Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.13 commit in: /
Date: Wed, 14 Jul 2021 16:16:30
Message-Id: 1626279337.01cc722d694c5ac80294fa112d6cf98ebb5aefd1.mpagano@gentoo
1 commit: 01cc722d694c5ac80294fa112d6cf98ebb5aefd1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 14 16:15:37 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 14 16:15:37 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=01cc722d
7
8 Linux 5.13.2 and rename 5.13.1 patch properly
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1001_linux-5.13.1.patch => 1000_linux-5.13.1.patch | 0
14 1001_linux-5.13.2.patch | 29932 +++++++++++++++++++
15 3 files changed, 29936 insertions(+)
16
17 diff --git a/0000_README b/0000_README
18 index 57ab4e6..e1ba986 100644
19 --- a/0000_README
20 +++ b/0000_README
21 @@ -47,6 +47,10 @@ Patch: 1000_linux-5.13.1.patch
22 From: http://www.kernel.org
23 Desc: Linux 5.13.1
24
25 +Patch: 1001_linux-5.13.2.patch
26 +From: http://www.kernel.org
27 +Desc: Linux 5.13.2
28 +
29 Patch: 1500_XATTR_USER_PREFIX.patch
30 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
31 Desc: Support for namespace user.pax.* on tmpfs.
32
33 diff --git a/1001_linux-5.13.1.patch b/1000_linux-5.13.1.patch
34 similarity index 100%
35 rename from 1001_linux-5.13.1.patch
36 rename to 1000_linux-5.13.1.patch
37
38 diff --git a/1001_linux-5.13.2.patch b/1001_linux-5.13.2.patch
39 new file mode 100644
40 index 0000000..c6dd58d
41 --- /dev/null
42 +++ b/1001_linux-5.13.2.patch
43 @@ -0,0 +1,29932 @@
44 +diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
45 +index 3c477ba48a312..2243b72e41107 100644
46 +--- a/Documentation/ABI/testing/evm
47 ++++ b/Documentation/ABI/testing/evm
48 +@@ -49,8 +49,30 @@ Description:
49 + modification of EVM-protected metadata and
50 + disable all further modification of policy
51 +
52 +- Note that once a key has been loaded, it will no longer be
53 +- possible to enable metadata modification.
54 ++ Echoing a value is additive, the new value is added to the
55 ++ existing initialization flags.
56 ++
57 ++ For example, after::
58 ++
59 ++ echo 2 ><securityfs>/evm
60 ++
61 ++ another echo can be performed::
62 ++
63 ++ echo 1 ><securityfs>/evm
64 ++
65 ++ and the resulting value will be 3.
66 ++
67 ++ Note that once an HMAC key has been loaded, it will no longer
68 ++ be possible to enable metadata modification. Signaling that an
69 ++ HMAC key has been loaded will clear the corresponding flag.
70 ++ For example, if the current value is 6 (2 and 4 set)::
71 ++
72 ++ echo 1 ><securityfs>/evm
73 ++
74 ++ will set the new value to 3 (4 cleared).
75 ++
76 ++ Loading an HMAC key is the only way to disable metadata
77 ++ modification.
78 +
79 + Until key loading has been signaled EVM can not create
80 + or validate the 'security.evm' xattr, but returns
81 +diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem
82 +index 92e2db0e2d3de..95254cec92bfb 100644
83 +--- a/Documentation/ABI/testing/sysfs-bus-papr-pmem
84 ++++ b/Documentation/ABI/testing/sysfs-bus-papr-pmem
85 +@@ -39,9 +39,11 @@ KernelVersion: v5.9
86 + Contact: linuxppc-dev <linuxppc-dev@××××××××××××.org>, nvdimm@×××××××××××.dev,
87 + Description:
88 + (RO) Report various performance stats related to papr-scm NVDIMM
89 +- device. Each stat is reported on a new line with each line
90 +- composed of a stat-identifier followed by it value. Below are
91 +- currently known dimm performance stats which are reported:
92 ++ device. This attribute is only available for NVDIMM devices
93 ++ that support reporting NVDIMM performance stats. Each stat is
94 ++ reported on a new line with each line composed of a
95 ++ stat-identifier followed by it value. Below are currently known
96 ++ dimm performance stats which are reported:
97 +
98 + * "CtlResCt" : Controller Reset Count
99 + * "CtlResTm" : Controller Reset Elapsed Time
100 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
101 +index cb89dbdedc463..995deccc28bcd 100644
102 +--- a/Documentation/admin-guide/kernel-parameters.txt
103 ++++ b/Documentation/admin-guide/kernel-parameters.txt
104 +@@ -581,6 +581,12 @@
105 + loops can be debugged more effectively on production
106 + systems.
107 +
108 ++ clocksource.max_cswd_read_retries= [KNL]
109 ++ Number of clocksource_watchdog() retries due to
110 ++ external delays before the clock will be marked
111 ++ unstable. Defaults to three retries, that is,
112 ++ four attempts to read the clock under test.
113 ++
114 + clearcpuid=BITNUM[,BITNUM...] [X86]
115 + Disable CPUID feature X for the kernel. See
116 + arch/x86/include/asm/cpufeatures.h for the valid bit
117 +diff --git a/Documentation/hwmon/max31790.rst b/Documentation/hwmon/max31790.rst
118 +index f301385d8cef3..7b097c3b9b908 100644
119 +--- a/Documentation/hwmon/max31790.rst
120 ++++ b/Documentation/hwmon/max31790.rst
121 +@@ -38,6 +38,7 @@ Sysfs entries
122 + fan[1-12]_input RO fan tachometer speed in RPM
123 + fan[1-12]_fault RO fan experienced fault
124 + fan[1-6]_target RW desired fan speed in RPM
125 +-pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
126 +-pwm[1-6] RW fan target duty cycle (0-255)
127 ++pwm[1-6]_enable RW regulator mode, 0=disabled (duty cycle=0%), 1=manual mode, 2=rpm mode
128 ++pwm[1-6] RW read: current pwm duty cycle,
129 ++ write: target pwm duty cycle (0-255)
130 + ================== === =======================================================
131 +diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
132 +index b0de4e6e7ebd1..514b334470eab 100644
133 +--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
134 ++++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
135 +@@ -3053,7 +3053,7 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
136 + :stub-columns: 0
137 + :widths: 1 1 2
138 +
139 +- * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT``
140 ++ * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED``
141 + - 0x00000001
142 + -
143 + * - ``V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT``
144 +@@ -3277,6 +3277,9 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
145 + * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED``
146 + - 0x00000100
147 + -
148 ++ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT``
149 ++ - 0x00000200
150 ++ -
151 +
152 + .. raw:: latex
153 +
154 +diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst
155 +index 6efb41cc80725..d61219889e494 100644
156 +--- a/Documentation/userspace-api/seccomp_filter.rst
157 ++++ b/Documentation/userspace-api/seccomp_filter.rst
158 +@@ -259,6 +259,18 @@ and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be
159 + returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should
160 + be the same ``id`` as in ``struct seccomp_notif``.
161 +
162 ++Userspace can also add file descriptors to the notifying process via
163 ++``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of
164 ++``struct seccomp_notif_addfd`` should be the same ``id`` as in
165 ++``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags
166 ++like O_EXEC on the file descriptor in the notifying process. If the supervisor
167 ++wants to inject the file descriptor with a specific number, the
168 ++``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to
169 ++the specific number to use. If that file descriptor is already open in the
170 ++notifying process it will be replaced. The supervisor can also add an FD, and
171 ++respond atomically by using the ``SECCOMP_ADDFD_FLAG_SEND`` flag and the return
172 ++value will be the injected file descriptor number.
173 ++
174 + It is worth noting that ``struct seccomp_data`` contains the values of register
175 + arguments to the syscall, but does not contain pointers to memory. The task's
176 + memory is accessible to suitably privileged traces via ``ptrace()`` or
177 +diff --git a/Makefile b/Makefile
178 +index 069607cfe2836..31bbcc5255357 100644
179 +--- a/Makefile
180 ++++ b/Makefile
181 +@@ -1,7 +1,7 @@
182 + # SPDX-License-Identifier: GPL-2.0
183 + VERSION = 5
184 + PATCHLEVEL = 13
185 +-SUBLEVEL = 1
186 ++SUBLEVEL = 2
187 + EXTRAVERSION =
188 + NAME = Opossums on Parade
189 +
190 +@@ -1039,7 +1039,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,)
191 + endif
192 +
193 + ifeq ($(CONFIG_RELR),y)
194 +-LDFLAGS_vmlinux += --pack-dyn-relocs=relr
195 ++LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags
196 + endif
197 +
198 + # We never want expected sections to be placed heuristically by the
199 +diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
200 +index f4dd9f3f30010..4b2575f936d46 100644
201 +--- a/arch/alpha/kernel/smp.c
202 ++++ b/arch/alpha/kernel/smp.c
203 +@@ -166,7 +166,6 @@ smp_callin(void)
204 + DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
205 + cpuid, current, current->active_mm));
206 +
207 +- preempt_disable();
208 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
209 + }
210 +
211 +diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
212 +index 52906d3145371..db0e104d68355 100644
213 +--- a/arch/arc/kernel/smp.c
214 ++++ b/arch/arc/kernel/smp.c
215 +@@ -189,7 +189,6 @@ void start_kernel_secondary(void)
216 + pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
217 +
218 + local_irq_enable();
219 +- preempt_disable();
220 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
221 + }
222 +
223 +diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
224 +index 05c55875835d5..f70a8528b9598 100644
225 +--- a/arch/arm/boot/dts/sama5d4.dtsi
226 ++++ b/arch/arm/boot/dts/sama5d4.dtsi
227 +@@ -787,7 +787,7 @@
228 + 0xffffffff 0x3ffcfe7c 0x1c010101 /* pioA */
229 + 0x7fffffff 0xfffccc3a 0x3f00cc3a /* pioB */
230 + 0xffffffff 0x3ff83fff 0xff00ffff /* pioC */
231 +- 0x0003ff00 0x8002a800 0x00000000 /* pioD */
232 ++ 0xb003ff00 0x8002a800 0x00000000 /* pioD */
233 + 0xffffffff 0x7fffffff 0x76fff1bf /* pioE */
234 + >;
235 +
236 +diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
237 +index 83b179692dff7..13d2161929042 100644
238 +--- a/arch/arm/boot/dts/ste-href.dtsi
239 ++++ b/arch/arm/boot/dts/ste-href.dtsi
240 +@@ -4,6 +4,7 @@
241 + */
242 +
243 + #include <dt-bindings/interrupt-controller/irq.h>
244 ++#include <dt-bindings/leds/common.h>
245 + #include "ste-href-family-pinctrl.dtsi"
246 +
247 + / {
248 +@@ -64,17 +65,20 @@
249 + reg = <0>;
250 + led-cur = /bits/ 8 <0x2f>;
251 + max-cur = /bits/ 8 <0x5f>;
252 ++ color = <LED_COLOR_ID_BLUE>;
253 + linux,default-trigger = "heartbeat";
254 + };
255 + chan@1 {
256 + reg = <1>;
257 + led-cur = /bits/ 8 <0x2f>;
258 + max-cur = /bits/ 8 <0x5f>;
259 ++ color = <LED_COLOR_ID_BLUE>;
260 + };
261 + chan@2 {
262 + reg = <2>;
263 + led-cur = /bits/ 8 <0x2f>;
264 + max-cur = /bits/ 8 <0x5f>;
265 ++ color = <LED_COLOR_ID_BLUE>;
266 + };
267 + };
268 + lp5521@34 {
269 +@@ -88,16 +92,19 @@
270 + reg = <0>;
271 + led-cur = /bits/ 8 <0x2f>;
272 + max-cur = /bits/ 8 <0x5f>;
273 ++ color = <LED_COLOR_ID_BLUE>;
274 + };
275 + chan@1 {
276 + reg = <1>;
277 + led-cur = /bits/ 8 <0x2f>;
278 + max-cur = /bits/ 8 <0x5f>;
279 ++ color = <LED_COLOR_ID_BLUE>;
280 + };
281 + chan@2 {
282 + reg = <2>;
283 + led-cur = /bits/ 8 <0x2f>;
284 + max-cur = /bits/ 8 <0x5f>;
285 ++ color = <LED_COLOR_ID_BLUE>;
286 + };
287 + };
288 + bh1780@29 {
289 +diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
290 +index 2924d7910b106..eb2190477da10 100644
291 +--- a/arch/arm/kernel/perf_event_v7.c
292 ++++ b/arch/arm/kernel/perf_event_v7.c
293 +@@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
294 + pr_err("CPU%u writing wrong counter %d\n",
295 + smp_processor_id(), idx);
296 + } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
297 +- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
298 ++ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
299 + } else {
300 + armv7_pmnc_select_counter(idx);
301 +- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
302 ++ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
303 + }
304 + }
305 +
306 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
307 +index 74679240a9d8e..c7bb168b0d97c 100644
308 +--- a/arch/arm/kernel/smp.c
309 ++++ b/arch/arm/kernel/smp.c
310 +@@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void)
311 + #endif
312 + pr_debug("CPU%u: Booted secondary processor\n", cpu);
313 +
314 +- preempt_disable();
315 + trace_hardirqs_off();
316 +
317 + /*
318 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
319 +index 456dcd4a7793f..6ffbb099fcac7 100644
320 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
321 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
322 +@@ -134,7 +134,7 @@
323 +
324 + uart0: serial@12000 {
325 + compatible = "marvell,armada-3700-uart";
326 +- reg = <0x12000 0x200>;
327 ++ reg = <0x12000 0x18>;
328 + clocks = <&xtalclk>;
329 + interrupts =
330 + <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
331 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
332 +index 7cd7d5c8c4bc2..6336b4309114b 100644
333 +--- a/arch/arm64/include/asm/kvm_host.h
334 ++++ b/arch/arm64/include/asm/kvm_host.h
335 +@@ -46,6 +46,7 @@
336 + #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
337 + #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
338 + #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
339 ++#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
340 +
341 + #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
342 + KVM_DIRTY_LOG_INITIALLY_SET)
343 +diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
344 +index d3cef91335396..eeb210997149a 100644
345 +--- a/arch/arm64/include/asm/mmu_context.h
346 ++++ b/arch/arm64/include/asm/mmu_context.h
347 +@@ -177,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
348 + return;
349 +
350 + if (mm == &init_mm)
351 +- ttbr = __pa_symbol(reserved_pg_dir);
352 ++ ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
353 + else
354 +- ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
355 ++ ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
356 +
357 + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
358 + }
359 +diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
360 +index 80e946b2abee2..e83f0982b99c1 100644
361 +--- a/arch/arm64/include/asm/preempt.h
362 ++++ b/arch/arm64/include/asm/preempt.h
363 +@@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
364 + } while (0)
365 +
366 + #define init_idle_preempt_count(p, cpu) do { \
367 +- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
368 ++ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
369 + } while (0)
370 +
371 + static inline void set_preempt_need_resched(void)
372 +diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
373 +index 6cc97730790e7..787c3c83edd7a 100644
374 +--- a/arch/arm64/kernel/Makefile
375 ++++ b/arch/arm64/kernel/Makefile
376 +@@ -14,6 +14,11 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
377 + CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
378 + CFLAGS_syscall.o += -fno-stack-protector
379 +
380 ++# It's not safe to invoke KCOV when portions of the kernel environment aren't
381 ++# available or are out-of-sync with HW state. Since `noinstr` doesn't always
382 ++# inhibit KCOV instrumentation, disable it for the entire compilation unit.
383 ++KCOV_INSTRUMENT_entry.o := n
384 ++
385 + # Object file lists.
386 + obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
387 + entry-common.o entry-fpsimd.o process.o ptrace.o \
388 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
389 +index f594957e29bd1..44b6eda69a81a 100644
390 +--- a/arch/arm64/kernel/perf_event.c
391 ++++ b/arch/arm64/kernel/perf_event.c
392 +@@ -312,7 +312,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
393 + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
394 + u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
395 +
396 +- return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
397 ++ return sysfs_emit(page, "0x%08x\n", slots);
398 + }
399 +
400 + static DEVICE_ATTR_RO(slots);
401 +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
402 +index 61845c0821d9d..68b30e8c22dbf 100644
403 +--- a/arch/arm64/kernel/setup.c
404 ++++ b/arch/arm64/kernel/setup.c
405 +@@ -381,7 +381,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
406 + * faults in case uaccess_enable() is inadvertently called by the init
407 + * thread.
408 + */
409 +- init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
410 ++ init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
411 + #endif
412 +
413 + if (boot_args[1] || boot_args[2] || boot_args[3]) {
414 +diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
415 +index dcd7041b2b077..6671000a8b7d7 100644
416 +--- a/arch/arm64/kernel/smp.c
417 ++++ b/arch/arm64/kernel/smp.c
418 +@@ -224,7 +224,6 @@ asmlinkage notrace void secondary_start_kernel(void)
419 + init_gic_priority_masking();
420 +
421 + rcu_cpu_starting(cpu);
422 +- preempt_disable();
423 + trace_hardirqs_off();
424 +
425 + /*
426 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
427 +index e720148232a06..facf4d41d32a2 100644
428 +--- a/arch/arm64/kvm/arm.c
429 ++++ b/arch/arm64/kvm/arm.c
430 +@@ -689,6 +689,10 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
431 + vgic_v4_load(vcpu);
432 + preempt_enable();
433 + }
434 ++
435 ++ if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
436 ++ kvm_pmu_handle_pmcr(vcpu,
437 ++ __vcpu_sys_reg(vcpu, PMCR_EL0));
438 + }
439 + }
440 +
441 +diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
442 +index fd167d4f42157..f33825c995cbb 100644
443 +--- a/arch/arm64/kvm/pmu-emul.c
444 ++++ b/arch/arm64/kvm/pmu-emul.c
445 +@@ -578,6 +578,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
446 + kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
447 +
448 + if (val & ARMV8_PMU_PMCR_P) {
449 ++ mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
450 + for_each_set_bit(i, &mask, 32)
451 + kvm_pmu_set_counter_value(vcpu, i, 0);
452 + }
453 +@@ -850,6 +851,9 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
454 + return -EINVAL;
455 + }
456 +
457 ++ /* One-off reload of the PMU on first run */
458 ++ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
459 ++
460 + return 0;
461 + }
462 +
463 +diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
464 +index 0f9f5eef93386..e2993539af8ef 100644
465 +--- a/arch/csky/kernel/smp.c
466 ++++ b/arch/csky/kernel/smp.c
467 +@@ -281,7 +281,6 @@ void csky_start_secondary(void)
468 + pr_info("CPU%u Online: %s...\n", cpu, __func__);
469 +
470 + local_irq_enable();
471 +- preempt_disable();
472 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
473 + }
474 +
475 +diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
476 +index 4e51d63850c46..cd847ad62c7ee 100644
477 +--- a/arch/csky/mm/syscache.c
478 ++++ b/arch/csky/mm/syscache.c
479 +@@ -12,15 +12,17 @@ SYSCALL_DEFINE3(cacheflush,
480 + int, cache)
481 + {
482 + switch (cache) {
483 +- case ICACHE:
484 + case BCACHE:
485 +- flush_icache_mm_range(current->mm,
486 +- (unsigned long)addr,
487 +- (unsigned long)addr + bytes);
488 +- fallthrough;
489 + case DCACHE:
490 + dcache_wb_range((unsigned long)addr,
491 + (unsigned long)addr + bytes);
492 ++ if (cache != BCACHE)
493 ++ break;
494 ++ fallthrough;
495 ++ case ICACHE:
496 ++ flush_icache_mm_range(current->mm,
497 ++ (unsigned long)addr,
498 ++ (unsigned long)addr + bytes);
499 + break;
500 + default:
501 + return -EINVAL;
502 +diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
503 +index 36a69b4e61690..5bfc79be4cefe 100644
504 +--- a/arch/ia64/kernel/mca_drv.c
505 ++++ b/arch/ia64/kernel/mca_drv.c
506 +@@ -343,7 +343,7 @@ init_record_index_pools(void)
507 +
508 + /* - 2 - */
509 + sect_min_size = sal_log_sect_min_sizes[0];
510 +- for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
511 ++ for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
512 + if (sect_min_size > sal_log_sect_min_sizes[i])
513 + sect_min_size = sal_log_sect_min_sizes[i];
514 +
515 +diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
516 +index 49b4885809399..d10f780c13b9e 100644
517 +--- a/arch/ia64/kernel/smpboot.c
518 ++++ b/arch/ia64/kernel/smpboot.c
519 +@@ -441,7 +441,6 @@ start_secondary (void *unused)
520 + #endif
521 + efi_map_pal_code();
522 + cpu_init();
523 +- preempt_disable();
524 + smp_callin();
525 +
526 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
527 +diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
528 +index 4d59ec2f5b8d6..d964c1f273995 100644
529 +--- a/arch/m68k/Kconfig.machine
530 ++++ b/arch/m68k/Kconfig.machine
531 +@@ -25,6 +25,9 @@ config ATARI
532 + this kernel on an Atari, say Y here and browse the material
533 + available in <file:Documentation/m68k>; otherwise say N.
534 +
535 ++config ATARI_KBD_CORE
536 ++ bool
537 ++
538 + config MAC
539 + bool "Macintosh support"
540 + depends on MMU
541 +diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
542 +index 292d0425717f3..92a3802100178 100644
543 +--- a/arch/mips/include/asm/highmem.h
544 ++++ b/arch/mips/include/asm/highmem.h
545 +@@ -36,7 +36,7 @@ extern pte_t *pkmap_page_table;
546 + * easily, subsequent pte tables have to be allocated in one physical
547 + * chunk of RAM.
548 + */
549 +-#ifdef CONFIG_PHYS_ADDR_T_64BIT
550 ++#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
551 + #define LAST_PKMAP 512
552 + #else
553 + #define LAST_PKMAP 1024
554 +diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
555 +index ef86fbad85460..d542fb7af3ba2 100644
556 +--- a/arch/mips/kernel/smp.c
557 ++++ b/arch/mips/kernel/smp.c
558 +@@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
559 + */
560 +
561 + calibrate_delay();
562 +- preempt_disable();
563 + cpu = smp_processor_id();
564 + cpu_data[cpu].udelay_val = loops_per_jiffy;
565 +
566 +diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
567 +index 48e1092a64de3..415e209732a3d 100644
568 +--- a/arch/openrisc/kernel/smp.c
569 ++++ b/arch/openrisc/kernel/smp.c
570 +@@ -145,8 +145,6 @@ asmlinkage __init void secondary_start_kernel(void)
571 + set_cpu_online(cpu, true);
572 +
573 + local_irq_enable();
574 +-
575 +- preempt_disable();
576 + /*
577 + * OK, it's off to the idle thread for us
578 + */
579 +diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
580 +index 10227f667c8a6..1405b603b91b6 100644
581 +--- a/arch/parisc/kernel/smp.c
582 ++++ b/arch/parisc/kernel/smp.c
583 +@@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
584 + #endif
585 +
586 + smp_cpu_init(slave_id);
587 +- preempt_disable();
588 +
589 + flush_cache_all_local(); /* start with known state */
590 + flush_tlb_all_local(NULL);
591 +diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
592 +index 98c8bd155bf9d..b167186aaee4a 100644
593 +--- a/arch/powerpc/include/asm/cputhreads.h
594 ++++ b/arch/powerpc/include/asm/cputhreads.h
595 +@@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
596 + return cpu | (threads_per_core - 1);
597 + }
598 +
599 ++/*
600 ++ * tlb_thread_siblings are siblings which share a TLB. This is not
601 ++ * architected, is not something a hypervisor could emulate and a future
602 ++ * CPU may change behaviour even in compat mode, so this should only be
603 ++ * used on PowerNV, and only with care.
604 ++ */
605 ++static inline int cpu_first_tlb_thread_sibling(int cpu)
606 ++{
607 ++ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
608 ++ return cpu & ~0x6; /* Big Core */
609 ++ else
610 ++ return cpu_first_thread_sibling(cpu);
611 ++}
612 ++
613 ++static inline int cpu_last_tlb_thread_sibling(int cpu)
614 ++{
615 ++ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
616 ++ return cpu | 0x6; /* Big Core */
617 ++ else
618 ++ return cpu_last_thread_sibling(cpu);
619 ++}
620 ++
621 ++static inline int cpu_tlb_thread_sibling_step(void)
622 ++{
623 ++ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
624 ++ return 2; /* Big Core */
625 ++ else
626 ++ return 1;
627 ++}
628 ++
629 + static inline u32 get_tensr(void)
630 + {
631 + #ifdef CONFIG_BOOKE
632 +diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
633 +index 59f704408d65d..a26aad41ef3e7 100644
634 +--- a/arch/powerpc/include/asm/interrupt.h
635 ++++ b/arch/powerpc/include/asm/interrupt.h
636 +@@ -186,6 +186,7 @@ struct interrupt_nmi_state {
637 + u8 irq_soft_mask;
638 + u8 irq_happened;
639 + u8 ftrace_enabled;
640 ++ u64 softe;
641 + #endif
642 + };
643 +
644 +@@ -211,6 +212,7 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
645 + #ifdef CONFIG_PPC64
646 + state->irq_soft_mask = local_paca->irq_soft_mask;
647 + state->irq_happened = local_paca->irq_happened;
648 ++ state->softe = regs->softe;
649 +
650 + /*
651 + * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
652 +@@ -263,6 +265,7 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
653 +
654 + /* Check we didn't change the pending interrupt mask. */
655 + WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
656 ++ regs->softe = state->softe;
657 + local_paca->irq_happened = state->irq_happened;
658 + local_paca->irq_soft_mask = state->irq_soft_mask;
659 + #endif
660 +diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
661 +index 2fca299f7e192..c63105d2c9e7c 100644
662 +--- a/arch/powerpc/include/asm/kvm_guest.h
663 ++++ b/arch/powerpc/include/asm/kvm_guest.h
664 +@@ -16,10 +16,10 @@ static inline bool is_kvm_guest(void)
665 + return static_branch_unlikely(&kvm_guest);
666 + }
667 +
668 +-bool check_kvm_guest(void);
669 ++int check_kvm_guest(void);
670 + #else
671 + static inline bool is_kvm_guest(void) { return false; }
672 +-static inline bool check_kvm_guest(void) { return false; }
673 ++static inline int check_kvm_guest(void) { return 0; }
674 + #endif
675 +
676 + #endif /* _ASM_POWERPC_KVM_GUEST_H_ */
677 +diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
678 +index c9e2819b095ab..c7022c41cc314 100644
679 +--- a/arch/powerpc/kernel/firmware.c
680 ++++ b/arch/powerpc/kernel/firmware.c
681 +@@ -23,18 +23,20 @@ EXPORT_SYMBOL_GPL(powerpc_firmware_features);
682 +
683 + #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
684 + DEFINE_STATIC_KEY_FALSE(kvm_guest);
685 +-bool check_kvm_guest(void)
686 ++int __init check_kvm_guest(void)
687 + {
688 + struct device_node *hyper_node;
689 +
690 + hyper_node = of_find_node_by_path("/hypervisor");
691 + if (!hyper_node)
692 +- return false;
693 ++ return 0;
694 +
695 + if (!of_device_is_compatible(hyper_node, "linux,kvm"))
696 +- return false;
697 ++ return 0;
698 +
699 + static_branch_enable(&kvm_guest);
700 +- return true;
701 ++
702 ++ return 0;
703 + }
704 ++core_initcall(check_kvm_guest); // before kvm_guest_init()
705 + #endif
706 +diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
707 +index 667104d4c4550..2fff886c549d0 100644
708 +--- a/arch/powerpc/kernel/mce_power.c
709 ++++ b/arch/powerpc/kernel/mce_power.c
710 +@@ -481,12 +481,11 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
711 + return -1;
712 + }
713 +
714 +-static int mce_handle_ierror(struct pt_regs *regs,
715 ++static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
716 + const struct mce_ierror_table table[],
717 + struct mce_error_info *mce_err, uint64_t *addr,
718 + uint64_t *phys_addr)
719 + {
720 +- uint64_t srr1 = regs->msr;
721 + int handled = 0;
722 + int i;
723 +
724 +@@ -695,19 +694,19 @@ static long mce_handle_ue_error(struct pt_regs *regs,
725 + }
726 +
727 + static long mce_handle_error(struct pt_regs *regs,
728 ++ unsigned long srr1,
729 + const struct mce_derror_table dtable[],
730 + const struct mce_ierror_table itable[])
731 + {
732 + struct mce_error_info mce_err = { 0 };
733 + uint64_t addr, phys_addr = ULONG_MAX;
734 +- uint64_t srr1 = regs->msr;
735 + long handled;
736 +
737 + if (SRR1_MC_LOADSTORE(srr1))
738 + handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
739 + &phys_addr);
740 + else
741 +- handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
742 ++ handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
743 + &phys_addr);
744 +
745 + if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
746 +@@ -723,16 +722,20 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
747 + /* P7 DD1 leaves top bits of DSISR undefined */
748 + regs->dsisr &= 0x0000ffff;
749 +
750 +- return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
751 ++ return mce_handle_error(regs, regs->msr,
752 ++ mce_p7_derror_table, mce_p7_ierror_table);
753 + }
754 +
755 + long __machine_check_early_realmode_p8(struct pt_regs *regs)
756 + {
757 +- return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
758 ++ return mce_handle_error(regs, regs->msr,
759 ++ mce_p8_derror_table, mce_p8_ierror_table);
760 + }
761 +
762 + long __machine_check_early_realmode_p9(struct pt_regs *regs)
763 + {
764 ++ unsigned long srr1 = regs->msr;
765 ++
766 + /*
767 + * On POWER9 DD2.1 and below, it's possible to get a machine check
768 + * caused by a paste instruction where only DSISR bit 25 is set. This
769 +@@ -746,10 +749,39 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs)
770 + if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
771 + return 1;
772 +
773 +- return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
774 ++ /*
775 ++ * Async machine check due to bad real address from store or foreign
776 ++ * link time out comes with the load/store bit (PPC bit 42) set in
777 ++ * SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
778 ++ * directed to the ierror table so it will find the cause (which
779 ++ * describes it correctly as a store error).
780 ++ */
781 ++ if (SRR1_MC_LOADSTORE(srr1) &&
782 ++ ((srr1 & 0x081c0000) == 0x08140000 ||
783 ++ (srr1 & 0x081c0000) == 0x08180000)) {
784 ++ srr1 &= ~PPC_BIT(42);
785 ++ }
786 ++
787 ++ return mce_handle_error(regs, srr1,
788 ++ mce_p9_derror_table, mce_p9_ierror_table);
789 + }
790 +
791 + long __machine_check_early_realmode_p10(struct pt_regs *regs)
792 + {
793 +- return mce_handle_error(regs, mce_p10_derror_table, mce_p10_ierror_table);
794 ++ unsigned long srr1 = regs->msr;
795 ++
796 ++ /*
797 ++ * Async machine check due to bad real address from store comes with
798 ++ * the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
799 ++ * SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
800 ++ * so it will find the cause (which describes it correctly as a store
801 ++ * error).
802 ++ */
803 ++ if (SRR1_MC_LOADSTORE(srr1) &&
804 ++ (srr1 & 0x081c0000) == 0x08140000) {
805 ++ srr1 &= ~PPC_BIT(42);
806 ++ }
807 ++
808 ++ return mce_handle_error(regs, srr1,
809 ++ mce_p10_derror_table, mce_p10_ierror_table);
810 + }
811 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
812 +index 89e34aa273e21..1138f035ce747 100644
813 +--- a/arch/powerpc/kernel/process.c
814 ++++ b/arch/powerpc/kernel/process.c
815 +@@ -1213,6 +1213,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
816 + __flush_tlb_pending(batch);
817 + batch->active = 0;
818 + }
819 ++
820 ++ /*
821 ++ * On POWER9 the copy-paste buffer can only paste into
822 ++ * foreign real addresses, so unprivileged processes can not
823 ++ * see the data or use it in any way unless they have
824 ++ * foreign real mappings. If the new process has the foreign
825 ++ * real address mappings, we must issue a cp_abort to clear
826 ++ * any state and prevent snooping, corruption or a covert
827 ++ * channel. ISA v3.1 supports paste into local memory.
828 ++ */
829 ++ if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
830 ++ atomic_read(&new->mm->context.vas_windows)))
831 ++ asm volatile(PPC_CP_ABORT);
832 + #endif /* CONFIG_PPC_BOOK3S_64 */
833 +
834 + #ifdef CONFIG_PPC_ADV_DEBUG_REGS
835 +@@ -1261,30 +1274,33 @@ struct task_struct *__switch_to(struct task_struct *prev,
836 + #endif
837 + last = _switch(old_thread, new_thread);
838 +
839 ++ /*
840 ++ * Nothing after _switch will be run for newly created tasks,
841 ++ * because they switch directly to ret_from_fork/ret_from_kernel_thread
842 ++ * etc. Code added here should have a comment explaining why that is
843 ++ * okay.
844 ++ */
845 ++
846 + #ifdef CONFIG_PPC_BOOK3S_64
847 ++ /*
848 ++ * This applies to a process that was context switched while inside
849 ++ * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
850 ++ * deactivated above, before _switch(). This will never be the case
851 ++ * for new tasks.
852 ++ */
853 + if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
854 + current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
855 + batch = this_cpu_ptr(&ppc64_tlb_batch);
856 + batch->active = 1;
857 + }
858 +
859 +- if (current->thread.regs) {
860 ++ /*
861 ++ * Math facilities are masked out of the child MSR in copy_thread.
862 ++ * A new task does not need to restore_math because it will
863 ++ * demand fault them.
864 ++ */
865 ++ if (current->thread.regs)
866 + restore_math(current->thread.regs);
867 +-
868 +- /*
869 +- * On POWER9 the copy-paste buffer can only paste into
870 +- * foreign real addresses, so unprivileged processes can not
871 +- * see the data or use it in any way unless they have
872 +- * foreign real mappings. If the new process has the foreign
873 +- * real address mappings, we must issue a cp_abort to clear
874 +- * any state and prevent snooping, corruption or a covert
875 +- * channel. ISA v3.1 supports paste into local memory.
876 +- */
877 +- if (current->mm &&
878 +- (cpu_has_feature(CPU_FTR_ARCH_31) ||
879 +- atomic_read(&current->mm->context.vas_windows)))
880 +- asm volatile(PPC_CP_ABORT);
881 +- }
882 + #endif /* CONFIG_PPC_BOOK3S_64 */
883 +
884 + return last;
885 +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
886 +index 2e05c783440a3..df6b468976d53 100644
887 +--- a/arch/powerpc/kernel/smp.c
888 ++++ b/arch/powerpc/kernel/smp.c
889 +@@ -619,6 +619,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
890 + /*
891 + * IRQs are already hard disabled by the smp_handle_nmi_ipi.
892 + */
893 ++ set_cpu_online(smp_processor_id(), false);
894 ++
895 + spin_begin();
896 + while (1)
897 + spin_cpu_relax();
898 +@@ -634,6 +636,15 @@ void smp_send_stop(void)
899 + static void stop_this_cpu(void *dummy)
900 + {
901 + hard_irq_disable();
902 ++
903 ++ /*
904 ++ * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
905 ++ * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
906 ++ * to know other CPUs are offline before it breaks locks to flush
907 ++ * printk buffers, in case we panic()ed while holding the lock.
908 ++ */
909 ++ set_cpu_online(smp_processor_id(), false);
910 ++
911 + spin_begin();
912 + while (1)
913 + spin_cpu_relax();
914 +@@ -1547,7 +1558,6 @@ void start_secondary(void *unused)
915 + smp_store_cpu_info(cpu);
916 + set_dec(tb_ticks_per_jiffy);
917 + rcu_cpu_starting(cpu);
918 +- preempt_disable();
919 + cpu_callin_map[cpu] = 1;
920 +
921 + if (smp_ops->setup_cpu)
922 +diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
923 +index 1deb1bf331ddb..ea0d9c36e177c 100644
924 +--- a/arch/powerpc/kernel/stacktrace.c
925 ++++ b/arch/powerpc/kernel/stacktrace.c
926 +@@ -172,17 +172,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
927 +
928 + static void raise_backtrace_ipi(cpumask_t *mask)
929 + {
930 ++ struct paca_struct *p;
931 + unsigned int cpu;
932 ++ u64 delay_us;
933 +
934 + for_each_cpu(cpu, mask) {
935 +- if (cpu == smp_processor_id())
936 ++ if (cpu == smp_processor_id()) {
937 + handle_backtrace_ipi(NULL);
938 +- else
939 +- smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
940 +- }
941 ++ continue;
942 ++ }
943 +
944 +- for_each_cpu(cpu, mask) {
945 +- struct paca_struct *p = paca_ptrs[cpu];
946 ++ delay_us = 5 * USEC_PER_SEC;
947 ++
948 ++ if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
949 ++ // Now wait up to 5s for the other CPU to do its backtrace
950 ++ while (cpumask_test_cpu(cpu, mask) && delay_us) {
951 ++ udelay(1);
952 ++ delay_us--;
953 ++ }
954 ++
955 ++ // Other CPU cleared itself from the mask
956 ++ if (delay_us)
957 ++ continue;
958 ++ }
959 ++
960 ++ p = paca_ptrs[cpu];
961 +
962 + cpumask_clear_cpu(cpu, mask);
963 +
964 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
965 +index bc08136446660..67cc164c4ac1a 100644
966 +--- a/arch/powerpc/kvm/book3s_hv.c
967 ++++ b/arch/powerpc/kvm/book3s_hv.c
968 +@@ -2657,7 +2657,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
969 + cpumask_t *cpu_in_guest;
970 + int i;
971 +
972 +- cpu = cpu_first_thread_sibling(cpu);
973 ++ cpu = cpu_first_tlb_thread_sibling(cpu);
974 + if (nested) {
975 + cpumask_set_cpu(cpu, &nested->need_tlb_flush);
976 + cpu_in_guest = &nested->cpu_in_guest;
977 +@@ -2671,9 +2671,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
978 + * the other side is the first smp_mb() in kvmppc_run_core().
979 + */
980 + smp_mb();
981 +- for (i = 0; i < threads_per_core; ++i)
982 +- if (cpumask_test_cpu(cpu + i, cpu_in_guest))
983 +- smp_call_function_single(cpu + i, do_nothing, NULL, 1);
984 ++ for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
985 ++ i += cpu_tlb_thread_sibling_step())
986 ++ if (cpumask_test_cpu(i, cpu_in_guest))
987 ++ smp_call_function_single(i, do_nothing, NULL, 1);
988 + }
989 +
990 + static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
991 +@@ -2704,8 +2705,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
992 + */
993 + if (prev_cpu != pcpu) {
994 + if (prev_cpu >= 0 &&
995 +- cpu_first_thread_sibling(prev_cpu) !=
996 +- cpu_first_thread_sibling(pcpu))
997 ++ cpu_first_tlb_thread_sibling(prev_cpu) !=
998 ++ cpu_first_tlb_thread_sibling(pcpu))
999 + radix_flush_cpu(kvm, prev_cpu, vcpu);
1000 + if (nested)
1001 + nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
1002 +diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
1003 +index 7a0e33a9c980d..3edc25c890923 100644
1004 +--- a/arch/powerpc/kvm/book3s_hv_builtin.c
1005 ++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
1006 +@@ -800,7 +800,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
1007 + * Thus we make all 4 threads use the same bit.
1008 + */
1009 + if (cpu_has_feature(CPU_FTR_ARCH_300))
1010 +- pcpu = cpu_first_thread_sibling(pcpu);
1011 ++ pcpu = cpu_first_tlb_thread_sibling(pcpu);
1012 +
1013 + if (nested)
1014 + need_tlb_flush = &nested->need_tlb_flush;
1015 +diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
1016 +index 60724f6744219..1b3ff0af12648 100644
1017 +--- a/arch/powerpc/kvm/book3s_hv_nested.c
1018 ++++ b/arch/powerpc/kvm/book3s_hv_nested.c
1019 +@@ -53,7 +53,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
1020 + hr->dawrx1 = vcpu->arch.dawrx1;
1021 + }
1022 +
1023 +-static void byteswap_pt_regs(struct pt_regs *regs)
1024 ++/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
1025 ++static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
1026 + {
1027 + unsigned long *addr = (unsigned long *) regs;
1028 +
1029 +diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1030 +index 7a0f12404e0ee..502d9ebe3ae47 100644
1031 +--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1032 ++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
1033 +@@ -56,7 +56,7 @@ static int global_invalidates(struct kvm *kvm)
1034 + * so use the bit for the first thread to represent the core.
1035 + */
1036 + if (cpu_has_feature(CPU_FTR_ARCH_300))
1037 +- cpu = cpu_first_thread_sibling(cpu);
1038 ++ cpu = cpu_first_tlb_thread_sibling(cpu);
1039 + cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
1040 + }
1041 +
1042 +diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
1043 +index 96d9aa1640073..ac5720371c0d9 100644
1044 +--- a/arch/powerpc/mm/book3s64/hash_utils.c
1045 ++++ b/arch/powerpc/mm/book3s64/hash_utils.c
1046 +@@ -1522,8 +1522,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
1047 + }
1048 + EXPORT_SYMBOL_GPL(hash_page);
1049 +
1050 +-DECLARE_INTERRUPT_HANDLER_RET(__do_hash_fault);
1051 +-DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
1052 ++DECLARE_INTERRUPT_HANDLER(__do_hash_fault);
1053 ++DEFINE_INTERRUPT_HANDLER(__do_hash_fault)
1054 + {
1055 + unsigned long ea = regs->dar;
1056 + unsigned long dsisr = regs->dsisr;
1057 +@@ -1533,6 +1533,11 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
1058 + unsigned int region_id;
1059 + long err;
1060 +
1061 ++ if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) {
1062 ++ hash__do_page_fault(regs);
1063 ++ return;
1064 ++ }
1065 ++
1066 + region_id = get_region_id(ea);
1067 + if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
1068 + mm = &init_mm;
1069 +@@ -1571,9 +1576,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
1070 + bad_page_fault(regs, SIGBUS);
1071 + }
1072 + err = 0;
1073 +- }
1074 +
1075 +- return err;
1076 ++ } else if (err) {
1077 ++ hash__do_page_fault(regs);
1078 ++ }
1079 + }
1080 +
1081 + /*
1082 +@@ -1582,13 +1588,6 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
1083 + */
1084 + DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
1085 + {
1086 +- unsigned long dsisr = regs->dsisr;
1087 +-
1088 +- if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) {
1089 +- hash__do_page_fault(regs);
1090 +- return 0;
1091 +- }
1092 +-
1093 + /*
1094 + * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
1095 + * don't call hash_page, just fail the fault. This is required to
1096 +@@ -1607,8 +1606,7 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
1097 + return 0;
1098 + }
1099 +
1100 +- if (__do_hash_fault(regs))
1101 +- hash__do_page_fault(regs);
1102 ++ __do_hash_fault(regs);
1103 +
1104 + return 0;
1105 + }
1106 +diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
1107 +index c855a0aeb49cc..d7ab868aab54a 100644
1108 +--- a/arch/powerpc/platforms/cell/smp.c
1109 ++++ b/arch/powerpc/platforms/cell/smp.c
1110 +@@ -78,9 +78,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
1111 +
1112 + pcpu = get_hard_smp_processor_id(lcpu);
1113 +
1114 +- /* Fixup atomic count: it exited inside IRQ handler. */
1115 +- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
1116 +-
1117 + /*
1118 + * If the RTAS start-cpu token does not exist then presume the
1119 + * cpu is already spinning.
1120 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
1121 +index ef26fe40efb03..d34e6eb4be0d5 100644
1122 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
1123 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
1124 +@@ -18,6 +18,7 @@
1125 + #include <asm/plpar_wrappers.h>
1126 + #include <asm/papr_pdsm.h>
1127 + #include <asm/mce.h>
1128 ++#include <asm/unaligned.h>
1129 +
1130 + #define BIND_ANY_ADDR (~0ul)
1131 +
1132 +@@ -900,6 +901,20 @@ static ssize_t flags_show(struct device *dev,
1133 + }
1134 + DEVICE_ATTR_RO(flags);
1135 +
1136 ++static umode_t papr_nd_attribute_visible(struct kobject *kobj,
1137 ++ struct attribute *attr, int n)
1138 ++{
1139 ++ struct device *dev = kobj_to_dev(kobj);
1140 ++ struct nvdimm *nvdimm = to_nvdimm(dev);
1141 ++ struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
1142 ++
1143 ++ /* For if perf-stats not available remove perf_stats sysfs */
1144 ++ if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
1145 ++ return 0;
1146 ++
1147 ++ return attr->mode;
1148 ++}
1149 ++
1150 + /* papr_scm specific dimm attributes */
1151 + static struct attribute *papr_nd_attributes[] = {
1152 + &dev_attr_flags.attr,
1153 +@@ -909,6 +924,7 @@ static struct attribute *papr_nd_attributes[] = {
1154 +
1155 + static struct attribute_group papr_nd_attribute_group = {
1156 + .name = "papr",
1157 ++ .is_visible = papr_nd_attribute_visible,
1158 + .attrs = papr_nd_attributes,
1159 + };
1160 +
1161 +@@ -924,7 +940,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1162 + struct nd_region_desc ndr_desc;
1163 + unsigned long dimm_flags;
1164 + int target_nid, online_nid;
1165 +- ssize_t stat_size;
1166 +
1167 + p->bus_desc.ndctl = papr_scm_ndctl;
1168 + p->bus_desc.module = THIS_MODULE;
1169 +@@ -1009,16 +1024,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1170 + list_add_tail(&p->region_list, &papr_nd_regions);
1171 + mutex_unlock(&papr_ndr_lock);
1172 +
1173 +- /* Try retriving the stat buffer and see if its supported */
1174 +- stat_size = drc_pmem_query_stats(p, NULL, 0);
1175 +- if (stat_size > 0) {
1176 +- p->stat_buffer_len = stat_size;
1177 +- dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
1178 +- p->stat_buffer_len);
1179 +- } else {
1180 +- dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
1181 +- }
1182 +-
1183 + return 0;
1184 +
1185 + err: nvdimm_bus_unregister(p->bus);
1186 +@@ -1094,8 +1099,10 @@ static int papr_scm_probe(struct platform_device *pdev)
1187 + u32 drc_index, metadata_size;
1188 + u64 blocks, block_size;
1189 + struct papr_scm_priv *p;
1190 ++ u8 uuid_raw[UUID_SIZE];
1191 + const char *uuid_str;
1192 +- u64 uuid[2];
1193 ++ ssize_t stat_size;
1194 ++ uuid_t uuid;
1195 + int rc;
1196 +
1197 + /* check we have all the required DT properties */
1198 +@@ -1138,16 +1145,23 @@ static int papr_scm_probe(struct platform_device *pdev)
1199 + p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
1200 +
1201 + /* We just need to ensure that set cookies are unique across */
1202 +- uuid_parse(uuid_str, (uuid_t *) uuid);
1203 ++ uuid_parse(uuid_str, &uuid);
1204 ++
1205 + /*
1206 +- * cookie1 and cookie2 are not really little endian
1207 +- * we store a little endian representation of the
1208 +- * uuid str so that we can compare this with the label
1209 +- * area cookie irrespective of the endian config with which
1210 +- * the kernel is built.
1211 ++ * The cookie1 and cookie2 are not really little endian.
1212 ++ * We store a raw buffer representation of the
1213 ++ * uuid string so that we can compare this with the label
1214 ++ * area cookie irrespective of the endian configuration
1215 ++ * with which the kernel is built.
1216 ++ *
1217 ++ * Historically we stored the cookie in the below format.
1218 ++ * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
1219 ++ * cookie1 was 0xfd423b0b671b5172
1220 ++ * cookie2 was 0xaabce8cae35b1d8d
1221 + */
1222 +- p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
1223 +- p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
1224 ++ export_uuid(uuid_raw, &uuid);
1225 ++ p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
1226 ++ p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
1227 +
1228 + /* might be zero */
1229 + p->metadata_size = metadata_size;
1230 +@@ -1172,6 +1186,14 @@ static int papr_scm_probe(struct platform_device *pdev)
1231 + p->res.name = pdev->name;
1232 + p->res.flags = IORESOURCE_MEM;
1233 +
1234 ++ /* Try retrieving the stat buffer and see if its supported */
1235 ++ stat_size = drc_pmem_query_stats(p, NULL, 0);
1236 ++ if (stat_size > 0) {
1237 ++ p->stat_buffer_len = stat_size;
1238 ++ dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
1239 ++ p->stat_buffer_len);
1240 ++ }
1241 ++
1242 + rc = papr_scm_nvdimm_init(p);
1243 + if (rc)
1244 + goto err2;
1245 +diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
1246 +index c70b4be9f0a54..f47429323eee9 100644
1247 +--- a/arch/powerpc/platforms/pseries/smp.c
1248 ++++ b/arch/powerpc/platforms/pseries/smp.c
1249 +@@ -105,9 +105,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
1250 + return 1;
1251 + }
1252 +
1253 +- /* Fixup atomic count: it exited inside IRQ handler. */
1254 +- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
1255 +-
1256 + /*
1257 + * If the RTAS start-cpu token does not exist then presume the
1258 + * cpu is already spinning.
1259 +@@ -211,7 +208,9 @@ static __init void pSeries_smp_probe(void)
1260 + if (!cpu_has_feature(CPU_FTR_SMT))
1261 + return;
1262 +
1263 +- if (check_kvm_guest()) {
1264 ++ check_kvm_guest();
1265 ++
1266 ++ if (is_kvm_guest()) {
1267 + /*
1268 + * KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp
1269 + * faults to the hypervisor which then reads the instruction
1270 +diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
1271 +index 9a408e2942acf..bd82375db51a6 100644
1272 +--- a/arch/riscv/kernel/smpboot.c
1273 ++++ b/arch/riscv/kernel/smpboot.c
1274 +@@ -180,7 +180,6 @@ asmlinkage __visible void smp_callin(void)
1275 + * Disable preemption before enabling interrupts, so we don't try to
1276 + * schedule a CPU that hasn't actually started yet.
1277 + */
1278 +- preempt_disable();
1279 + local_irq_enable();
1280 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1281 + }
1282 +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
1283 +index b4c7c34069f81..93488bbf491b9 100644
1284 +--- a/arch/s390/Kconfig
1285 ++++ b/arch/s390/Kconfig
1286 +@@ -164,6 +164,7 @@ config S390
1287 + select HAVE_FUTEX_CMPXCHG if FUTEX
1288 + select HAVE_GCC_PLUGINS
1289 + select HAVE_GENERIC_VDSO
1290 ++ select HAVE_IOREMAP_PROT if PCI
1291 + select HAVE_IRQ_EXIT_ON_IRQ_STACK
1292 + select HAVE_KERNEL_BZIP2
1293 + select HAVE_KERNEL_GZIP
1294 +@@ -853,7 +854,7 @@ config CMM_IUCV
1295 + config APPLDATA_BASE
1296 + def_bool n
1297 + prompt "Linux - VM Monitor Stream, base infrastructure"
1298 +- depends on PROC_FS
1299 ++ depends on PROC_SYSCTL
1300 + help
1301 + This provides a kernel interface for creating and updating z/VM APPLDATA
1302 + monitor records. The monitor records are updated at certain time
1303 +diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
1304 +index 87641dd65ccf9..b3501ea5039e4 100644
1305 +--- a/arch/s390/boot/uv.c
1306 ++++ b/arch/s390/boot/uv.c
1307 +@@ -36,6 +36,7 @@ void uv_query_info(void)
1308 + uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
1309 + uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
1310 + uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
1311 ++ uv_info.uv_feature_indications = uvcb.uv_feature_indications;
1312 + }
1313 +
1314 + #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
1315 +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
1316 +index 29c7ecd5ad1d5..adea53f69bfd3 100644
1317 +--- a/arch/s390/include/asm/pgtable.h
1318 ++++ b/arch/s390/include/asm/pgtable.h
1319 +@@ -344,8 +344,6 @@ static inline int is_module_addr(void *addr)
1320 + #define PTRS_PER_P4D _CRST_ENTRIES
1321 + #define PTRS_PER_PGD _CRST_ENTRIES
1322 +
1323 +-#define MAX_PTRS_PER_P4D PTRS_PER_P4D
1324 +-
1325 + /*
1326 + * Segment table and region3 table entry encoding
1327 + * (R = read-only, I = invalid, y = young bit):
1328 +@@ -865,6 +863,25 @@ static inline int pte_unused(pte_t pte)
1329 + return pte_val(pte) & _PAGE_UNUSED;
1330 + }
1331 +
1332 ++/*
1333 ++ * Extract the pgprot value from the given pte while at the same time making it
1334 ++ * usable for kernel address space mappings where fault driven dirty and
1335 ++ * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
1336 ++ * must not be set.
1337 ++ */
1338 ++static inline pgprot_t pte_pgprot(pte_t pte)
1339 ++{
1340 ++ unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
1341 ++
1342 ++ if (pte_write(pte))
1343 ++ pte_flags |= pgprot_val(PAGE_KERNEL);
1344 ++ else
1345 ++ pte_flags |= pgprot_val(PAGE_KERNEL_RO);
1346 ++ pte_flags |= pte_val(pte) & mio_wb_bit_mask;
1347 ++
1348 ++ return __pgprot(pte_flags);
1349 ++}
1350 ++
1351 + /*
1352 + * pgd/pmd/pte modification functions
1353 + */
1354 +diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
1355 +index b49e0492842cc..d9d5350cc3ec3 100644
1356 +--- a/arch/s390/include/asm/preempt.h
1357 ++++ b/arch/s390/include/asm/preempt.h
1358 +@@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc)
1359 + old, new) != old);
1360 + }
1361 +
1362 +-#define init_task_preempt_count(p) do { } while (0)
1363 +-
1364 +-#define init_idle_preempt_count(p, cpu) do { \
1365 +- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
1366 +-} while (0)
1367 +-
1368 + static inline void set_preempt_need_resched(void)
1369 + {
1370 + __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
1371 +@@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc)
1372 + S390_lowcore.preempt_count = pc;
1373 + }
1374 +
1375 +-#define init_task_preempt_count(p) do { } while (0)
1376 +-
1377 +-#define init_idle_preempt_count(p, cpu) do { \
1378 +- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
1379 +-} while (0)
1380 +-
1381 + static inline void set_preempt_need_resched(void)
1382 + {
1383 + }
1384 +@@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset)
1385 +
1386 + #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
1387 +
1388 ++#define init_task_preempt_count(p) do { } while (0)
1389 ++/* Deferred to CPU bringup time */
1390 ++#define init_idle_preempt_count(p, cpu) do { } while (0)
1391 ++
1392 + #ifdef CONFIG_PREEMPTION
1393 + extern void preempt_schedule(void);
1394 + #define __preempt_schedule() preempt_schedule()
1395 +diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
1396 +index 7b98d4caee779..12c5f006c1364 100644
1397 +--- a/arch/s390/include/asm/uv.h
1398 ++++ b/arch/s390/include/asm/uv.h
1399 +@@ -73,6 +73,10 @@ enum uv_cmds_inst {
1400 + BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
1401 + };
1402 +
1403 ++enum uv_feat_ind {
1404 ++ BIT_UV_FEAT_MISC = 0,
1405 ++};
1406 ++
1407 + struct uv_cb_header {
1408 + u16 len;
1409 + u16 cmd; /* Command Code */
1410 +@@ -97,7 +101,8 @@ struct uv_cb_qui {
1411 + u64 max_guest_stor_addr;
1412 + u8 reserved88[158 - 136];
1413 + u16 max_guest_cpu_id;
1414 +- u8 reserveda0[200 - 160];
1415 ++ u64 uv_feature_indications;
1416 ++ u8 reserveda0[200 - 168];
1417 + } __packed __aligned(8);
1418 +
1419 + /* Initialize Ultravisor */
1420 +@@ -274,6 +279,7 @@ struct uv_info {
1421 + unsigned long max_sec_stor_addr;
1422 + unsigned int max_num_sec_conf;
1423 + unsigned short max_guest_cpu_id;
1424 ++ unsigned long uv_feature_indications;
1425 + };
1426 +
1427 + extern struct uv_info uv_info;
1428 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1429 +index 5aab59ad56881..382d73da134cf 100644
1430 +--- a/arch/s390/kernel/setup.c
1431 ++++ b/arch/s390/kernel/setup.c
1432 +@@ -466,6 +466,7 @@ static void __init setup_lowcore_dat_off(void)
1433 + lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1434 + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
1435 + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
1436 ++ lc->preempt_count = PREEMPT_DISABLED;
1437 +
1438 + set_prefix((u32)(unsigned long) lc);
1439 + lowcore_ptr[0] = lc;
1440 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1441 +index 2fec2b80d35d2..1fb483e06a647 100644
1442 +--- a/arch/s390/kernel/smp.c
1443 ++++ b/arch/s390/kernel/smp.c
1444 +@@ -219,6 +219,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
1445 + lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1446 + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
1447 + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
1448 ++ lc->preempt_count = PREEMPT_DISABLED;
1449 + if (nmi_alloc_per_cpu(lc))
1450 + goto out_stack;
1451 + lowcore_ptr[cpu] = lc;
1452 +@@ -878,7 +879,6 @@ static void smp_init_secondary(void)
1453 + restore_access_regs(S390_lowcore.access_regs_save_area);
1454 + cpu_init();
1455 + rcu_cpu_starting(cpu);
1456 +- preempt_disable();
1457 + init_cpu_timer();
1458 + vtime_init();
1459 + vdso_getcpu_init();
1460 +diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
1461 +index 370f664580af5..650b4b7b1e6b0 100644
1462 +--- a/arch/s390/kernel/uv.c
1463 ++++ b/arch/s390/kernel/uv.c
1464 +@@ -364,6 +364,15 @@ static ssize_t uv_query_facilities(struct kobject *kobj,
1465 + static struct kobj_attribute uv_query_facilities_attr =
1466 + __ATTR(facilities, 0444, uv_query_facilities, NULL);
1467 +
1468 ++static ssize_t uv_query_feature_indications(struct kobject *kobj,
1469 ++ struct kobj_attribute *attr, char *buf)
1470 ++{
1471 ++ return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
1472 ++}
1473 ++
1474 ++static struct kobj_attribute uv_query_feature_indications_attr =
1475 ++ __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
1476 ++
1477 + static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
1478 + struct kobj_attribute *attr, char *page)
1479 + {
1480 +@@ -396,6 +405,7 @@ static struct kobj_attribute uv_query_max_guest_addr_attr =
1481 +
1482 + static struct attribute *uv_query_attrs[] = {
1483 + &uv_query_facilities_attr.attr,
1484 ++ &uv_query_feature_indications_attr.attr,
1485 + &uv_query_max_guest_cpus_attr.attr,
1486 + &uv_query_max_guest_vms_attr.attr,
1487 + &uv_query_max_guest_addr_attr.attr,
1488 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1489 +index 1296fc10f80c8..876fc1f7282a0 100644
1490 +--- a/arch/s390/kvm/kvm-s390.c
1491 ++++ b/arch/s390/kvm/kvm-s390.c
1492 +@@ -329,31 +329,31 @@ static void allow_cpu_feat(unsigned long nr)
1493 +
1494 + static inline int plo_test_bit(unsigned char nr)
1495 + {
1496 +- register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
1497 ++ unsigned long function = (unsigned long)nr | 0x100;
1498 + int cc;
1499 +
1500 + asm volatile(
1501 ++ " lgr 0,%[function]\n"
1502 + /* Parameter registers are ignored for "test bit" */
1503 + " plo 0,0,0,0(0)\n"
1504 + " ipm %0\n"
1505 + " srl %0,28\n"
1506 + : "=d" (cc)
1507 +- : "d" (r0)
1508 +- : "cc");
1509 ++ : [function] "d" (function)
1510 ++ : "cc", "0");
1511 + return cc == 0;
1512 + }
1513 +
1514 + static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
1515 + {
1516 +- register unsigned long r0 asm("0") = 0; /* query function */
1517 +- register unsigned long r1 asm("1") = (unsigned long) query;
1518 +-
1519 + asm volatile(
1520 +- /* Parameter regs are ignored */
1521 ++ " lghi 0,0\n"
1522 ++ " lgr 1,%[query]\n"
1523 ++ /* Parameter registers are ignored */
1524 + " .insn rrf,%[opc] << 16,2,4,6,0\n"
1525 + :
1526 +- : "d" (r0), "a" (r1), [opc] "i" (opcode)
1527 +- : "cc", "memory");
1528 ++ : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
1529 ++ : "cc", "memory", "0", "1");
1530 + }
1531 +
1532 + #define INSN_SORTL 0xb938
1533 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
1534 +index 826d017773616..f54f6dcd87489 100644
1535 +--- a/arch/s390/mm/fault.c
1536 ++++ b/arch/s390/mm/fault.c
1537 +@@ -792,6 +792,32 @@ void do_secure_storage_access(struct pt_regs *regs)
1538 + struct page *page;
1539 + int rc;
1540 +
1541 ++ /*
1542 ++ * bit 61 tells us if the address is valid, if it's not we
1543 ++ * have a major problem and should stop the kernel or send a
1544 ++ * SIGSEGV to the process. Unfortunately bit 61 is not
1545 ++ * reliable without the misc UV feature so we need to check
1546 ++ * for that as well.
1547 ++ */
1548 ++ if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
1549 ++ !test_bit_inv(61, &regs->int_parm_long)) {
1550 ++ /*
1551 ++ * When this happens, userspace did something that it
1552 ++ * was not supposed to do, e.g. branching into secure
1553 ++ * memory. Trigger a segmentation fault.
1554 ++ */
1555 ++ if (user_mode(regs)) {
1556 ++ send_sig(SIGSEGV, current, 0);
1557 ++ return;
1558 ++ }
1559 ++
1560 ++ /*
1561 ++ * The kernel should never run into this case and we
1562 ++ * have no way out of this situation.
1563 ++ */
1564 ++ panic("Unexpected PGM 0x3d with TEID bit 61=0");
1565 ++ }
1566 ++
1567 + switch (get_fault_type(regs)) {
1568 + case USER_FAULT:
1569 + mm = current->mm;
1570 +diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
1571 +index 372acdc9033eb..65924d9ec2459 100644
1572 +--- a/arch/sh/kernel/smp.c
1573 ++++ b/arch/sh/kernel/smp.c
1574 +@@ -186,8 +186,6 @@ asmlinkage void start_secondary(void)
1575 +
1576 + per_cpu_trap_init();
1577 +
1578 +- preempt_disable();
1579 +-
1580 + notify_cpu_starting(cpu);
1581 +
1582 + local_irq_enable();
1583 +diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
1584 +index 50c127ab46d5b..22b148e5a5f88 100644
1585 +--- a/arch/sparc/kernel/smp_32.c
1586 ++++ b/arch/sparc/kernel/smp_32.c
1587 +@@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg)
1588 + */
1589 + arch_cpu_pre_starting(arg);
1590 +
1591 +- preempt_disable();
1592 + cpu = smp_processor_id();
1593 +
1594 + notify_cpu_starting(cpu);
1595 +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
1596 +index e38d8bf454e86..ae5faa1d989d2 100644
1597 +--- a/arch/sparc/kernel/smp_64.c
1598 ++++ b/arch/sparc/kernel/smp_64.c
1599 +@@ -138,9 +138,6 @@ void smp_callin(void)
1600 +
1601 + set_cpu_online(cpuid, true);
1602 +
1603 +- /* idle thread is expected to have preempt disabled */
1604 +- preempt_disable();
1605 +-
1606 + local_irq_enable();
1607 +
1608 + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1609 +diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
1610 +index 6706b6cb1d0fc..38caf61cd5b7d 100644
1611 +--- a/arch/x86/crypto/curve25519-x86_64.c
1612 ++++ b/arch/x86/crypto/curve25519-x86_64.c
1613 +@@ -1500,7 +1500,7 @@ static int __init curve25519_mod_init(void)
1614 + static void __exit curve25519_mod_exit(void)
1615 + {
1616 + if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
1617 +- (boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX)))
1618 ++ static_branch_likely(&curve25519_use_bmi2_adx))
1619 + crypto_unregister_kpp(&curve25519_alg);
1620 + }
1621 +
1622 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
1623 +index a16a5294d55f6..1886aaf199143 100644
1624 +--- a/arch/x86/entry/entry_64.S
1625 ++++ b/arch/x86/entry/entry_64.S
1626 +@@ -506,7 +506,7 @@ SYM_CODE_START(\asmsym)
1627 +
1628 + movq %rsp, %rdi /* pt_regs pointer */
1629 +
1630 +- call \cfunc
1631 ++ call kernel_\cfunc
1632 +
1633 + /*
1634 + * No need to switch back to the IST stack. The current stack is either
1635 +@@ -517,7 +517,7 @@ SYM_CODE_START(\asmsym)
1636 +
1637 + /* Switch to the regular task stack */
1638 + .Lfrom_usermode_switch_stack_\@:
1639 +- idtentry_body safe_stack_\cfunc, has_error_code=1
1640 ++ idtentry_body user_\cfunc, has_error_code=1
1641 +
1642 + _ASM_NOKPROBE(\asmsym)
1643 + SYM_CODE_END(\asmsym)
1644 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
1645 +index 8f71dd72ef95f..1eb45139fcc6e 100644
1646 +--- a/arch/x86/events/core.c
1647 ++++ b/arch/x86/events/core.c
1648 +@@ -1626,6 +1626,8 @@ static void x86_pmu_del(struct perf_event *event, int flags)
1649 + if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1650 + goto do_del;
1651 +
1652 ++ __set_bit(event->hw.idx, cpuc->dirty);
1653 ++
1654 + /*
1655 + * Not a TXN, therefore cleanup properly.
1656 + */
1657 +@@ -2474,6 +2476,31 @@ static int x86_pmu_event_init(struct perf_event *event)
1658 + return err;
1659 + }
1660 +
1661 ++void perf_clear_dirty_counters(void)
1662 ++{
1663 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1664 ++ int i;
1665 ++
1666 ++ /* Don't need to clear the assigned counter. */
1667 ++ for (i = 0; i < cpuc->n_events; i++)
1668 ++ __clear_bit(cpuc->assign[i], cpuc->dirty);
1669 ++
1670 ++ if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
1671 ++ return;
1672 ++
1673 ++ for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
1674 ++ /* Metrics and fake events don't have corresponding HW counters. */
1675 ++ if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
1676 ++ continue;
1677 ++ else if (i >= INTEL_PMC_IDX_FIXED)
1678 ++ wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
1679 ++ else
1680 ++ wrmsrl(x86_pmu_event_addr(i), 0);
1681 ++ }
1682 ++
1683 ++ bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
1684 ++}
1685 ++
1686 + static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
1687 + {
1688 + if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
1689 +@@ -2497,7 +2524,6 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
1690 +
1691 + static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
1692 + {
1693 +-
1694 + if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
1695 + return;
1696 +
1697 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1698 +index e28892270c580..d76be3bba11e4 100644
1699 +--- a/arch/x86/events/intel/core.c
1700 ++++ b/arch/x86/events/intel/core.c
1701 +@@ -280,6 +280,8 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
1702 + INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
1703 + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
1704 + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
1705 ++ INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
1706 ++ INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
1707 + EVENT_EXTRA_END
1708 + };
1709 +
1710 +@@ -4030,8 +4032,10 @@ spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1711 + * The :ppp indicates the Precise Distribution (PDist) facility, which
1712 + * is only supported on the GP counter 0. If a :ppp event which is not
1713 + * available on the GP counter 0, error out.
1714 ++ * Exception: Instruction PDIR is only available on the fixed counter 0.
1715 + */
1716 +- if (event->attr.precise_ip == 3) {
1717 ++ if ((event->attr.precise_ip == 3) &&
1718 ++ !constraint_match(&fixed0_constraint, event->hw.config)) {
1719 + if (c->idxmsk64 & BIT_ULL(0))
1720 + return &counter0_constraint;
1721 +
1722 +@@ -6157,8 +6161,13 @@ __init int intel_pmu_init(void)
1723 + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
1724 + pmu->name = "cpu_core";
1725 + pmu->cpu_type = hybrid_big;
1726 +- pmu->num_counters = x86_pmu.num_counters + 2;
1727 +- pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
1728 ++ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
1729 ++ pmu->num_counters = x86_pmu.num_counters + 2;
1730 ++ pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
1731 ++ } else {
1732 ++ pmu->num_counters = x86_pmu.num_counters;
1733 ++ pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
1734 ++ }
1735 + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
1736 + pmu->unconstrained = (struct event_constraint)
1737 + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
1738 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
1739 +index ad87cb36f7c81..2bf1c7ea2758d 100644
1740 +--- a/arch/x86/events/perf_event.h
1741 ++++ b/arch/x86/events/perf_event.h
1742 +@@ -229,6 +229,7 @@ struct cpu_hw_events {
1743 + */
1744 + struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
1745 + unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1746 ++ unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1747 + int enabled;
1748 +
1749 + int n_events; /* the # of events in the below arrays */
1750 +diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
1751 +index 73d45b0dfff2d..cd9f3e3049449 100644
1752 +--- a/arch/x86/include/asm/idtentry.h
1753 ++++ b/arch/x86/include/asm/idtentry.h
1754 +@@ -312,8 +312,8 @@ static __always_inline void __##func(struct pt_regs *regs)
1755 + */
1756 + #define DECLARE_IDTENTRY_VC(vector, func) \
1757 + DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
1758 +- __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \
1759 +- __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
1760 ++ __visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code); \
1761 ++ __visible noinstr void user_##func(struct pt_regs *regs, unsigned long error_code)
1762 +
1763 + /**
1764 + * DEFINE_IDTENTRY_IST - Emit code for IST entry points
1765 +@@ -355,33 +355,24 @@ static __always_inline void __##func(struct pt_regs *regs)
1766 + DEFINE_IDTENTRY_RAW_ERRORCODE(func)
1767 +
1768 + /**
1769 +- * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
1770 +- which runs on a safe stack.
1771 ++ * DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
1772 ++ when raised from kernel mode
1773 + * @func: Function name of the entry point
1774 + *
1775 + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
1776 + */
1777 +-#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
1778 +- DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
1779 ++#define DEFINE_IDTENTRY_VC_KERNEL(func) \
1780 ++ DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
1781 +
1782 + /**
1783 +- * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
1784 +- which runs on the VC fall-back stack
1785 ++ * DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
1786 ++ when raised from user mode
1787 + * @func: Function name of the entry point
1788 + *
1789 + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
1790 + */
1791 +-#define DEFINE_IDTENTRY_VC_IST(func) \
1792 +- DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
1793 +-
1794 +-/**
1795 +- * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
1796 +- * @func: Function name of the entry point
1797 +- *
1798 +- * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
1799 +- */
1800 +-#define DEFINE_IDTENTRY_VC(func) \
1801 +- DEFINE_IDTENTRY_RAW_ERRORCODE(func)
1802 ++#define DEFINE_IDTENTRY_VC_USER(func) \
1803 ++ DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
1804 +
1805 + #else /* CONFIG_X86_64 */
1806 +
1807 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1808 +index 682e82956ea5a..fbd55c682d5e7 100644
1809 +--- a/arch/x86/include/asm/kvm_host.h
1810 ++++ b/arch/x86/include/asm/kvm_host.h
1811 +@@ -85,7 +85,7 @@
1812 + #define KVM_REQ_APICV_UPDATE \
1813 + KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
1814 + #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
1815 +-#define KVM_REQ_HV_TLB_FLUSH \
1816 ++#define KVM_REQ_TLB_FLUSH_GUEST \
1817 + KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
1818 + #define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
1819 + #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
1820 +@@ -1464,6 +1464,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
1821 + void kvm_mmu_init_vm(struct kvm *kvm);
1822 + void kvm_mmu_uninit_vm(struct kvm *kvm);
1823 +
1824 ++void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
1825 + void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1826 + void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1827 + struct kvm_memory_slot *memslot,
1828 +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
1829 +index 544f41a179fb6..8fc1b5003713f 100644
1830 +--- a/arch/x86/include/asm/perf_event.h
1831 ++++ b/arch/x86/include/asm/perf_event.h
1832 +@@ -478,6 +478,7 @@ struct x86_pmu_lbr {
1833 +
1834 + extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
1835 + extern void perf_check_microcode(void);
1836 ++extern void perf_clear_dirty_counters(void);
1837 + extern int x86_perf_rdpmc_index(struct perf_event *event);
1838 + #else
1839 + static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
1840 +diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
1841 +index f8cb8af4de5ce..fe5efbcba8240 100644
1842 +--- a/arch/x86/include/asm/preempt.h
1843 ++++ b/arch/x86/include/asm/preempt.h
1844 +@@ -44,7 +44,7 @@ static __always_inline void preempt_count_set(int pc)
1845 + #define init_task_preempt_count(p) do { } while (0)
1846 +
1847 + #define init_idle_preempt_count(p, cpu) do { \
1848 +- per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
1849 ++ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
1850 + } while (0)
1851 +
1852 + /*
1853 +diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
1854 +index 5fdfcb47000f9..054604aba9f00 100644
1855 +--- a/arch/x86/include/uapi/asm/hwcap2.h
1856 ++++ b/arch/x86/include/uapi/asm/hwcap2.h
1857 +@@ -2,10 +2,12 @@
1858 + #ifndef _ASM_X86_HWCAP2_H
1859 + #define _ASM_X86_HWCAP2_H
1860 +
1861 ++#include <linux/const.h>
1862 ++
1863 + /* MONITOR/MWAIT enabled in Ring 3 */
1864 +-#define HWCAP2_RING3MWAIT (1 << 0)
1865 ++#define HWCAP2_RING3MWAIT _BITUL(0)
1866 +
1867 + /* Kernel allows FSGSBASE instructions available in Ring 3 */
1868 +-#define HWCAP2_FSGSBASE BIT(1)
1869 ++#define HWCAP2_FSGSBASE _BITUL(1)
1870 +
1871 + #endif
1872 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
1873 +index 22f13343b5da8..4fa0a42808951 100644
1874 +--- a/arch/x86/kernel/cpu/mshyperv.c
1875 ++++ b/arch/x86/kernel/cpu/mshyperv.c
1876 +@@ -236,7 +236,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
1877 + for_each_present_cpu(i) {
1878 + if (i == 0)
1879 + continue;
1880 +- ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
1881 ++ ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
1882 + BUG_ON(ret);
1883 + }
1884 +
1885 +diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
1886 +index 6edd1e2ee8afa..058aacb423371 100644
1887 +--- a/arch/x86/kernel/early-quirks.c
1888 ++++ b/arch/x86/kernel/early-quirks.c
1889 +@@ -549,6 +549,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
1890 + INTEL_CNL_IDS(&gen9_early_ops),
1891 + INTEL_ICL_11_IDS(&gen11_early_ops),
1892 + INTEL_EHL_IDS(&gen11_early_ops),
1893 ++ INTEL_JSL_IDS(&gen11_early_ops),
1894 + INTEL_TGL_12_IDS(&gen11_early_ops),
1895 + INTEL_RKL_IDS(&gen11_early_ops),
1896 + INTEL_ADLS_IDS(&gen11_early_ops),
1897 +diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
1898 +index 651b81cd648e5..d66a33d24f4f9 100644
1899 +--- a/arch/x86/kernel/sev.c
1900 ++++ b/arch/x86/kernel/sev.c
1901 +@@ -7,12 +7,11 @@
1902 + * Author: Joerg Roedel <jroedel@××××.de>
1903 + */
1904 +
1905 +-#define pr_fmt(fmt) "SEV-ES: " fmt
1906 ++#define pr_fmt(fmt) "SEV: " fmt
1907 +
1908 + #include <linux/sched/debug.h> /* For show_regs() */
1909 + #include <linux/percpu-defs.h>
1910 + #include <linux/mem_encrypt.h>
1911 +-#include <linux/lockdep.h>
1912 + #include <linux/printk.h>
1913 + #include <linux/mm_types.h>
1914 + #include <linux/set_memory.h>
1915 +@@ -192,11 +191,19 @@ void noinstr __sev_es_ist_exit(void)
1916 + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
1917 + }
1918 +
1919 +-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
1920 ++/*
1921 ++ * Nothing shall interrupt this code path while holding the per-CPU
1922 ++ * GHCB. The backup GHCB is only for NMIs interrupting this path.
1923 ++ *
1924 ++ * Callers must disable local interrupts around it.
1925 ++ */
1926 ++static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
1927 + {
1928 + struct sev_es_runtime_data *data;
1929 + struct ghcb *ghcb;
1930 +
1931 ++ WARN_ON(!irqs_disabled());
1932 ++
1933 + data = this_cpu_read(runtime_data);
1934 + ghcb = &data->ghcb_page;
1935 +
1936 +@@ -213,7 +220,9 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
1937 + data->ghcb_active = false;
1938 + data->backup_ghcb_active = false;
1939 +
1940 ++ instrumentation_begin();
1941 + panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
1942 ++ instrumentation_end();
1943 + }
1944 +
1945 + /* Mark backup_ghcb active before writing to it */
1946 +@@ -479,11 +488,13 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
1947 + /* Include code shared with pre-decompression boot stage */
1948 + #include "sev-shared.c"
1949 +
1950 +-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
1951 ++static noinstr void __sev_put_ghcb(struct ghcb_state *state)
1952 + {
1953 + struct sev_es_runtime_data *data;
1954 + struct ghcb *ghcb;
1955 +
1956 ++ WARN_ON(!irqs_disabled());
1957 ++
1958 + data = this_cpu_read(runtime_data);
1959 + ghcb = &data->ghcb_page;
1960 +
1961 +@@ -507,7 +518,7 @@ void noinstr __sev_es_nmi_complete(void)
1962 + struct ghcb_state state;
1963 + struct ghcb *ghcb;
1964 +
1965 +- ghcb = sev_es_get_ghcb(&state);
1966 ++ ghcb = __sev_get_ghcb(&state);
1967 +
1968 + vc_ghcb_invalidate(ghcb);
1969 + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
1970 +@@ -517,7 +528,7 @@ void noinstr __sev_es_nmi_complete(void)
1971 + sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
1972 + VMGEXIT();
1973 +
1974 +- sev_es_put_ghcb(&state);
1975 ++ __sev_put_ghcb(&state);
1976 + }
1977 +
1978 + static u64 get_jump_table_addr(void)
1979 +@@ -529,7 +540,7 @@ static u64 get_jump_table_addr(void)
1980 +
1981 + local_irq_save(flags);
1982 +
1983 +- ghcb = sev_es_get_ghcb(&state);
1984 ++ ghcb = __sev_get_ghcb(&state);
1985 +
1986 + vc_ghcb_invalidate(ghcb);
1987 + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
1988 +@@ -543,7 +554,7 @@ static u64 get_jump_table_addr(void)
1989 + ghcb_sw_exit_info_2_is_valid(ghcb))
1990 + ret = ghcb->save.sw_exit_info_2;
1991 +
1992 +- sev_es_put_ghcb(&state);
1993 ++ __sev_put_ghcb(&state);
1994 +
1995 + local_irq_restore(flags);
1996 +
1997 +@@ -668,7 +679,7 @@ static void sev_es_ap_hlt_loop(void)
1998 + struct ghcb_state state;
1999 + struct ghcb *ghcb;
2000 +
2001 +- ghcb = sev_es_get_ghcb(&state);
2002 ++ ghcb = __sev_get_ghcb(&state);
2003 +
2004 + while (true) {
2005 + vc_ghcb_invalidate(ghcb);
2006 +@@ -685,7 +696,7 @@ static void sev_es_ap_hlt_loop(void)
2007 + break;
2008 + }
2009 +
2010 +- sev_es_put_ghcb(&state);
2011 ++ __sev_put_ghcb(&state);
2012 + }
2013 +
2014 + /*
2015 +@@ -775,7 +786,7 @@ void __init sev_es_init_vc_handling(void)
2016 + sev_es_setup_play_dead();
2017 +
2018 + /* Secondary CPUs use the runtime #VC handler */
2019 +- initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
2020 ++ initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
2021 + }
2022 +
2023 + static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
2024 +@@ -1213,14 +1224,6 @@ static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
2025 + return ES_EXCEPTION;
2026 + }
2027 +
2028 +-static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
2029 +-{
2030 +- if (user_mode(regs))
2031 +- noist_exc_debug(regs);
2032 +- else
2033 +- exc_debug(regs);
2034 +-}
2035 +-
2036 + static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
2037 + struct ghcb *ghcb,
2038 + unsigned long exit_code)
2039 +@@ -1316,44 +1319,15 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
2040 + return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
2041 + }
2042 +
2043 +-/*
2044 +- * Main #VC exception handler. It is called when the entry code was able to
2045 +- * switch off the IST to a safe kernel stack.
2046 +- *
2047 +- * With the current implementation it is always possible to switch to a safe
2048 +- * stack because #VC exceptions only happen at known places, like intercepted
2049 +- * instructions or accesses to MMIO areas/IO ports. They can also happen with
2050 +- * code instrumentation when the hypervisor intercepts #DB, but the critical
2051 +- * paths are forbidden to be instrumented, so #DB exceptions currently also
2052 +- * only happen in safe places.
2053 +- */
2054 +-DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
2055 ++static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
2056 + {
2057 +- irqentry_state_t irq_state;
2058 + struct ghcb_state state;
2059 + struct es_em_ctxt ctxt;
2060 + enum es_result result;
2061 + struct ghcb *ghcb;
2062 ++ bool ret = true;
2063 +
2064 +- /*
2065 +- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
2066 +- */
2067 +- if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
2068 +- vc_handle_trap_db(regs);
2069 +- return;
2070 +- }
2071 +-
2072 +- irq_state = irqentry_nmi_enter(regs);
2073 +- lockdep_assert_irqs_disabled();
2074 +- instrumentation_begin();
2075 +-
2076 +- /*
2077 +- * This is invoked through an interrupt gate, so IRQs are disabled. The
2078 +- * code below might walk page-tables for user or kernel addresses, so
2079 +- * keep the IRQs disabled to protect us against concurrent TLB flushes.
2080 +- */
2081 +-
2082 +- ghcb = sev_es_get_ghcb(&state);
2083 ++ ghcb = __sev_get_ghcb(&state);
2084 +
2085 + vc_ghcb_invalidate(ghcb);
2086 + result = vc_init_em_ctxt(&ctxt, regs, error_code);
2087 +@@ -1361,7 +1335,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
2088 + if (result == ES_OK)
2089 + result = vc_handle_exitcode(&ctxt, ghcb, error_code);
2090 +
2091 +- sev_es_put_ghcb(&state);
2092 ++ __sev_put_ghcb(&state);
2093 +
2094 + /* Done - now check the result */
2095 + switch (result) {
2096 +@@ -1371,15 +1345,18 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
2097 + case ES_UNSUPPORTED:
2098 + pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
2099 + error_code, regs->ip);
2100 +- goto fail;
2101 ++ ret = false;
2102 ++ break;
2103 + case ES_VMM_ERROR:
2104 + pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
2105 + error_code, regs->ip);
2106 +- goto fail;
2107 ++ ret = false;
2108 ++ break;
2109 + case ES_DECODE_FAILED:
2110 + pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
2111 + error_code, regs->ip);
2112 +- goto fail;
2113 ++ ret = false;
2114 ++ break;
2115 + case ES_EXCEPTION:
2116 + vc_forward_exception(&ctxt);
2117 + break;
2118 +@@ -1395,24 +1372,52 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
2119 + BUG();
2120 + }
2121 +
2122 +-out:
2123 +- instrumentation_end();
2124 +- irqentry_nmi_exit(regs, irq_state);
2125 ++ return ret;
2126 ++}
2127 +
2128 +- return;
2129 ++static __always_inline bool vc_is_db(unsigned long error_code)
2130 ++{
2131 ++ return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
2132 ++}
2133 +
2134 +-fail:
2135 +- if (user_mode(regs)) {
2136 +- /*
2137 +- * Do not kill the machine if user-space triggered the
2138 +- * exception. Send SIGBUS instead and let user-space deal with
2139 +- * it.
2140 +- */
2141 +- force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
2142 +- } else {
2143 +- pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
2144 +- result);
2145 ++/*
2146 ++ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
2147 ++ * and will panic when an error happens.
2148 ++ */
2149 ++DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
2150 ++{
2151 ++ irqentry_state_t irq_state;
2152 +
2153 ++ /*
2154 ++ * With the current implementation it is always possible to switch to a
2155 ++ * safe stack because #VC exceptions only happen at known places, like
2156 ++ * intercepted instructions or accesses to MMIO areas/IO ports. They can
2157 ++ * also happen with code instrumentation when the hypervisor intercepts
2158 ++ * #DB, but the critical paths are forbidden to be instrumented, so #DB
2159 ++ * exceptions currently also only happen in safe places.
2160 ++ *
2161 ++ * But keep this here in case the noinstr annotations are violated due
2162 ++ * to bug elsewhere.
2163 ++ */
2164 ++ if (unlikely(on_vc_fallback_stack(regs))) {
2165 ++ instrumentation_begin();
2166 ++ panic("Can't handle #VC exception from unsupported context\n");
2167 ++ instrumentation_end();
2168 ++ }
2169 ++
2170 ++ /*
2171 ++ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
2172 ++ */
2173 ++ if (vc_is_db(error_code)) {
2174 ++ exc_debug(regs);
2175 ++ return;
2176 ++ }
2177 ++
2178 ++ irq_state = irqentry_nmi_enter(regs);
2179 ++
2180 ++ instrumentation_begin();
2181 ++
2182 ++ if (!vc_raw_handle_exception(regs, error_code)) {
2183 + /* Show some debug info */
2184 + show_regs(regs);
2185 +
2186 +@@ -1423,23 +1428,38 @@ fail:
2187 + panic("Returned from Terminate-Request to Hypervisor\n");
2188 + }
2189 +
2190 +- goto out;
2191 ++ instrumentation_end();
2192 ++ irqentry_nmi_exit(regs, irq_state);
2193 + }
2194 +
2195 +-/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
2196 +-DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
2197 ++/*
2198 ++ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
2199 ++ * and will kill the current task with SIGBUS when an error happens.
2200 ++ */
2201 ++DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
2202 + {
2203 ++ /*
2204 ++ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
2205 ++ */
2206 ++ if (vc_is_db(error_code)) {
2207 ++ noist_exc_debug(regs);
2208 ++ return;
2209 ++ }
2210 ++
2211 ++ irqentry_enter_from_user_mode(regs);
2212 + instrumentation_begin();
2213 +- panic("Can't handle #VC exception from unsupported context\n");
2214 +- instrumentation_end();
2215 +-}
2216 +
2217 +-DEFINE_IDTENTRY_VC(exc_vmm_communication)
2218 +-{
2219 +- if (likely(!on_vc_fallback_stack(regs)))
2220 +- safe_stack_exc_vmm_communication(regs, error_code);
2221 +- else
2222 +- ist_exc_vmm_communication(regs, error_code);
2223 ++ if (!vc_raw_handle_exception(regs, error_code)) {
2224 ++ /*
2225 ++ * Do not kill the machine if user-space triggered the
2226 ++ * exception. Send SIGBUS instead and let user-space deal with
2227 ++ * it.
2228 ++ */
2229 ++ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
2230 ++ }
2231 ++
2232 ++ instrumentation_end();
2233 ++ irqentry_exit_to_user_mode(regs);
2234 + }
2235 +
2236 + bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
2237 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
2238 +index 7770245cc7fa7..ec2d64aa21631 100644
2239 +--- a/arch/x86/kernel/smpboot.c
2240 ++++ b/arch/x86/kernel/smpboot.c
2241 +@@ -236,7 +236,6 @@ static void notrace start_secondary(void *unused)
2242 + cpu_init();
2243 + rcu_cpu_starting(raw_smp_processor_id());
2244 + x86_cpuinit.early_percpu_clock_init();
2245 +- preempt_disable();
2246 + smp_callin();
2247 +
2248 + enable_start_cpu0 = 0;
2249 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
2250 +index 57ec011921805..6eb1b097e97eb 100644
2251 +--- a/arch/x86/kernel/tsc.c
2252 ++++ b/arch/x86/kernel/tsc.c
2253 +@@ -1152,7 +1152,8 @@ static struct clocksource clocksource_tsc = {
2254 + .mask = CLOCKSOURCE_MASK(64),
2255 + .flags = CLOCK_SOURCE_IS_CONTINUOUS |
2256 + CLOCK_SOURCE_VALID_FOR_HRES |
2257 +- CLOCK_SOURCE_MUST_VERIFY,
2258 ++ CLOCK_SOURCE_MUST_VERIFY |
2259 ++ CLOCK_SOURCE_VERIFY_PERCPU,
2260 + .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
2261 + .enable = tsc_cs_enable,
2262 + .resume = tsc_resume,
2263 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
2264 +index b4da665bb8923..c42613cfb5ba6 100644
2265 +--- a/arch/x86/kvm/cpuid.c
2266 ++++ b/arch/x86/kvm/cpuid.c
2267 +@@ -202,10 +202,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
2268 + static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
2269 +
2270 + /*
2271 +- * Except for the MMU, which needs to be reset after any vendor
2272 +- * specific adjustments to the reserved GPA bits.
2273 ++ * Except for the MMU, which needs to do its thing any vendor specific
2274 ++ * adjustments to the reserved GPA bits.
2275 + */
2276 +- kvm_mmu_reset_context(vcpu);
2277 ++ kvm_mmu_after_set_cpuid(vcpu);
2278 + }
2279 +
2280 + static int is_efer_nx(void)
2281 +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
2282 +index f00830e5202fe..fdd1eca717fd6 100644
2283 +--- a/arch/x86/kvm/hyperv.c
2284 ++++ b/arch/x86/kvm/hyperv.c
2285 +@@ -1704,7 +1704,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
2286 + * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
2287 + * analyze it here, flush TLB regardless of the specified address space.
2288 + */
2289 +- kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
2290 ++ kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
2291 + NULL, vcpu_mask, &hv_vcpu->tlb_flush);
2292 +
2293 + ret_success:
2294 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
2295 +index a54f72c31be90..99afc6f1eed02 100644
2296 +--- a/arch/x86/kvm/mmu/mmu.c
2297 ++++ b/arch/x86/kvm/mmu/mmu.c
2298 +@@ -4168,7 +4168,15 @@ static inline u64 reserved_hpa_bits(void)
2299 + void
2300 + reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
2301 + {
2302 +- bool uses_nx = context->nx ||
2303 ++ /*
2304 ++ * KVM uses NX when TDP is disabled to handle a variety of scenarios,
2305 ++ * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
2306 ++ * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
2307 ++ * The iTLB multi-hit workaround can be toggled at any time, so assume
2308 ++ * NX can be used by any non-nested shadow MMU to avoid having to reset
2309 ++ * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
2310 ++ */
2311 ++ bool uses_nx = context->nx || !tdp_enabled ||
2312 + context->mmu_role.base.smep_andnot_wp;
2313 + struct rsvd_bits_validate *shadow_zero_check;
2314 + int i;
2315 +@@ -4851,6 +4859,18 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
2316 + return role.base;
2317 + }
2318 +
2319 ++void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
2320 ++{
2321 ++ /*
2322 ++ * Invalidate all MMU roles to force them to reinitialize as CPUID
2323 ++ * information is factored into reserved bit calculations.
2324 ++ */
2325 ++ vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
2326 ++ vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
2327 ++ vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
2328 ++ kvm_mmu_reset_context(vcpu);
2329 ++}
2330 ++
2331 + void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2332 + {
2333 + kvm_mmu_unload(vcpu);
2334 +diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
2335 +index 823a5919f9fa0..52fffd68b5229 100644
2336 +--- a/arch/x86/kvm/mmu/paging_tmpl.h
2337 ++++ b/arch/x86/kvm/mmu/paging_tmpl.h
2338 +@@ -471,8 +471,7 @@ retry_walk:
2339 +
2340 + error:
2341 + errcode |= write_fault | user_fault;
2342 +- if (fetch_fault && (mmu->nx ||
2343 +- kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
2344 ++ if (fetch_fault && (mmu->nx || mmu->mmu_role.ext.cr4_smep))
2345 + errcode |= PFERR_FETCH_MASK;
2346 +
2347 + walker->fault.vector = PF_VECTOR;
2348 +diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
2349 +index 66d43cec0c31a..8e8e8da740a07 100644
2350 +--- a/arch/x86/kvm/mmu/spte.c
2351 ++++ b/arch/x86/kvm/mmu/spte.c
2352 +@@ -102,13 +102,6 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
2353 + else if (kvm_vcpu_ad_need_write_protect(vcpu))
2354 + spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
2355 +
2356 +- /*
2357 +- * Bits 62:52 of PAE SPTEs are reserved. WARN if said bits are set
2358 +- * if PAE paging may be employed (shadow paging or any 32-bit KVM).
2359 +- */
2360 +- WARN_ON_ONCE((!tdp_enabled || !IS_ENABLED(CONFIG_X86_64)) &&
2361 +- (spte & SPTE_TDP_AD_MASK));
2362 +-
2363 + /*
2364 + * For the EPT case, shadow_present_mask is 0 if hardware
2365 + * supports exec-only page table entries. In that case,
2366 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
2367 +index 237317b1eddda..8773bd5287da8 100644
2368 +--- a/arch/x86/kvm/mmu/tdp_mmu.c
2369 ++++ b/arch/x86/kvm/mmu/tdp_mmu.c
2370 +@@ -912,7 +912,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
2371 + kvm_pfn_t pfn, bool prefault)
2372 + {
2373 + u64 new_spte;
2374 +- int ret = 0;
2375 ++ int ret = RET_PF_FIXED;
2376 + int make_spte_ret = 0;
2377 +
2378 + if (unlikely(is_noslot_pfn(pfn)))
2379 +@@ -949,7 +949,11 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
2380 + rcu_dereference(iter->sptep));
2381 + }
2382 +
2383 +- if (!prefault)
2384 ++ /*
2385 ++ * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
2386 ++ * consistent with legacy MMU behavior.
2387 ++ */
2388 ++ if (ret != RET_PF_SPURIOUS)
2389 + vcpu->stat.pf_fixed++;
2390 +
2391 + return ret;
2392 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
2393 +index 6058a65a6ede6..2e63171864a74 100644
2394 +--- a/arch/x86/kvm/vmx/nested.c
2395 ++++ b/arch/x86/kvm/vmx/nested.c
2396 +@@ -1127,12 +1127,19 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
2397 +
2398 + /*
2399 + * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
2400 +- * flushes are handled by nested_vmx_transition_tlb_flush(). See
2401 +- * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
2402 ++ * flushes are handled by nested_vmx_transition_tlb_flush().
2403 + */
2404 +- if (!nested_ept)
2405 +- kvm_mmu_new_pgd(vcpu, cr3, true,
2406 +- !nested_vmx_transition_mmu_sync(vcpu));
2407 ++ if (!nested_ept) {
2408 ++ kvm_mmu_new_pgd(vcpu, cr3, true, true);
2409 ++
2410 ++ /*
2411 ++ * A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
2412 ++ * across all PCIDs, i.e. all PGDs need to be synchronized.
2413 ++ * See nested_vmx_transition_mmu_sync() for more details.
2414 ++ */
2415 ++ if (nested_vmx_transition_mmu_sync(vcpu))
2416 ++ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
2417 ++ }
2418 +
2419 + vcpu->arch.cr3 = cr3;
2420 + kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
2421 +@@ -3682,7 +3689,7 @@ void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
2422 + }
2423 + }
2424 +
2425 +-static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
2426 ++static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
2427 + {
2428 + struct vcpu_vmx *vmx = to_vmx(vcpu);
2429 + int max_irr;
2430 +@@ -3690,17 +3697,17 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
2431 + u16 status;
2432 +
2433 + if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
2434 +- return;
2435 ++ return 0;
2436 +
2437 + vmx->nested.pi_pending = false;
2438 + if (!pi_test_and_clear_on(vmx->nested.pi_desc))
2439 +- return;
2440 ++ return 0;
2441 +
2442 + max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
2443 + if (max_irr != 256) {
2444 + vapic_page = vmx->nested.virtual_apic_map.hva;
2445 + if (!vapic_page)
2446 +- return;
2447 ++ return 0;
2448 +
2449 + __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
2450 + vapic_page, &max_irr);
2451 +@@ -3713,6 +3720,7 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
2452 + }
2453 +
2454 + nested_mark_vmcs12_pages_dirty(vcpu);
2455 ++ return 0;
2456 + }
2457 +
2458 + static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2459 +@@ -3887,8 +3895,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
2460 + }
2461 +
2462 + no_vmexit:
2463 +- vmx_complete_nested_posted_interrupt(vcpu);
2464 +- return 0;
2465 ++ return vmx_complete_nested_posted_interrupt(vcpu);
2466 + }
2467 +
2468 + static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
2469 +@@ -5481,8 +5488,6 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
2470 + {
2471 + u32 index = kvm_rcx_read(vcpu);
2472 + u64 new_eptp;
2473 +- bool accessed_dirty;
2474 +- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
2475 +
2476 + if (!nested_cpu_has_eptp_switching(vmcs12) ||
2477 + !nested_cpu_has_ept(vmcs12))
2478 +@@ -5491,13 +5496,10 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
2479 + if (index >= VMFUNC_EPTP_ENTRIES)
2480 + return 1;
2481 +
2482 +-
2483 + if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
2484 + &new_eptp, index * 8, 8))
2485 + return 1;
2486 +
2487 +- accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
2488 +-
2489 + /*
2490 + * If the (L2) guest does a vmfunc to the currently
2491 + * active ept pointer, we don't have to do anything else
2492 +@@ -5506,8 +5508,6 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
2493 + if (!nested_vmx_check_eptp(vcpu, new_eptp))
2494 + return 1;
2495 +
2496 +- mmu->ept_ad = accessed_dirty;
2497 +- mmu->mmu_role.base.ad_disabled = !accessed_dirty;
2498 + vmcs12->ept_pointer = new_eptp;
2499 +
2500 + kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2501 +@@ -5533,7 +5533,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
2502 + }
2503 +
2504 + vmcs12 = get_vmcs12(vcpu);
2505 +- if ((vmcs12->vm_function_control & (1 << function)) == 0)
2506 ++ if (!(vmcs12->vm_function_control & BIT_ULL(function)))
2507 + goto fail;
2508 +
2509 + switch (function) {
2510 +@@ -5806,6 +5806,9 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
2511 + else if (is_breakpoint(intr_info) &&
2512 + vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2513 + return true;
2514 ++ else if (is_alignment_check(intr_info) &&
2515 ++ !vmx_guest_inject_ac(vcpu))
2516 ++ return true;
2517 + return false;
2518 + case EXIT_REASON_EXTERNAL_INTERRUPT:
2519 + return true;
2520 +diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
2521 +index 1472c6c376f74..571d9ad80a59e 100644
2522 +--- a/arch/x86/kvm/vmx/vmcs.h
2523 ++++ b/arch/x86/kvm/vmx/vmcs.h
2524 +@@ -117,6 +117,11 @@ static inline bool is_gp_fault(u32 intr_info)
2525 + return is_exception_n(intr_info, GP_VECTOR);
2526 + }
2527 +
2528 ++static inline bool is_alignment_check(u32 intr_info)
2529 ++{
2530 ++ return is_exception_n(intr_info, AC_VECTOR);
2531 ++}
2532 ++
2533 + static inline bool is_machine_check(u32 intr_info)
2534 + {
2535 + return is_exception_n(intr_info, MC_VECTOR);
2536 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
2537 +index c2a779b688e64..dcd4f43c23de5 100644
2538 +--- a/arch/x86/kvm/vmx/vmx.c
2539 ++++ b/arch/x86/kvm/vmx/vmx.c
2540 +@@ -4829,7 +4829,7 @@ static int handle_machine_check(struct kvm_vcpu *vcpu)
2541 + * - Guest has #AC detection enabled in CR0
2542 + * - Guest EFLAGS has AC bit set
2543 + */
2544 +-static inline bool guest_inject_ac(struct kvm_vcpu *vcpu)
2545 ++bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
2546 + {
2547 + if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
2548 + return true;
2549 +@@ -4937,7 +4937,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
2550 + kvm_run->debug.arch.exception = ex_no;
2551 + break;
2552 + case AC_VECTOR:
2553 +- if (guest_inject_ac(vcpu)) {
2554 ++ if (vmx_guest_inject_ac(vcpu)) {
2555 + kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
2556 + return 1;
2557 + }
2558 +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
2559 +index 16e4e457ba23c..d91869c8c1fc2 100644
2560 +--- a/arch/x86/kvm/vmx/vmx.h
2561 ++++ b/arch/x86/kvm/vmx/vmx.h
2562 +@@ -387,6 +387,7 @@ void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2563 + void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2564 + u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
2565 +
2566 ++bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
2567 + void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
2568 + void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
2569 + bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
2570 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2571 +index e0f4a46649d75..dad282fe0dac2 100644
2572 +--- a/arch/x86/kvm/x86.c
2573 ++++ b/arch/x86/kvm/x86.c
2574 +@@ -9171,7 +9171,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
2575 + }
2576 + if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
2577 + kvm_vcpu_flush_tlb_current(vcpu);
2578 +- if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
2579 ++ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
2580 + kvm_vcpu_flush_tlb_guest(vcpu);
2581 +
2582 + if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
2583 +@@ -10454,6 +10454,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2584 +
2585 + void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
2586 + {
2587 ++ unsigned long old_cr0 = kvm_read_cr0(vcpu);
2588 ++
2589 + kvm_lapic_reset(vcpu, init_event);
2590 +
2591 + vcpu->arch.hflags = 0;
2592 +@@ -10522,6 +10524,17 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
2593 + vcpu->arch.ia32_xss = 0;
2594 +
2595 + static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
2596 ++
2597 ++ /*
2598 ++ * Reset the MMU context if paging was enabled prior to INIT (which is
2599 ++ * implied if CR0.PG=1 as CR0 will be '0' prior to RESET). Unlike the
2600 ++ * standard CR0/CR4/EFER modification paths, only CR0.PG needs to be
2601 ++ * checked because it is unconditionally cleared on INIT and all other
2602 ++ * paging related bits are ignored if paging is disabled, i.e. CR0.WP,
2603 ++ * CR4, and EFER changes are all irrelevant if CR0.PG was '0'.
2604 ++ */
2605 ++ if (old_cr0 & X86_CR0_PG)
2606 ++ kvm_mmu_reset_context(vcpu);
2607 + }
2608 +
2609 + void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2610 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
2611 +index 78804680e9231..cfe6b1e85fa61 100644
2612 +--- a/arch/x86/mm/tlb.c
2613 ++++ b/arch/x86/mm/tlb.c
2614 +@@ -14,6 +14,7 @@
2615 + #include <asm/nospec-branch.h>
2616 + #include <asm/cache.h>
2617 + #include <asm/apic.h>
2618 ++#include <asm/perf_event.h>
2619 +
2620 + #include "mm_internal.h"
2621 +
2622 +@@ -404,9 +405,14 @@ static inline void cr4_update_pce_mm(struct mm_struct *mm)
2623 + {
2624 + if (static_branch_unlikely(&rdpmc_always_available_key) ||
2625 + (!static_branch_unlikely(&rdpmc_never_available_key) &&
2626 +- atomic_read(&mm->context.perf_rdpmc_allowed)))
2627 ++ atomic_read(&mm->context.perf_rdpmc_allowed))) {
2628 ++ /*
2629 ++ * Clear the existing dirty counters to
2630 ++ * prevent the leak for an RDPMC task.
2631 ++ */
2632 ++ perf_clear_dirty_counters();
2633 + cr4_set_bits_irqsoff(X86_CR4_PCE);
2634 +- else
2635 ++ } else
2636 + cr4_clear_bits_irqsoff(X86_CR4_PCE);
2637 + }
2638 +
2639 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
2640 +index 2a2e290fa5d83..a3d867f221531 100644
2641 +--- a/arch/x86/net/bpf_jit_comp.c
2642 ++++ b/arch/x86/net/bpf_jit_comp.c
2643 +@@ -1297,7 +1297,7 @@ st: if (is_imm8(insn->off))
2644 + emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
2645 + if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
2646 + struct exception_table_entry *ex;
2647 +- u8 *_insn = image + proglen;
2648 ++ u8 *_insn = image + proglen + (start_of_ldx - temp);
2649 + s64 delta;
2650 +
2651 + /* populate jmp_offset for JMP above */
2652 +diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
2653 +index cd85a7a2722ba..1254da07ead1f 100644
2654 +--- a/arch/xtensa/kernel/smp.c
2655 ++++ b/arch/xtensa/kernel/smp.c
2656 +@@ -145,7 +145,6 @@ void secondary_start_kernel(void)
2657 + cpumask_set_cpu(cpu, mm_cpumask(mm));
2658 + enter_lazy_tlb(mm, current);
2659 +
2660 +- preempt_disable();
2661 + trace_hardirqs_off();
2662 +
2663 + calibrate_delay();
2664 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
2665 +index acd1f881273e0..eccbe2aed7c3f 100644
2666 +--- a/block/bfq-iosched.c
2667 ++++ b/block/bfq-iosched.c
2668 +@@ -2695,9 +2695,15 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2669 + * costly and complicated.
2670 + */
2671 + if (unlikely(!bfqd->nonrot_with_queueing)) {
2672 +- if (bic->stable_merge_bfqq &&
2673 ++ /*
2674 ++ * Make sure also that bfqq is sync, because
2675 ++ * bic->stable_merge_bfqq may point to some queue (for
2676 ++ * stable merging) also if bic is associated with a
2677 ++ * sync queue, but this bfqq is async
2678 ++ */
2679 ++ if (bfq_bfqq_sync(bfqq) && bic->stable_merge_bfqq &&
2680 + !bfq_bfqq_just_created(bfqq) &&
2681 +- time_is_after_jiffies(bfqq->split_time +
2682 ++ time_is_before_jiffies(bfqq->split_time +
2683 + msecs_to_jiffies(200))) {
2684 + struct bfq_queue *stable_merge_bfqq =
2685 + bic->stable_merge_bfqq;
2686 +@@ -6129,11 +6135,13 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
2687 + * of other queues. But a false waker will unjustly steal
2688 + * bandwidth to its supposedly woken queue. So considering
2689 + * also shared queues in the waking mechanism may cause more
2690 +- * control troubles than throughput benefits. Then do not set
2691 +- * last_completed_rq_bfqq to bfqq if bfqq is a shared queue.
2692 ++ * control troubles than throughput benefits. Then reset
2693 ++ * last_completed_rq_bfqq if bfqq is a shared queue.
2694 + */
2695 + if (!bfq_bfqq_coop(bfqq))
2696 + bfqd->last_completed_rq_bfqq = bfqq;
2697 ++ else
2698 ++ bfqd->last_completed_rq_bfqq = NULL;
2699 +
2700 + /*
2701 + * If we are waiting to discover whether the request pattern
2702 +diff --git a/block/bio.c b/block/bio.c
2703 +index 44205dfb6b60a..1fab762e079be 100644
2704 +--- a/block/bio.c
2705 ++++ b/block/bio.c
2706 +@@ -1375,8 +1375,7 @@ static inline bool bio_remaining_done(struct bio *bio)
2707 + *
2708 + * bio_endio() can be called several times on a bio that has been chained
2709 + * using bio_chain(). The ->bi_end_io() function will only be called the
2710 +- * last time. At this point the BLK_TA_COMPLETE tracing event will be
2711 +- * generated if BIO_TRACE_COMPLETION is set.
2712 ++ * last time.
2713 + **/
2714 + void bio_endio(struct bio *bio)
2715 + {
2716 +@@ -1389,6 +1388,11 @@ again:
2717 + if (bio->bi_bdev)
2718 + rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
2719 +
2720 ++ if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
2721 ++ trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
2722 ++ bio_clear_flag(bio, BIO_TRACE_COMPLETION);
2723 ++ }
2724 ++
2725 + /*
2726 + * Need to have a real endio function for chained bios, otherwise
2727 + * various corner cases will break (like stacking block devices that
2728 +@@ -1402,11 +1406,6 @@ again:
2729 + goto again;
2730 + }
2731 +
2732 +- if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
2733 +- trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
2734 +- bio_clear_flag(bio, BIO_TRACE_COMPLETION);
2735 +- }
2736 +-
2737 + blk_throtl_bio_endio(bio);
2738 + /* release cgroup info */
2739 + bio_uninit(bio);
2740 +diff --git a/block/blk-flush.c b/block/blk-flush.c
2741 +index 7942ca6ed3211..1002f6c581816 100644
2742 +--- a/block/blk-flush.c
2743 ++++ b/block/blk-flush.c
2744 +@@ -219,8 +219,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
2745 + unsigned long flags = 0;
2746 + struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
2747 +
2748 +- blk_account_io_flush(flush_rq);
2749 +-
2750 + /* release the tag's ownership to the req cloned from */
2751 + spin_lock_irqsave(&fq->mq_flush_lock, flags);
2752 +
2753 +@@ -230,6 +228,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
2754 + return;
2755 + }
2756 +
2757 ++ blk_account_io_flush(flush_rq);
2758 + /*
2759 + * Flush request has to be marked as IDLE when it is really ended
2760 + * because its .end_io() is called from timeout code path too for
2761 +diff --git a/block/blk-merge.c b/block/blk-merge.c
2762 +index 4d97fb6dd2267..bcdff1879c346 100644
2763 +--- a/block/blk-merge.c
2764 ++++ b/block/blk-merge.c
2765 +@@ -559,10 +559,14 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
2766 + static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
2767 + unsigned int nr_phys_segs)
2768 + {
2769 +- if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
2770 ++ if (blk_integrity_merge_bio(req->q, req, bio) == false)
2771 + goto no_merge;
2772 +
2773 +- if (blk_integrity_merge_bio(req->q, req, bio) == false)
2774 ++ /* discard request merge won't add new segment */
2775 ++ if (req_op(req) == REQ_OP_DISCARD)
2776 ++ return 1;
2777 ++
2778 ++ if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
2779 + goto no_merge;
2780 +
2781 + /*
2782 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
2783 +index 2a37731e8244b..1671dae43030b 100644
2784 +--- a/block/blk-mq-tag.c
2785 ++++ b/block/blk-mq-tag.c
2786 +@@ -199,6 +199,20 @@ struct bt_iter_data {
2787 + bool reserved;
2788 + };
2789 +
2790 ++static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
2791 ++ unsigned int bitnr)
2792 ++{
2793 ++ struct request *rq;
2794 ++ unsigned long flags;
2795 ++
2796 ++ spin_lock_irqsave(&tags->lock, flags);
2797 ++ rq = tags->rqs[bitnr];
2798 ++ if (!rq || !refcount_inc_not_zero(&rq->ref))
2799 ++ rq = NULL;
2800 ++ spin_unlock_irqrestore(&tags->lock, flags);
2801 ++ return rq;
2802 ++}
2803 ++
2804 + static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
2805 + {
2806 + struct bt_iter_data *iter_data = data;
2807 +@@ -206,18 +220,22 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
2808 + struct blk_mq_tags *tags = hctx->tags;
2809 + bool reserved = iter_data->reserved;
2810 + struct request *rq;
2811 ++ bool ret = true;
2812 +
2813 + if (!reserved)
2814 + bitnr += tags->nr_reserved_tags;
2815 +- rq = tags->rqs[bitnr];
2816 +-
2817 + /*
2818 + * We can hit rq == NULL here, because the tagging functions
2819 + * test and set the bit before assigning ->rqs[].
2820 + */
2821 +- if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx)
2822 +- return iter_data->fn(hctx, rq, iter_data->data, reserved);
2823 +- return true;
2824 ++ rq = blk_mq_find_and_get_req(tags, bitnr);
2825 ++ if (!rq)
2826 ++ return true;
2827 ++
2828 ++ if (rq->q == hctx->queue && rq->mq_hctx == hctx)
2829 ++ ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
2830 ++ blk_mq_put_rq_ref(rq);
2831 ++ return ret;
2832 + }
2833 +
2834 + /**
2835 +@@ -264,6 +282,8 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
2836 + struct blk_mq_tags *tags = iter_data->tags;
2837 + bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
2838 + struct request *rq;
2839 ++ bool ret = true;
2840 ++ bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
2841 +
2842 + if (!reserved)
2843 + bitnr += tags->nr_reserved_tags;
2844 +@@ -272,16 +292,19 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
2845 + * We can hit rq == NULL here, because the tagging functions
2846 + * test and set the bit before assigning ->rqs[].
2847 + */
2848 +- if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
2849 ++ if (iter_static_rqs)
2850 + rq = tags->static_rqs[bitnr];
2851 + else
2852 +- rq = tags->rqs[bitnr];
2853 ++ rq = blk_mq_find_and_get_req(tags, bitnr);
2854 + if (!rq)
2855 + return true;
2856 +- if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
2857 +- !blk_mq_request_started(rq))
2858 +- return true;
2859 +- return iter_data->fn(rq, iter_data->data, reserved);
2860 ++
2861 ++ if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
2862 ++ blk_mq_request_started(rq))
2863 ++ ret = iter_data->fn(rq, iter_data->data, reserved);
2864 ++ if (!iter_static_rqs)
2865 ++ blk_mq_put_rq_ref(rq);
2866 ++ return ret;
2867 + }
2868 +
2869 + /**
2870 +@@ -348,6 +371,9 @@ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
2871 + * indicates whether or not @rq is a reserved request. Return
2872 + * true to continue iterating tags, false to stop.
2873 + * @priv: Will be passed as second argument to @fn.
2874 ++ *
2875 ++ * We grab one request reference before calling @fn and release it after
2876 ++ * @fn returns.
2877 + */
2878 + void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
2879 + busy_tag_iter_fn *fn, void *priv)
2880 +@@ -516,6 +542,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
2881 +
2882 + tags->nr_tags = total_tags;
2883 + tags->nr_reserved_tags = reserved_tags;
2884 ++ spin_lock_init(&tags->lock);
2885 +
2886 + if (blk_mq_is_sbitmap_shared(flags))
2887 + return tags;
2888 +diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
2889 +index 7d3e6b333a4a9..f887988e5ef60 100644
2890 +--- a/block/blk-mq-tag.h
2891 ++++ b/block/blk-mq-tag.h
2892 +@@ -20,6 +20,12 @@ struct blk_mq_tags {
2893 + struct request **rqs;
2894 + struct request **static_rqs;
2895 + struct list_head page_list;
2896 ++
2897 ++ /*
2898 ++ * used to clear request reference in rqs[] before freeing one
2899 ++ * request pool
2900 ++ */
2901 ++ spinlock_t lock;
2902 + };
2903 +
2904 + extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
2905 +diff --git a/block/blk-mq.c b/block/blk-mq.c
2906 +index c86c01bfecdbe..c732aa581124f 100644
2907 +--- a/block/blk-mq.c
2908 ++++ b/block/blk-mq.c
2909 +@@ -909,6 +909,14 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
2910 + return false;
2911 + }
2912 +
2913 ++void blk_mq_put_rq_ref(struct request *rq)
2914 ++{
2915 ++ if (is_flush_rq(rq, rq->mq_hctx))
2916 ++ rq->end_io(rq, 0);
2917 ++ else if (refcount_dec_and_test(&rq->ref))
2918 ++ __blk_mq_free_request(rq);
2919 ++}
2920 ++
2921 + static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
2922 + struct request *rq, void *priv, bool reserved)
2923 + {
2924 +@@ -942,11 +950,7 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
2925 + if (blk_mq_req_expired(rq, next))
2926 + blk_mq_rq_timed_out(rq, reserved);
2927 +
2928 +- if (is_flush_rq(rq, hctx))
2929 +- rq->end_io(rq, 0);
2930 +- else if (refcount_dec_and_test(&rq->ref))
2931 +- __blk_mq_free_request(rq);
2932 +-
2933 ++ blk_mq_put_rq_ref(rq);
2934 + return true;
2935 + }
2936 +
2937 +@@ -1220,9 +1224,6 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
2938 + {
2939 + unsigned int ewma;
2940 +
2941 +- if (hctx->queue->elevator)
2942 +- return;
2943 +-
2944 + ewma = hctx->dispatch_busy;
2945 +
2946 + if (!ewma && !busy)
2947 +@@ -2303,6 +2304,45 @@ queue_exit:
2948 + return BLK_QC_T_NONE;
2949 + }
2950 +
2951 ++static size_t order_to_size(unsigned int order)
2952 ++{
2953 ++ return (size_t)PAGE_SIZE << order;
2954 ++}
2955 ++
2956 ++/* called before freeing request pool in @tags */
2957 ++static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
2958 ++ struct blk_mq_tags *tags, unsigned int hctx_idx)
2959 ++{
2960 ++ struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
2961 ++ struct page *page;
2962 ++ unsigned long flags;
2963 ++
2964 ++ list_for_each_entry(page, &tags->page_list, lru) {
2965 ++ unsigned long start = (unsigned long)page_address(page);
2966 ++ unsigned long end = start + order_to_size(page->private);
2967 ++ int i;
2968 ++
2969 ++ for (i = 0; i < set->queue_depth; i++) {
2970 ++ struct request *rq = drv_tags->rqs[i];
2971 ++ unsigned long rq_addr = (unsigned long)rq;
2972 ++
2973 ++ if (rq_addr >= start && rq_addr < end) {
2974 ++ WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
2975 ++ cmpxchg(&drv_tags->rqs[i], rq, NULL);
2976 ++ }
2977 ++ }
2978 ++ }
2979 ++
2980 ++ /*
2981 ++ * Wait until all pending iteration is done.
2982 ++ *
2983 ++ * Request reference is cleared and it is guaranteed to be observed
2984 ++ * after the ->lock is released.
2985 ++ */
2986 ++ spin_lock_irqsave(&drv_tags->lock, flags);
2987 ++ spin_unlock_irqrestore(&drv_tags->lock, flags);
2988 ++}
2989 ++
2990 + void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2991 + unsigned int hctx_idx)
2992 + {
2993 +@@ -2321,6 +2361,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2994 + }
2995 + }
2996 +
2997 ++ blk_mq_clear_rq_mapping(set, tags, hctx_idx);
2998 ++
2999 + while (!list_empty(&tags->page_list)) {
3000 + page = list_first_entry(&tags->page_list, struct page, lru);
3001 + list_del_init(&page->lru);
3002 +@@ -2380,11 +2422,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3003 + return tags;
3004 + }
3005 +
3006 +-static size_t order_to_size(unsigned int order)
3007 +-{
3008 +- return (size_t)PAGE_SIZE << order;
3009 +-}
3010 +-
3011 + static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3012 + unsigned int hctx_idx, int node)
3013 + {
3014 +diff --git a/block/blk-mq.h b/block/blk-mq.h
3015 +index 9ce64bc4a6c8f..556368d2c5b69 100644
3016 +--- a/block/blk-mq.h
3017 ++++ b/block/blk-mq.h
3018 +@@ -47,6 +47,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
3019 + void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
3020 + struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
3021 + struct blk_mq_ctx *start);
3022 ++void blk_mq_put_rq_ref(struct request *rq);
3023 +
3024 + /*
3025 + * Internal helpers for allocating/freeing the request map
3026 +diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
3027 +index 2bc43e94f4c40..2bcb3495e376b 100644
3028 +--- a/block/blk-rq-qos.h
3029 ++++ b/block/blk-rq-qos.h
3030 +@@ -7,6 +7,7 @@
3031 + #include <linux/blk_types.h>
3032 + #include <linux/atomic.h>
3033 + #include <linux/wait.h>
3034 ++#include <linux/blk-mq.h>
3035 +
3036 + #include "blk-mq-debugfs.h"
3037 +
3038 +@@ -99,8 +100,21 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
3039 +
3040 + static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
3041 + {
3042 ++ /*
3043 ++ * No IO can be in-flight when adding rqos, so freeze queue, which
3044 ++ * is fine since we only support rq_qos for blk-mq queue.
3045 ++ *
3046 ++ * Reuse ->queue_lock for protecting against other concurrent
3047 ++ * rq_qos adding/deleting
3048 ++ */
3049 ++ blk_mq_freeze_queue(q);
3050 ++
3051 ++ spin_lock_irq(&q->queue_lock);
3052 + rqos->next = q->rq_qos;
3053 + q->rq_qos = rqos;
3054 ++ spin_unlock_irq(&q->queue_lock);
3055 ++
3056 ++ blk_mq_unfreeze_queue(q);
3057 +
3058 + if (rqos->ops->debugfs_attrs)
3059 + blk_mq_debugfs_register_rqos(rqos);
3060 +@@ -110,12 +124,22 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
3061 + {
3062 + struct rq_qos **cur;
3063 +
3064 ++ /*
3065 ++ * See comment in rq_qos_add() about freezing queue & using
3066 ++ * ->queue_lock.
3067 ++ */
3068 ++ blk_mq_freeze_queue(q);
3069 ++
3070 ++ spin_lock_irq(&q->queue_lock);
3071 + for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
3072 + if (*cur == rqos) {
3073 + *cur = rqos->next;
3074 + break;
3075 + }
3076 + }
3077 ++ spin_unlock_irq(&q->queue_lock);
3078 ++
3079 ++ blk_mq_unfreeze_queue(q);
3080 +
3081 + blk_mq_debugfs_unregister_rqos(rqos);
3082 + }
3083 +diff --git a/block/blk-wbt.c b/block/blk-wbt.c
3084 +index 42aed0160f86a..f5e5ac915bf7c 100644
3085 +--- a/block/blk-wbt.c
3086 ++++ b/block/blk-wbt.c
3087 +@@ -77,7 +77,8 @@ enum {
3088 +
3089 + static inline bool rwb_enabled(struct rq_wb *rwb)
3090 + {
3091 +- return rwb && rwb->wb_normal != 0;
3092 ++ return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
3093 ++ rwb->wb_normal != 0;
3094 + }
3095 +
3096 + static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
3097 +@@ -636,9 +637,13 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
3098 + void wbt_enable_default(struct request_queue *q)
3099 + {
3100 + struct rq_qos *rqos = wbt_rq_qos(q);
3101 ++
3102 + /* Throttling already enabled? */
3103 +- if (rqos)
3104 ++ if (rqos) {
3105 ++ if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
3106 ++ RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
3107 + return;
3108 ++ }
3109 +
3110 + /* Queue not registered? Maybe shutting down... */
3111 + if (!blk_queue_registered(q))
3112 +@@ -702,7 +707,7 @@ void wbt_disable_default(struct request_queue *q)
3113 + rwb = RQWB(rqos);
3114 + if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
3115 + blk_stat_deactivate(rwb->cb);
3116 +- rwb->wb_normal = 0;
3117 ++ rwb->enable_state = WBT_STATE_OFF_DEFAULT;
3118 + }
3119 + }
3120 + EXPORT_SYMBOL_GPL(wbt_disable_default);
3121 +diff --git a/block/blk-wbt.h b/block/blk-wbt.h
3122 +index 16bdc85b8df92..2eb01becde8c4 100644
3123 +--- a/block/blk-wbt.h
3124 ++++ b/block/blk-wbt.h
3125 +@@ -34,6 +34,7 @@ enum {
3126 + enum {
3127 + WBT_STATE_ON_DEFAULT = 1,
3128 + WBT_STATE_ON_MANUAL = 2,
3129 ++ WBT_STATE_OFF_DEFAULT
3130 + };
3131 +
3132 + struct rq_wb {
3133 +diff --git a/crypto/ecdh.c b/crypto/ecdh.c
3134 +index 04a427b8c9564..e2c4808590244 100644
3135 +--- a/crypto/ecdh.c
3136 ++++ b/crypto/ecdh.c
3137 +@@ -179,10 +179,20 @@ static int ecdh_init(void)
3138 + {
3139 + int ret;
3140 +
3141 ++ /* NIST p192 will fail to register in FIPS mode */
3142 + ret = crypto_register_kpp(&ecdh_nist_p192);
3143 + ecdh_nist_p192_registered = ret == 0;
3144 +
3145 +- return crypto_register_kpp(&ecdh_nist_p256);
3146 ++ ret = crypto_register_kpp(&ecdh_nist_p256);
3147 ++ if (ret)
3148 ++ goto nist_p256_error;
3149 ++
3150 ++ return 0;
3151 ++
3152 ++nist_p256_error:
3153 ++ if (ecdh_nist_p192_registered)
3154 ++ crypto_unregister_kpp(&ecdh_nist_p192);
3155 ++ return ret;
3156 + }
3157 +
3158 + static void ecdh_exit(void)
3159 +diff --git a/crypto/shash.c b/crypto/shash.c
3160 +index 2e3433ad97629..0a0a50cb694f0 100644
3161 +--- a/crypto/shash.c
3162 ++++ b/crypto/shash.c
3163 +@@ -20,12 +20,24 @@
3164 +
3165 + static const struct crypto_type crypto_shash_type;
3166 +
3167 +-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
3168 +- unsigned int keylen)
3169 ++static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
3170 ++ unsigned int keylen)
3171 + {
3172 + return -ENOSYS;
3173 + }
3174 +-EXPORT_SYMBOL_GPL(shash_no_setkey);
3175 ++
3176 ++/*
3177 ++ * Check whether an shash algorithm has a setkey function.
3178 ++ *
3179 ++ * For CFI compatibility, this must not be an inline function. This is because
3180 ++ * when CFI is enabled, modules won't get the same address for shash_no_setkey
3181 ++ * (if it were exported, which inlining would require) as the core kernel will.
3182 ++ */
3183 ++bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
3184 ++{
3185 ++ return alg->setkey != shash_no_setkey;
3186 ++}
3187 ++EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey);
3188 +
3189 + static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
3190 + unsigned int keylen)
3191 +diff --git a/crypto/sm2.c b/crypto/sm2.c
3192 +index b21addc3ac06a..db8a4a265669d 100644
3193 +--- a/crypto/sm2.c
3194 ++++ b/crypto/sm2.c
3195 +@@ -79,10 +79,17 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
3196 + goto free;
3197 +
3198 + rc = -ENOMEM;
3199 ++
3200 ++ ec->Q = mpi_point_new(0);
3201 ++ if (!ec->Q)
3202 ++ goto free;
3203 ++
3204 + /* mpi_ec_setup_elliptic_curve */
3205 + ec->G = mpi_point_new(0);
3206 +- if (!ec->G)
3207 ++ if (!ec->G) {
3208 ++ mpi_point_release(ec->Q);
3209 + goto free;
3210 ++ }
3211 +
3212 + mpi_set(ec->G->x, x);
3213 + mpi_set(ec->G->y, y);
3214 +@@ -91,6 +98,7 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
3215 + rc = -EINVAL;
3216 + ec->n = mpi_scanval(ecp->n);
3217 + if (!ec->n) {
3218 ++ mpi_point_release(ec->Q);
3219 + mpi_point_release(ec->G);
3220 + goto free;
3221 + }
3222 +@@ -386,27 +394,15 @@ static int sm2_set_pub_key(struct crypto_akcipher *tfm,
3223 + MPI a;
3224 + int rc;
3225 +
3226 +- ec->Q = mpi_point_new(0);
3227 +- if (!ec->Q)
3228 +- return -ENOMEM;
3229 +-
3230 + /* include the uncompressed flag '0x04' */
3231 +- rc = -ENOMEM;
3232 + a = mpi_read_raw_data(key, keylen);
3233 + if (!a)
3234 +- goto error;
3235 ++ return -ENOMEM;
3236 +
3237 + mpi_normalize(a);
3238 + rc = sm2_ecc_os2ec(ec->Q, a);
3239 + mpi_free(a);
3240 +- if (rc)
3241 +- goto error;
3242 +-
3243 +- return 0;
3244 +
3245 +-error:
3246 +- mpi_point_release(ec->Q);
3247 +- ec->Q = NULL;
3248 + return rc;
3249 + }
3250 +
3251 +diff --git a/crypto/testmgr.c b/crypto/testmgr.c
3252 +index 10c5b3b01ec47..26e40dba9ad29 100644
3253 +--- a/crypto/testmgr.c
3254 ++++ b/crypto/testmgr.c
3255 +@@ -4899,15 +4899,12 @@ static const struct alg_test_desc alg_test_descs[] = {
3256 + }
3257 + }, {
3258 + #endif
3259 +-#ifndef CONFIG_CRYPTO_FIPS
3260 + .alg = "ecdh-nist-p192",
3261 + .test = alg_test_kpp,
3262 +- .fips_allowed = 1,
3263 + .suite = {
3264 + .kpp = __VECS(ecdh_p192_tv_template)
3265 + }
3266 + }, {
3267 +-#endif
3268 + .alg = "ecdh-nist-p256",
3269 + .test = alg_test_kpp,
3270 + .fips_allowed = 1,
3271 +diff --git a/crypto/testmgr.h b/crypto/testmgr.h
3272 +index 34e4a3db39917..b9cf5b815532a 100644
3273 +--- a/crypto/testmgr.h
3274 ++++ b/crypto/testmgr.h
3275 +@@ -2685,7 +2685,6 @@ static const struct kpp_testvec curve25519_tv_template[] = {
3276 + }
3277 + };
3278 +
3279 +-#ifndef CONFIG_CRYPTO_FIPS
3280 + static const struct kpp_testvec ecdh_p192_tv_template[] = {
3281 + {
3282 + .secret =
3283 +@@ -2719,13 +2718,12 @@ static const struct kpp_testvec ecdh_p192_tv_template[] = {
3284 + "\xf4\x57\xcc\x4f\x1f\x4e\x31\xcc"
3285 + "\xe3\x40\x60\xc8\x06\x93\xc6\x2e"
3286 + "\x99\x80\x81\x28\xaf\xc5\x51\x74",
3287 +- .secret_size = 32,
3288 ++ .secret_size = 30,
3289 + .b_public_size = 48,
3290 + .expected_a_public_size = 48,
3291 + .expected_ss_size = 24
3292 + }
3293 + };
3294 +-#endif
3295 +
3296 + static const struct kpp_testvec ecdh_p256_tv_template[] = {
3297 + {
3298 +@@ -2766,7 +2764,7 @@ static const struct kpp_testvec ecdh_p256_tv_template[] = {
3299 + "\x9f\x4a\x38\xcc\xc0\x2c\x49\x2f"
3300 + "\xb1\x32\xbb\xaf\x22\x61\xda\xcb"
3301 + "\x6f\xdb\xa9\xaa\xfc\x77\x81\xf3",
3302 +- .secret_size = 40,
3303 ++ .secret_size = 38,
3304 + .b_public_size = 64,
3305 + .expected_a_public_size = 64,
3306 + .expected_ss_size = 32
3307 +@@ -2804,8 +2802,8 @@ static const struct kpp_testvec ecdh_p256_tv_template[] = {
3308 + "\x37\x08\xcc\x40\x5e\x7a\xfd\x6a"
3309 + "\x6a\x02\x6e\x41\x87\x68\x38\x77"
3310 + "\xfa\xa9\x44\x43\x2d\xef\x09\xdf",
3311 +- .secret_size = 8,
3312 +- .b_secret_size = 40,
3313 ++ .secret_size = 6,
3314 ++ .b_secret_size = 38,
3315 + .b_public_size = 64,
3316 + .expected_a_public_size = 64,
3317 + .expected_ss_size = 32,
3318 +diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
3319 +index 700b41adf2db6..9aa82d5272720 100644
3320 +--- a/drivers/acpi/Makefile
3321 ++++ b/drivers/acpi/Makefile
3322 +@@ -8,6 +8,11 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
3323 + #
3324 + # ACPI Boot-Time Table Parsing
3325 + #
3326 ++ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
3327 ++tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
3328 ++
3329 ++endif
3330 ++
3331 + obj-$(CONFIG_ACPI) += tables.o
3332 + obj-$(CONFIG_X86) += blacklist.o
3333 +
3334 +diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
3335 +index a89a806a7a2a9..4ee2ad234e3d6 100644
3336 +--- a/drivers/acpi/acpi_fpdt.c
3337 ++++ b/drivers/acpi/acpi_fpdt.c
3338 +@@ -240,8 +240,10 @@ static int __init acpi_init_fpdt(void)
3339 + return 0;
3340 +
3341 + fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
3342 +- if (!fpdt_kobj)
3343 ++ if (!fpdt_kobj) {
3344 ++ acpi_put_table(header);
3345 + return -ENOMEM;
3346 ++ }
3347 +
3348 + while (offset < header->length) {
3349 + subtable = (void *)header + offset;
3350 +diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
3351 +index 14b71b41e8453..38e10ab976e67 100644
3352 +--- a/drivers/acpi/acpica/nsrepair2.c
3353 ++++ b/drivers/acpi/acpica/nsrepair2.c
3354 +@@ -379,6 +379,13 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
3355 +
3356 + (*element_ptr)->common.reference_count =
3357 + original_ref_count;
3358 ++
3359 ++ /*
3360 ++ * The original_element holds a reference from the package object
3361 ++ * that represents _HID. Since a new element was created by _HID,
3362 ++ * remove the reference from the _CID package.
3363 ++ */
3364 ++ acpi_ut_remove_reference(original_element);
3365 + }
3366 +
3367 + element_ptr++;
3368 +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
3369 +index fce7ade2aba92..0c8330ed1ffd5 100644
3370 +--- a/drivers/acpi/apei/ghes.c
3371 ++++ b/drivers/acpi/apei/ghes.c
3372 +@@ -441,28 +441,35 @@ static void ghes_kick_task_work(struct callback_head *head)
3373 + gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
3374 + }
3375 +
3376 +-static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
3377 +- int sev)
3378 ++static bool ghes_do_memory_failure(u64 physical_addr, int flags)
3379 + {
3380 + unsigned long pfn;
3381 +- int flags = -1;
3382 +- int sec_sev = ghes_severity(gdata->error_severity);
3383 +- struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
3384 +
3385 + if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
3386 + return false;
3387 +
3388 +- if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
3389 +- return false;
3390 +-
3391 +- pfn = mem_err->physical_addr >> PAGE_SHIFT;
3392 ++ pfn = PHYS_PFN(physical_addr);
3393 + if (!pfn_valid(pfn)) {
3394 + pr_warn_ratelimited(FW_WARN GHES_PFX
3395 + "Invalid address in generic error data: %#llx\n",
3396 +- mem_err->physical_addr);
3397 ++ physical_addr);
3398 + return false;
3399 + }
3400 +
3401 ++ memory_failure_queue(pfn, flags);
3402 ++ return true;
3403 ++}
3404 ++
3405 ++static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
3406 ++ int sev)
3407 ++{
3408 ++ int flags = -1;
3409 ++ int sec_sev = ghes_severity(gdata->error_severity);
3410 ++ struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
3411 ++
3412 ++ if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
3413 ++ return false;
3414 ++
3415 + /* iff following two events can be handled properly by now */
3416 + if (sec_sev == GHES_SEV_CORRECTED &&
3417 + (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
3418 +@@ -470,14 +477,56 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
3419 + if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
3420 + flags = 0;
3421 +
3422 +- if (flags != -1) {
3423 +- memory_failure_queue(pfn, flags);
3424 +- return true;
3425 +- }
3426 ++ if (flags != -1)
3427 ++ return ghes_do_memory_failure(mem_err->physical_addr, flags);
3428 +
3429 + return false;
3430 + }
3431 +
3432 ++static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
3433 ++{
3434 ++ struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
3435 ++ bool queued = false;
3436 ++ int sec_sev, i;
3437 ++ char *p;
3438 ++
3439 ++ log_arm_hw_error(err);
3440 ++
3441 ++ sec_sev = ghes_severity(gdata->error_severity);
3442 ++ if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
3443 ++ return false;
3444 ++
3445 ++ p = (char *)(err + 1);
3446 ++ for (i = 0; i < err->err_info_num; i++) {
3447 ++ struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
3448 ++ bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
3449 ++ bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
3450 ++ const char *error_type = "unknown error";
3451 ++
3452 ++ /*
3453 ++ * The field (err_info->error_info & BIT(26)) is fixed to set to
3454 ++ * 1 in some old firmware of HiSilicon Kunpeng920. We assume that
3455 ++ * firmware won't mix corrected errors in an uncorrected section,
3456 ++ * and don't filter out 'corrected' error here.
3457 ++ */
3458 ++ if (is_cache && has_pa) {
3459 ++ queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
3460 ++ p += err_info->length;
3461 ++ continue;
3462 ++ }
3463 ++
3464 ++ if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
3465 ++ error_type = cper_proc_error_type_strs[err_info->type];
3466 ++
3467 ++ pr_warn_ratelimited(FW_WARN GHES_PFX
3468 ++ "Unhandled processor error type: %s\n",
3469 ++ error_type);
3470 ++ p += err_info->length;
3471 ++ }
3472 ++
3473 ++ return queued;
3474 ++}
3475 ++
3476 + /*
3477 + * PCIe AER errors need to be sent to the AER driver for reporting and
3478 + * recovery. The GHES severities map to the following AER severities and
3479 +@@ -605,9 +654,7 @@ static bool ghes_do_proc(struct ghes *ghes,
3480 + ghes_handle_aer(gdata);
3481 + }
3482 + else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
3483 +- struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
3484 +-
3485 +- log_arm_hw_error(err);
3486 ++ queued = ghes_handle_arm_hw_error(gdata, sev);
3487 + } else {
3488 + void *err = acpi_hest_get_payload(gdata);
3489 +
3490 +diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
3491 +index 19bb7f870204c..e0d14017706ea 100644
3492 +--- a/drivers/acpi/bgrt.c
3493 ++++ b/drivers/acpi/bgrt.c
3494 +@@ -15,40 +15,19 @@
3495 + static void *bgrt_image;
3496 + static struct kobject *bgrt_kobj;
3497 +
3498 +-static ssize_t version_show(struct device *dev,
3499 +- struct device_attribute *attr, char *buf)
3500 +-{
3501 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
3502 +-}
3503 +-static DEVICE_ATTR_RO(version);
3504 +-
3505 +-static ssize_t status_show(struct device *dev,
3506 +- struct device_attribute *attr, char *buf)
3507 +-{
3508 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
3509 +-}
3510 +-static DEVICE_ATTR_RO(status);
3511 +-
3512 +-static ssize_t type_show(struct device *dev,
3513 +- struct device_attribute *attr, char *buf)
3514 +-{
3515 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
3516 +-}
3517 +-static DEVICE_ATTR_RO(type);
3518 +-
3519 +-static ssize_t xoffset_show(struct device *dev,
3520 +- struct device_attribute *attr, char *buf)
3521 +-{
3522 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
3523 +-}
3524 +-static DEVICE_ATTR_RO(xoffset);
3525 +-
3526 +-static ssize_t yoffset_show(struct device *dev,
3527 +- struct device_attribute *attr, char *buf)
3528 +-{
3529 +- return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
3530 +-}
3531 +-static DEVICE_ATTR_RO(yoffset);
3532 ++#define BGRT_SHOW(_name, _member) \
3533 ++ static ssize_t _name##_show(struct kobject *kobj, \
3534 ++ struct kobj_attribute *attr, char *buf) \
3535 ++ { \
3536 ++ return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab._member); \
3537 ++ } \
3538 ++ struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name)
3539 ++
3540 ++BGRT_SHOW(version, version);
3541 ++BGRT_SHOW(status, status);
3542 ++BGRT_SHOW(type, image_type);
3543 ++BGRT_SHOW(xoffset, image_offset_x);
3544 ++BGRT_SHOW(yoffset, image_offset_y);
3545 +
3546 + static ssize_t image_read(struct file *file, struct kobject *kobj,
3547 + struct bin_attribute *attr, char *buf, loff_t off, size_t count)
3548 +@@ -60,11 +39,11 @@ static ssize_t image_read(struct file *file, struct kobject *kobj,
3549 + static BIN_ATTR_RO(image, 0); /* size gets filled in later */
3550 +
3551 + static struct attribute *bgrt_attributes[] = {
3552 +- &dev_attr_version.attr,
3553 +- &dev_attr_status.attr,
3554 +- &dev_attr_type.attr,
3555 +- &dev_attr_xoffset.attr,
3556 +- &dev_attr_yoffset.attr,
3557 ++ &bgrt_attr_version.attr,
3558 ++ &bgrt_attr_status.attr,
3559 ++ &bgrt_attr_type.attr,
3560 ++ &bgrt_attr_xoffset.attr,
3561 ++ &bgrt_attr_yoffset.attr,
3562 + NULL,
3563 + };
3564 +
3565 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
3566 +index a4bd673934c0a..44b4f02e2c6d7 100644
3567 +--- a/drivers/acpi/bus.c
3568 ++++ b/drivers/acpi/bus.c
3569 +@@ -1321,6 +1321,7 @@ static int __init acpi_init(void)
3570 +
3571 + result = acpi_bus_init();
3572 + if (result) {
3573 ++ kobject_put(acpi_kobj);
3574 + disable_acpi();
3575 + return result;
3576 + }
3577 +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
3578 +index d260bc1f3e6e7..9d2d3b9bb8b59 100644
3579 +--- a/drivers/acpi/device_pm.c
3580 ++++ b/drivers/acpi/device_pm.c
3581 +@@ -20,6 +20,7 @@
3582 + #include <linux/pm_runtime.h>
3583 + #include <linux/suspend.h>
3584 +
3585 ++#include "fan.h"
3586 + #include "internal.h"
3587 +
3588 + /**
3589 +@@ -1310,10 +1311,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
3590 + * with the generic ACPI PM domain.
3591 + */
3592 + static const struct acpi_device_id special_pm_ids[] = {
3593 +- {"PNP0C0B", }, /* Generic ACPI fan */
3594 +- {"INT3404", }, /* Fan */
3595 +- {"INTC1044", }, /* Fan for Tiger Lake generation */
3596 +- {"INTC1048", }, /* Fan for Alder Lake generation */
3597 ++ ACPI_FAN_DEVICE_IDS,
3598 + {}
3599 + };
3600 + struct acpi_device *adev = ACPI_COMPANION(dev);
3601 +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
3602 +index fa2c1c93072cf..a393e0e09381d 100644
3603 +--- a/drivers/acpi/device_sysfs.c
3604 ++++ b/drivers/acpi/device_sysfs.c
3605 +@@ -448,7 +448,7 @@ static ssize_t description_show(struct device *dev,
3606 + (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
3607 + acpi_dev->pnp.str_obj->buffer.length,
3608 + UTF16_LITTLE_ENDIAN, buf,
3609 +- PAGE_SIZE);
3610 ++ PAGE_SIZE - 1);
3611 +
3612 + buf[result++] = '\n';
3613 +
3614 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
3615 +index 13565629ce0a8..87c3b4a099b94 100644
3616 +--- a/drivers/acpi/ec.c
3617 ++++ b/drivers/acpi/ec.c
3618 +@@ -183,6 +183,7 @@ static struct workqueue_struct *ec_query_wq;
3619 +
3620 + static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
3621 + static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
3622 ++static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
3623 + static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
3624 +
3625 + /* --------------------------------------------------------------------------
3626 +@@ -1593,7 +1594,8 @@ static int acpi_ec_add(struct acpi_device *device)
3627 + }
3628 +
3629 + if (boot_ec && ec->command_addr == boot_ec->command_addr &&
3630 +- ec->data_addr == boot_ec->data_addr) {
3631 ++ ec->data_addr == boot_ec->data_addr &&
3632 ++ !EC_FLAGS_TRUST_DSDT_GPE) {
3633 + /*
3634 + * Trust PNP0C09 namespace location rather than
3635 + * ECDT ID. But trust ECDT GPE rather than _GPE
3636 +@@ -1816,6 +1818,18 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
3637 + return 0;
3638 + }
3639 +
3640 ++/*
3641 ++ * Some ECDTs contain wrong GPE setting, but they share the same port addresses
3642 ++ * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
3643 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=209989
3644 ++ */
3645 ++static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
3646 ++{
3647 ++ pr_debug("Detected system needing DSDT GPE setting.\n");
3648 ++ EC_FLAGS_TRUST_DSDT_GPE = 1;
3649 ++ return 0;
3650 ++}
3651 ++
3652 + /*
3653 + * Some DSDTs contain wrong GPE setting.
3654 + * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
3655 +@@ -1846,6 +1860,22 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
3656 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3657 + DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
3658 + {
3659 ++ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
3660 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3661 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
3662 ++ {
3663 ++ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
3664 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3665 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
3666 ++ {
3667 ++ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
3668 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3669 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
3670 ++ {
3671 ++ ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
3672 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3673 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
3674 ++ {
3675 + ec_honor_ecdt_gpe, "ASUS X550VXK", {
3676 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3677 + DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
3678 +@@ -1854,6 +1884,11 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
3679 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
3680 + DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
3681 + {
3682 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
3683 ++ ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
3684 ++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
3685 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL},
3686 ++ {
3687 + ec_clear_on_resume, "Samsung hardware", {
3688 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
3689 + {},
3690 +diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
3691 +index 66c3983f0ccca..5cd0ceb50bc8a 100644
3692 +--- a/drivers/acpi/fan.c
3693 ++++ b/drivers/acpi/fan.c
3694 +@@ -16,6 +16,8 @@
3695 + #include <linux/platform_device.h>
3696 + #include <linux/sort.h>
3697 +
3698 ++#include "fan.h"
3699 ++
3700 + MODULE_AUTHOR("Paul Diefenbaugh");
3701 + MODULE_DESCRIPTION("ACPI Fan Driver");
3702 + MODULE_LICENSE("GPL");
3703 +@@ -24,10 +26,7 @@ static int acpi_fan_probe(struct platform_device *pdev);
3704 + static int acpi_fan_remove(struct platform_device *pdev);
3705 +
3706 + static const struct acpi_device_id fan_device_ids[] = {
3707 +- {"PNP0C0B", 0},
3708 +- {"INT3404", 0},
3709 +- {"INTC1044", 0},
3710 +- {"INTC1048", 0},
3711 ++ ACPI_FAN_DEVICE_IDS,
3712 + {"", 0},
3713 + };
3714 + MODULE_DEVICE_TABLE(acpi, fan_device_ids);
3715 +diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
3716 +new file mode 100644
3717 +index 0000000000000..dc9a6efa514b0
3718 +--- /dev/null
3719 ++++ b/drivers/acpi/fan.h
3720 +@@ -0,0 +1,13 @@
3721 ++/* SPDX-License-Identifier: GPL-2.0-only */
3722 ++
3723 ++/*
3724 ++ * ACPI fan device IDs are shared between the fan driver and the device power
3725 ++ * management code.
3726 ++ *
3727 ++ * Add new device IDs before the generic ACPI fan one.
3728 ++ */
3729 ++#define ACPI_FAN_DEVICE_IDS \
3730 ++ {"INT3404", }, /* Fan */ \
3731 ++ {"INTC1044", }, /* Fan for Tiger Lake generation */ \
3732 ++ {"INTC1048", }, /* Fan for Alder Lake generation */ \
3733 ++ {"PNP0C0B", } /* Generic ACPI fan */
3734 +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
3735 +index 45a019619e4a5..095c8aca141eb 100644
3736 +--- a/drivers/acpi/processor_idle.c
3737 ++++ b/drivers/acpi/processor_idle.c
3738 +@@ -16,6 +16,7 @@
3739 + #include <linux/acpi.h>
3740 + #include <linux/dmi.h>
3741 + #include <linux/sched.h> /* need_resched() */
3742 ++#include <linux/sort.h>
3743 + #include <linux/tick.h>
3744 + #include <linux/cpuidle.h>
3745 + #include <linux/cpu.h>
3746 +@@ -384,10 +385,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
3747 + return;
3748 + }
3749 +
3750 ++static int acpi_cst_latency_cmp(const void *a, const void *b)
3751 ++{
3752 ++ const struct acpi_processor_cx *x = a, *y = b;
3753 ++
3754 ++ if (!(x->valid && y->valid))
3755 ++ return 0;
3756 ++ if (x->latency > y->latency)
3757 ++ return 1;
3758 ++ if (x->latency < y->latency)
3759 ++ return -1;
3760 ++ return 0;
3761 ++}
3762 ++static void acpi_cst_latency_swap(void *a, void *b, int n)
3763 ++{
3764 ++ struct acpi_processor_cx *x = a, *y = b;
3765 ++ u32 tmp;
3766 ++
3767 ++ if (!(x->valid && y->valid))
3768 ++ return;
3769 ++ tmp = x->latency;
3770 ++ x->latency = y->latency;
3771 ++ y->latency = tmp;
3772 ++}
3773 ++
3774 + static int acpi_processor_power_verify(struct acpi_processor *pr)
3775 + {
3776 + unsigned int i;
3777 + unsigned int working = 0;
3778 ++ unsigned int last_latency = 0;
3779 ++ unsigned int last_type = 0;
3780 ++ bool buggy_latency = false;
3781 +
3782 + pr->power.timer_broadcast_on_state = INT_MAX;
3783 +
3784 +@@ -411,12 +439,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
3785 + }
3786 + if (!cx->valid)
3787 + continue;
3788 ++ if (cx->type >= last_type && cx->latency < last_latency)
3789 ++ buggy_latency = true;
3790 ++ last_latency = cx->latency;
3791 ++ last_type = cx->type;
3792 +
3793 + lapic_timer_check_state(i, pr, cx);
3794 + tsc_check_state(cx->type);
3795 + working++;
3796 + }
3797 +
3798 ++ if (buggy_latency) {
3799 ++ pr_notice("FW issue: working around C-state latencies out of order\n");
3800 ++ sort(&pr->power.states[1], max_cstate,
3801 ++ sizeof(struct acpi_processor_cx),
3802 ++ acpi_cst_latency_cmp,
3803 ++ acpi_cst_latency_swap);
3804 ++ }
3805 ++
3806 + lapic_timer_propagate_broadcast(pr);
3807 +
3808 + return (working);
3809 +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
3810 +index ee78a210c6068..dc01fb550b28d 100644
3811 +--- a/drivers/acpi/resource.c
3812 ++++ b/drivers/acpi/resource.c
3813 +@@ -423,6 +423,13 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
3814 + }
3815 + }
3816 +
3817 ++static bool irq_is_legacy(struct acpi_resource_irq *irq)
3818 ++{
3819 ++ return irq->triggering == ACPI_EDGE_SENSITIVE &&
3820 ++ irq->polarity == ACPI_ACTIVE_HIGH &&
3821 ++ irq->shareable == ACPI_EXCLUSIVE;
3822 ++}
3823 ++
3824 + /**
3825 + * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
3826 + * @ares: Input ACPI resource object.
3827 +@@ -461,7 +468,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
3828 + }
3829 + acpi_dev_get_irqresource(res, irq->interrupts[index],
3830 + irq->triggering, irq->polarity,
3831 +- irq->shareable, true);
3832 ++ irq->shareable, irq_is_legacy(irq));
3833 + break;
3834 + case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
3835 + ext_irq = &ares->data.extended_irq;
3836 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
3837 +index e10d38ac7cf28..438df8da6d122 100644
3838 +--- a/drivers/acpi/scan.c
3839 ++++ b/drivers/acpi/scan.c
3840 +@@ -1671,8 +1671,20 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
3841 + device_initialize(&device->dev);
3842 + dev_set_uevent_suppress(&device->dev, true);
3843 + acpi_init_coherency(device);
3844 +- /* Assume there are unmet deps to start with. */
3845 +- device->dep_unmet = 1;
3846 ++}
3847 ++
3848 ++static void acpi_scan_dep_init(struct acpi_device *adev)
3849 ++{
3850 ++ struct acpi_dep_data *dep;
3851 ++
3852 ++ mutex_lock(&acpi_dep_list_lock);
3853 ++
3854 ++ list_for_each_entry(dep, &acpi_dep_list, node) {
3855 ++ if (dep->consumer == adev->handle)
3856 ++ adev->dep_unmet++;
3857 ++ }
3858 ++
3859 ++ mutex_unlock(&acpi_dep_list_lock);
3860 + }
3861 +
3862 + void acpi_device_add_finalize(struct acpi_device *device)
3863 +@@ -1688,7 +1700,7 @@ static void acpi_scan_init_status(struct acpi_device *adev)
3864 + }
3865 +
3866 + static int acpi_add_single_object(struct acpi_device **child,
3867 +- acpi_handle handle, int type)
3868 ++ acpi_handle handle, int type, bool dep_init)
3869 + {
3870 + struct acpi_device *device;
3871 + int result;
3872 +@@ -1703,8 +1715,12 @@ static int acpi_add_single_object(struct acpi_device **child,
3873 + * acpi_bus_get_status() and use its quirk handling. Note that
3874 + * this must be done before the get power-/wakeup_dev-flags calls.
3875 + */
3876 +- if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR)
3877 ++ if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR) {
3878 ++ if (dep_init)
3879 ++ acpi_scan_dep_init(device);
3880 ++
3881 + acpi_scan_init_status(device);
3882 ++ }
3883 +
3884 + acpi_bus_get_power_flags(device);
3885 + acpi_bus_get_wakeup_device_flags(device);
3886 +@@ -1886,22 +1902,6 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
3887 + return count;
3888 + }
3889 +
3890 +-static void acpi_scan_dep_init(struct acpi_device *adev)
3891 +-{
3892 +- struct acpi_dep_data *dep;
3893 +-
3894 +- adev->dep_unmet = 0;
3895 +-
3896 +- mutex_lock(&acpi_dep_list_lock);
3897 +-
3898 +- list_for_each_entry(dep, &acpi_dep_list, node) {
3899 +- if (dep->consumer == adev->handle)
3900 +- adev->dep_unmet++;
3901 +- }
3902 +-
3903 +- mutex_unlock(&acpi_dep_list_lock);
3904 +-}
3905 +-
3906 + static bool acpi_bus_scan_second_pass;
3907 +
3908 + static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
3909 +@@ -1949,19 +1949,15 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
3910 + return AE_OK;
3911 + }
3912 +
3913 +- acpi_add_single_object(&device, handle, type);
3914 +- if (!device)
3915 +- return AE_CTRL_DEPTH;
3916 +-
3917 +- acpi_scan_init_hotplug(device);
3918 + /*
3919 + * If check_dep is true at this point, the device has no dependencies,
3920 + * or the creation of the device object would have been postponed above.
3921 + */
3922 +- if (check_dep)
3923 +- device->dep_unmet = 0;
3924 +- else
3925 +- acpi_scan_dep_init(device);
3926 ++ acpi_add_single_object(&device, handle, type, !check_dep);
3927 ++ if (!device)
3928 ++ return AE_CTRL_DEPTH;
3929 ++
3930 ++ acpi_scan_init_hotplug(device);
3931 +
3932 + out:
3933 + if (!*adev_p)
3934 +@@ -2223,7 +2219,7 @@ int acpi_bus_register_early_device(int type)
3935 + struct acpi_device *device = NULL;
3936 + int result;
3937 +
3938 +- result = acpi_add_single_object(&device, NULL, type);
3939 ++ result = acpi_add_single_object(&device, NULL, type, false);
3940 + if (result)
3941 + return result;
3942 +
3943 +@@ -2243,7 +2239,7 @@ static int acpi_bus_scan_fixed(void)
3944 + struct acpi_device *device = NULL;
3945 +
3946 + result = acpi_add_single_object(&device, NULL,
3947 +- ACPI_BUS_TYPE_POWER_BUTTON);
3948 ++ ACPI_BUS_TYPE_POWER_BUTTON, false);
3949 + if (result)
3950 + return result;
3951 +
3952 +@@ -2259,7 +2255,7 @@ static int acpi_bus_scan_fixed(void)
3953 + struct acpi_device *device = NULL;
3954 +
3955 + result = acpi_add_single_object(&device, NULL,
3956 +- ACPI_BUS_TYPE_SLEEP_BUTTON);
3957 ++ ACPI_BUS_TYPE_SLEEP_BUTTON, false);
3958 + if (result)
3959 + return result;
3960 +
3961 +diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
3962 +index 2b69536cdccba..2d7ddb8a8cb65 100644
3963 +--- a/drivers/acpi/x86/s2idle.c
3964 ++++ b/drivers/acpi/x86/s2idle.c
3965 +@@ -42,6 +42,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
3966 +
3967 + /* AMD */
3968 + #define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
3969 ++#define ACPI_LPS0_ENTRY_AMD 2
3970 ++#define ACPI_LPS0_EXIT_AMD 3
3971 + #define ACPI_LPS0_SCREEN_OFF_AMD 4
3972 + #define ACPI_LPS0_SCREEN_ON_AMD 5
3973 +
3974 +@@ -408,6 +410,7 @@ int acpi_s2idle_prepare_late(void)
3975 +
3976 + if (acpi_s2idle_vendor_amd()) {
3977 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
3978 ++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD);
3979 + } else {
3980 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
3981 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
3982 +@@ -422,6 +425,7 @@ void acpi_s2idle_restore_early(void)
3983 + return;
3984 +
3985 + if (acpi_s2idle_vendor_amd()) {
3986 ++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD);
3987 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
3988 + } else {
3989 + acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
3990 +diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
3991 +index badab67088935..46208ececbb6a 100644
3992 +--- a/drivers/ata/pata_ep93xx.c
3993 ++++ b/drivers/ata/pata_ep93xx.c
3994 +@@ -928,7 +928,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
3995 + /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
3996 + irq = platform_get_irq(pdev, 0);
3997 + if (irq < 0) {
3998 +- err = -ENXIO;
3999 ++ err = irq;
4000 + goto err_rel_gpio;
4001 + }
4002 +
4003 +diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
4004 +index bd87476ab4813..b5a3f710d76de 100644
4005 +--- a/drivers/ata/pata_octeon_cf.c
4006 ++++ b/drivers/ata/pata_octeon_cf.c
4007 +@@ -898,10 +898,11 @@ static int octeon_cf_probe(struct platform_device *pdev)
4008 + return -EINVAL;
4009 + }
4010 +
4011 +- irq_handler = octeon_cf_interrupt;
4012 + i = platform_get_irq(dma_dev, 0);
4013 +- if (i > 0)
4014 ++ if (i > 0) {
4015 + irq = i;
4016 ++ irq_handler = octeon_cf_interrupt;
4017 ++ }
4018 + }
4019 + of_node_put(dma_node);
4020 + }
4021 +diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
4022 +index 479c4b29b8562..303f8c375b3af 100644
4023 +--- a/drivers/ata/pata_rb532_cf.c
4024 ++++ b/drivers/ata/pata_rb532_cf.c
4025 +@@ -115,10 +115,12 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
4026 + }
4027 +
4028 + irq = platform_get_irq(pdev, 0);
4029 +- if (irq <= 0) {
4030 ++ if (irq < 0) {
4031 + dev_err(&pdev->dev, "no IRQ resource found\n");
4032 +- return -ENOENT;
4033 ++ return irq;
4034 + }
4035 ++ if (!irq)
4036 ++ return -EINVAL;
4037 +
4038 + gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
4039 + if (IS_ERR(gpiod)) {
4040 +diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
4041 +index 64b2ef15ec191..8440203e835ed 100644
4042 +--- a/drivers/ata/sata_highbank.c
4043 ++++ b/drivers/ata/sata_highbank.c
4044 +@@ -469,10 +469,12 @@ static int ahci_highbank_probe(struct platform_device *pdev)
4045 + }
4046 +
4047 + irq = platform_get_irq(pdev, 0);
4048 +- if (irq <= 0) {
4049 ++ if (irq < 0) {
4050 + dev_err(dev, "no irq\n");
4051 +- return -EINVAL;
4052 ++ return irq;
4053 + }
4054 ++ if (!irq)
4055 ++ return -EINVAL;
4056 +
4057 + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
4058 + if (!hpriv) {
4059 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
4060 +index 76e12f3482a91..8271df1251535 100644
4061 +--- a/drivers/block/loop.c
4062 ++++ b/drivers/block/loop.c
4063 +@@ -1154,6 +1154,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
4064 + blk_queue_physical_block_size(lo->lo_queue, bsize);
4065 + blk_queue_io_min(lo->lo_queue, bsize);
4066 +
4067 ++ loop_config_discard(lo);
4068 + loop_update_rotational(lo);
4069 + loop_update_dio(lo);
4070 + loop_sysfs_init(lo);
4071 +diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
4072 +index 25114f0d13199..bd71dfc9c9748 100644
4073 +--- a/drivers/bluetooth/btqca.c
4074 ++++ b/drivers/bluetooth/btqca.c
4075 +@@ -183,7 +183,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
4076 + EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
4077 +
4078 + static void qca_tlv_check_data(struct qca_fw_config *config,
4079 +- const struct firmware *fw, enum qca_btsoc_type soc_type)
4080 ++ u8 *fw_data, enum qca_btsoc_type soc_type)
4081 + {
4082 + const u8 *data;
4083 + u32 type_len;
4084 +@@ -194,7 +194,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
4085 + struct tlv_type_nvm *tlv_nvm;
4086 + uint8_t nvm_baud_rate = config->user_baud_rate;
4087 +
4088 +- tlv = (struct tlv_type_hdr *)fw->data;
4089 ++ tlv = (struct tlv_type_hdr *)fw_data;
4090 +
4091 + type_len = le32_to_cpu(tlv->type_len);
4092 + length = (type_len >> 8) & 0x00ffffff;
4093 +@@ -390,8 +390,9 @@ static int qca_download_firmware(struct hci_dev *hdev,
4094 + enum qca_btsoc_type soc_type)
4095 + {
4096 + const struct firmware *fw;
4097 ++ u8 *data;
4098 + const u8 *segment;
4099 +- int ret, remain, i = 0;
4100 ++ int ret, size, remain, i = 0;
4101 +
4102 + bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
4103 +
4104 +@@ -402,10 +403,22 @@ static int qca_download_firmware(struct hci_dev *hdev,
4105 + return ret;
4106 + }
4107 +
4108 +- qca_tlv_check_data(config, fw, soc_type);
4109 ++ size = fw->size;
4110 ++ data = vmalloc(fw->size);
4111 ++ if (!data) {
4112 ++ bt_dev_err(hdev, "QCA Failed to allocate memory for file: %s",
4113 ++ config->fwname);
4114 ++ release_firmware(fw);
4115 ++ return -ENOMEM;
4116 ++ }
4117 ++
4118 ++ memcpy(data, fw->data, size);
4119 ++ release_firmware(fw);
4120 ++
4121 ++ qca_tlv_check_data(config, data, soc_type);
4122 +
4123 +- segment = fw->data;
4124 +- remain = fw->size;
4125 ++ segment = data;
4126 ++ remain = size;
4127 + while (remain > 0) {
4128 + int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain);
4129 +
4130 +@@ -435,7 +448,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
4131 + ret = qca_inject_cmd_complete_event(hdev);
4132 +
4133 + out:
4134 +- release_firmware(fw);
4135 ++ vfree(data);
4136 +
4137 + return ret;
4138 + }
4139 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
4140 +index 0a0056912d51e..dc6551d65912f 100644
4141 +--- a/drivers/bluetooth/hci_qca.c
4142 ++++ b/drivers/bluetooth/hci_qca.c
4143 +@@ -1835,8 +1835,6 @@ static void qca_power_shutdown(struct hci_uart *hu)
4144 + unsigned long flags;
4145 + enum qca_btsoc_type soc_type = qca_soc_type(hu);
4146 +
4147 +- qcadev = serdev_device_get_drvdata(hu->serdev);
4148 +-
4149 + /* From this point we go into power off state. But serial port is
4150 + * still open, stop queueing the IBS data and flush all the buffered
4151 + * data in skb's.
4152 +@@ -1852,6 +1850,8 @@ static void qca_power_shutdown(struct hci_uart *hu)
4153 + if (!hu->serdev)
4154 + return;
4155 +
4156 ++ qcadev = serdev_device_get_drvdata(hu->serdev);
4157 ++
4158 + if (qca_is_wcn399x(soc_type)) {
4159 + host_set_baudrate(hu, 2400);
4160 + qca_send_power_pulse(hu, false);
4161 +diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
4162 +index c804db7e90f8f..57908ce4fae85 100644
4163 +--- a/drivers/bluetooth/virtio_bt.c
4164 ++++ b/drivers/bluetooth/virtio_bt.c
4165 +@@ -34,6 +34,9 @@ static int virtbt_add_inbuf(struct virtio_bluetooth *vbt)
4166 + int err;
4167 +
4168 + skb = alloc_skb(1000, GFP_KERNEL);
4169 ++ if (!skb)
4170 ++ return -ENOMEM;
4171 ++
4172 + sg_init_one(sg, skb->data, 1000);
4173 +
4174 + err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL);
4175 +diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
4176 +index e2e59a341fef6..bbf6cd04861eb 100644
4177 +--- a/drivers/bus/mhi/core/pm.c
4178 ++++ b/drivers/bus/mhi/core/pm.c
4179 +@@ -465,23 +465,15 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
4180 +
4181 + /* Trigger MHI RESET so that the device will not access host memory */
4182 + if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
4183 +- u32 in_reset = -1;
4184 +- unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
4185 +-
4186 + dev_dbg(dev, "Triggering MHI Reset in device\n");
4187 + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
4188 +
4189 + /* Wait for the reset bit to be cleared by the device */
4190 +- ret = wait_event_timeout(mhi_cntrl->state_event,
4191 +- mhi_read_reg_field(mhi_cntrl,
4192 +- mhi_cntrl->regs,
4193 +- MHICTRL,
4194 +- MHICTRL_RESET_MASK,
4195 +- MHICTRL_RESET_SHIFT,
4196 +- &in_reset) ||
4197 +- !in_reset, timeout);
4198 +- if (!ret || in_reset)
4199 +- dev_err(dev, "Device failed to exit MHI Reset state\n");
4200 ++ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
4201 ++ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
4202 ++ 25000);
4203 ++ if (ret)
4204 ++ dev_err(dev, "Device failed to clear MHI Reset\n");
4205 +
4206 + /*
4207 + * Device will clear BHI_INTVEC as a part of RESET processing,
4208 +@@ -934,6 +926,7 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
4209 +
4210 + ret = wait_event_timeout(mhi_cntrl->state_event,
4211 + mhi_cntrl->dev_state == MHI_STATE_M0 ||
4212 ++ mhi_cntrl->dev_state == MHI_STATE_M2 ||
4213 + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
4214 + msecs_to_jiffies(mhi_cntrl->timeout_ms));
4215 +
4216 +diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
4217 +index b3357a8a2fdbc..ca3bc40427f85 100644
4218 +--- a/drivers/bus/mhi/pci_generic.c
4219 ++++ b/drivers/bus/mhi/pci_generic.c
4220 +@@ -665,7 +665,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4221 +
4222 + err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
4223 + if (err)
4224 +- return err;
4225 ++ goto err_disable_reporting;
4226 +
4227 + /* MHI bus does not power up the controller by default */
4228 + err = mhi_prepare_for_power_up(mhi_cntrl);
4229 +@@ -699,6 +699,8 @@ err_unprepare:
4230 + mhi_unprepare_after_power_down(mhi_cntrl);
4231 + err_unregister:
4232 + mhi_unregister_controller(mhi_cntrl);
4233 ++err_disable_reporting:
4234 ++ pci_disable_pcie_error_reporting(pdev);
4235 +
4236 + return err;
4237 + }
4238 +@@ -721,6 +723,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
4239 + pm_runtime_get_noresume(&pdev->dev);
4240 +
4241 + mhi_unregister_controller(mhi_cntrl);
4242 ++ pci_disable_pcie_error_reporting(pdev);
4243 + }
4244 +
4245 + static void mhi_pci_shutdown(struct pci_dev *pdev)
4246 +diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
4247 +index 8e1fe3f8dd2df..c8db62bc5ff72 100644
4248 +--- a/drivers/char/hw_random/exynos-trng.c
4249 ++++ b/drivers/char/hw_random/exynos-trng.c
4250 +@@ -132,7 +132,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
4251 + return PTR_ERR(trng->mem);
4252 +
4253 + pm_runtime_enable(&pdev->dev);
4254 +- ret = pm_runtime_get_sync(&pdev->dev);
4255 ++ ret = pm_runtime_resume_and_get(&pdev->dev);
4256 + if (ret < 0) {
4257 + dev_err(&pdev->dev, "Could not get runtime PM.\n");
4258 + goto err_pm_get;
4259 +@@ -165,7 +165,7 @@ err_register:
4260 + clk_disable_unprepare(trng->clk);
4261 +
4262 + err_clock:
4263 +- pm_runtime_put_sync(&pdev->dev);
4264 ++ pm_runtime_put_noidle(&pdev->dev);
4265 +
4266 + err_pm_get:
4267 + pm_runtime_disable(&pdev->dev);
4268 +diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
4269 +index 89681f07bc787..9468e9520cee0 100644
4270 +--- a/drivers/char/pcmcia/cm4000_cs.c
4271 ++++ b/drivers/char/pcmcia/cm4000_cs.c
4272 +@@ -544,6 +544,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
4273 + io_read_num_rec_bytes(iobase, &num_bytes_read);
4274 + if (num_bytes_read >= 4) {
4275 + DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
4276 ++ if (num_bytes_read > 4) {
4277 ++ rc = -EIO;
4278 ++ goto exit_setprotocol;
4279 ++ }
4280 + break;
4281 + }
4282 + usleep_range(10000, 11000);
4283 +diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
4284 +index 55b9d3965ae1b..69579efb247b3 100644
4285 +--- a/drivers/char/tpm/tpm_tis_core.c
4286 ++++ b/drivers/char/tpm/tpm_tis_core.c
4287 +@@ -196,13 +196,24 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
4288 + return 0;
4289 +
4290 + if (unlikely((status & TPM_STS_READ_ZERO) != 0)) {
4291 +- /*
4292 +- * If this trips, the chances are the read is
4293 +- * returning 0xff because the locality hasn't been
4294 +- * acquired. Usually because tpm_try_get_ops() hasn't
4295 +- * been called before doing a TPM operation.
4296 +- */
4297 +- WARN_ONCE(1, "TPM returned invalid status\n");
4298 ++ if (!test_and_set_bit(TPM_TIS_INVALID_STATUS, &priv->flags)) {
4299 ++ /*
4300 ++ * If this trips, the chances are the read is
4301 ++ * returning 0xff because the locality hasn't been
4302 ++ * acquired. Usually because tpm_try_get_ops() hasn't
4303 ++ * been called before doing a TPM operation.
4304 ++ */
4305 ++ dev_err(&chip->dev, "invalid TPM_STS.x 0x%02x, dumping stack for forensics\n",
4306 ++ status);
4307 ++
4308 ++ /*
4309 ++ * Dump stack for forensics, as invalid TPM_STS.x could be
4310 ++ * potentially triggered by impaired tpm_try_get_ops() or
4311 ++ * tpm_find_get_ops().
4312 ++ */
4313 ++ dump_stack();
4314 ++ }
4315 ++
4316 + return 0;
4317 + }
4318 +
4319 +diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
4320 +index 9b2d32a59f670..b2a3c6c72882d 100644
4321 +--- a/drivers/char/tpm/tpm_tis_core.h
4322 ++++ b/drivers/char/tpm/tpm_tis_core.h
4323 +@@ -83,6 +83,7 @@ enum tis_defaults {
4324 +
4325 + enum tpm_tis_flags {
4326 + TPM_TIS_ITPM_WORKAROUND = BIT(0),
4327 ++ TPM_TIS_INVALID_STATUS = BIT(1),
4328 + };
4329 +
4330 + struct tpm_tis_data {
4331 +@@ -90,7 +91,7 @@ struct tpm_tis_data {
4332 + int locality;
4333 + int irq;
4334 + bool irq_tested;
4335 +- unsigned int flags;
4336 ++ unsigned long flags;
4337 + void __iomem *ilb_base_addr;
4338 + u16 clkrun_enabled;
4339 + wait_queue_head_t int_queue;
4340 +diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
4341 +index 3856f6ebcb34f..de4209003a448 100644
4342 +--- a/drivers/char/tpm/tpm_tis_spi_main.c
4343 ++++ b/drivers/char/tpm/tpm_tis_spi_main.c
4344 +@@ -260,6 +260,8 @@ static int tpm_tis_spi_remove(struct spi_device *dev)
4345 + }
4346 +
4347 + static const struct spi_device_id tpm_tis_spi_id[] = {
4348 ++ { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
4349 ++ { "slb9670", (unsigned long)tpm_tis_spi_probe },
4350 + { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
4351 + { "cr50", (unsigned long)cr50_spi_probe },
4352 + {}
4353 +diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
4354 +index 61bb224f63309..cbeb51c804eb5 100644
4355 +--- a/drivers/clk/actions/owl-s500.c
4356 ++++ b/drivers/clk/actions/owl-s500.c
4357 +@@ -127,8 +127,7 @@ static struct clk_factor_table sd_factor_table[] = {
4358 + { 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 },
4359 + { 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 },
4360 + { 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 },
4361 +- { 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 },
4362 +- { 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 },
4363 ++ { 24, 1, 25 },
4364 +
4365 + /* bit8: /128 */
4366 + { 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 },
4367 +@@ -137,19 +136,20 @@ static struct clk_factor_table sd_factor_table[] = {
4368 + { 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 },
4369 + { 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
4370 + { 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
4371 +- { 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
4372 +- { 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
4373 ++ { 280, 1, 25 * 128 },
4374 + { 0, 0, 0 },
4375 + };
4376 +
4377 +-static struct clk_factor_table bisp_factor_table[] = {
4378 +- { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
4379 +- { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
4380 ++static struct clk_factor_table de_factor_table[] = {
4381 ++ { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
4382 ++ { 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
4383 ++ { 8, 1, 12 },
4384 + { 0, 0, 0 },
4385 + };
4386 +
4387 +-static struct clk_factor_table ahb_factor_table[] = {
4388 +- { 1, 1, 2 }, { 2, 1, 3 },
4389 ++static struct clk_factor_table hde_factor_table[] = {
4390 ++ { 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
4391 ++ { 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
4392 + { 0, 0, 0 },
4393 + };
4394 +
4395 +@@ -158,6 +158,13 @@ static struct clk_div_table rmii_ref_div_table[] = {
4396 + { 0, 0 },
4397 + };
4398 +
4399 ++static struct clk_div_table std12rate_div_table[] = {
4400 ++ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
4401 ++ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
4402 ++ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
4403 ++ { 0, 0 },
4404 ++};
4405 ++
4406 + static struct clk_div_table i2s_div_table[] = {
4407 + { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
4408 + { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
4409 +@@ -174,7 +181,6 @@ static struct clk_div_table nand_div_table[] = {
4410 +
4411 + /* mux clock */
4412 + static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT);
4413 +-static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
4414 +
4415 + /* gate clocks */
4416 + static OWL_GATE(gpio_clk, "gpio_clk", "apb_clk", CMU_DEVCLKEN0, 18, 0, 0);
4417 +@@ -187,45 +193,54 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
4418 + static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
4419 +
4420 + /* divider clocks */
4421 +-static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
4422 ++static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 2, 2, NULL, 0, 0);
4423 + static OWL_DIVIDER(apb_clk, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0);
4424 + static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
4425 +
4426 + /* factor clocks */
4427 +-static OWL_FACTOR(ahb_clk, "ahb_clk", "h_clk", CMU_BUSCLK1, 2, 2, ahb_factor_table, 0, 0);
4428 +-static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0);
4429 +-static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0);
4430 ++static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 4, de_factor_table, 0, 0);
4431 ++static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 4, de_factor_table, 0, 0);
4432 +
4433 + /* composite clocks */
4434 ++static OWL_COMP_DIV(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p,
4435 ++ OWL_MUX_HW(CMU_BUSCLK1, 8, 3),
4436 ++ { 0 },
4437 ++ OWL_DIVIDER_HW(CMU_BUSCLK1, 12, 2, 0, NULL),
4438 ++ CLK_SET_RATE_PARENT);
4439 ++
4440 ++static OWL_COMP_FIXED_FACTOR(ahb_clk, "ahb_clk", "h_clk",
4441 ++ { 0 },
4442 ++ 1, 1, 0);
4443 ++
4444 + static OWL_COMP_FACTOR(vce_clk, "vce_clk", hde_clk_mux_p,
4445 + OWL_MUX_HW(CMU_VCECLK, 4, 2),
4446 + OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0),
4447 +- OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table),
4448 ++ OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, hde_factor_table),
4449 + 0);
4450 +
4451 + static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p,
4452 + OWL_MUX_HW(CMU_VDECLK, 4, 2),
4453 + OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0),
4454 +- OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table),
4455 ++ OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, hde_factor_table),
4456 + 0);
4457 +
4458 +-static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p,
4459 ++static OWL_COMP_DIV(bisp_clk, "bisp_clk", bisp_clk_mux_p,
4460 + OWL_MUX_HW(CMU_BISPCLK, 4, 1),
4461 + OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
4462 +- OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table),
4463 ++ OWL_DIVIDER_HW(CMU_BISPCLK, 0, 4, 0, std12rate_div_table),
4464 + 0);
4465 +
4466 +-static OWL_COMP_FACTOR(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
4467 ++static OWL_COMP_DIV(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
4468 + OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
4469 + OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
4470 +- OWL_FACTOR_HW(CMU_SENSORCLK, 0, 3, 0, bisp_factor_table),
4471 +- CLK_IGNORE_UNUSED);
4472 ++ OWL_DIVIDER_HW(CMU_SENSORCLK, 0, 4, 0, std12rate_div_table),
4473 ++ 0);
4474 +
4475 +-static OWL_COMP_FACTOR(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
4476 ++static OWL_COMP_DIV(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
4477 + OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
4478 + OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
4479 +- OWL_FACTOR_HW(CMU_SENSORCLK, 8, 3, 0, bisp_factor_table),
4480 +- CLK_IGNORE_UNUSED);
4481 ++ OWL_DIVIDER_HW(CMU_SENSORCLK, 8, 4, 0, std12rate_div_table),
4482 ++ 0);
4483 +
4484 + static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p,
4485 + OWL_MUX_HW(CMU_SD0CLK, 9, 1),
4486 +@@ -305,7 +320,7 @@ static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "ethernet_pll_clk",
4487 + static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p,
4488 + OWL_MUX_HW(CMU_UART0CLK, 16, 1),
4489 + OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0),
4490 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4491 ++ OWL_DIVIDER_HW(CMU_UART0CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4492 + CLK_IGNORE_UNUSED);
4493 +
4494 + static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
4495 +@@ -317,31 +332,31 @@ static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
4496 + static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p,
4497 + OWL_MUX_HW(CMU_UART2CLK, 16, 1),
4498 + OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
4499 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4500 ++ OWL_DIVIDER_HW(CMU_UART2CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4501 + CLK_IGNORE_UNUSED);
4502 +
4503 + static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p,
4504 + OWL_MUX_HW(CMU_UART3CLK, 16, 1),
4505 + OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
4506 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4507 ++ OWL_DIVIDER_HW(CMU_UART3CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4508 + CLK_IGNORE_UNUSED);
4509 +
4510 + static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p,
4511 + OWL_MUX_HW(CMU_UART4CLK, 16, 1),
4512 + OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
4513 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4514 ++ OWL_DIVIDER_HW(CMU_UART4CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4515 + CLK_IGNORE_UNUSED);
4516 +
4517 + static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p,
4518 + OWL_MUX_HW(CMU_UART5CLK, 16, 1),
4519 + OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
4520 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4521 ++ OWL_DIVIDER_HW(CMU_UART5CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4522 + CLK_IGNORE_UNUSED);
4523 +
4524 + static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p,
4525 + OWL_MUX_HW(CMU_UART6CLK, 16, 1),
4526 + OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
4527 +- OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4528 ++ OWL_DIVIDER_HW(CMU_UART6CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
4529 + CLK_IGNORE_UNUSED);
4530 +
4531 + static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p,
4532 +diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c
4533 +index 6c84abf5b2e36..67a7cb3503c36 100644
4534 +--- a/drivers/clk/clk-k210.c
4535 ++++ b/drivers/clk/clk-k210.c
4536 +@@ -722,6 +722,7 @@ static int k210_clk_set_parent(struct clk_hw *hw, u8 index)
4537 + reg |= BIT(cfg->mux_bit);
4538 + else
4539 + reg &= ~BIT(cfg->mux_bit);
4540 ++ writel(reg, ksc->regs + cfg->mux_reg);
4541 + spin_unlock_irqrestore(&ksc->clk_lock, flags);
4542 +
4543 + return 0;
4544 +diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
4545 +index e0446e66fa645..eb22f4fdbc6b4 100644
4546 +--- a/drivers/clk/clk-si5341.c
4547 ++++ b/drivers/clk/clk-si5341.c
4548 +@@ -92,12 +92,22 @@ struct clk_si5341_output_config {
4549 + #define SI5341_PN_BASE 0x0002
4550 + #define SI5341_DEVICE_REV 0x0005
4551 + #define SI5341_STATUS 0x000C
4552 ++#define SI5341_LOS 0x000D
4553 ++#define SI5341_STATUS_STICKY 0x0011
4554 ++#define SI5341_LOS_STICKY 0x0012
4555 + #define SI5341_SOFT_RST 0x001C
4556 + #define SI5341_IN_SEL 0x0021
4557 ++#define SI5341_DEVICE_READY 0x00FE
4558 + #define SI5341_XAXB_CFG 0x090E
4559 + #define SI5341_IN_EN 0x0949
4560 + #define SI5341_INX_TO_PFD_EN 0x094A
4561 +
4562 ++/* Status bits */
4563 ++#define SI5341_STATUS_SYSINCAL BIT(0)
4564 ++#define SI5341_STATUS_LOSXAXB BIT(1)
4565 ++#define SI5341_STATUS_LOSREF BIT(2)
4566 ++#define SI5341_STATUS_LOL BIT(3)
4567 ++
4568 + /* Input selection */
4569 + #define SI5341_IN_SEL_MASK 0x06
4570 + #define SI5341_IN_SEL_SHIFT 1
4571 +@@ -340,6 +350,8 @@ static const struct si5341_reg_default si5341_reg_defaults[] = {
4572 + { 0x094A, 0x00 }, /* INx_TO_PFD_EN (disabled) */
4573 + { 0x0A02, 0x00 }, /* Not in datasheet */
4574 + { 0x0B44, 0x0F }, /* PDIV_ENB (datasheet does not mention what it is) */
4575 ++ { 0x0B57, 0x10 }, /* VCO_RESET_CALCODE (not described in datasheet) */
4576 ++ { 0x0B58, 0x05 }, /* VCO_RESET_CALCODE (not described in datasheet) */
4577 + };
4578 +
4579 + /* Read and interpret a 44-bit followed by a 32-bit value in the regmap */
4580 +@@ -623,6 +635,9 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
4581 + SI5341_SYNTH_N_NUM(synth->index), &n_num, &n_den);
4582 + if (err < 0)
4583 + return err;
4584 ++ /* Check for bogus/uninitialized settings */
4585 ++ if (!n_num || !n_den)
4586 ++ return 0;
4587 +
4588 + /*
4589 + * n_num and n_den are shifted left as much as possible, so to prevent
4590 +@@ -806,6 +821,9 @@ static long si5341_output_clk_round_rate(struct clk_hw *hw, unsigned long rate,
4591 + {
4592 + unsigned long r;
4593 +
4594 ++ if (!rate)
4595 ++ return 0;
4596 ++
4597 + r = *parent_rate >> 1;
4598 +
4599 + /* If rate is an even divisor, no changes to parent required */
4600 +@@ -834,11 +852,16 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
4601 + unsigned long parent_rate)
4602 + {
4603 + struct clk_si5341_output *output = to_clk_si5341_output(hw);
4604 +- /* Frequency divider is (r_div + 1) * 2 */
4605 +- u32 r_div = (parent_rate / rate) >> 1;
4606 ++ u32 r_div;
4607 + int err;
4608 + u8 r[3];
4609 +
4610 ++ if (!rate)
4611 ++ return -EINVAL;
4612 ++
4613 ++ /* Frequency divider is (r_div + 1) * 2 */
4614 ++ r_div = (parent_rate / rate) >> 1;
4615 ++
4616 + if (r_div <= 1)
4617 + r_div = 0;
4618 + else if (r_div >= BIT(24))
4619 +@@ -1083,7 +1106,7 @@ static const struct si5341_reg_default si5341_preamble[] = {
4620 + { 0x0B25, 0x00 },
4621 + { 0x0502, 0x01 },
4622 + { 0x0505, 0x03 },
4623 +- { 0x0957, 0x1F },
4624 ++ { 0x0957, 0x17 },
4625 + { 0x0B4E, 0x1A },
4626 + };
4627 +
4628 +@@ -1189,6 +1212,32 @@ static const struct regmap_range_cfg si5341_regmap_ranges[] = {
4629 + },
4630 + };
4631 +
4632 ++static int si5341_wait_device_ready(struct i2c_client *client)
4633 ++{
4634 ++ int count;
4635 ++
4636 ++ /* Datasheet warns: Any attempt to read or write any register other
4637 ++ * than DEVICE_READY before DEVICE_READY reads as 0x0F may corrupt the
4638 ++ * NVM programming and may corrupt the register contents, as they are
4639 ++ * read from NVM. Note that this includes accesses to the PAGE register.
4640 ++ * Also: DEVICE_READY is available on every register page, so no page
4641 ++ * change is needed to read it.
4642 ++ * Do this outside regmap to avoid automatic PAGE register access.
4643 ++ * May take up to 300ms to complete.
4644 ++ */
4645 ++ for (count = 0; count < 15; ++count) {
4646 ++ s32 result = i2c_smbus_read_byte_data(client,
4647 ++ SI5341_DEVICE_READY);
4648 ++ if (result < 0)
4649 ++ return result;
4650 ++ if (result == 0x0F)
4651 ++ return 0;
4652 ++ msleep(20);
4653 ++ }
4654 ++ dev_err(&client->dev, "timeout waiting for DEVICE_READY\n");
4655 ++ return -EIO;
4656 ++}
4657 ++
4658 + static const struct regmap_config si5341_regmap_config = {
4659 + .reg_bits = 8,
4660 + .val_bits = 8,
4661 +@@ -1378,6 +1427,7 @@ static int si5341_probe(struct i2c_client *client,
4662 + unsigned int i;
4663 + struct clk_si5341_output_config config[SI5341_MAX_NUM_OUTPUTS];
4664 + bool initialization_required;
4665 ++ u32 status;
4666 +
4667 + data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
4668 + if (!data)
4669 +@@ -1385,6 +1435,11 @@ static int si5341_probe(struct i2c_client *client,
4670 +
4671 + data->i2c_client = client;
4672 +
4673 ++ /* Must be done before otherwise touching hardware */
4674 ++ err = si5341_wait_device_ready(client);
4675 ++ if (err)
4676 ++ return err;
4677 ++
4678 + for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
4679 + input = devm_clk_get(&client->dev, si5341_input_clock_names[i]);
4680 + if (IS_ERR(input)) {
4681 +@@ -1540,6 +1595,22 @@ static int si5341_probe(struct i2c_client *client,
4682 + return err;
4683 + }
4684 +
4685 ++ /* wait for device to report input clock present and PLL lock */
4686 ++ err = regmap_read_poll_timeout(data->regmap, SI5341_STATUS, status,
4687 ++ !(status & (SI5341_STATUS_LOSREF | SI5341_STATUS_LOL)),
4688 ++ 10000, 250000);
4689 ++ if (err) {
4690 ++ dev_err(&client->dev, "Error waiting for input clock or PLL lock\n");
4691 ++ return err;
4692 ++ }
4693 ++
4694 ++ /* clear sticky alarm bits from initialization */
4695 ++ err = regmap_write(data->regmap, SI5341_STATUS_STICKY, 0);
4696 ++ if (err) {
4697 ++ dev_err(&client->dev, "unable to clear sticky status\n");
4698 ++ return err;
4699 ++ }
4700 ++
4701 + /* Free the names, clk framework makes copies */
4702 + for (i = 0; i < data->num_synth; ++i)
4703 + devm_kfree(&client->dev, (void *)synth_clock_names[i]);
4704 +diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
4705 +index 344cd6c611887..3c737742c2a92 100644
4706 +--- a/drivers/clk/clk-versaclock5.c
4707 ++++ b/drivers/clk/clk-versaclock5.c
4708 +@@ -69,7 +69,10 @@
4709 + #define VC5_FEEDBACK_FRAC_DIV(n) (0x19 + (n))
4710 + #define VC5_RC_CONTROL0 0x1e
4711 + #define VC5_RC_CONTROL1 0x1f
4712 +-/* Register 0x20 is factory reserved */
4713 ++
4714 ++/* These registers are named "Unused Factory Reserved Registers" */
4715 ++#define VC5_RESERVED_X0(idx) (0x20 + ((idx) * 0x10))
4716 ++#define VC5_RESERVED_X0_BYPASS_SYNC BIT(7) /* bypass_sync<idx> bit */
4717 +
4718 + /* Output divider control for divider 1,2,3,4 */
4719 + #define VC5_OUT_DIV_CONTROL(idx) (0x21 + ((idx) * 0x10))
4720 +@@ -87,7 +90,6 @@
4721 + #define VC5_OUT_DIV_SKEW_INT(idx, n) (0x2b + ((idx) * 0x10) + (n))
4722 + #define VC5_OUT_DIV_INT(idx, n) (0x2d + ((idx) * 0x10) + (n))
4723 + #define VC5_OUT_DIV_SKEW_FRAC(idx) (0x2f + ((idx) * 0x10))
4724 +-/* Registers 0x30, 0x40, 0x50 are factory reserved */
4725 +
4726 + /* Clock control register for clock 1,2 */
4727 + #define VC5_CLK_OUTPUT_CFG(idx, n) (0x60 + ((idx) * 0x2) + (n))
4728 +@@ -140,6 +142,8 @@
4729 + #define VC5_HAS_INTERNAL_XTAL BIT(0)
4730 + /* chip has PFD requency doubler */
4731 + #define VC5_HAS_PFD_FREQ_DBL BIT(1)
4732 ++/* chip has bits to disable FOD sync */
4733 ++#define VC5_HAS_BYPASS_SYNC_BIT BIT(2)
4734 +
4735 + /* Supported IDT VC5 models. */
4736 + enum vc5_model {
4737 +@@ -581,6 +585,23 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
4738 + unsigned int src;
4739 + int ret;
4740 +
4741 ++ /*
4742 ++ * When enabling a FOD, all currently enabled FODs are briefly
4743 ++ * stopped in order to synchronize all of them. This causes a clock
4744 ++ * disruption to any unrelated chips that might be already using
4745 ++ * other clock outputs. Bypass the sync feature to avoid the issue,
4746 ++ * which is possible on the VersaClock 6E family via reserved
4747 ++ * registers.
4748 ++ */
4749 ++ if (vc5->chip_info->flags & VC5_HAS_BYPASS_SYNC_BIT) {
4750 ++ ret = regmap_update_bits(vc5->regmap,
4751 ++ VC5_RESERVED_X0(hwdata->num),
4752 ++ VC5_RESERVED_X0_BYPASS_SYNC,
4753 ++ VC5_RESERVED_X0_BYPASS_SYNC);
4754 ++ if (ret)
4755 ++ return ret;
4756 ++ }
4757 ++
4758 + /*
4759 + * If the input mux is disabled, enable it first and
4760 + * select source from matching FOD.
4761 +@@ -1166,7 +1187,7 @@ static const struct vc5_chip_info idt_5p49v6965_info = {
4762 + .model = IDT_VC6_5P49V6965,
4763 + .clk_fod_cnt = 4,
4764 + .clk_out_cnt = 5,
4765 +- .flags = 0,
4766 ++ .flags = VC5_HAS_BYPASS_SYNC_BIT,
4767 + };
4768 +
4769 + static const struct i2c_device_id vc5_id[] = {
4770 +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
4771 +index b08019e1faf99..c491bc9c61ce7 100644
4772 +--- a/drivers/clk/imx/clk-imx8mq.c
4773 ++++ b/drivers/clk/imx/clk-imx8mq.c
4774 +@@ -358,46 +358,26 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
4775 + hws[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_hw_sscg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
4776 +
4777 + /* SYS PLL1 fixed output */
4778 +- hws[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_hw_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
4779 +- hws[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_hw_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
4780 +- hws[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_hw_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
4781 +- hws[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_hw_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
4782 +- hws[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_hw_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
4783 +- hws[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_hw_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
4784 +- hws[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_hw_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
4785 +- hws[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_hw_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
4786 +- hws[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_hw_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
4787 +-
4788 +- hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
4789 +- hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
4790 +- hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
4791 +- hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
4792 +- hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
4793 +- hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
4794 +- hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
4795 +- hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
4796 +- hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
4797 ++ hws[IMX8MQ_SYS1_PLL_40M] = imx_clk_hw_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
4798 ++ hws[IMX8MQ_SYS1_PLL_80M] = imx_clk_hw_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
4799 ++ hws[IMX8MQ_SYS1_PLL_100M] = imx_clk_hw_fixed_factor("sys1_pll_100m", "sys1_pll_out", 1, 8);
4800 ++ hws[IMX8MQ_SYS1_PLL_133M] = imx_clk_hw_fixed_factor("sys1_pll_133m", "sys1_pll_out", 1, 6);
4801 ++ hws[IMX8MQ_SYS1_PLL_160M] = imx_clk_hw_fixed_factor("sys1_pll_160m", "sys1_pll_out", 1, 5);
4802 ++ hws[IMX8MQ_SYS1_PLL_200M] = imx_clk_hw_fixed_factor("sys1_pll_200m", "sys1_pll_out", 1, 4);
4803 ++ hws[IMX8MQ_SYS1_PLL_266M] = imx_clk_hw_fixed_factor("sys1_pll_266m", "sys1_pll_out", 1, 3);
4804 ++ hws[IMX8MQ_SYS1_PLL_400M] = imx_clk_hw_fixed_factor("sys1_pll_400m", "sys1_pll_out", 1, 2);
4805 ++ hws[IMX8MQ_SYS1_PLL_800M] = imx_clk_hw_fixed_factor("sys1_pll_800m", "sys1_pll_out", 1, 1);
4806 +
4807 + /* SYS PLL2 fixed output */
4808 +- hws[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_hw_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
4809 +- hws[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_hw_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
4810 +- hws[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_hw_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
4811 +- hws[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_hw_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
4812 +- hws[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_hw_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
4813 +- hws[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_hw_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
4814 +- hws[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_hw_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
4815 +- hws[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_hw_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
4816 +- hws[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_hw_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
4817 +-
4818 +- hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
4819 +- hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
4820 +- hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
4821 +- hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
4822 +- hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
4823 +- hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
4824 +- hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
4825 +- hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
4826 +- hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
4827 ++ hws[IMX8MQ_SYS2_PLL_50M] = imx_clk_hw_fixed_factor("sys2_pll_50m", "sys2_pll_out", 1, 20);
4828 ++ hws[IMX8MQ_SYS2_PLL_100M] = imx_clk_hw_fixed_factor("sys2_pll_100m", "sys2_pll_out", 1, 10);
4829 ++ hws[IMX8MQ_SYS2_PLL_125M] = imx_clk_hw_fixed_factor("sys2_pll_125m", "sys2_pll_out", 1, 8);
4830 ++ hws[IMX8MQ_SYS2_PLL_166M] = imx_clk_hw_fixed_factor("sys2_pll_166m", "sys2_pll_out", 1, 6);
4831 ++ hws[IMX8MQ_SYS2_PLL_200M] = imx_clk_hw_fixed_factor("sys2_pll_200m", "sys2_pll_out", 1, 5);
4832 ++ hws[IMX8MQ_SYS2_PLL_250M] = imx_clk_hw_fixed_factor("sys2_pll_250m", "sys2_pll_out", 1, 4);
4833 ++ hws[IMX8MQ_SYS2_PLL_333M] = imx_clk_hw_fixed_factor("sys2_pll_333m", "sys2_pll_out", 1, 3);
4834 ++ hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_out", 1, 2);
4835 ++ hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_out", 1, 1);
4836 +
4837 + hws[IMX8MQ_CLK_MON_AUDIO_PLL1_DIV] = imx_clk_hw_divider("audio_pll1_out_monitor", "audio_pll1_bypass", base + 0x78, 0, 3);
4838 + hws[IMX8MQ_CLK_MON_AUDIO_PLL2_DIV] = imx_clk_hw_divider("audio_pll2_out_monitor", "audio_pll2_bypass", base + 0x78, 4, 3);
4839 +diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
4840 +index b080359b4645e..a805bac93c113 100644
4841 +--- a/drivers/clk/meson/g12a.c
4842 ++++ b/drivers/clk/meson/g12a.c
4843 +@@ -1603,7 +1603,7 @@ static struct clk_regmap g12b_cpub_clk_trace = {
4844 + };
4845 +
4846 + static const struct pll_mult_range g12a_gp0_pll_mult_range = {
4847 +- .min = 55,
4848 ++ .min = 125,
4849 + .max = 255,
4850 + };
4851 +
4852 +diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
4853 +index c6eb99169ddc7..6f8f0bbc5ab5b 100644
4854 +--- a/drivers/clk/qcom/clk-alpha-pll.c
4855 ++++ b/drivers/clk/qcom/clk-alpha-pll.c
4856 +@@ -1234,7 +1234,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
4857 + return ret;
4858 +
4859 + /* Setup PLL for calibration frequency */
4860 +- regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), cal_l);
4861 ++ regmap_write(pll->clkr.regmap, PLL_CAL_L_VAL(pll), cal_l);
4862 +
4863 + /* Bringup the PLL at calibration frequency */
4864 + ret = clk_alpha_pll_enable(hw);
4865 +diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
4866 +index ef734db316df8..6cefcdc869905 100644
4867 +--- a/drivers/clk/qcom/gcc-sc7280.c
4868 ++++ b/drivers/clk/qcom/gcc-sc7280.c
4869 +@@ -716,6 +716,7 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
4870 + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
4871 + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
4872 + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
4873 ++ F(52174000, P_GCC_GPLL0_OUT_MAIN, 1, 2, 23),
4874 + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
4875 + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
4876 + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
4877 +diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
4878 +index 946ea2f45bf3b..75ca855e720df 100644
4879 +--- a/drivers/clk/rockchip/clk-rk3568.c
4880 ++++ b/drivers/clk/rockchip/clk-rk3568.c
4881 +@@ -454,17 +454,17 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
4882 + COMPOSITE_NOMUX(CPLL_125M, "cpll_125m", "cpll", CLK_IGNORE_UNUSED,
4883 + RK3568_CLKSEL_CON(80), 0, 5, DFLAGS,
4884 + RK3568_CLKGATE_CON(35), 10, GFLAGS),
4885 ++ COMPOSITE_NOMUX(CPLL_100M, "cpll_100m", "cpll", CLK_IGNORE_UNUSED,
4886 ++ RK3568_CLKSEL_CON(82), 0, 5, DFLAGS,
4887 ++ RK3568_CLKGATE_CON(35), 11, GFLAGS),
4888 + COMPOSITE_NOMUX(CPLL_62P5M, "cpll_62p5", "cpll", CLK_IGNORE_UNUSED,
4889 + RK3568_CLKSEL_CON(80), 8, 5, DFLAGS,
4890 +- RK3568_CLKGATE_CON(35), 11, GFLAGS),
4891 ++ RK3568_CLKGATE_CON(35), 12, GFLAGS),
4892 + COMPOSITE_NOMUX(CPLL_50M, "cpll_50m", "cpll", CLK_IGNORE_UNUSED,
4893 + RK3568_CLKSEL_CON(81), 0, 5, DFLAGS,
4894 +- RK3568_CLKGATE_CON(35), 12, GFLAGS),
4895 ++ RK3568_CLKGATE_CON(35), 13, GFLAGS),
4896 + COMPOSITE_NOMUX(CPLL_25M, "cpll_25m", "cpll", CLK_IGNORE_UNUSED,
4897 + RK3568_CLKSEL_CON(81), 8, 6, DFLAGS,
4898 +- RK3568_CLKGATE_CON(35), 13, GFLAGS),
4899 +- COMPOSITE_NOMUX(CPLL_100M, "cpll_100m", "cpll", CLK_IGNORE_UNUSED,
4900 +- RK3568_CLKSEL_CON(82), 0, 5, DFLAGS,
4901 + RK3568_CLKGATE_CON(35), 14, GFLAGS),
4902 + COMPOSITE_NOMUX(0, "clk_osc0_div_750k", "xin24m", CLK_IGNORE_UNUSED,
4903 + RK3568_CLKSEL_CON(82), 8, 6, DFLAGS,
4904 +diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
4905 +index 92a6d740a799d..1cb21ea79c640 100644
4906 +--- a/drivers/clk/socfpga/clk-agilex.c
4907 ++++ b/drivers/clk/socfpga/clk-agilex.c
4908 +@@ -177,6 +177,8 @@ static const struct clk_parent_data emac_mux[] = {
4909 + .name = "emaca_free_clk", },
4910 + { .fw_name = "emacb_free_clk",
4911 + .name = "emacb_free_clk", },
4912 ++ { .fw_name = "boot_clk",
4913 ++ .name = "boot_clk", },
4914 + };
4915 +
4916 + static const struct clk_parent_data noc_mux[] = {
4917 +@@ -186,6 +188,41 @@ static const struct clk_parent_data noc_mux[] = {
4918 + .name = "boot_clk", },
4919 + };
4920 +
4921 ++static const struct clk_parent_data sdmmc_mux[] = {
4922 ++ { .fw_name = "sdmmc_free_clk",
4923 ++ .name = "sdmmc_free_clk", },
4924 ++ { .fw_name = "boot_clk",
4925 ++ .name = "boot_clk", },
4926 ++};
4927 ++
4928 ++static const struct clk_parent_data s2f_user1_mux[] = {
4929 ++ { .fw_name = "s2f_user1_free_clk",
4930 ++ .name = "s2f_user1_free_clk", },
4931 ++ { .fw_name = "boot_clk",
4932 ++ .name = "boot_clk", },
4933 ++};
4934 ++
4935 ++static const struct clk_parent_data psi_mux[] = {
4936 ++ { .fw_name = "psi_ref_free_clk",
4937 ++ .name = "psi_ref_free_clk", },
4938 ++ { .fw_name = "boot_clk",
4939 ++ .name = "boot_clk", },
4940 ++};
4941 ++
4942 ++static const struct clk_parent_data gpio_db_mux[] = {
4943 ++ { .fw_name = "gpio_db_free_clk",
4944 ++ .name = "gpio_db_free_clk", },
4945 ++ { .fw_name = "boot_clk",
4946 ++ .name = "boot_clk", },
4947 ++};
4948 ++
4949 ++static const struct clk_parent_data emac_ptp_mux[] = {
4950 ++ { .fw_name = "emac_ptp_free_clk",
4951 ++ .name = "emac_ptp_free_clk", },
4952 ++ { .fw_name = "boot_clk",
4953 ++ .name = "boot_clk", },
4954 ++};
4955 ++
4956 + /* clocks in AO (always on) controller */
4957 + static const struct stratix10_pll_clock agilex_pll_clks[] = {
4958 + { AGILEX_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
4959 +@@ -222,11 +259,9 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
4960 + { AGILEX_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
4961 + 0, 0x3C, 0, 0, 0},
4962 + { AGILEX_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
4963 +- 0, 0x40, 0, 0, 1},
4964 +- { AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
4965 +- 0, 4, 0, 0},
4966 +- { AGILEX_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
4967 +- 0, 0, 0, 0x30, 1},
4968 ++ 0, 0x40, 0, 0, 0},
4969 ++ { AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0,
4970 ++ 0, 4, 0x30, 1},
4971 + { AGILEX_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
4972 + 0, 0xD4, 0, 0x88, 0},
4973 + { AGILEX_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
4974 +@@ -236,7 +271,7 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
4975 + { AGILEX_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
4976 + ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3},
4977 + { AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
4978 +- ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0x88, 4},
4979 ++ ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0, 0},
4980 + { AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
4981 + ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
4982 + { AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
4983 +@@ -252,24 +287,24 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
4984 + 0, 0, 0, 0, 0, 0, 4},
4985 + { AGILEX_MPU_CCU_CLK, "mpu_ccu_clk", "mpu_clk", NULL, 1, 0, 0x24,
4986 + 0, 0, 0, 0, 0, 0, 2},
4987 +- { AGILEX_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x24,
4988 +- 1, 0x44, 0, 2, 0, 0, 0},
4989 +- { AGILEX_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x24,
4990 +- 2, 0x44, 8, 2, 0, 0, 0},
4991 ++ { AGILEX_L4_MAIN_CLK, "l4_main_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
4992 ++ 1, 0x44, 0, 2, 0x30, 1, 0},
4993 ++ { AGILEX_L4_MP_CLK, "l4_mp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
4994 ++ 2, 0x44, 8, 2, 0x30, 1, 0},
4995 + /*
4996 + * The l4_sp_clk feeds a 100 MHz clock to various peripherals, one of them
4997 + * being the SP timers, thus cannot get gated.
4998 + */
4999 +- { AGILEX_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x24,
5000 +- 3, 0x44, 16, 2, 0, 0, 0},
5001 +- { AGILEX_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x24,
5002 +- 4, 0x44, 24, 2, 0, 0, 0},
5003 +- { AGILEX_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x24,
5004 +- 4, 0x44, 26, 2, 0, 0, 0},
5005 ++ { AGILEX_L4_SP_CLK, "l4_sp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), CLK_IS_CRITICAL, 0x24,
5006 ++ 3, 0x44, 16, 2, 0x30, 1, 0},
5007 ++ { AGILEX_CS_AT_CLK, "cs_at_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
5008 ++ 4, 0x44, 24, 2, 0x30, 1, 0},
5009 ++ { AGILEX_CS_TRACE_CLK, "cs_trace_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
5010 ++ 4, 0x44, 26, 2, 0x30, 1, 0},
5011 + { AGILEX_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x24,
5012 + 4, 0x44, 28, 1, 0, 0, 0},
5013 +- { AGILEX_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x24,
5014 +- 5, 0, 0, 0, 0, 0, 0},
5015 ++ { AGILEX_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
5016 ++ 5, 0, 0, 0, 0x30, 1, 0},
5017 + { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
5018 + 6, 0, 0, 0, 0, 0, 0},
5019 + { AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
5020 +@@ -278,16 +313,16 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
5021 + 1, 0, 0, 0, 0x94, 27, 0},
5022 + { AGILEX_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
5023 + 2, 0, 0, 0, 0x94, 28, 0},
5024 +- { AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0x7C,
5025 +- 3, 0, 0, 0, 0, 0, 0},
5026 +- { AGILEX_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0x7C,
5027 +- 4, 0x98, 0, 16, 0, 0, 0},
5028 +- { AGILEX_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0x7C,
5029 +- 5, 0, 0, 0, 0, 0, 4},
5030 +- { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0x7C,
5031 +- 6, 0, 0, 0, 0, 0, 0},
5032 +- { AGILEX_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0x7C,
5033 +- 7, 0, 0, 0, 0, 0, 0},
5034 ++ { AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", NULL, emac_ptp_mux, ARRAY_SIZE(emac_ptp_mux), 0, 0x7C,
5035 ++ 3, 0, 0, 0, 0x88, 2, 0},
5036 ++ { AGILEX_GPIO_DB_CLK, "gpio_db_clk", NULL, gpio_db_mux, ARRAY_SIZE(gpio_db_mux), 0, 0x7C,
5037 ++ 4, 0x98, 0, 16, 0x88, 3, 0},
5038 ++ { AGILEX_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0x7C,
5039 ++ 5, 0, 0, 0, 0x88, 4, 4},
5040 ++ { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0x7C,
5041 ++ 6, 0, 0, 0, 0x88, 5, 0},
5042 ++ { AGILEX_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0x7C,
5043 ++ 7, 0, 0, 0, 0x88, 6, 0},
5044 + { AGILEX_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
5045 + 8, 0, 0, 0, 0, 0, 0},
5046 + { AGILEX_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
5047 +@@ -366,7 +401,7 @@ static int agilex_clk_register_gate(const struct stratix10_gate_clock *clks,
5048 + int i;
5049 +
5050 + for (i = 0; i < nums; i++) {
5051 +- hw_clk = s10_register_gate(&clks[i], base);
5052 ++ hw_clk = agilex_register_gate(&clks[i], base);
5053 + if (IS_ERR(hw_clk)) {
5054 + pr_err("%s: failed to register clock %s\n",
5055 + __func__, clks[i].name);
5056 +diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
5057 +index b84f2627551e1..32567795765fb 100644
5058 +--- a/drivers/clk/socfpga/clk-gate-s10.c
5059 ++++ b/drivers/clk/socfpga/clk-gate-s10.c
5060 +@@ -11,6 +11,13 @@
5061 + #define SOCFPGA_CS_PDBG_CLK "cs_pdbg_clk"
5062 + #define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
5063 +
5064 ++#define SOCFPGA_EMAC0_CLK "emac0_clk"
5065 ++#define SOCFPGA_EMAC1_CLK "emac1_clk"
5066 ++#define SOCFPGA_EMAC2_CLK "emac2_clk"
5067 ++#define AGILEX_BYPASS_OFFSET 0xC
5068 ++#define STRATIX10_BYPASS_OFFSET 0x2C
5069 ++#define BOOTCLK_BYPASS 2
5070 ++
5071 + static unsigned long socfpga_gate_clk_recalc_rate(struct clk_hw *hwclk,
5072 + unsigned long parent_rate)
5073 + {
5074 +@@ -44,14 +51,61 @@ static unsigned long socfpga_dbg_clk_recalc_rate(struct clk_hw *hwclk,
5075 + static u8 socfpga_gate_get_parent(struct clk_hw *hwclk)
5076 + {
5077 + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
5078 +- u32 mask;
5079 ++ u32 mask, second_bypass;
5080 ++ u8 parent = 0;
5081 ++ const char *name = clk_hw_get_name(hwclk);
5082 ++
5083 ++ if (socfpgaclk->bypass_reg) {
5084 ++ mask = (0x1 << socfpgaclk->bypass_shift);
5085 ++ parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
5086 ++ socfpgaclk->bypass_shift);
5087 ++ }
5088 ++
5089 ++ if (streq(name, SOCFPGA_EMAC0_CLK) ||
5090 ++ streq(name, SOCFPGA_EMAC1_CLK) ||
5091 ++ streq(name, SOCFPGA_EMAC2_CLK)) {
5092 ++ second_bypass = readl(socfpgaclk->bypass_reg -
5093 ++ STRATIX10_BYPASS_OFFSET);
5094 ++ /* EMACA bypass to bootclk @0xB0 offset */
5095 ++ if (second_bypass & 0x1)
5096 ++ if (parent == 0) /* only applicable if parent is maca */
5097 ++ parent = BOOTCLK_BYPASS;
5098 ++
5099 ++ if (second_bypass & 0x2)
5100 ++ if (parent == 1) /* only applicable if parent is macb */
5101 ++ parent = BOOTCLK_BYPASS;
5102 ++ }
5103 ++ return parent;
5104 ++}
5105 ++
5106 ++static u8 socfpga_agilex_gate_get_parent(struct clk_hw *hwclk)
5107 ++{
5108 ++ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
5109 ++ u32 mask, second_bypass;
5110 + u8 parent = 0;
5111 ++ const char *name = clk_hw_get_name(hwclk);
5112 +
5113 + if (socfpgaclk->bypass_reg) {
5114 + mask = (0x1 << socfpgaclk->bypass_shift);
5115 + parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
5116 + socfpgaclk->bypass_shift);
5117 + }
5118 ++
5119 ++ if (streq(name, SOCFPGA_EMAC0_CLK) ||
5120 ++ streq(name, SOCFPGA_EMAC1_CLK) ||
5121 ++ streq(name, SOCFPGA_EMAC2_CLK)) {
5122 ++ second_bypass = readl(socfpgaclk->bypass_reg -
5123 ++ AGILEX_BYPASS_OFFSET);
5124 ++ /* EMACA bypass to bootclk @0x88 offset */
5125 ++ if (second_bypass & 0x1)
5126 ++ if (parent == 0) /* only applicable if parent is maca */
5127 ++ parent = BOOTCLK_BYPASS;
5128 ++
5129 ++ if (second_bypass & 0x2)
5130 ++ if (parent == 1) /* only applicable if parent is macb */
5131 ++ parent = BOOTCLK_BYPASS;
5132 ++ }
5133 ++
5134 + return parent;
5135 + }
5136 +
5137 +@@ -60,6 +114,11 @@ static struct clk_ops gateclk_ops = {
5138 + .get_parent = socfpga_gate_get_parent,
5139 + };
5140 +
5141 ++static const struct clk_ops agilex_gateclk_ops = {
5142 ++ .recalc_rate = socfpga_gate_clk_recalc_rate,
5143 ++ .get_parent = socfpga_agilex_gate_get_parent,
5144 ++};
5145 ++
5146 + static const struct clk_ops dbgclk_ops = {
5147 + .recalc_rate = socfpga_dbg_clk_recalc_rate,
5148 + .get_parent = socfpga_gate_get_parent,
5149 +@@ -122,3 +181,61 @@ struct clk_hw *s10_register_gate(const struct stratix10_gate_clock *clks, void _
5150 + }
5151 + return hw_clk;
5152 + }
5153 ++
5154 ++struct clk_hw *agilex_register_gate(const struct stratix10_gate_clock *clks, void __iomem *regbase)
5155 ++{
5156 ++ struct clk_hw *hw_clk;
5157 ++ struct socfpga_gate_clk *socfpga_clk;
5158 ++ struct clk_init_data init;
5159 ++ const char *parent_name = clks->parent_name;
5160 ++ int ret;
5161 ++
5162 ++ socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
5163 ++ if (!socfpga_clk)
5164 ++ return NULL;
5165 ++
5166 ++ socfpga_clk->hw.reg = regbase + clks->gate_reg;
5167 ++ socfpga_clk->hw.bit_idx = clks->gate_idx;
5168 ++
5169 ++ gateclk_ops.enable = clk_gate_ops.enable;
5170 ++ gateclk_ops.disable = clk_gate_ops.disable;
5171 ++
5172 ++ socfpga_clk->fixed_div = clks->fixed_div;
5173 ++
5174 ++ if (clks->div_reg)
5175 ++ socfpga_clk->div_reg = regbase + clks->div_reg;
5176 ++ else
5177 ++ socfpga_clk->div_reg = NULL;
5178 ++
5179 ++ socfpga_clk->width = clks->div_width;
5180 ++ socfpga_clk->shift = clks->div_offset;
5181 ++
5182 ++ if (clks->bypass_reg)
5183 ++ socfpga_clk->bypass_reg = regbase + clks->bypass_reg;
5184 ++ else
5185 ++ socfpga_clk->bypass_reg = NULL;
5186 ++ socfpga_clk->bypass_shift = clks->bypass_shift;
5187 ++
5188 ++ if (streq(clks->name, "cs_pdbg_clk"))
5189 ++ init.ops = &dbgclk_ops;
5190 ++ else
5191 ++ init.ops = &agilex_gateclk_ops;
5192 ++
5193 ++ init.name = clks->name;
5194 ++ init.flags = clks->flags;
5195 ++
5196 ++ init.num_parents = clks->num_parents;
5197 ++ init.parent_names = parent_name ? &parent_name : NULL;
5198 ++ if (init.parent_names == NULL)
5199 ++ init.parent_data = clks->parent_data;
5200 ++ socfpga_clk->hw.hw.init = &init;
5201 ++
5202 ++ hw_clk = &socfpga_clk->hw.hw;
5203 ++
5204 ++ ret = clk_hw_register(NULL, &socfpga_clk->hw.hw);
5205 ++ if (ret) {
5206 ++ kfree(socfpga_clk);
5207 ++ return ERR_PTR(ret);
5208 ++ }
5209 ++ return hw_clk;
5210 ++}
5211 +diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
5212 +index e5a5fef76df70..cbabde2b476bf 100644
5213 +--- a/drivers/clk/socfpga/clk-periph-s10.c
5214 ++++ b/drivers/clk/socfpga/clk-periph-s10.c
5215 +@@ -64,16 +64,21 @@ static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
5216 + {
5217 + struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
5218 + u32 clk_src, mask;
5219 +- u8 parent;
5220 ++ u8 parent = 0;
5221 +
5222 ++ /* handle the bypass first */
5223 + if (socfpgaclk->bypass_reg) {
5224 + mask = (0x1 << socfpgaclk->bypass_shift);
5225 + parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
5226 + socfpgaclk->bypass_shift);
5227 +- } else {
5228 ++ if (parent)
5229 ++ return parent;
5230 ++ }
5231 ++
5232 ++ if (socfpgaclk->hw.reg) {
5233 + clk_src = readl(socfpgaclk->hw.reg);
5234 + parent = (clk_src >> CLK_MGR_FREE_SHIFT) &
5235 +- CLK_MGR_FREE_MASK;
5236 ++ CLK_MGR_FREE_MASK;
5237 + }
5238 + return parent;
5239 + }
5240 +diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
5241 +index f0bd77138ecb4..b532d51faaee5 100644
5242 +--- a/drivers/clk/socfpga/clk-s10.c
5243 ++++ b/drivers/clk/socfpga/clk-s10.c
5244 +@@ -144,6 +144,41 @@ static const struct clk_parent_data mpu_free_mux[] = {
5245 + .name = "f2s-free-clk", },
5246 + };
5247 +
5248 ++static const struct clk_parent_data sdmmc_mux[] = {
5249 ++ { .fw_name = "sdmmc_free_clk",
5250 ++ .name = "sdmmc_free_clk", },
5251 ++ { .fw_name = "boot_clk",
5252 ++ .name = "boot_clk", },
5253 ++};
5254 ++
5255 ++static const struct clk_parent_data s2f_user1_mux[] = {
5256 ++ { .fw_name = "s2f_user1_free_clk",
5257 ++ .name = "s2f_user1_free_clk", },
5258 ++ { .fw_name = "boot_clk",
5259 ++ .name = "boot_clk", },
5260 ++};
5261 ++
5262 ++static const struct clk_parent_data psi_mux[] = {
5263 ++ { .fw_name = "psi_ref_free_clk",
5264 ++ .name = "psi_ref_free_clk", },
5265 ++ { .fw_name = "boot_clk",
5266 ++ .name = "boot_clk", },
5267 ++};
5268 ++
5269 ++static const struct clk_parent_data gpio_db_mux[] = {
5270 ++ { .fw_name = "gpio_db_free_clk",
5271 ++ .name = "gpio_db_free_clk", },
5272 ++ { .fw_name = "boot_clk",
5273 ++ .name = "boot_clk", },
5274 ++};
5275 ++
5276 ++static const struct clk_parent_data emac_ptp_mux[] = {
5277 ++ { .fw_name = "emac_ptp_free_clk",
5278 ++ .name = "emac_ptp_free_clk", },
5279 ++ { .fw_name = "boot_clk",
5280 ++ .name = "boot_clk", },
5281 ++};
5282 ++
5283 + /* clocks in AO (always on) controller */
5284 + static const struct stratix10_pll_clock s10_pll_clks[] = {
5285 + { STRATIX10_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
5286 +@@ -167,7 +202,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
5287 + { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
5288 + 0, 0x48, 0, 0, 0},
5289 + { STRATIX10_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
5290 +- 0, 0x4C, 0, 0, 0},
5291 ++ 0, 0x4C, 0, 0x3C, 1},
5292 + { STRATIX10_MAIN_EMACA_CLK, "main_emaca_clk", "main_noc_base_clk", NULL, 1, 0,
5293 + 0x50, 0, 0, 0},
5294 + { STRATIX10_MAIN_EMACB_CLK, "main_emacb_clk", "main_noc_base_clk", NULL, 1, 0,
5295 +@@ -200,10 +235,8 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
5296 + 0, 0xD4, 0, 0, 0},
5297 + { STRATIX10_PERI_PSI_REF_CLK, "peri_psi_ref_clk", "peri_noc_base_clk", NULL, 1, 0,
5298 + 0xD8, 0, 0, 0},
5299 +- { STRATIX10_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
5300 +- 0, 4, 0, 0},
5301 +- { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
5302 +- 0, 0, 0, 0x3C, 1},
5303 ++ { STRATIX10_L4_SYS_FREE_CLK, "l4_sys_free_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0,
5304 ++ 0, 4, 0x3C, 1},
5305 + { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
5306 + 0, 0, 2, 0xB0, 0},
5307 + { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
5308 +@@ -227,20 +260,20 @@ static const struct stratix10_gate_clock s10_gate_clks[] = {
5309 + 0, 0, 0, 0, 0, 0, 4},
5310 + { STRATIX10_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x30,
5311 + 0, 0, 0, 0, 0, 0, 2},
5312 +- { STRATIX10_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x30,
5313 +- 1, 0x70, 0, 2, 0, 0, 0},
5314 +- { STRATIX10_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x30,
5315 +- 2, 0x70, 8, 2, 0, 0, 0},
5316 +- { STRATIX10_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x30,
5317 +- 3, 0x70, 16, 2, 0, 0, 0},
5318 +- { STRATIX10_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x30,
5319 +- 4, 0x70, 24, 2, 0, 0, 0},
5320 +- { STRATIX10_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x30,
5321 +- 4, 0x70, 26, 2, 0, 0, 0},
5322 ++ { STRATIX10_L4_MAIN_CLK, "l4_main_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
5323 ++ 1, 0x70, 0, 2, 0x3C, 1, 0},
5324 ++ { STRATIX10_L4_MP_CLK, "l4_mp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
5325 ++ 2, 0x70, 8, 2, 0x3C, 1, 0},
5326 ++ { STRATIX10_L4_SP_CLK, "l4_sp_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), CLK_IS_CRITICAL, 0x30,
5327 ++ 3, 0x70, 16, 2, 0x3C, 1, 0},
5328 ++ { STRATIX10_CS_AT_CLK, "cs_at_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
5329 ++ 4, 0x70, 24, 2, 0x3C, 1, 0},
5330 ++ { STRATIX10_CS_TRACE_CLK, "cs_trace_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
5331 ++ 4, 0x70, 26, 2, 0x3C, 1, 0},
5332 + { STRATIX10_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x30,
5333 + 4, 0x70, 28, 1, 0, 0, 0},
5334 +- { STRATIX10_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x30,
5335 +- 5, 0, 0, 0, 0, 0, 0},
5336 ++ { STRATIX10_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x30,
5337 ++ 5, 0, 0, 0, 0x3C, 1, 0},
5338 + { STRATIX10_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x30,
5339 + 6, 0, 0, 0, 0, 0, 0},
5340 + { STRATIX10_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
5341 +@@ -249,16 +282,16 @@ static const struct stratix10_gate_clock s10_gate_clks[] = {
5342 + 1, 0, 0, 0, 0xDC, 27, 0},
5343 + { STRATIX10_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
5344 + 2, 0, 0, 0, 0xDC, 28, 0},
5345 +- { STRATIX10_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0xA4,
5346 +- 3, 0, 0, 0, 0, 0, 0},
5347 +- { STRATIX10_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0xA4,
5348 +- 4, 0xE0, 0, 16, 0, 0, 0},
5349 +- { STRATIX10_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0xA4,
5350 +- 5, 0, 0, 0, 0, 0, 4},
5351 +- { STRATIX10_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0xA4,
5352 +- 6, 0, 0, 0, 0, 0, 0},
5353 +- { STRATIX10_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0xA4,
5354 +- 7, 0, 0, 0, 0, 0, 0},
5355 ++ { STRATIX10_EMAC_PTP_CLK, "emac_ptp_clk", NULL, emac_ptp_mux, ARRAY_SIZE(emac_ptp_mux), 0, 0xA4,
5356 ++ 3, 0, 0, 0, 0xB0, 2, 0},
5357 ++ { STRATIX10_GPIO_DB_CLK, "gpio_db_clk", NULL, gpio_db_mux, ARRAY_SIZE(gpio_db_mux), 0, 0xA4,
5358 ++ 4, 0xE0, 0, 16, 0xB0, 3, 0},
5359 ++ { STRATIX10_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0xA4,
5360 ++ 5, 0, 0, 0, 0xB0, 4, 4},
5361 ++ { STRATIX10_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0xA4,
5362 ++ 6, 0, 0, 0, 0xB0, 5, 0},
5363 ++ { STRATIX10_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0xA4,
5364 ++ 7, 0, 0, 0, 0xB0, 6, 0},
5365 + { STRATIX10_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
5366 + 8, 0, 0, 0, 0, 0, 0},
5367 + { STRATIX10_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
5368 +diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
5369 +index 61eaf3a41fbb8..75234e0783e1c 100644
5370 +--- a/drivers/clk/socfpga/stratix10-clk.h
5371 ++++ b/drivers/clk/socfpga/stratix10-clk.h
5372 +@@ -85,4 +85,6 @@ struct clk_hw *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *c
5373 + void __iomem *reg);
5374 + struct clk_hw *s10_register_gate(const struct stratix10_gate_clock *clks,
5375 + void __iomem *reg);
5376 ++struct clk_hw *agilex_register_gate(const struct stratix10_gate_clock *clks,
5377 ++ void __iomem *reg);
5378 + #endif /* __STRATIX10_CLK_H */
5379 +diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
5380 +index a774942cb153a..f49724a22540e 100644
5381 +--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
5382 ++++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
5383 +@@ -817,10 +817,10 @@ static void __init sun8i_v3_v3s_ccu_init(struct device_node *node,
5384 + return;
5385 + }
5386 +
5387 +- /* Force the PLL-Audio-1x divider to 4 */
5388 ++ /* Force the PLL-Audio-1x divider to 1 */
5389 + val = readl(reg + SUN8I_V3S_PLL_AUDIO_REG);
5390 + val &= ~GENMASK(19, 16);
5391 +- writel(val | (3 << 16), reg + SUN8I_V3S_PLL_AUDIO_REG);
5392 ++ writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG);
5393 +
5394 + sunxi_ccu_probe(node, reg, ccu_desc);
5395 + }
5396 +diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
5397 +index 16dbf83d2f62a..a33688b2359e5 100644
5398 +--- a/drivers/clk/tegra/clk-tegra30.c
5399 ++++ b/drivers/clk/tegra/clk-tegra30.c
5400 +@@ -1245,7 +1245,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
5401 + { TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
5402 + { TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
5403 + { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
5404 +- { TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 600000000, 0 },
5405 ++ { TEGRA30_CLK_VDE, TEGRA30_CLK_PLL_C, 300000000, 0 },
5406 + { TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
5407 + { TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
5408 + { TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
5409 +diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
5410 +index 06194149be831..d576c900dee06 100644
5411 +--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
5412 ++++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
5413 +@@ -38,7 +38,7 @@ struct zynqmp_clk_mux {
5414 + * zynqmp_clk_mux_get_parent() - Get parent of clock
5415 + * @hw: handle between common and hardware-specific interfaces
5416 + *
5417 +- * Return: Parent index
5418 ++ * Return: Parent index on success or number of parents in case of error
5419 + */
5420 + static u8 zynqmp_clk_mux_get_parent(struct clk_hw *hw)
5421 + {
5422 +@@ -50,9 +50,15 @@ static u8 zynqmp_clk_mux_get_parent(struct clk_hw *hw)
5423 +
5424 + ret = zynqmp_pm_clock_getparent(clk_id, &val);
5425 +
5426 +- if (ret)
5427 ++ if (ret) {
5428 + pr_warn_once("%s() getparent failed for clock: %s, ret = %d\n",
5429 + __func__, clk_name, ret);
5430 ++ /*
5431 ++ * clk_core_get_parent_by_index() takes num_parents as incorrect
5432 ++ * index which is exactly what I want to return here
5433 ++ */
5434 ++ return clk_hw_get_num_parents(hw);
5435 ++ }
5436 +
5437 + return val;
5438 + }
5439 +diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
5440 +index abe6afbf3407b..e025581f0d54a 100644
5441 +--- a/drivers/clk/zynqmp/pll.c
5442 ++++ b/drivers/clk/zynqmp/pll.c
5443 +@@ -31,8 +31,9 @@ struct zynqmp_pll {
5444 + #define PS_PLL_VCO_MAX 3000000000UL
5445 +
5446 + enum pll_mode {
5447 +- PLL_MODE_INT,
5448 +- PLL_MODE_FRAC,
5449 ++ PLL_MODE_INT = 0,
5450 ++ PLL_MODE_FRAC = 1,
5451 ++ PLL_MODE_ERROR = 2,
5452 + };
5453 +
5454 + #define FRAC_OFFSET 0x8
5455 +@@ -54,9 +55,11 @@ static inline enum pll_mode zynqmp_pll_get_mode(struct clk_hw *hw)
5456 + int ret;
5457 +
5458 + ret = zynqmp_pm_get_pll_frac_mode(clk_id, ret_payload);
5459 +- if (ret)
5460 ++ if (ret) {
5461 + pr_warn_once("%s() PLL get frac mode failed for %s, ret = %d\n",
5462 + __func__, clk_name, ret);
5463 ++ return PLL_MODE_ERROR;
5464 ++ }
5465 +
5466 + return ret_payload[1];
5467 + }
5468 +@@ -126,7 +129,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
5469 + * @hw: Handle between common and hardware-specific interfaces
5470 + * @parent_rate: Clock frequency of parent clock
5471 + *
5472 +- * Return: Current clock frequency
5473 ++ * Return: Current clock frequency or 0 in case of error
5474 + */
5475 + static unsigned long zynqmp_pll_recalc_rate(struct clk_hw *hw,
5476 + unsigned long parent_rate)
5477 +@@ -138,14 +141,21 @@ static unsigned long zynqmp_pll_recalc_rate(struct clk_hw *hw,
5478 + unsigned long rate, frac;
5479 + u32 ret_payload[PAYLOAD_ARG_CNT];
5480 + int ret;
5481 ++ enum pll_mode mode;
5482 +
5483 + ret = zynqmp_pm_clock_getdivider(clk_id, &fbdiv);
5484 +- if (ret)
5485 ++ if (ret) {
5486 + pr_warn_once("%s() get divider failed for %s, ret = %d\n",
5487 + __func__, clk_name, ret);
5488 ++ return 0ul;
5489 ++ }
5490 ++
5491 ++ mode = zynqmp_pll_get_mode(hw);
5492 ++ if (mode == PLL_MODE_ERROR)
5493 ++ return 0ul;
5494 +
5495 + rate = parent_rate * fbdiv;
5496 +- if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
5497 ++ if (mode == PLL_MODE_FRAC) {
5498 + zynqmp_pm_get_pll_frac_data(clk_id, ret_payload);
5499 + data = ret_payload[1];
5500 + frac = (parent_rate * data) / FRAC_DIV;
5501 +diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
5502 +index 33eeabf9c3d12..e5c631f1b5cbe 100644
5503 +--- a/drivers/clocksource/timer-ti-dm.c
5504 ++++ b/drivers/clocksource/timer-ti-dm.c
5505 +@@ -78,6 +78,9 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
5506 +
5507 + static void omap_timer_restore_context(struct omap_dm_timer *timer)
5508 + {
5509 ++ __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET,
5510 ++ timer->context.ocp_cfg, 0);
5511 ++
5512 + omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
5513 + timer->context.twer);
5514 + omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
5515 +@@ -95,6 +98,9 @@ static void omap_timer_restore_context(struct omap_dm_timer *timer)
5516 +
5517 + static void omap_timer_save_context(struct omap_dm_timer *timer)
5518 + {
5519 ++ timer->context.ocp_cfg =
5520 ++ __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
5521 ++
5522 + timer->context.tclr =
5523 + omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
5524 + timer->context.twer =
5525 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
5526 +index 802abc925b2ae..cbab834c37a03 100644
5527 +--- a/drivers/cpufreq/cpufreq.c
5528 ++++ b/drivers/cpufreq/cpufreq.c
5529 +@@ -1367,9 +1367,14 @@ static int cpufreq_online(unsigned int cpu)
5530 + goto out_free_policy;
5531 + }
5532 +
5533 ++ /*
5534 ++ * The initialization has succeeded and the policy is online.
5535 ++ * If there is a problem with its frequency table, take it
5536 ++ * offline and drop it.
5537 ++ */
5538 + ret = cpufreq_table_validate_and_sort(policy);
5539 + if (ret)
5540 +- goto out_exit_policy;
5541 ++ goto out_offline_policy;
5542 +
5543 + /* related_cpus should at least include policy->cpus. */
5544 + cpumask_copy(policy->related_cpus, policy->cpus);
5545 +@@ -1515,6 +1520,10 @@ out_destroy_policy:
5546 +
5547 + up_write(&policy->rwsem);
5548 +
5549 ++out_offline_policy:
5550 ++ if (cpufreq_driver->offline)
5551 ++ cpufreq_driver->offline(policy);
5552 ++
5553 + out_exit_policy:
5554 + if (cpufreq_driver->exit)
5555 + cpufreq_driver->exit(policy);
5556 +diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
5557 +index c288c4b51783d..f19e520da6d0c 100644
5558 +--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
5559 ++++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
5560 +@@ -307,6 +307,10 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
5561 + * Entry 192: NPS_CORE_INT_ACTIVE
5562 + */
5563 + nr_vecs = pci_msix_vec_count(pdev);
5564 ++ if (nr_vecs < 0) {
5565 ++ dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
5566 ++ return nr_vecs;
5567 ++ }
5568 +
5569 + /* Enable MSI-X */
5570 + ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
5571 +diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
5572 +index 3506b2050fb86..91808402e0bf2 100644
5573 +--- a/drivers/crypto/ccp/sev-dev.c
5574 ++++ b/drivers/crypto/ccp/sev-dev.c
5575 +@@ -43,6 +43,10 @@ static int psp_probe_timeout = 5;
5576 + module_param(psp_probe_timeout, int, 0644);
5577 + MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
5578 +
5579 ++MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
5580 ++MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
5581 ++MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
5582 ++
5583 + static bool psp_dead;
5584 + static int psp_timeout;
5585 +
5586 +diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
5587 +index f468594ef8afa..6fb6ba35f89d4 100644
5588 +--- a/drivers/crypto/ccp/sp-pci.c
5589 ++++ b/drivers/crypto/ccp/sp-pci.c
5590 +@@ -222,7 +222,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5591 + if (ret) {
5592 + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
5593 + ret);
5594 +- goto e_err;
5595 ++ goto free_irqs;
5596 + }
5597 + }
5598 +
5599 +@@ -230,10 +230,12 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5600 +
5601 + ret = sp_init(sp);
5602 + if (ret)
5603 +- goto e_err;
5604 ++ goto free_irqs;
5605 +
5606 + return 0;
5607 +
5608 ++free_irqs:
5609 ++ sp_free_irqs(sp);
5610 + e_err:
5611 + dev_notice(dev, "initialization failed\n");
5612 + return ret;
5613 +diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
5614 +index a380087c83f77..782ddffa5d904 100644
5615 +--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
5616 ++++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
5617 +@@ -298,6 +298,8 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
5618 + dma_addr_t tmp;
5619 +
5620 + tmp = le64_to_cpu(sqe->in);
5621 ++ if (unlikely(dma_mapping_error(dev, tmp)))
5622 ++ return;
5623 +
5624 + if (src) {
5625 + if (req->src)
5626 +@@ -307,6 +309,8 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
5627 + }
5628 +
5629 + tmp = le64_to_cpu(sqe->out);
5630 ++ if (unlikely(dma_mapping_error(dev, tmp)))
5631 ++ return;
5632 +
5633 + if (req->dst) {
5634 + if (dst)
5635 +@@ -524,6 +528,8 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
5636 + msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
5637 + }
5638 +
5639 ++ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
5640 ++ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
5641 + msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
5642 + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
5643 + h_req->ctx = ctx;
5644 +@@ -1372,11 +1378,15 @@ static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
5645 + dma_addr_t dma;
5646 +
5647 + dma = le64_to_cpu(sqe->in);
5648 ++ if (unlikely(dma_mapping_error(dev, dma)))
5649 ++ return;
5650 +
5651 + if (src && req->src)
5652 + dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
5653 +
5654 + dma = le64_to_cpu(sqe->out);
5655 ++ if (unlikely(dma_mapping_error(dev, dma)))
5656 ++ return;
5657 +
5658 + if (req->dst)
5659 + dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
5660 +@@ -1431,6 +1441,8 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
5661 + h_req->areq.ecdh = req;
5662 + msg = &h_req->req;
5663 + memset(msg, 0, sizeof(*msg));
5664 ++ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
5665 ++ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
5666 + msg->key = cpu_to_le64(ctx->ecdh.dma_p);
5667 +
5668 + msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
5669 +@@ -1667,11 +1679,15 @@ static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
5670 + dma_addr_t dma;
5671 +
5672 + dma = le64_to_cpu(sqe->in);
5673 ++ if (unlikely(dma_mapping_error(dev, dma)))
5674 ++ return;
5675 +
5676 + if (src && req->src)
5677 + dma_free_coherent(dev, ctx->key_sz, req->src, dma);
5678 +
5679 + dma = le64_to_cpu(sqe->out);
5680 ++ if (unlikely(dma_mapping_error(dev, dma)))
5681 ++ return;
5682 +
5683 + if (req->dst)
5684 + dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
5685 +@@ -1722,6 +1738,8 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
5686 + h_req->areq.curve25519 = req;
5687 + msg = &h_req->req;
5688 + memset(msg, 0, sizeof(*msg));
5689 ++ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
5690 ++ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
5691 + msg->key = cpu_to_le64(ctx->curve25519.dma_p);
5692 +
5693 + msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
5694 +diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
5695 +index 133aede8bf078..b43fad8b9e8d4 100644
5696 +--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
5697 ++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
5698 +@@ -1541,11 +1541,11 @@ static struct skcipher_alg sec_skciphers[] = {
5699 + AES_BLOCK_SIZE, AES_BLOCK_SIZE)
5700 +
5701 + SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
5702 +- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
5703 ++ SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
5704 + DES3_EDE_BLOCK_SIZE, 0)
5705 +
5706 + SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
5707 +- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
5708 ++ SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
5709 + DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
5710 +
5711 + SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
5712 +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
5713 +index 0616e369522e9..f577ee4afd06f 100644
5714 +--- a/drivers/crypto/ixp4xx_crypto.c
5715 ++++ b/drivers/crypto/ixp4xx_crypto.c
5716 +@@ -149,6 +149,8 @@ struct crypt_ctl {
5717 + struct ablk_ctx {
5718 + struct buffer_desc *src;
5719 + struct buffer_desc *dst;
5720 ++ u8 iv[MAX_IVLEN];
5721 ++ bool encrypt;
5722 + };
5723 +
5724 + struct aead_ctx {
5725 +@@ -330,7 +332,7 @@ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
5726 +
5727 + buf1 = buf->next;
5728 + phys1 = buf->phys_next;
5729 +- dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
5730 ++ dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
5731 + dma_pool_free(buffer_pool, buf, phys);
5732 + buf = buf1;
5733 + phys = phys1;
5734 +@@ -381,6 +383,20 @@ static void one_packet(dma_addr_t phys)
5735 + case CTL_FLAG_PERFORM_ABLK: {
5736 + struct skcipher_request *req = crypt->data.ablk_req;
5737 + struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
5738 ++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
5739 ++ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
5740 ++ unsigned int offset;
5741 ++
5742 ++ if (ivsize > 0) {
5743 ++ offset = req->cryptlen - ivsize;
5744 ++ if (req_ctx->encrypt) {
5745 ++ scatterwalk_map_and_copy(req->iv, req->dst,
5746 ++ offset, ivsize, 0);
5747 ++ } else {
5748 ++ memcpy(req->iv, req_ctx->iv, ivsize);
5749 ++ memzero_explicit(req_ctx->iv, ivsize);
5750 ++ }
5751 ++ }
5752 +
5753 + if (req_ctx->dst) {
5754 + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
5755 +@@ -876,6 +892,7 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
5756 + struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
5757 + struct buffer_desc src_hook;
5758 + struct device *dev = &pdev->dev;
5759 ++ unsigned int offset;
5760 + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
5761 + GFP_KERNEL : GFP_ATOMIC;
5762 +
5763 +@@ -885,6 +902,7 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
5764 + return -EAGAIN;
5765 +
5766 + dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
5767 ++ req_ctx->encrypt = encrypt;
5768 +
5769 + crypt = get_crypt_desc();
5770 + if (!crypt)
5771 +@@ -900,6 +918,10 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
5772 +
5773 + BUG_ON(ivsize && !req->iv);
5774 + memcpy(crypt->iv, req->iv, ivsize);
5775 ++ if (ivsize > 0 && !encrypt) {
5776 ++ offset = req->cryptlen - ivsize;
5777 ++ scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
5778 ++ }
5779 + if (req->src != req->dst) {
5780 + struct buffer_desc dst_hook;
5781 + crypt->mode |= NPE_OP_NOT_IN_PLACE;
5782 +diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
5783 +index cc8dd3072b8b7..9b2417ebc95a0 100644
5784 +--- a/drivers/crypto/nx/nx-842-pseries.c
5785 ++++ b/drivers/crypto/nx/nx-842-pseries.c
5786 +@@ -538,13 +538,15 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
5787 + * The status field indicates if the device is enabled when the status
5788 + * is 'okay'. Otherwise the device driver will be disabled.
5789 + *
5790 +- * @prop - struct property point containing the maxsyncop for the update
5791 ++ * @devdata: struct nx842_devdata to use for dev_info
5792 ++ * @prop: struct property point containing the maxsyncop for the update
5793 + *
5794 + * Returns:
5795 + * 0 - Device is available
5796 + * -ENODEV - Device is not available
5797 + */
5798 +-static int nx842_OF_upd_status(struct property *prop)
5799 ++static int nx842_OF_upd_status(struct nx842_devdata *devdata,
5800 ++ struct property *prop)
5801 + {
5802 + const char *status = (const char *)prop->value;
5803 +
5804 +@@ -758,7 +760,7 @@ static int nx842_OF_upd(struct property *new_prop)
5805 + goto out;
5806 +
5807 + /* Perform property updates */
5808 +- ret = nx842_OF_upd_status(status);
5809 ++ ret = nx842_OF_upd_status(new_devdata, status);
5810 + if (ret)
5811 + goto error_out;
5812 +
5813 +@@ -1069,6 +1071,7 @@ static const struct vio_device_id nx842_vio_driver_ids[] = {
5814 + {"ibm,compression-v1", "ibm,compression"},
5815 + {"", ""},
5816 + };
5817 ++MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
5818 +
5819 + static struct vio_driver nx842_vio_driver = {
5820 + .name = KBUILD_MODNAME,
5821 +diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
5822 +index 13f518802343d..6120e350ff71d 100644
5823 +--- a/drivers/crypto/nx/nx-aes-ctr.c
5824 ++++ b/drivers/crypto/nx/nx-aes-ctr.c
5825 +@@ -118,7 +118,7 @@ static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
5826 + struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
5827 + u8 iv[16];
5828 +
5829 +- memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
5830 ++ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_NONCE_SIZE);
5831 + memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
5832 + iv[12] = iv[13] = iv[14] = 0;
5833 + iv[15] = 1;
5834 +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
5835 +index ae0d320d3c60d..dd53ad9987b0d 100644
5836 +--- a/drivers/crypto/omap-sham.c
5837 ++++ b/drivers/crypto/omap-sham.c
5838 +@@ -372,7 +372,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
5839 + {
5840 + int err;
5841 +
5842 +- err = pm_runtime_get_sync(dd->dev);
5843 ++ err = pm_runtime_resume_and_get(dd->dev);
5844 + if (err < 0) {
5845 + dev_err(dd->dev, "failed to get sync: %d\n", err);
5846 + return err;
5847 +@@ -2244,7 +2244,7 @@ static int omap_sham_suspend(struct device *dev)
5848 +
5849 + static int omap_sham_resume(struct device *dev)
5850 + {
5851 +- int err = pm_runtime_get_sync(dev);
5852 ++ int err = pm_runtime_resume_and_get(dev);
5853 + if (err < 0) {
5854 + dev_err(dev, "failed to get sync: %d\n", err);
5855 + return err;
5856 +diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
5857 +index bd3028126cbe6..069f51621f0e8 100644
5858 +--- a/drivers/crypto/qat/qat_common/qat_hal.c
5859 ++++ b/drivers/crypto/qat/qat_common/qat_hal.c
5860 +@@ -1417,7 +1417,11 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
5861 + pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
5862 + return -EINVAL;
5863 + }
5864 +- qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
5865 ++ status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
5866 ++ if (status) {
5867 ++ pr_err("QAT: failed to read register");
5868 ++ return status;
5869 ++ }
5870 + gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
5871 + data16low = 0xffff & data;
5872 + data16hi = 0xffff & (data >> 0x10);
5873 +diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
5874 +index 1fb5fc852f6b8..6d95160e451e5 100644
5875 +--- a/drivers/crypto/qat/qat_common/qat_uclo.c
5876 ++++ b/drivers/crypto/qat/qat_common/qat_uclo.c
5877 +@@ -342,7 +342,6 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
5878 + return 0;
5879 + }
5880 +
5881 +-#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
5882 + static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
5883 + struct icp_qat_uof_initmem *init_mem)
5884 + {
5885 +diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
5886 +index c0a0d8c4fce19..8ff10928f581d 100644
5887 +--- a/drivers/crypto/qce/skcipher.c
5888 ++++ b/drivers/crypto/qce/skcipher.c
5889 +@@ -72,7 +72,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
5890 + struct scatterlist *sg;
5891 + bool diff_dst;
5892 + gfp_t gfp;
5893 +- int ret;
5894 ++ int dst_nents, src_nents, ret;
5895 +
5896 + rctx->iv = req->iv;
5897 + rctx->ivsize = crypto_skcipher_ivsize(skcipher);
5898 +@@ -123,21 +123,26 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
5899 + sg_mark_end(sg);
5900 + rctx->dst_sg = rctx->dst_tbl.sgl;
5901 +
5902 +- ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
5903 +- if (ret < 0)
5904 ++ dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
5905 ++ if (dst_nents < 0) {
5906 ++ ret = dst_nents;
5907 + goto error_free;
5908 ++ }
5909 +
5910 + if (diff_dst) {
5911 +- ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
5912 +- if (ret < 0)
5913 ++ src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
5914 ++ if (src_nents < 0) {
5915 ++ ret = src_nents;
5916 + goto error_unmap_dst;
5917 ++ }
5918 + rctx->src_sg = req->src;
5919 + } else {
5920 + rctx->src_sg = rctx->dst_sg;
5921 ++ src_nents = dst_nents - 1;
5922 + }
5923 +
5924 +- ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
5925 +- rctx->dst_sg, rctx->dst_nents,
5926 ++ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
5927 ++ rctx->dst_sg, dst_nents,
5928 + qce_skcipher_done, async_req);
5929 + if (ret)
5930 + goto error_unmap_src;
5931 +diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
5932 +index 1c6929fb3a131..9f077ec9dbb7f 100644
5933 +--- a/drivers/crypto/sa2ul.c
5934 ++++ b/drivers/crypto/sa2ul.c
5935 +@@ -2300,9 +2300,9 @@ static int sa_dma_init(struct sa_crypto_data *dd)
5936 +
5937 + dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
5938 + if (IS_ERR(dd->dma_rx2)) {
5939 +- dma_release_channel(dd->dma_rx1);
5940 +- return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
5941 +- "Unable to request rx2 DMA channel\n");
5942 ++ ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
5943 ++ "Unable to request rx2 DMA channel\n");
5944 ++ goto err_dma_rx2;
5945 + }
5946 +
5947 + dd->dma_tx = dma_request_chan(dd->dev, "tx");
5948 +@@ -2323,28 +2323,31 @@ static int sa_dma_init(struct sa_crypto_data *dd)
5949 + if (ret) {
5950 + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
5951 + ret);
5952 +- return ret;
5953 ++ goto err_dma_config;
5954 + }
5955 +
5956 + ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
5957 + if (ret) {
5958 + dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
5959 + ret);
5960 +- return ret;
5961 ++ goto err_dma_config;
5962 + }
5963 +
5964 + ret = dmaengine_slave_config(dd->dma_tx, &cfg);
5965 + if (ret) {
5966 + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
5967 + ret);
5968 +- return ret;
5969 ++ goto err_dma_config;
5970 + }
5971 +
5972 + return 0;
5973 +
5974 ++err_dma_config:
5975 ++ dma_release_channel(dd->dma_tx);
5976 + err_dma_tx:
5977 +- dma_release_channel(dd->dma_rx1);
5978 + dma_release_channel(dd->dma_rx2);
5979 ++err_dma_rx2:
5980 ++ dma_release_channel(dd->dma_rx1);
5981 +
5982 + return ret;
5983 + }
5984 +@@ -2385,7 +2388,6 @@ MODULE_DEVICE_TABLE(of, of_match);
5985 +
5986 + static int sa_ul_probe(struct platform_device *pdev)
5987 + {
5988 +- const struct of_device_id *match;
5989 + struct device *dev = &pdev->dev;
5990 + struct device_node *node = dev->of_node;
5991 + struct resource *res;
5992 +@@ -2397,6 +2399,10 @@ static int sa_ul_probe(struct platform_device *pdev)
5993 + if (!dev_data)
5994 + return -ENOMEM;
5995 +
5996 ++ dev_data->match_data = of_device_get_match_data(dev);
5997 ++ if (!dev_data->match_data)
5998 ++ return -ENODEV;
5999 ++
6000 + sa_k3_dev = dev;
6001 + dev_data->dev = dev;
6002 + dev_data->pdev = pdev;
6003 +@@ -2408,20 +2414,14 @@ static int sa_ul_probe(struct platform_device *pdev)
6004 + if (ret < 0) {
6005 + dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
6006 + ret);
6007 ++ pm_runtime_disable(dev);
6008 + return ret;
6009 + }
6010 +
6011 + sa_init_mem(dev_data);
6012 + ret = sa_dma_init(dev_data);
6013 + if (ret)
6014 +- goto disable_pm_runtime;
6015 +-
6016 +- match = of_match_node(of_match, dev->of_node);
6017 +- if (!match) {
6018 +- dev_err(dev, "No compatible match found\n");
6019 +- return -ENODEV;
6020 +- }
6021 +- dev_data->match_data = match->data;
6022 ++ goto destroy_dma_pool;
6023 +
6024 + spin_lock_init(&dev_data->scid_lock);
6025 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6026 +@@ -2454,9 +2454,9 @@ release_dma:
6027 + dma_release_channel(dev_data->dma_rx1);
6028 + dma_release_channel(dev_data->dma_tx);
6029 +
6030 ++destroy_dma_pool:
6031 + dma_pool_destroy(dev_data->sc_pool);
6032 +
6033 +-disable_pm_runtime:
6034 + pm_runtime_put_sync(&pdev->dev);
6035 + pm_runtime_disable(&pdev->dev);
6036 +
6037 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
6038 +index ecb7412e84e3e..51a6e1a424349 100644
6039 +--- a/drivers/crypto/ux500/hash/hash_core.c
6040 ++++ b/drivers/crypto/ux500/hash/hash_core.c
6041 +@@ -1011,6 +1011,7 @@ static int hash_hw_final(struct ahash_request *req)
6042 + goto out;
6043 + }
6044 + } else if (req->nbytes == 0 && ctx->keylen > 0) {
6045 ++ ret = -EPERM;
6046 + dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
6047 + __func__);
6048 + goto out;
6049 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
6050 +index fe08c46642f7c..28f3e0ba6cdd9 100644
6051 +--- a/drivers/devfreq/devfreq.c
6052 ++++ b/drivers/devfreq/devfreq.c
6053 +@@ -823,6 +823,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
6054 + if (devfreq->profile->timer < 0
6055 + || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
6056 + mutex_unlock(&devfreq->lock);
6057 ++ err = -EINVAL;
6058 + goto err_dev;
6059 + }
6060 +
6061 +diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
6062 +index b094132bd20b3..fc09324a03e03 100644
6063 +--- a/drivers/devfreq/governor_passive.c
6064 ++++ b/drivers/devfreq/governor_passive.c
6065 +@@ -65,7 +65,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
6066 + dev_pm_opp_put(p_opp);
6067 +
6068 + if (IS_ERR(opp))
6069 +- return PTR_ERR(opp);
6070 ++ goto no_required_opp;
6071 +
6072 + *freq = dev_pm_opp_get_freq(opp);
6073 + dev_pm_opp_put(opp);
6074 +@@ -73,6 +73,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
6075 + return 0;
6076 + }
6077 +
6078 ++no_required_opp:
6079 + /*
6080 + * Get the OPP table's index of decided frequency by governor
6081 + * of parent device.
6082 +diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
6083 +index 1e836e320edd9..91164c5f0757d 100644
6084 +--- a/drivers/edac/Kconfig
6085 ++++ b/drivers/edac/Kconfig
6086 +@@ -270,7 +270,8 @@ config EDAC_PND2
6087 +
6088 + config EDAC_IGEN6
6089 + tristate "Intel client SoC Integrated MC"
6090 +- depends on PCI && X86_64 && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
6091 ++ depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
6092 ++ depends on X64_64 && X86_MCE_INTEL
6093 + help
6094 + Support for error detection and correction on the Intel
6095 + client SoC Integrated Memory Controller using In-Band ECC IP.
6096 +diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
6097 +index a46da56d6d544..6bd5f88159193 100644
6098 +--- a/drivers/edac/aspeed_edac.c
6099 ++++ b/drivers/edac/aspeed_edac.c
6100 +@@ -254,8 +254,8 @@ static int init_csrows(struct mem_ctl_info *mci)
6101 + return rc;
6102 + }
6103 +
6104 +- dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
6105 +- r.start, resource_size(&r), PAGE_SHIFT);
6106 ++ dev_dbg(mci->pdev, "dt: /memory node resources: first page %pR, PAGE_SHIFT macro=0x%x\n",
6107 ++ &r, PAGE_SHIFT);
6108 +
6109 + csrow->first_page = r.start >> PAGE_SHIFT;
6110 + nr_pages = resource_size(&r) >> PAGE_SHIFT;
6111 +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
6112 +index 238a4ad1e526e..37b4e875420e4 100644
6113 +--- a/drivers/edac/i10nm_base.c
6114 ++++ b/drivers/edac/i10nm_base.c
6115 +@@ -278,6 +278,9 @@ static int __init i10nm_init(void)
6116 + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
6117 + return -EBUSY;
6118 +
6119 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
6120 ++ return -ENODEV;
6121 ++
6122 + id = x86_match_cpu(i10nm_cpuids);
6123 + if (!id)
6124 + return -ENODEV;
6125 +diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
6126 +index 928f63a374c78..c94ca1f790c43 100644
6127 +--- a/drivers/edac/pnd2_edac.c
6128 ++++ b/drivers/edac/pnd2_edac.c
6129 +@@ -1554,6 +1554,9 @@ static int __init pnd2_init(void)
6130 + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
6131 + return -EBUSY;
6132 +
6133 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
6134 ++ return -ENODEV;
6135 ++
6136 + id = x86_match_cpu(pnd2_cpuids);
6137 + if (!id)
6138 + return -ENODEV;
6139 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
6140 +index 93daa4297f2e0..4c626fcd4dcbb 100644
6141 +--- a/drivers/edac/sb_edac.c
6142 ++++ b/drivers/edac/sb_edac.c
6143 +@@ -3510,6 +3510,9 @@ static int __init sbridge_init(void)
6144 + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
6145 + return -EBUSY;
6146 +
6147 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
6148 ++ return -ENODEV;
6149 ++
6150 + id = x86_match_cpu(sbridge_cpuids);
6151 + if (!id)
6152 + return -ENODEV;
6153 +diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
6154 +index 6a4f0b27c6545..4dbd46575bfb4 100644
6155 +--- a/drivers/edac/skx_base.c
6156 ++++ b/drivers/edac/skx_base.c
6157 +@@ -656,6 +656,9 @@ static int __init skx_init(void)
6158 + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
6159 + return -EBUSY;
6160 +
6161 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
6162 ++ return -ENODEV;
6163 ++
6164 + id = x86_match_cpu(skx_cpuids);
6165 + if (!id)
6166 + return -ENODEV;
6167 +diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
6168 +index e7eae20f83d1d..169f96e51c293 100644
6169 +--- a/drivers/edac/ti_edac.c
6170 ++++ b/drivers/edac/ti_edac.c
6171 +@@ -197,6 +197,7 @@ static const struct of_device_id ti_edac_of_match[] = {
6172 + { .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
6173 + {},
6174 + };
6175 ++MODULE_DEVICE_TABLE(of, ti_edac_of_match);
6176 +
6177 + static int _emif_get_id(struct device_node *node)
6178 + {
6179 +diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
6180 +index e1408075ef7d6..5c3cdb725514d 100644
6181 +--- a/drivers/extcon/extcon-max8997.c
6182 ++++ b/drivers/extcon/extcon-max8997.c
6183 +@@ -733,7 +733,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
6184 + 2, info->status);
6185 + if (ret) {
6186 + dev_err(info->dev, "failed to read MUIC register\n");
6187 +- return ret;
6188 ++ goto err_irq;
6189 + }
6190 + cable_type = max8997_muic_get_cable_type(info,
6191 + MAX8997_CABLE_GROUP_ADC, &attached);
6192 +@@ -788,3 +788,4 @@ module_platform_driver(max8997_muic_driver);
6193 + MODULE_DESCRIPTION("Maxim MAX8997 Extcon driver");
6194 + MODULE_AUTHOR("Donggeun Kim <dg77.kim@×××××××.com>");
6195 + MODULE_LICENSE("GPL");
6196 ++MODULE_ALIAS("platform:max8997-muic");
6197 +diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
6198 +index db41d1c58efd5..c3e4b220e66fa 100644
6199 +--- a/drivers/extcon/extcon-sm5502.c
6200 ++++ b/drivers/extcon/extcon-sm5502.c
6201 +@@ -88,7 +88,6 @@ static struct reg_data sm5502_reg_data[] = {
6202 + | SM5502_REG_INTM2_MHL_MASK,
6203 + .invert = true,
6204 + },
6205 +- { }
6206 + };
6207 +
6208 + /* List of detectable cables */
6209 +diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
6210 +index 3aa489dba30a7..2a7687911c097 100644
6211 +--- a/drivers/firmware/stratix10-svc.c
6212 ++++ b/drivers/firmware/stratix10-svc.c
6213 +@@ -1034,24 +1034,32 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
6214 +
6215 + /* add svc client device(s) */
6216 + svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
6217 +- if (!svc)
6218 +- return -ENOMEM;
6219 ++ if (!svc) {
6220 ++ ret = -ENOMEM;
6221 ++ goto err_free_kfifo;
6222 ++ }
6223 +
6224 + svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
6225 + if (!svc->stratix10_svc_rsu) {
6226 + dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
6227 +- return -ENOMEM;
6228 ++ ret = -ENOMEM;
6229 ++ goto err_free_kfifo;
6230 + }
6231 +
6232 + ret = platform_device_add(svc->stratix10_svc_rsu);
6233 +- if (ret) {
6234 +- platform_device_put(svc->stratix10_svc_rsu);
6235 +- return ret;
6236 +- }
6237 ++ if (ret)
6238 ++ goto err_put_device;
6239 ++
6240 + dev_set_drvdata(dev, svc);
6241 +
6242 + pr_info("Intel Service Layer Driver Initialized\n");
6243 +
6244 ++ return 0;
6245 ++
6246 ++err_put_device:
6247 ++ platform_device_put(svc->stratix10_svc_rsu);
6248 ++err_free_kfifo:
6249 ++ kfifo_free(&controller->svc_fifo);
6250 + return ret;
6251 + }
6252 +
6253 +diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
6254 +index 4e60e84cd17a5..59ddc9fd5bca4 100644
6255 +--- a/drivers/fsi/fsi-core.c
6256 ++++ b/drivers/fsi/fsi-core.c
6257 +@@ -724,7 +724,7 @@ static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
6258 + rc = count;
6259 + fail:
6260 + *offset = off;
6261 +- return count;
6262 ++ return rc;
6263 + }
6264 +
6265 + static ssize_t cfam_write(struct file *filep, const char __user *buf,
6266 +@@ -761,7 +761,7 @@ static ssize_t cfam_write(struct file *filep, const char __user *buf,
6267 + rc = count;
6268 + fail:
6269 + *offset = off;
6270 +- return count;
6271 ++ return rc;
6272 + }
6273 +
6274 + static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
6275 +diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
6276 +index 10ca2e290655b..cb05b6dacc9d5 100644
6277 +--- a/drivers/fsi/fsi-occ.c
6278 ++++ b/drivers/fsi/fsi-occ.c
6279 +@@ -495,6 +495,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
6280 + goto done;
6281 +
6282 + if (resp->return_status == OCC_RESP_CMD_IN_PRG ||
6283 ++ resp->return_status == OCC_RESP_CRIT_INIT ||
6284 + resp->seq_no != seq_no) {
6285 + rc = -ETIMEDOUT;
6286 +
6287 +diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
6288 +index bfd5e5da80209..84cb965bfed5c 100644
6289 +--- a/drivers/fsi/fsi-sbefifo.c
6290 ++++ b/drivers/fsi/fsi-sbefifo.c
6291 +@@ -325,7 +325,8 @@ static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
6292 + static int sbefifo_request_reset(struct sbefifo *sbefifo)
6293 + {
6294 + struct device *dev = &sbefifo->fsi_dev->dev;
6295 +- u32 status, timeout;
6296 ++ unsigned long end_time;
6297 ++ u32 status;
6298 + int rc;
6299 +
6300 + dev_dbg(dev, "Requesting FIFO reset\n");
6301 +@@ -341,7 +342,8 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
6302 + }
6303 +
6304 + /* Wait for it to complete */
6305 +- for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
6306 ++ end_time = jiffies + msecs_to_jiffies(SBEFIFO_RESET_TIMEOUT);
6307 ++ while (!time_after(jiffies, end_time)) {
6308 + rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
6309 + if (rc) {
6310 + dev_err(dev, "Failed to read UP fifo status during reset"
6311 +@@ -355,7 +357,7 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
6312 + return 0;
6313 + }
6314 +
6315 +- msleep(1);
6316 ++ cond_resched();
6317 + }
6318 + dev_err(dev, "FIFO reset timed out\n");
6319 +
6320 +@@ -400,7 +402,7 @@ static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
6321 + /* The FIFO already contains a reset request from the SBE ? */
6322 + if (down_status & SBEFIFO_STS_RESET_REQ) {
6323 + dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
6324 +- rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
6325 ++ rc = sbefifo_regw(sbefifo, SBEFIFO_DOWN, SBEFIFO_PERFORM_RESET);
6326 + if (rc) {
6327 + sbefifo->broken = true;
6328 + dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
6329 +diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
6330 +index b45bfab7b7f55..75d1389e2626d 100644
6331 +--- a/drivers/fsi/fsi-scom.c
6332 ++++ b/drivers/fsi/fsi-scom.c
6333 +@@ -38,9 +38,10 @@
6334 + #define SCOM_STATUS_PIB_RESP_MASK 0x00007000
6335 + #define SCOM_STATUS_PIB_RESP_SHIFT 12
6336 +
6337 +-#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
6338 +- SCOM_STATUS_PARITY | \
6339 +- SCOM_STATUS_PIB_ABORT | \
6340 ++#define SCOM_STATUS_FSI2PIB_ERROR (SCOM_STATUS_PROTECTION | \
6341 ++ SCOM_STATUS_PARITY | \
6342 ++ SCOM_STATUS_PIB_ABORT)
6343 ++#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_FSI2PIB_ERROR | \
6344 + SCOM_STATUS_PIB_RESP_MASK)
6345 + /* SCOM address encodings */
6346 + #define XSCOM_ADDR_IND_FLAG BIT_ULL(63)
6347 +@@ -240,13 +241,14 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
6348 + {
6349 + uint32_t dummy = -1;
6350 +
6351 +- if (status & SCOM_STATUS_PROTECTION)
6352 +- return -EPERM;
6353 +- if (status & SCOM_STATUS_PARITY) {
6354 ++ if (status & SCOM_STATUS_FSI2PIB_ERROR)
6355 + fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
6356 + sizeof(uint32_t));
6357 ++
6358 ++ if (status & SCOM_STATUS_PROTECTION)
6359 ++ return -EPERM;
6360 ++ if (status & SCOM_STATUS_PARITY)
6361 + return -EIO;
6362 +- }
6363 + /* Return -EBUSY on PIB abort to force a retry */
6364 + if (status & SCOM_STATUS_PIB_ABORT)
6365 + return -EBUSY;
6366 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
6367 +index 652cc1a0e450f..2b2d7b9f26f16 100644
6368 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
6369 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
6370 +@@ -28,6 +28,7 @@
6371 +
6372 + #include "dm_services_types.h"
6373 + #include "dc.h"
6374 ++#include "dc_link_dp.h"
6375 + #include "dc/inc/core_types.h"
6376 + #include "dal_asic_id.h"
6377 + #include "dmub/dmub_srv.h"
6378 +@@ -2696,6 +2697,7 @@ static void handle_hpd_rx_irq(void *param)
6379 + enum dc_connection_type new_connection_type = dc_connection_none;
6380 + struct amdgpu_device *adev = drm_to_adev(dev);
6381 + union hpd_irq_data hpd_irq_data;
6382 ++ bool lock_flag = 0;
6383 +
6384 + memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
6385 +
6386 +@@ -2726,13 +2728,28 @@ static void handle_hpd_rx_irq(void *param)
6387 + }
6388 + }
6389 +
6390 +- mutex_lock(&adev->dm.dc_lock);
6391 ++ /*
6392 ++ * TODO: We need the lock to avoid touching DC state while it's being
6393 ++ * modified during automated compliance testing, or when link loss
6394 ++ * happens. While this should be split into subhandlers and proper
6395 ++ * interfaces to avoid having to conditionally lock like this in the
6396 ++ * outer layer, we need this workaround temporarily to allow MST
6397 ++ * lightup in some scenarios to avoid timeout.
6398 ++ */
6399 ++ if (!amdgpu_in_reset(adev) &&
6400 ++ (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
6401 ++ hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
6402 ++ mutex_lock(&adev->dm.dc_lock);
6403 ++ lock_flag = 1;
6404 ++ }
6405 ++
6406 + #ifdef CONFIG_DRM_AMD_DC_HDCP
6407 + result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
6408 + #else
6409 + result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
6410 + #endif
6411 +- mutex_unlock(&adev->dm.dc_lock);
6412 ++ if (!amdgpu_in_reset(adev) && lock_flag)
6413 ++ mutex_unlock(&adev->dm.dc_lock);
6414 +
6415 + out:
6416 + if (result && !is_mst_root_connector) {
6417 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
6418 +index 9b221db526dc9..d62460b69d954 100644
6419 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
6420 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
6421 +@@ -278,6 +278,9 @@ dm_dp_mst_detect(struct drm_connector *connector,
6422 + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6423 + struct amdgpu_dm_connector *master = aconnector->mst_port;
6424 +
6425 ++ if (drm_connector_is_unregistered(connector))
6426 ++ return connector_status_disconnected;
6427 ++
6428 + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
6429 + aconnector->port);
6430 + }
6431 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
6432 +index 3ff3d9e909837..72bd7bc681a81 100644
6433 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
6434 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
6435 +@@ -1976,7 +1976,7 @@ enum dc_status read_hpd_rx_irq_data(
6436 + return retval;
6437 + }
6438 +
6439 +-static bool hpd_rx_irq_check_link_loss_status(
6440 ++bool hpd_rx_irq_check_link_loss_status(
6441 + struct dc_link *link,
6442 + union hpd_irq_data *hpd_irq_dpcd_data)
6443 + {
6444 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
6445 +index 3ae05c96d5572..a9c0c7f7a55dc 100644
6446 +--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
6447 ++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
6448 +@@ -67,6 +67,10 @@ bool perform_link_training_with_retries(
6449 + struct pipe_ctx *pipe_ctx,
6450 + enum signal_type signal);
6451 +
6452 ++bool hpd_rx_irq_check_link_loss_status(
6453 ++ struct dc_link *link,
6454 ++ union hpd_irq_data *hpd_irq_dpcd_data);
6455 ++
6456 + bool is_mst_supported(struct dc_link *link);
6457 +
6458 + bool detect_dp_sink_caps(struct dc_link *link);
6459 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
6460 +index 0ac3c2039c4b1..c29cc7f19863a 100644
6461 +--- a/drivers/gpu/drm/ast/ast_main.c
6462 ++++ b/drivers/gpu/drm/ast/ast_main.c
6463 +@@ -413,7 +413,7 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
6464 +
6465 + pci_set_drvdata(pdev, dev);
6466 +
6467 +- ast->regs = pci_iomap(pdev, 1, 0);
6468 ++ ast->regs = pcim_iomap(pdev, 1, 0);
6469 + if (!ast->regs)
6470 + return ERR_PTR(-EIO);
6471 +
6472 +@@ -429,7 +429,7 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
6473 +
6474 + /* "map" IO regs if the above hasn't done so already */
6475 + if (!ast->ioregs) {
6476 +- ast->ioregs = pci_iomap(pdev, 2, 0);
6477 ++ ast->ioregs = pcim_iomap(pdev, 2, 0);
6478 + if (!ast->ioregs)
6479 + return ERR_PTR(-EIO);
6480 + }
6481 +diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
6482 +index 400193e38d298..9ce8438fb58b9 100644
6483 +--- a/drivers/gpu/drm/bridge/Kconfig
6484 ++++ b/drivers/gpu/drm/bridge/Kconfig
6485 +@@ -68,6 +68,7 @@ config DRM_LONTIUM_LT8912B
6486 + select DRM_KMS_HELPER
6487 + select DRM_MIPI_DSI
6488 + select REGMAP_I2C
6489 ++ select VIDEOMODE_HELPERS
6490 + help
6491 + Driver for Lontium LT8912B DSI to HDMI bridge
6492 + chip driver.
6493 +@@ -172,7 +173,7 @@ config DRM_SIL_SII8620
6494 + tristate "Silicon Image SII8620 HDMI/MHL bridge"
6495 + depends on OF
6496 + select DRM_KMS_HELPER
6497 +- imply EXTCON
6498 ++ select EXTCON
6499 + depends on RC_CORE || !RC_CORE
6500 + help
6501 + Silicon Image SII8620 HDMI/MHL bridge chip driver.
6502 +diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
6503 +index 23283ba0c4f93..b4e349ca38fe3 100644
6504 +--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
6505 ++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
6506 +@@ -893,7 +893,7 @@ static void anx7625_power_on(struct anx7625_data *ctx)
6507 + usleep_range(2000, 2100);
6508 + }
6509 +
6510 +- usleep_range(4000, 4100);
6511 ++ usleep_range(11000, 12000);
6512 +
6513 + /* Power on pin enable */
6514 + gpiod_set_value(ctx->pdata.gpio_p_on, 1);
6515 +diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
6516 +index 64f0effb52ac1..044acd07c1538 100644
6517 +--- a/drivers/gpu/drm/drm_bridge.c
6518 ++++ b/drivers/gpu/drm/drm_bridge.c
6519 +@@ -522,6 +522,9 @@ void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
6520 + list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
6521 + if (iter->funcs->pre_enable)
6522 + iter->funcs->pre_enable(iter);
6523 ++
6524 ++ if (iter == bridge)
6525 ++ break;
6526 + }
6527 + }
6528 + EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
6529 +diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
6530 +index 7ffd7b570b54d..538682f882b16 100644
6531 +--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
6532 ++++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
6533 +@@ -1082,7 +1082,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
6534 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
6535 + const struct drm_framebuffer *fb = plane_state->hw.fb;
6536 + unsigned int rotation = plane_state->hw.rotation;
6537 +- struct drm_format_name_buf format_name;
6538 +
6539 + if (!fb)
6540 + return 0;
6541 +@@ -1130,9 +1129,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
6542 + case DRM_FORMAT_XVYU12_16161616:
6543 + case DRM_FORMAT_XVYU16161616:
6544 + drm_dbg_kms(&dev_priv->drm,
6545 +- "Unsupported pixel format %s for 90/270!\n",
6546 +- drm_get_format_name(fb->format->format,
6547 +- &format_name));
6548 ++ "Unsupported pixel format %p4cc for 90/270!\n",
6549 ++ &fb->format->format);
6550 + return -EINVAL;
6551 + default:
6552 + break;
6553 +diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
6554 +index 1081cd36a2bd3..1e5d59a776b81 100644
6555 +--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
6556 ++++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
6557 +@@ -551,6 +551,32 @@ static int live_pin_rewind(void *arg)
6558 + return err;
6559 + }
6560 +
6561 ++static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
6562 ++{
6563 ++ tasklet_disable(&engine->execlists.tasklet);
6564 ++ local_bh_disable();
6565 ++
6566 ++ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
6567 ++ &engine->gt->reset.flags)) {
6568 ++ local_bh_enable();
6569 ++ tasklet_enable(&engine->execlists.tasklet);
6570 ++
6571 ++ intel_gt_set_wedged(engine->gt);
6572 ++ return -EBUSY;
6573 ++ }
6574 ++
6575 ++ return 0;
6576 ++}
6577 ++
6578 ++static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
6579 ++{
6580 ++ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
6581 ++ &engine->gt->reset.flags);
6582 ++
6583 ++ local_bh_enable();
6584 ++ tasklet_enable(&engine->execlists.tasklet);
6585 ++}
6586 ++
6587 + static int live_hold_reset(void *arg)
6588 + {
6589 + struct intel_gt *gt = arg;
6590 +@@ -598,15 +624,9 @@ static int live_hold_reset(void *arg)
6591 +
6592 + /* We have our request executing, now remove it and reset */
6593 +
6594 +- local_bh_disable();
6595 +- if (test_and_set_bit(I915_RESET_ENGINE + id,
6596 +- &gt->reset.flags)) {
6597 +- local_bh_enable();
6598 +- intel_gt_set_wedged(gt);
6599 +- err = -EBUSY;
6600 ++ err = engine_lock_reset_tasklet(engine);
6601 ++ if (err)
6602 + goto out;
6603 +- }
6604 +- tasklet_disable(&engine->execlists.tasklet);
6605 +
6606 + engine->execlists.tasklet.callback(&engine->execlists.tasklet);
6607 + GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
6608 +@@ -618,10 +638,7 @@ static int live_hold_reset(void *arg)
6609 + __intel_engine_reset_bh(engine, NULL);
6610 + GEM_BUG_ON(rq->fence.error != -EIO);
6611 +
6612 +- tasklet_enable(&engine->execlists.tasklet);
6613 +- clear_and_wake_up_bit(I915_RESET_ENGINE + id,
6614 +- &gt->reset.flags);
6615 +- local_bh_enable();
6616 ++ engine_unlock_reset_tasklet(engine);
6617 +
6618 + /* Check that we do not resubmit the held request */
6619 + if (!i915_request_wait(rq, 0, HZ / 5)) {
6620 +@@ -4585,15 +4602,9 @@ static int reset_virtual_engine(struct intel_gt *gt,
6621 + GEM_BUG_ON(engine == ve->engine);
6622 +
6623 + /* Take ownership of the reset and tasklet */
6624 +- local_bh_disable();
6625 +- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
6626 +- &gt->reset.flags)) {
6627 +- local_bh_enable();
6628 +- intel_gt_set_wedged(gt);
6629 +- err = -EBUSY;
6630 ++ err = engine_lock_reset_tasklet(engine);
6631 ++ if (err)
6632 + goto out_heartbeat;
6633 +- }
6634 +- tasklet_disable(&engine->execlists.tasklet);
6635 +
6636 + engine->execlists.tasklet.callback(&engine->execlists.tasklet);
6637 + GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
6638 +@@ -4612,9 +4623,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
6639 + GEM_BUG_ON(rq->fence.error != -EIO);
6640 +
6641 + /* Release our grasp on the engine, letting CS flow again */
6642 +- tasklet_enable(&engine->execlists.tasklet);
6643 +- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
6644 +- local_bh_enable();
6645 ++ engine_unlock_reset_tasklet(engine);
6646 +
6647 + /* Check that we do not resubmit the held request */
6648 + i915_request_get(rq);
6649 +diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
6650 +index fa5009705365e..233310712deb0 100644
6651 +--- a/drivers/gpu/drm/imx/ipuv3-plane.c
6652 ++++ b/drivers/gpu/drm/imx/ipuv3-plane.c
6653 +@@ -35,7 +35,7 @@ static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
6654 + return container_of(p, struct ipu_plane, base);
6655 + }
6656 +
6657 +-static const uint32_t ipu_plane_formats[] = {
6658 ++static const uint32_t ipu_plane_all_formats[] = {
6659 + DRM_FORMAT_ARGB1555,
6660 + DRM_FORMAT_XRGB1555,
6661 + DRM_FORMAT_ABGR1555,
6662 +@@ -72,6 +72,31 @@ static const uint32_t ipu_plane_formats[] = {
6663 + DRM_FORMAT_BGRX8888_A8,
6664 + };
6665 +
6666 ++static const uint32_t ipu_plane_rgb_formats[] = {
6667 ++ DRM_FORMAT_ARGB1555,
6668 ++ DRM_FORMAT_XRGB1555,
6669 ++ DRM_FORMAT_ABGR1555,
6670 ++ DRM_FORMAT_XBGR1555,
6671 ++ DRM_FORMAT_RGBA5551,
6672 ++ DRM_FORMAT_BGRA5551,
6673 ++ DRM_FORMAT_ARGB4444,
6674 ++ DRM_FORMAT_ARGB8888,
6675 ++ DRM_FORMAT_XRGB8888,
6676 ++ DRM_FORMAT_ABGR8888,
6677 ++ DRM_FORMAT_XBGR8888,
6678 ++ DRM_FORMAT_RGBA8888,
6679 ++ DRM_FORMAT_RGBX8888,
6680 ++ DRM_FORMAT_BGRA8888,
6681 ++ DRM_FORMAT_BGRX8888,
6682 ++ DRM_FORMAT_RGB565,
6683 ++ DRM_FORMAT_RGB565_A8,
6684 ++ DRM_FORMAT_BGR565_A8,
6685 ++ DRM_FORMAT_RGB888_A8,
6686 ++ DRM_FORMAT_BGR888_A8,
6687 ++ DRM_FORMAT_RGBX8888_A8,
6688 ++ DRM_FORMAT_BGRX8888_A8,
6689 ++};
6690 ++
6691 + static const uint64_t ipu_format_modifiers[] = {
6692 + DRM_FORMAT_MOD_LINEAR,
6693 + DRM_FORMAT_MOD_INVALID
6694 +@@ -320,10 +345,11 @@ static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
6695 + if (modifier == DRM_FORMAT_MOD_LINEAR)
6696 + return true;
6697 +
6698 +- /* without a PRG there are no supported modifiers */
6699 +- if (!ipu_prg_present(ipu))
6700 +- return false;
6701 +-
6702 ++ /*
6703 ++ * Without a PRG the possible modifiers list only includes the linear
6704 ++ * modifier, so we always take the early return from this function and
6705 ++ * only end up here if the PRG is present.
6706 ++ */
6707 + return ipu_prg_format_supported(ipu, format, modifier);
6708 + }
6709 +
6710 +@@ -830,16 +856,28 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
6711 + struct ipu_plane *ipu_plane;
6712 + const uint64_t *modifiers = ipu_format_modifiers;
6713 + unsigned int zpos = (type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
6714 ++ unsigned int format_count;
6715 ++ const uint32_t *formats;
6716 + int ret;
6717 +
6718 + DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
6719 + dma, dp, possible_crtcs);
6720 +
6721 ++ if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG) {
6722 ++ formats = ipu_plane_all_formats;
6723 ++ format_count = ARRAY_SIZE(ipu_plane_all_formats);
6724 ++ } else {
6725 ++ formats = ipu_plane_rgb_formats;
6726 ++ format_count = ARRAY_SIZE(ipu_plane_rgb_formats);
6727 ++ }
6728 ++
6729 ++ if (ipu_prg_present(ipu))
6730 ++ modifiers = pre_format_modifiers;
6731 ++
6732 + ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base,
6733 + possible_crtcs, &ipu_plane_funcs,
6734 +- ipu_plane_formats,
6735 +- ARRAY_SIZE(ipu_plane_formats),
6736 +- modifiers, type, NULL);
6737 ++ formats, format_count, modifiers,
6738 ++ type, NULL);
6739 + if (IS_ERR(ipu_plane)) {
6740 + DRM_ERROR("failed to allocate and initialize %s plane\n",
6741 + zpos ? "overlay" : "primary");
6742 +@@ -850,9 +888,6 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
6743 + ipu_plane->dma = dma;
6744 + ipu_plane->dp_flow = dp;
6745 +
6746 +- if (ipu_prg_present(ipu))
6747 +- modifiers = pre_format_modifiers;
6748 +-
6749 + drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
6750 +
6751 + if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
6752 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
6753 +index 18bc76b7f1a33..4523d6ba891b5 100644
6754 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
6755 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
6756 +@@ -407,9 +407,6 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
6757 + fevent->event);
6758 + }
6759 +
6760 +- if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
6761 +- dpu_core_perf_crtc_update(crtc, 0, false);
6762 +-
6763 + if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
6764 + | DPU_ENCODER_FRAME_EVENT_ERROR))
6765 + frame_done = true;
6766 +@@ -477,6 +474,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
6767 + void dpu_crtc_complete_commit(struct drm_crtc *crtc)
6768 + {
6769 + trace_dpu_crtc_complete_commit(DRMID(crtc));
6770 ++ dpu_core_perf_crtc_update(crtc, 0, false);
6771 + _dpu_crtc_complete_flip(crtc);
6772 + }
6773 +
6774 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
6775 +index 06b56fec04e04..6b0a7bc87eb75 100644
6776 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
6777 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
6778 +@@ -225,7 +225,7 @@ int dpu_mdss_init(struct drm_device *dev)
6779 + struct msm_drm_private *priv = dev->dev_private;
6780 + struct dpu_mdss *dpu_mdss;
6781 + struct dss_module_power *mp;
6782 +- int ret = 0;
6783 ++ int ret;
6784 + int irq;
6785 +
6786 + dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
6787 +@@ -253,8 +253,10 @@ int dpu_mdss_init(struct drm_device *dev)
6788 + goto irq_domain_error;
6789 +
6790 + irq = platform_get_irq(pdev, 0);
6791 +- if (irq < 0)
6792 ++ if (irq < 0) {
6793 ++ ret = irq;
6794 + goto irq_error;
6795 ++ }
6796 +
6797 + irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
6798 + dpu_mdss);
6799 +@@ -263,7 +265,7 @@ int dpu_mdss_init(struct drm_device *dev)
6800 +
6801 + pm_runtime_enable(dev->dev);
6802 +
6803 +- return ret;
6804 ++ return 0;
6805 +
6806 + irq_error:
6807 + _dpu_mdss_irq_domain_fini(dpu_mdss);
6808 +diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
6809 +index b1a9b1b98f5f6..f4f53f23e331e 100644
6810 +--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
6811 ++++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
6812 +@@ -582,10 +582,9 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
6813 +
6814 + u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
6815 +
6816 +- /* enable HPD interrupts */
6817 ++ /* enable HPD plug and unplug interrupts */
6818 + dp_catalog_hpd_config_intr(dp_catalog,
6819 +- DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK
6820 +- | DP_DP_HPD_UNPLUG_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
6821 ++ DP_DP_HPD_PLUG_INT_MASK | DP_DP_HPD_UNPLUG_INT_MASK, true);
6822 +
6823 + /* Configure REFTIMER and enable it */
6824 + reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
6825 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
6826 +index 1390f3547fde4..2a8955ca70d1a 100644
6827 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
6828 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
6829 +@@ -1809,6 +1809,61 @@ end:
6830 + return ret;
6831 + }
6832 +
6833 ++int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
6834 ++{
6835 ++ struct dp_ctrl_private *ctrl;
6836 ++ struct dp_io *dp_io;
6837 ++ struct phy *phy;
6838 ++ int ret;
6839 ++
6840 ++ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
6841 ++ dp_io = &ctrl->parser->io;
6842 ++ phy = dp_io->phy;
6843 ++
6844 ++ /* set dongle to D3 (power off) mode */
6845 ++ dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
6846 ++
6847 ++ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
6848 ++
6849 ++ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
6850 ++ if (ret) {
6851 ++ DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
6852 ++ return ret;
6853 ++ }
6854 ++
6855 ++ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
6856 ++ if (ret) {
6857 ++ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
6858 ++ return ret;
6859 ++ }
6860 ++
6861 ++ phy_power_off(phy);
6862 ++
6863 ++ /* aux channel down, reinit phy */
6864 ++ phy_exit(phy);
6865 ++ phy_init(phy);
6866 ++
6867 ++ DRM_DEBUG_DP("DP off link/stream done\n");
6868 ++ return ret;
6869 ++}
6870 ++
6871 ++void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl)
6872 ++{
6873 ++ struct dp_ctrl_private *ctrl;
6874 ++ struct dp_io *dp_io;
6875 ++ struct phy *phy;
6876 ++
6877 ++ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
6878 ++ dp_io = &ctrl->parser->io;
6879 ++ phy = dp_io->phy;
6880 ++
6881 ++ dp_catalog_ctrl_reset(ctrl->catalog);
6882 ++
6883 ++ phy_exit(phy);
6884 ++
6885 ++ DRM_DEBUG_DP("DP off phy done\n");
6886 ++}
6887 ++
6888 + int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
6889 + {
6890 + struct dp_ctrl_private *ctrl;
6891 +diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
6892 +index a836bd358447c..25e4f75122522 100644
6893 +--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
6894 ++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
6895 +@@ -23,6 +23,8 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
6896 + void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
6897 + int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
6898 + int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
6899 ++int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
6900 ++void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl);
6901 + int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
6902 + void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
6903 + void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
6904 +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
6905 +index 1784e119269b7..cdec0a367a2cb 100644
6906 +--- a/drivers/gpu/drm/msm/dp/dp_display.c
6907 ++++ b/drivers/gpu/drm/msm/dp/dp_display.c
6908 +@@ -346,6 +346,12 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
6909 + dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
6910 + dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
6911 +
6912 ++ /*
6913 ++ * set sink to normal operation mode -- D0
6914 ++ * before dpcd read
6915 ++ */
6916 ++ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
6917 ++
6918 + dp_link_reset_phy_params_vx_px(dp->link);
6919 + rc = dp_ctrl_on_link(dp->ctrl);
6920 + if (rc) {
6921 +@@ -414,11 +420,6 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
6922 +
6923 + dp_display_host_init(dp, false);
6924 +
6925 +- /*
6926 +- * set sink to normal operation mode -- D0
6927 +- * before dpcd read
6928 +- */
6929 +- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
6930 + rc = dp_display_process_hpd_high(dp);
6931 + end:
6932 + return rc;
6933 +@@ -579,6 +580,10 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
6934 + dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
6935 + }
6936 +
6937 ++ /* enable HDP irq_hpd/replug interrupt */
6938 ++ dp_catalog_hpd_config_intr(dp->catalog,
6939 ++ DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
6940 ++
6941 + mutex_unlock(&dp->event_mutex);
6942 +
6943 + /* uevent will complete connection part */
6944 +@@ -628,7 +633,26 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
6945 + mutex_lock(&dp->event_mutex);
6946 +
6947 + state = dp->hpd_state;
6948 +- if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
6949 ++
6950 ++ /* disable irq_hpd/replug interrupts */
6951 ++ dp_catalog_hpd_config_intr(dp->catalog,
6952 ++ DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, false);
6953 ++
6954 ++ /* unplugged, no more irq_hpd handle */
6955 ++ dp_del_event(dp, EV_IRQ_HPD_INT);
6956 ++
6957 ++ if (state == ST_DISCONNECTED) {
6958 ++ /* triggered by irq_hdp with sink_count = 0 */
6959 ++ if (dp->link->sink_count == 0) {
6960 ++ dp_ctrl_off_phy(dp->ctrl);
6961 ++ hpd->hpd_high = 0;
6962 ++ dp->core_initialized = false;
6963 ++ }
6964 ++ mutex_unlock(&dp->event_mutex);
6965 ++ return 0;
6966 ++ }
6967 ++
6968 ++ if (state == ST_DISCONNECT_PENDING) {
6969 + mutex_unlock(&dp->event_mutex);
6970 + return 0;
6971 + }
6972 +@@ -642,9 +666,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
6973 +
6974 + dp->hpd_state = ST_DISCONNECT_PENDING;
6975 +
6976 +- /* disable HPD plug interrupt until disconnect is done */
6977 +- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK
6978 +- | DP_DP_IRQ_HPD_INT_MASK, false);
6979 ++ /* disable HPD plug interrupts */
6980 ++ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
6981 +
6982 + hpd->hpd_high = 0;
6983 +
6984 +@@ -660,8 +683,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
6985 + /* signal the disconnect event early to ensure proper teardown */
6986 + dp_display_handle_plugged_change(g_dp_display, false);
6987 +
6988 +- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
6989 +- DP_DP_IRQ_HPD_INT_MASK, true);
6990 ++ /* enable HDP plug interrupt to prepare for next plugin */
6991 ++ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
6992 +
6993 + /* uevent will complete disconnection part */
6994 + mutex_unlock(&dp->event_mutex);
6995 +@@ -692,7 +715,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
6996 +
6997 + /* irq_hpd can happen at either connected or disconnected state */
6998 + state = dp->hpd_state;
6999 +- if (state == ST_DISPLAY_OFF) {
7000 ++ if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
7001 + mutex_unlock(&dp->event_mutex);
7002 + return 0;
7003 + }
7004 +@@ -910,9 +933,13 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
7005 +
7006 + dp_display->audio_enabled = false;
7007 +
7008 +- dp_ctrl_off(dp->ctrl);
7009 +-
7010 +- dp->core_initialized = false;
7011 ++ /* triggered by irq_hpd with sink_count = 0 */
7012 ++ if (dp->link->sink_count == 0) {
7013 ++ dp_ctrl_off_link_stream(dp->ctrl);
7014 ++ } else {
7015 ++ dp_ctrl_off(dp->ctrl);
7016 ++ dp->core_initialized = false;
7017 ++ }
7018 +
7019 + dp_display->power_on = false;
7020 +
7021 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
7022 +index fe7d17cd35ecd..afd555b0c105e 100644
7023 +--- a/drivers/gpu/drm/msm/msm_drv.c
7024 ++++ b/drivers/gpu/drm/msm/msm_drv.c
7025 +@@ -523,6 +523,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
7026 + priv->event_thread[i].worker = kthread_create_worker(0,
7027 + "crtc_event:%d", priv->event_thread[i].crtc_id);
7028 + if (IS_ERR(priv->event_thread[i].worker)) {
7029 ++ ret = PTR_ERR(priv->event_thread[i].worker);
7030 + DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
7031 + goto err_msm_uninit;
7032 + }
7033 +diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
7034 +index 80f6748055e36..3aae387a96af2 100644
7035 +--- a/drivers/gpu/drm/pl111/Kconfig
7036 ++++ b/drivers/gpu/drm/pl111/Kconfig
7037 +@@ -3,6 +3,7 @@ config DRM_PL111
7038 + tristate "DRM Support for PL111 CLCD Controller"
7039 + depends on DRM
7040 + depends on ARM || ARM64 || COMPILE_TEST
7041 ++ depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
7042 + depends on COMMON_CLK
7043 + select DRM_KMS_HELPER
7044 + select DRM_KMS_CMA_HELPER
7045 +diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
7046 +index 48a58ba1db965..686485b19d0f2 100644
7047 +--- a/drivers/gpu/drm/qxl/qxl_dumb.c
7048 ++++ b/drivers/gpu/drm/qxl/qxl_dumb.c
7049 +@@ -58,6 +58,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
7050 + surf.height = args->height;
7051 + surf.stride = pitch;
7052 + surf.format = format;
7053 ++ surf.data = 0;
7054 ++
7055 + r = qxl_gem_object_create_with_handle(qdev, file_priv,
7056 + QXL_GEM_DOMAIN_CPU,
7057 + args->size, &surf, &qobj,
7058 +diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
7059 +index a4a45daf93f2b..6802d9b65f828 100644
7060 +--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
7061 ++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
7062 +@@ -73,6 +73,7 @@ static int cdn_dp_grf_write(struct cdn_dp_device *dp,
7063 + ret = regmap_write(dp->grf, reg, val);
7064 + if (ret) {
7065 + DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
7066 ++ clk_disable_unprepare(dp->grf_clk);
7067 + return ret;
7068 + }
7069 +
7070 +diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
7071 +index 9d2163ef4d6e2..33fb4d05c5065 100644
7072 +--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
7073 ++++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
7074 +@@ -658,7 +658,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
7075 + */
7076 + do {
7077 + tu_size_reg += 2;
7078 +- symbol = tu_size_reg * mode->clock * bit_per_pix;
7079 ++ symbol = (u64)tu_size_reg * mode->clock * bit_per_pix;
7080 + do_div(symbol, dp->max_lanes * link_rate * 8);
7081 + rem = do_div(symbol, 1000);
7082 + if (tu_size_reg > 64) {
7083 +diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
7084 +index 24a71091759cc..d8c47ee3cad37 100644
7085 +--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
7086 ++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
7087 +@@ -692,13 +692,8 @@ static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
7088 + .get_timing = dw_mipi_dsi_phy_get_timing,
7089 + };
7090 +
7091 +-static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
7092 +- int mux)
7093 ++static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
7094 + {
7095 +- if (dsi->cdata->lcdsel_grf_reg)
7096 +- regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
7097 +- mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
7098 +-
7099 + if (dsi->cdata->lanecfg1_grf_reg)
7100 + regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
7101 + dsi->cdata->lanecfg1);
7102 +@@ -712,6 +707,13 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
7103 + dsi->cdata->enable);
7104 + }
7105 +
7106 ++static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
7107 ++ int mux)
7108 ++{
7109 ++ regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
7110 ++ mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
7111 ++}
7112 ++
7113 + static int
7114 + dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
7115 + struct drm_crtc_state *crtc_state,
7116 +@@ -767,9 +769,9 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
7117 + return;
7118 + }
7119 +
7120 +- dw_mipi_dsi_rockchip_config(dsi, mux);
7121 ++ dw_mipi_dsi_rockchip_set_lcdsel(dsi, mux);
7122 + if (dsi->slave)
7123 +- dw_mipi_dsi_rockchip_config(dsi->slave, mux);
7124 ++ dw_mipi_dsi_rockchip_set_lcdsel(dsi->slave, mux);
7125 +
7126 + clk_disable_unprepare(dsi->grf_clk);
7127 + }
7128 +@@ -923,6 +925,24 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
7129 + return ret;
7130 + }
7131 +
7132 ++ /*
7133 ++ * With the GRF clock running, write lane and dual-mode configurations
7134 ++ * that won't change immediately. If we waited until enable() to do
7135 ++ * this, things like panel preparation would not be able to send
7136 ++ * commands over DSI.
7137 ++ */
7138 ++ ret = clk_prepare_enable(dsi->grf_clk);
7139 ++ if (ret) {
7140 ++ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
7141 ++ return ret;
7142 ++ }
7143 ++
7144 ++ dw_mipi_dsi_rockchip_config(dsi);
7145 ++ if (dsi->slave)
7146 ++ dw_mipi_dsi_rockchip_config(dsi->slave);
7147 ++
7148 ++ clk_disable_unprepare(dsi->grf_clk);
7149 ++
7150 + ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
7151 + if (ret) {
7152 + DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
7153 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
7154 +index 64469439ddf2f..f5b9028a16a38 100644
7155 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
7156 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
7157 +@@ -1022,6 +1022,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
7158 + VOP_WIN_SET(vop, win, alpha_en, 1);
7159 + } else {
7160 + VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
7161 ++ VOP_WIN_SET(vop, win, alpha_en, 0);
7162 + }
7163 +
7164 + VOP_WIN_SET(vop, win, enable, 1);
7165 +diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
7166 +index bd5ba10822c24..489d63c05c0d9 100644
7167 +--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
7168 ++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
7169 +@@ -499,11 +499,11 @@ static int px30_lvds_probe(struct platform_device *pdev,
7170 + if (IS_ERR(lvds->dphy))
7171 + return PTR_ERR(lvds->dphy);
7172 +
7173 +- phy_init(lvds->dphy);
7174 ++ ret = phy_init(lvds->dphy);
7175 + if (ret)
7176 + return ret;
7177 +
7178 +- phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
7179 ++ ret = phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
7180 + if (ret)
7181 + return ret;
7182 +
7183 +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
7184 +index 76657dcdf9b00..1f36b67cd6ce9 100644
7185 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c
7186 ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
7187 +@@ -279,14 +279,22 @@ static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc,
7188 + * allows drivers to push pixels to more than one encoder from the
7189 + * same CRTC.
7190 + */
7191 +-static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
7192 ++static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
7193 ++ struct drm_atomic_state *state,
7194 ++ struct drm_connector_state *(*get_state)(struct drm_atomic_state *state,
7195 ++ struct drm_connector *connector))
7196 + {
7197 + struct drm_connector *connector;
7198 + struct drm_connector_list_iter conn_iter;
7199 +
7200 + drm_connector_list_iter_begin(crtc->dev, &conn_iter);
7201 + drm_for_each_connector_iter(connector, &conn_iter) {
7202 +- if (connector->state->crtc == crtc) {
7203 ++ struct drm_connector_state *conn_state = get_state(state, connector);
7204 ++
7205 ++ if (!conn_state)
7206 ++ continue;
7207 ++
7208 ++ if (conn_state->crtc == crtc) {
7209 + drm_connector_list_iter_end(&conn_iter);
7210 + return connector->encoder;
7211 + }
7212 +@@ -305,16 +313,17 @@ static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
7213 + CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
7214 + }
7215 +
7216 +-static void vc4_crtc_config_pv(struct drm_crtc *crtc)
7217 ++static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_atomic_state *state)
7218 + {
7219 + struct drm_device *dev = crtc->dev;
7220 + struct vc4_dev *vc4 = to_vc4_dev(dev);
7221 +- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
7222 ++ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
7223 ++ drm_atomic_get_new_connector_state);
7224 + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
7225 + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
7226 + const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
7227 +- struct drm_crtc_state *state = crtc->state;
7228 +- struct drm_display_mode *mode = &state->adjusted_mode;
7229 ++ struct drm_crtc_state *crtc_state = crtc->state;
7230 ++ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
7231 + bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
7232 + u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
7233 + bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
7234 +@@ -421,10 +430,10 @@ static void require_hvs_enabled(struct drm_device *dev)
7235 + }
7236 +
7237 + static int vc4_crtc_disable(struct drm_crtc *crtc,
7238 ++ struct drm_encoder *encoder,
7239 + struct drm_atomic_state *state,
7240 + unsigned int channel)
7241 + {
7242 +- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
7243 + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
7244 + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
7245 + struct drm_device *dev = crtc->dev;
7246 +@@ -465,10 +474,29 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
7247 + return 0;
7248 + }
7249 +
7250 ++static struct drm_encoder *vc4_crtc_get_encoder_by_type(struct drm_crtc *crtc,
7251 ++ enum vc4_encoder_type type)
7252 ++{
7253 ++ struct drm_encoder *encoder;
7254 ++
7255 ++ drm_for_each_encoder(encoder, crtc->dev) {
7256 ++ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
7257 ++
7258 ++ if (vc4_encoder->type == type)
7259 ++ return encoder;
7260 ++ }
7261 ++
7262 ++ return NULL;
7263 ++}
7264 ++
7265 + int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
7266 + {
7267 + struct drm_device *drm = crtc->dev;
7268 + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
7269 ++ enum vc4_encoder_type encoder_type;
7270 ++ const struct vc4_pv_data *pv_data;
7271 ++ struct drm_encoder *encoder;
7272 ++ unsigned encoder_sel;
7273 + int channel;
7274 +
7275 + if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
7276 +@@ -487,7 +515,17 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
7277 + if (channel < 0)
7278 + return 0;
7279 +
7280 +- return vc4_crtc_disable(crtc, NULL, channel);
7281 ++ encoder_sel = VC4_GET_FIELD(CRTC_READ(PV_CONTROL), PV_CONTROL_CLK_SELECT);
7282 ++ if (WARN_ON(encoder_sel != 0))
7283 ++ return 0;
7284 ++
7285 ++ pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
7286 ++ encoder_type = pv_data->encoder_types[encoder_sel];
7287 ++ encoder = vc4_crtc_get_encoder_by_type(crtc, encoder_type);
7288 ++ if (WARN_ON(!encoder))
7289 ++ return 0;
7290 ++
7291 ++ return vc4_crtc_disable(crtc, encoder, NULL, channel);
7292 + }
7293 +
7294 + static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
7295 +@@ -496,6 +534,8 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
7296 + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
7297 + crtc);
7298 + struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
7299 ++ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
7300 ++ drm_atomic_get_old_connector_state);
7301 + struct drm_device *dev = crtc->dev;
7302 +
7303 + require_hvs_enabled(dev);
7304 +@@ -503,7 +543,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
7305 + /* Disable vblank irq handling before crtc is disabled. */
7306 + drm_crtc_vblank_off(crtc);
7307 +
7308 +- vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel);
7309 ++ vc4_crtc_disable(crtc, encoder, state, old_vc4_state->assigned_channel);
7310 +
7311 + /*
7312 + * Make sure we issue a vblank event after disabling the CRTC if
7313 +@@ -524,7 +564,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
7314 + {
7315 + struct drm_device *dev = crtc->dev;
7316 + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
7317 +- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
7318 ++ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
7319 ++ drm_atomic_get_new_connector_state);
7320 + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
7321 +
7322 + require_hvs_enabled(dev);
7323 +@@ -539,7 +580,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
7324 + if (vc4_encoder->pre_crtc_configure)
7325 + vc4_encoder->pre_crtc_configure(encoder, state);
7326 +
7327 +- vc4_crtc_config_pv(crtc);
7328 ++ vc4_crtc_config_pv(crtc, state);
7329 +
7330 + CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
7331 +
7332 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
7333 +index 8106b5634fe10..e94730beb15b7 100644
7334 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
7335 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
7336 +@@ -2000,7 +2000,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
7337 + &hpd_gpio_flags);
7338 + if (vc4_hdmi->hpd_gpio < 0) {
7339 + ret = vc4_hdmi->hpd_gpio;
7340 +- goto err_unprepare_hsm;
7341 ++ goto err_put_ddc;
7342 + }
7343 +
7344 + vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
7345 +@@ -2041,8 +2041,8 @@ err_destroy_conn:
7346 + vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
7347 + err_destroy_encoder:
7348 + drm_encoder_cleanup(encoder);
7349 +-err_unprepare_hsm:
7350 + pm_runtime_disable(dev);
7351 ++err_put_ddc:
7352 + put_device(&vc4_hdmi->ddc->dev);
7353 +
7354 + return ret;
7355 +diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
7356 +index 4db25bd9fa22d..127eaf0a0a580 100644
7357 +--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
7358 ++++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
7359 +@@ -1467,6 +1467,7 @@ struct svga3dsurface_cache {
7360 +
7361 + /**
7362 + * struct svga3dsurface_loc - Surface location
7363 ++ * @sheet: The multisample sheet.
7364 + * @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
7365 + * mip_level.
7366 + * @x: X coordinate.
7367 +@@ -1474,6 +1475,7 @@ struct svga3dsurface_cache {
7368 + * @z: Z coordinate.
7369 + */
7370 + struct svga3dsurface_loc {
7371 ++ u32 sheet;
7372 + u32 sub_resource;
7373 + u32 x, y, z;
7374 + };
7375 +@@ -1566,8 +1568,8 @@ svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
7376 + u32 layer;
7377 + int i;
7378 +
7379 +- if (offset >= cache->sheet_bytes)
7380 +- offset %= cache->sheet_bytes;
7381 ++ loc->sheet = offset / cache->sheet_bytes;
7382 ++ offset -= loc->sheet * cache->sheet_bytes;
7383 +
7384 + layer = offset / cache->mip_chain_bytes;
7385 + offset -= layer * cache->mip_chain_bytes;
7386 +@@ -1631,6 +1633,7 @@ svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
7387 + u32 sub_resource,
7388 + struct svga3dsurface_loc *loc)
7389 + {
7390 ++ loc->sheet = 0;
7391 + loc->sub_resource = sub_resource;
7392 + loc->x = loc->y = loc->z = 0;
7393 + }
7394 +@@ -1652,6 +1655,7 @@ svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
7395 + const struct drm_vmw_size *size;
7396 + u32 mip;
7397 +
7398 ++ loc->sheet = 0;
7399 + loc->sub_resource = sub_resource + 1;
7400 + mip = sub_resource % cache->num_mip_levels;
7401 + size = &cache->mip[mip].size;
7402 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
7403 +index 7a24196f92c38..d6a6d8a3387a9 100644
7404 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
7405 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
7406 +@@ -2763,12 +2763,24 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
7407 + {
7408 + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
7409 + container_of(header, typeof(*cmd), header);
7410 +- struct vmw_resource *ret;
7411 ++ struct vmw_resource *view;
7412 ++ struct vmw_res_cache_entry *rcache;
7413 +
7414 +- ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
7415 +- cmd->body.shaderResourceViewId);
7416 ++ view = vmw_view_id_val_add(sw_context, vmw_view_sr,
7417 ++ cmd->body.shaderResourceViewId);
7418 ++ if (IS_ERR(view))
7419 ++ return PTR_ERR(view);
7420 +
7421 +- return PTR_ERR_OR_ZERO(ret);
7422 ++ /*
7423 ++ * Normally the shader-resource view is not gpu-dirtying, but for
7424 ++ * this particular command it is...
7425 ++ * So mark the last looked-up surface, which is the surface
7426 ++ * the view points to, gpu-dirty.
7427 ++ */
7428 ++ rcache = &sw_context->res_cache[vmw_res_surface];
7429 ++ vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
7430 ++ VMW_RES_DIRTY_SET);
7431 ++ return 0;
7432 + }
7433 +
7434 + /**
7435 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
7436 +index c3e55c1376eb8..beab3e19d8e21 100644
7437 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
7438 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
7439 +@@ -1804,6 +1804,19 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
7440 + svga3dsurface_get_loc(cache, &loc2, end - 1);
7441 + svga3dsurface_inc_loc(cache, &loc2);
7442 +
7443 ++ if (loc1.sheet != loc2.sheet) {
7444 ++ u32 sub_res;
7445 ++
7446 ++ /*
7447 ++ * Multiple multisample sheets. To do this in an optimized
7448 ++ * fashion, compute the dirty region for each sheet and the
7449 ++ * resulting union. Since this is not a common case, just dirty
7450 ++ * the whole surface.
7451 ++ */
7452 ++ for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
7453 ++ vmw_subres_dirty_full(dirty, sub_res);
7454 ++ return;
7455 ++ }
7456 + if (loc1.sub_resource + 1 == loc2.sub_resource) {
7457 + /* Dirty range covers a single sub-resource */
7458 + vmw_subres_dirty_add(dirty, &loc1, &loc2);
7459 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
7460 +index 0de2788b9814c..7db332139f7d5 100644
7461 +--- a/drivers/hid/hid-core.c
7462 ++++ b/drivers/hid/hid-core.c
7463 +@@ -2306,12 +2306,8 @@ static int hid_device_remove(struct device *dev)
7464 + {
7465 + struct hid_device *hdev = to_hid_device(dev);
7466 + struct hid_driver *hdrv;
7467 +- int ret = 0;
7468 +
7469 +- if (down_interruptible(&hdev->driver_input_lock)) {
7470 +- ret = -EINTR;
7471 +- goto end;
7472 +- }
7473 ++ down(&hdev->driver_input_lock);
7474 + hdev->io_started = false;
7475 +
7476 + hdrv = hdev->driver;
7477 +@@ -2326,8 +2322,8 @@ static int hid_device_remove(struct device *dev)
7478 +
7479 + if (!hdev->io_started)
7480 + up(&hdev->driver_input_lock);
7481 +-end:
7482 +- return ret;
7483 ++
7484 ++ return 0;
7485 + }
7486 +
7487 + static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
7488 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
7489 +index b84a0a11e05bf..63ca5959dc679 100644
7490 +--- a/drivers/hid/hid-ids.h
7491 ++++ b/drivers/hid/hid-ids.h
7492 +@@ -396,6 +396,7 @@
7493 + #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
7494 + #define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
7495 + #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
7496 ++#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
7497 +
7498 + #define USB_VENDOR_ID_ELECOM 0x056e
7499 + #define USB_DEVICE_ID_ELECOM_BM084 0x0061
7500 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
7501 +index abbfa91e73e43..68c8644234a4a 100644
7502 +--- a/drivers/hid/hid-input.c
7503 ++++ b/drivers/hid/hid-input.c
7504 +@@ -326,6 +326,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
7505 + HID_BATTERY_QUIRK_IGNORE },
7506 + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
7507 + HID_BATTERY_QUIRK_IGNORE },
7508 ++ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
7509 ++ HID_BATTERY_QUIRK_IGNORE },
7510 + {}
7511 + };
7512 +
7513 +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
7514 +index 8319b0ce385a5..b3722c51ec78a 100644
7515 +--- a/drivers/hid/hid-sony.c
7516 ++++ b/drivers/hid/hid-sony.c
7517 +@@ -597,9 +597,8 @@ struct sony_sc {
7518 + /* DS4 calibration data */
7519 + struct ds4_calibration_data ds4_calib_data[6];
7520 + /* GH Live */
7521 ++ struct urb *ghl_urb;
7522 + struct timer_list ghl_poke_timer;
7523 +- struct usb_ctrlrequest *ghl_cr;
7524 +- u8 *ghl_databuf;
7525 + };
7526 +
7527 + static void sony_set_leds(struct sony_sc *sc);
7528 +@@ -625,66 +624,54 @@ static inline void sony_schedule_work(struct sony_sc *sc,
7529 +
7530 + static void ghl_magic_poke_cb(struct urb *urb)
7531 + {
7532 +- if (urb) {
7533 +- /* Free sc->ghl_cr and sc->ghl_databuf allocated in
7534 +- * ghl_magic_poke()
7535 +- */
7536 +- kfree(urb->setup_packet);
7537 +- kfree(urb->transfer_buffer);
7538 +- }
7539 ++ struct sony_sc *sc = urb->context;
7540 ++
7541 ++ if (urb->status < 0)
7542 ++ hid_err(sc->hdev, "URB transfer failed : %d", urb->status);
7543 ++
7544 ++ mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
7545 + }
7546 +
7547 + static void ghl_magic_poke(struct timer_list *t)
7548 + {
7549 ++ int ret;
7550 + struct sony_sc *sc = from_timer(sc, t, ghl_poke_timer);
7551 +
7552 +- int ret;
7553 ++ ret = usb_submit_urb(sc->ghl_urb, GFP_ATOMIC);
7554 ++ if (ret < 0)
7555 ++ hid_err(sc->hdev, "usb_submit_urb failed: %d", ret);
7556 ++}
7557 ++
7558 ++static int ghl_init_urb(struct sony_sc *sc, struct usb_device *usbdev)
7559 ++{
7560 ++ struct usb_ctrlrequest *cr;
7561 ++ u16 poke_size;
7562 ++ u8 *databuf;
7563 + unsigned int pipe;
7564 +- struct urb *urb;
7565 +- struct usb_device *usbdev = to_usb_device(sc->hdev->dev.parent->parent);
7566 +- const u16 poke_size =
7567 +- ARRAY_SIZE(ghl_ps3wiiu_magic_data);
7568 +
7569 ++ poke_size = ARRAY_SIZE(ghl_ps3wiiu_magic_data);
7570 + pipe = usb_sndctrlpipe(usbdev, 0);
7571 +
7572 +- if (!sc->ghl_cr) {
7573 +- sc->ghl_cr = kzalloc(sizeof(*sc->ghl_cr), GFP_ATOMIC);
7574 +- if (!sc->ghl_cr)
7575 +- goto resched;
7576 +- }
7577 +-
7578 +- if (!sc->ghl_databuf) {
7579 +- sc->ghl_databuf = kzalloc(poke_size, GFP_ATOMIC);
7580 +- if (!sc->ghl_databuf)
7581 +- goto resched;
7582 +- }
7583 ++ cr = devm_kzalloc(&sc->hdev->dev, sizeof(*cr), GFP_ATOMIC);
7584 ++ if (cr == NULL)
7585 ++ return -ENOMEM;
7586 +
7587 +- urb = usb_alloc_urb(0, GFP_ATOMIC);
7588 +- if (!urb)
7589 +- goto resched;
7590 ++ databuf = devm_kzalloc(&sc->hdev->dev, poke_size, GFP_ATOMIC);
7591 ++ if (databuf == NULL)
7592 ++ return -ENOMEM;
7593 +
7594 +- sc->ghl_cr->bRequestType =
7595 ++ cr->bRequestType =
7596 + USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT;
7597 +- sc->ghl_cr->bRequest = USB_REQ_SET_CONFIGURATION;
7598 +- sc->ghl_cr->wValue = cpu_to_le16(ghl_ps3wiiu_magic_value);
7599 +- sc->ghl_cr->wIndex = 0;
7600 +- sc->ghl_cr->wLength = cpu_to_le16(poke_size);
7601 +- memcpy(sc->ghl_databuf, ghl_ps3wiiu_magic_data, poke_size);
7602 +-
7603 ++ cr->bRequest = USB_REQ_SET_CONFIGURATION;
7604 ++ cr->wValue = cpu_to_le16(ghl_ps3wiiu_magic_value);
7605 ++ cr->wIndex = 0;
7606 ++ cr->wLength = cpu_to_le16(poke_size);
7607 ++ memcpy(databuf, ghl_ps3wiiu_magic_data, poke_size);
7608 + usb_fill_control_urb(
7609 +- urb, usbdev, pipe,
7610 +- (unsigned char *) sc->ghl_cr, sc->ghl_databuf,
7611 +- poke_size, ghl_magic_poke_cb, NULL);
7612 +- ret = usb_submit_urb(urb, GFP_ATOMIC);
7613 +- if (ret < 0) {
7614 +- kfree(sc->ghl_databuf);
7615 +- kfree(sc->ghl_cr);
7616 +- }
7617 +- usb_free_urb(urb);
7618 +-
7619 +-resched:
7620 +- /* Reschedule for next time */
7621 +- mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
7622 ++ sc->ghl_urb, usbdev, pipe,
7623 ++ (unsigned char *) cr, databuf, poke_size,
7624 ++ ghl_magic_poke_cb, sc);
7625 ++ return 0;
7626 + }
7627 +
7628 + static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi,
7629 +@@ -2981,6 +2968,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
7630 + int ret;
7631 + unsigned long quirks = id->driver_data;
7632 + struct sony_sc *sc;
7633 ++ struct usb_device *usbdev;
7634 + unsigned int connect_mask = HID_CONNECT_DEFAULT;
7635 +
7636 + if (!strcmp(hdev->name, "FutureMax Dance Mat"))
7637 +@@ -3000,6 +2988,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
7638 + sc->quirks = quirks;
7639 + hid_set_drvdata(hdev, sc);
7640 + sc->hdev = hdev;
7641 ++ usbdev = to_usb_device(sc->hdev->dev.parent->parent);
7642 +
7643 + ret = hid_parse(hdev);
7644 + if (ret) {
7645 +@@ -3042,6 +3031,15 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
7646 + }
7647 +
7648 + if (sc->quirks & GHL_GUITAR_PS3WIIU) {
7649 ++ sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
7650 ++ if (!sc->ghl_urb)
7651 ++ return -ENOMEM;
7652 ++ ret = ghl_init_urb(sc, usbdev);
7653 ++ if (ret) {
7654 ++ hid_err(hdev, "error preparing URB\n");
7655 ++ return ret;
7656 ++ }
7657 ++
7658 + timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
7659 + mod_timer(&sc->ghl_poke_timer,
7660 + jiffies + GHL_GUITAR_POKE_INTERVAL*HZ);
7661 +@@ -3054,8 +3052,10 @@ static void sony_remove(struct hid_device *hdev)
7662 + {
7663 + struct sony_sc *sc = hid_get_drvdata(hdev);
7664 +
7665 +- if (sc->quirks & GHL_GUITAR_PS3WIIU)
7666 ++ if (sc->quirks & GHL_GUITAR_PS3WIIU) {
7667 + del_timer_sync(&sc->ghl_poke_timer);
7668 ++ usb_free_urb(sc->ghl_urb);
7669 ++ }
7670 +
7671 + hid_hw_close(hdev);
7672 +
7673 +diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
7674 +index 3477b31611ae1..a3a70e4f3f6c9 100644
7675 +--- a/drivers/hid/surface-hid/surface_hid.c
7676 ++++ b/drivers/hid/surface-hid/surface_hid.c
7677 +@@ -143,7 +143,7 @@ static int ssam_hid_get_raw_report(struct surface_hid_device *shid, u8 rprt_id,
7678 + rqst.target_id = shid->uid.target;
7679 + rqst.instance_id = shid->uid.instance;
7680 + rqst.command_id = SURFACE_HID_CID_GET_FEATURE_REPORT;
7681 +- rqst.flags = 0;
7682 ++ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
7683 + rqst.length = sizeof(rprt_id);
7684 + rqst.payload = &rprt_id;
7685 +
7686 +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
7687 +index 71c886245dbf2..8f16654eca098 100644
7688 +--- a/drivers/hid/wacom_wac.h
7689 ++++ b/drivers/hid/wacom_wac.h
7690 +@@ -122,7 +122,7 @@
7691 + #define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0454)
7692 + #define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
7693 + #define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
7694 +-#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
7695 ++#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0940)
7696 + #define WACOM_HID_WD_MODE_CHANGE (WACOM_HID_UP_WACOMDIGITIZER | 0x0980)
7697 + #define WACOM_HID_WD_MUTE_DEVICE (WACOM_HID_UP_WACOMDIGITIZER | 0x0981)
7698 + #define WACOM_HID_WD_CONTROLPANEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0982)
7699 +diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
7700 +index 311cd005b3be6..5e479d54918cf 100644
7701 +--- a/drivers/hv/connection.c
7702 ++++ b/drivers/hv/connection.c
7703 +@@ -232,8 +232,10 @@ int vmbus_connect(void)
7704 + */
7705 +
7706 + for (i = 0; ; i++) {
7707 +- if (i == ARRAY_SIZE(vmbus_versions))
7708 ++ if (i == ARRAY_SIZE(vmbus_versions)) {
7709 ++ ret = -EDOM;
7710 + goto cleanup;
7711 ++ }
7712 +
7713 + version = vmbus_versions[i];
7714 + if (version > max_version)
7715 +diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
7716 +index e4aefeb330daf..136576cba26f5 100644
7717 +--- a/drivers/hv/hv_util.c
7718 ++++ b/drivers/hv/hv_util.c
7719 +@@ -750,8 +750,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
7720 + */
7721 + hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
7722 + if (IS_ERR_OR_NULL(hv_ptp_clock)) {
7723 +- pr_err("cannot register PTP clock: %ld\n",
7724 +- PTR_ERR(hv_ptp_clock));
7725 ++ pr_err("cannot register PTP clock: %d\n",
7726 ++ PTR_ERR_OR_ZERO(hv_ptp_clock));
7727 + hv_ptp_clock = NULL;
7728 + }
7729 +
7730 +diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
7731 +index 40eab3349904b..6b884ea009877 100644
7732 +--- a/drivers/hwmon/lm70.c
7733 ++++ b/drivers/hwmon/lm70.c
7734 +@@ -22,10 +22,10 @@
7735 + #include <linux/hwmon.h>
7736 + #include <linux/mutex.h>
7737 + #include <linux/mod_devicetable.h>
7738 ++#include <linux/of.h>
7739 + #include <linux/property.h>
7740 + #include <linux/spi/spi.h>
7741 + #include <linux/slab.h>
7742 +-#include <linux/acpi.h>
7743 +
7744 + #define DRVNAME "lm70"
7745 +
7746 +@@ -148,29 +148,6 @@ static const struct of_device_id lm70_of_ids[] = {
7747 + MODULE_DEVICE_TABLE(of, lm70_of_ids);
7748 + #endif
7749 +
7750 +-#ifdef CONFIG_ACPI
7751 +-static const struct acpi_device_id lm70_acpi_ids[] = {
7752 +- {
7753 +- .id = "LM000070",
7754 +- .driver_data = LM70_CHIP_LM70,
7755 +- },
7756 +- {
7757 +- .id = "TMP00121",
7758 +- .driver_data = LM70_CHIP_TMP121,
7759 +- },
7760 +- {
7761 +- .id = "LM000071",
7762 +- .driver_data = LM70_CHIP_LM71,
7763 +- },
7764 +- {
7765 +- .id = "LM000074",
7766 +- .driver_data = LM70_CHIP_LM74,
7767 +- },
7768 +- {},
7769 +-};
7770 +-MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
7771 +-#endif
7772 +-
7773 + static int lm70_probe(struct spi_device *spi)
7774 + {
7775 + struct device *hwmon_dev;
7776 +@@ -217,7 +194,6 @@ static struct spi_driver lm70_driver = {
7777 + .driver = {
7778 + .name = "lm70",
7779 + .of_match_table = of_match_ptr(lm70_of_ids),
7780 +- .acpi_match_table = ACPI_PTR(lm70_acpi_ids),
7781 + },
7782 + .id_table = lm70_ids,
7783 + .probe = lm70_probe,
7784 +diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
7785 +index 062eceb7be0db..613338cbcb170 100644
7786 +--- a/drivers/hwmon/max31722.c
7787 ++++ b/drivers/hwmon/max31722.c
7788 +@@ -6,7 +6,6 @@
7789 + * Copyright (c) 2016, Intel Corporation.
7790 + */
7791 +
7792 +-#include <linux/acpi.h>
7793 + #include <linux/hwmon.h>
7794 + #include <linux/hwmon-sysfs.h>
7795 + #include <linux/kernel.h>
7796 +@@ -133,20 +132,12 @@ static const struct spi_device_id max31722_spi_id[] = {
7797 + {"max31723", 0},
7798 + {}
7799 + };
7800 +-
7801 +-static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = {
7802 +- {"MAX31722", 0},
7803 +- {"MAX31723", 0},
7804 +- {}
7805 +-};
7806 +-
7807 + MODULE_DEVICE_TABLE(spi, max31722_spi_id);
7808 +
7809 + static struct spi_driver max31722_driver = {
7810 + .driver = {
7811 + .name = "max31722",
7812 + .pm = &max31722_pm_ops,
7813 +- .acpi_match_table = ACPI_PTR(max31722_acpi_id),
7814 + },
7815 + .probe = max31722_probe,
7816 + .remove = max31722_remove,
7817 +diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
7818 +index 86e6c71db685c..67677c4377687 100644
7819 +--- a/drivers/hwmon/max31790.c
7820 ++++ b/drivers/hwmon/max31790.c
7821 +@@ -27,6 +27,7 @@
7822 +
7823 + /* Fan Config register bits */
7824 + #define MAX31790_FAN_CFG_RPM_MODE 0x80
7825 ++#define MAX31790_FAN_CFG_CTRL_MON 0x10
7826 + #define MAX31790_FAN_CFG_TACH_INPUT_EN 0x08
7827 + #define MAX31790_FAN_CFG_TACH_INPUT 0x01
7828 +
7829 +@@ -104,7 +105,7 @@ static struct max31790_data *max31790_update_device(struct device *dev)
7830 + data->tach[NR_CHANNEL + i] = rv;
7831 + } else {
7832 + rv = i2c_smbus_read_word_swapped(client,
7833 +- MAX31790_REG_PWMOUT(i));
7834 ++ MAX31790_REG_PWM_DUTY_CYCLE(i));
7835 + if (rv < 0)
7836 + goto abort;
7837 + data->pwm[i] = rv;
7838 +@@ -170,7 +171,7 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
7839 +
7840 + switch (attr) {
7841 + case hwmon_fan_input:
7842 +- sr = get_tach_period(data->fan_dynamics[channel]);
7843 ++ sr = get_tach_period(data->fan_dynamics[channel % NR_CHANNEL]);
7844 + rpm = RPM_FROM_REG(data->tach[channel], sr);
7845 + *val = rpm;
7846 + return 0;
7847 +@@ -271,12 +272,12 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
7848 + *val = data->pwm[channel] >> 8;
7849 + return 0;
7850 + case hwmon_pwm_enable:
7851 +- if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
7852 ++ if (fan_config & MAX31790_FAN_CFG_CTRL_MON)
7853 ++ *val = 0;
7854 ++ else if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
7855 + *val = 2;
7856 +- else if (fan_config & MAX31790_FAN_CFG_TACH_INPUT_EN)
7857 +- *val = 1;
7858 + else
7859 +- *val = 0;
7860 ++ *val = 1;
7861 + return 0;
7862 + default:
7863 + return -EOPNOTSUPP;
7864 +@@ -299,31 +300,41 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
7865 + err = -EINVAL;
7866 + break;
7867 + }
7868 +- data->pwm[channel] = val << 8;
7869 ++ data->valid = false;
7870 + err = i2c_smbus_write_word_swapped(client,
7871 + MAX31790_REG_PWMOUT(channel),
7872 +- data->pwm[channel]);
7873 ++ val << 8);
7874 + break;
7875 + case hwmon_pwm_enable:
7876 + fan_config = data->fan_config[channel];
7877 + if (val == 0) {
7878 +- fan_config &= ~(MAX31790_FAN_CFG_TACH_INPUT_EN |
7879 +- MAX31790_FAN_CFG_RPM_MODE);
7880 ++ fan_config |= MAX31790_FAN_CFG_CTRL_MON;
7881 ++ /*
7882 ++ * Disable RPM mode; otherwise disabling fan speed
7883 ++ * monitoring is not possible.
7884 ++ */
7885 ++ fan_config &= ~MAX31790_FAN_CFG_RPM_MODE;
7886 + } else if (val == 1) {
7887 +- fan_config = (fan_config |
7888 +- MAX31790_FAN_CFG_TACH_INPUT_EN) &
7889 +- ~MAX31790_FAN_CFG_RPM_MODE;
7890 ++ fan_config &= ~(MAX31790_FAN_CFG_CTRL_MON | MAX31790_FAN_CFG_RPM_MODE);
7891 + } else if (val == 2) {
7892 +- fan_config |= MAX31790_FAN_CFG_TACH_INPUT_EN |
7893 +- MAX31790_FAN_CFG_RPM_MODE;
7894 ++ fan_config &= ~MAX31790_FAN_CFG_CTRL_MON;
7895 ++ /*
7896 ++ * The chip sets MAX31790_FAN_CFG_TACH_INPUT_EN on its
7897 ++ * own if MAX31790_FAN_CFG_RPM_MODE is set.
7898 ++ * Do it here as well to reflect the actual register
7899 ++ * value in the cache.
7900 ++ */
7901 ++ fan_config |= (MAX31790_FAN_CFG_RPM_MODE | MAX31790_FAN_CFG_TACH_INPUT_EN);
7902 + } else {
7903 + err = -EINVAL;
7904 + break;
7905 + }
7906 +- data->fan_config[channel] = fan_config;
7907 +- err = i2c_smbus_write_byte_data(client,
7908 +- MAX31790_REG_FAN_CONFIG(channel),
7909 +- fan_config);
7910 ++ if (fan_config != data->fan_config[channel]) {
7911 ++ err = i2c_smbus_write_byte_data(client, MAX31790_REG_FAN_CONFIG(channel),
7912 ++ fan_config);
7913 ++ if (!err)
7914 ++ data->fan_config[channel] = fan_config;
7915 ++ }
7916 + break;
7917 + default:
7918 + err = -EOPNOTSUPP;
7919 +diff --git a/drivers/hwmon/pmbus/bpa-rs600.c b/drivers/hwmon/pmbus/bpa-rs600.c
7920 +index f6558ee9dec36..2be69fedfa361 100644
7921 +--- a/drivers/hwmon/pmbus/bpa-rs600.c
7922 ++++ b/drivers/hwmon/pmbus/bpa-rs600.c
7923 +@@ -46,6 +46,32 @@ static int bpa_rs600_read_byte_data(struct i2c_client *client, int page, int reg
7924 + return ret;
7925 + }
7926 +
7927 ++/*
7928 ++ * The BPA-RS600 violates the PMBus spec. Specifically it treats the
7929 ++ * mantissa as unsigned. Deal with this here to allow the PMBus core
7930 ++ * to work with correctly encoded data.
7931 ++ */
7932 ++static int bpa_rs600_read_vin(struct i2c_client *client)
7933 ++{
7934 ++ int ret, exponent, mantissa;
7935 ++
7936 ++ ret = pmbus_read_word_data(client, 0, 0xff, PMBUS_READ_VIN);
7937 ++ if (ret < 0)
7938 ++ return ret;
7939 ++
7940 ++ if (ret & BIT(10)) {
7941 ++ exponent = ret >> 11;
7942 ++ mantissa = ret & 0x7ff;
7943 ++
7944 ++ exponent++;
7945 ++ mantissa >>= 1;
7946 ++
7947 ++ ret = (exponent << 11) | mantissa;
7948 ++ }
7949 ++
7950 ++ return ret;
7951 ++}
7952 ++
7953 + static int bpa_rs600_read_word_data(struct i2c_client *client, int page, int phase, int reg)
7954 + {
7955 + int ret;
7956 +@@ -85,6 +111,9 @@ static int bpa_rs600_read_word_data(struct i2c_client *client, int page, int pha
7957 + /* These commands return data but it is invalid/un-documented */
7958 + ret = -ENXIO;
7959 + break;
7960 ++ case PMBUS_READ_VIN:
7961 ++ ret = bpa_rs600_read_vin(client);
7962 ++ break;
7963 + default:
7964 + if (reg >= PMBUS_VIRT_BASE)
7965 + ret = -ENXIO;
7966 +diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
7967 +index 6c68d34d956e8..4ddf3d2338443 100644
7968 +--- a/drivers/hwtracing/coresight/coresight-core.c
7969 ++++ b/drivers/hwtracing/coresight/coresight-core.c
7970 +@@ -608,7 +608,7 @@ static struct coresight_device *
7971 + coresight_find_enabled_sink(struct coresight_device *csdev)
7972 + {
7973 + int i;
7974 +- struct coresight_device *sink;
7975 ++ struct coresight_device *sink = NULL;
7976 +
7977 + if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
7978 + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
7979 +diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
7980 +index dcca9c2396db1..6d5014ebaab5e 100644
7981 +--- a/drivers/i2c/busses/i2c-mpc.c
7982 ++++ b/drivers/i2c/busses/i2c-mpc.c
7983 +@@ -635,6 +635,8 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
7984 +
7985 + status = readb(i2c->base + MPC_I2C_SR);
7986 + if (status & CSR_MIF) {
7987 ++ /* Read again to allow register to stabilise */
7988 ++ status = readb(i2c->base + MPC_I2C_SR);
7989 + writeb(0, i2c->base + MPC_I2C_SR);
7990 + mpc_i2c_do_intr(i2c, status);
7991 + return IRQ_HANDLED;
7992 +diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
7993 +index b8a7469cdae41..b8cea42fca1a1 100644
7994 +--- a/drivers/iio/accel/bma180.c
7995 ++++ b/drivers/iio/accel/bma180.c
7996 +@@ -55,7 +55,7 @@ struct bma180_part_info {
7997 +
7998 + u8 int_reset_reg, int_reset_mask;
7999 + u8 sleep_reg, sleep_mask;
8000 +- u8 bw_reg, bw_mask;
8001 ++ u8 bw_reg, bw_mask, bw_offset;
8002 + u8 scale_reg, scale_mask;
8003 + u8 power_reg, power_mask, lowpower_val;
8004 + u8 int_enable_reg, int_enable_mask;
8005 +@@ -127,6 +127,7 @@ struct bma180_part_info {
8006 +
8007 + #define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
8008 + #define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
8009 ++#define BMA250_BW_OFFSET 8
8010 + #define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */
8011 + #define BMA250_LOWPOWER_MASK BIT(6)
8012 + #define BMA250_DATA_INTEN_MASK BIT(4)
8013 +@@ -143,6 +144,7 @@ struct bma180_part_info {
8014 +
8015 + #define BMA254_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
8016 + #define BMA254_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
8017 ++#define BMA254_BW_OFFSET 8
8018 + #define BMA254_SUSPEND_MASK BIT(7) /* chip will sleep */
8019 + #define BMA254_LOWPOWER_MASK BIT(6)
8020 + #define BMA254_DATA_INTEN_MASK BIT(4)
8021 +@@ -162,7 +164,11 @@ struct bma180_data {
8022 + int scale;
8023 + int bw;
8024 + bool pmode;
8025 +- u8 buff[16]; /* 3x 16-bit + 8-bit + padding + timestamp */
8026 ++ /* Ensure timestamp is naturally aligned */
8027 ++ struct {
8028 ++ s16 chan[4];
8029 ++ s64 timestamp __aligned(8);
8030 ++ } scan;
8031 + };
8032 +
8033 + enum bma180_chan {
8034 +@@ -283,7 +289,8 @@ static int bma180_set_bw(struct bma180_data *data, int val)
8035 + for (i = 0; i < data->part_info->num_bw; ++i) {
8036 + if (data->part_info->bw_table[i] == val) {
8037 + ret = bma180_set_bits(data, data->part_info->bw_reg,
8038 +- data->part_info->bw_mask, i);
8039 ++ data->part_info->bw_mask,
8040 ++ i + data->part_info->bw_offset);
8041 + if (ret) {
8042 + dev_err(&data->client->dev,
8043 + "failed to set bandwidth\n");
8044 +@@ -876,6 +883,7 @@ static const struct bma180_part_info bma180_part_info[] = {
8045 + .sleep_mask = BMA250_SUSPEND_MASK,
8046 + .bw_reg = BMA250_BW_REG,
8047 + .bw_mask = BMA250_BW_MASK,
8048 ++ .bw_offset = BMA250_BW_OFFSET,
8049 + .scale_reg = BMA250_RANGE_REG,
8050 + .scale_mask = BMA250_RANGE_MASK,
8051 + .power_reg = BMA250_POWER_REG,
8052 +@@ -905,6 +913,7 @@ static const struct bma180_part_info bma180_part_info[] = {
8053 + .sleep_mask = BMA254_SUSPEND_MASK,
8054 + .bw_reg = BMA254_BW_REG,
8055 + .bw_mask = BMA254_BW_MASK,
8056 ++ .bw_offset = BMA254_BW_OFFSET,
8057 + .scale_reg = BMA254_RANGE_REG,
8058 + .scale_mask = BMA254_RANGE_MASK,
8059 + .power_reg = BMA254_POWER_REG,
8060 +@@ -938,12 +947,12 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
8061 + mutex_unlock(&data->mutex);
8062 + goto err;
8063 + }
8064 +- ((s16 *)data->buff)[i++] = ret;
8065 ++ data->scan.chan[i++] = ret;
8066 + }
8067 +
8068 + mutex_unlock(&data->mutex);
8069 +
8070 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns);
8071 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
8072 + err:
8073 + iio_trigger_notify_done(indio_dev->trig);
8074 +
8075 +diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
8076 +index 36fc9876dbcaf..0622c79364994 100644
8077 +--- a/drivers/iio/accel/bma220_spi.c
8078 ++++ b/drivers/iio/accel/bma220_spi.c
8079 +@@ -63,7 +63,11 @@ static const int bma220_scale_table[][2] = {
8080 + struct bma220_data {
8081 + struct spi_device *spi_device;
8082 + struct mutex lock;
8083 +- s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */
8084 ++ struct {
8085 ++ s8 chans[3];
8086 ++ /* Ensure timestamp is naturally aligned. */
8087 ++ s64 timestamp __aligned(8);
8088 ++ } scan;
8089 + u8 tx_buf[2] ____cacheline_aligned;
8090 + };
8091 +
8092 +@@ -94,12 +98,12 @@ static irqreturn_t bma220_trigger_handler(int irq, void *p)
8093 +
8094 + mutex_lock(&data->lock);
8095 + data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK;
8096 +- ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer,
8097 ++ ret = spi_write_then_read(spi, data->tx_buf, 1, &data->scan.chans,
8098 + ARRAY_SIZE(bma220_channels) - 1);
8099 + if (ret < 0)
8100 + goto err;
8101 +
8102 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8103 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8104 + pf->timestamp);
8105 + err:
8106 + mutex_unlock(&data->lock);
8107 +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
8108 +index 04d85ce34e9f5..5d58b5533cb82 100644
8109 +--- a/drivers/iio/accel/bmc150-accel-core.c
8110 ++++ b/drivers/iio/accel/bmc150-accel-core.c
8111 +@@ -1177,11 +1177,12 @@ static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
8112 + /*
8113 + * The datasheet page 17 says:
8114 + * 15.6, 31.3, 62.5 and 125 mg per LSB.
8115 ++ * IIO unit is m/s^2 so multiply by g = 9.80665 m/s^2.
8116 + */
8117 +- .scale_table = { {156000, BMC150_ACCEL_DEF_RANGE_2G},
8118 +- {313000, BMC150_ACCEL_DEF_RANGE_4G},
8119 +- {625000, BMC150_ACCEL_DEF_RANGE_8G},
8120 +- {1250000, BMC150_ACCEL_DEF_RANGE_16G} },
8121 ++ .scale_table = { {152984, BMC150_ACCEL_DEF_RANGE_2G},
8122 ++ {306948, BMC150_ACCEL_DEF_RANGE_4G},
8123 ++ {612916, BMC150_ACCEL_DEF_RANGE_8G},
8124 ++ {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
8125 + },
8126 + [bma222e] = {
8127 + .name = "BMA222E",
8128 +@@ -1809,21 +1810,17 @@ EXPORT_SYMBOL_GPL(bmc150_accel_core_probe);
8129 +
8130 + struct i2c_client *bmc150_get_second_device(struct i2c_client *client)
8131 + {
8132 +- struct bmc150_accel_data *data = i2c_get_clientdata(client);
8133 +-
8134 +- if (!data)
8135 +- return NULL;
8136 ++ struct bmc150_accel_data *data = iio_priv(i2c_get_clientdata(client));
8137 +
8138 + return data->second_device;
8139 + }
8140 + EXPORT_SYMBOL_GPL(bmc150_get_second_device);
8141 +
8142 +-void bmc150_set_second_device(struct i2c_client *client)
8143 ++void bmc150_set_second_device(struct i2c_client *client, struct i2c_client *second_dev)
8144 + {
8145 +- struct bmc150_accel_data *data = i2c_get_clientdata(client);
8146 ++ struct bmc150_accel_data *data = iio_priv(i2c_get_clientdata(client));
8147 +
8148 +- if (data)
8149 +- data->second_device = client;
8150 ++ data->second_device = second_dev;
8151 + }
8152 + EXPORT_SYMBOL_GPL(bmc150_set_second_device);
8153 +
8154 +diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
8155 +index 69f709319484f..2afaae0294eef 100644
8156 +--- a/drivers/iio/accel/bmc150-accel-i2c.c
8157 ++++ b/drivers/iio/accel/bmc150-accel-i2c.c
8158 +@@ -70,7 +70,7 @@ static int bmc150_accel_probe(struct i2c_client *client,
8159 +
8160 + second_dev = i2c_acpi_new_device(&client->dev, 1, &board_info);
8161 + if (!IS_ERR(second_dev))
8162 +- bmc150_set_second_device(second_dev);
8163 ++ bmc150_set_second_device(client, second_dev);
8164 + }
8165 + #endif
8166 +
8167 +diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
8168 +index 6024f15b97004..e30c1698f6fbd 100644
8169 +--- a/drivers/iio/accel/bmc150-accel.h
8170 ++++ b/drivers/iio/accel/bmc150-accel.h
8171 +@@ -18,7 +18,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
8172 + const char *name, bool block_supported);
8173 + int bmc150_accel_core_remove(struct device *dev);
8174 + struct i2c_client *bmc150_get_second_device(struct i2c_client *second_device);
8175 +-void bmc150_set_second_device(struct i2c_client *second_device);
8176 ++void bmc150_set_second_device(struct i2c_client *client, struct i2c_client *second_dev);
8177 + extern const struct dev_pm_ops bmc150_accel_pm_ops;
8178 + extern const struct regmap_config bmc150_regmap_conf;
8179 +
8180 +diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
8181 +index 2f9465cb382ff..27f47e1c251e9 100644
8182 +--- a/drivers/iio/accel/hid-sensor-accel-3d.c
8183 ++++ b/drivers/iio/accel/hid-sensor-accel-3d.c
8184 +@@ -28,8 +28,11 @@ struct accel_3d_state {
8185 + struct hid_sensor_hub_callbacks callbacks;
8186 + struct hid_sensor_common common_attributes;
8187 + struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX];
8188 +- /* Reserve for 3 channels + padding + timestamp */
8189 +- u32 accel_val[ACCEL_3D_CHANNEL_MAX + 3];
8190 ++ /* Ensure timestamp is naturally aligned */
8191 ++ struct {
8192 ++ u32 accel_val[3];
8193 ++ s64 timestamp __aligned(8);
8194 ++ } scan;
8195 + int scale_pre_decml;
8196 + int scale_post_decml;
8197 + int scale_precision;
8198 +@@ -245,8 +248,8 @@ static int accel_3d_proc_event(struct hid_sensor_hub_device *hsdev,
8199 + accel_state->timestamp = iio_get_time_ns(indio_dev);
8200 +
8201 + hid_sensor_push_data(indio_dev,
8202 +- accel_state->accel_val,
8203 +- sizeof(accel_state->accel_val),
8204 ++ &accel_state->scan,
8205 ++ sizeof(accel_state->scan),
8206 + accel_state->timestamp);
8207 +
8208 + accel_state->timestamp = 0;
8209 +@@ -271,7 +274,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
8210 + case HID_USAGE_SENSOR_ACCEL_Y_AXIS:
8211 + case HID_USAGE_SENSOR_ACCEL_Z_AXIS:
8212 + offset = usage_id - HID_USAGE_SENSOR_ACCEL_X_AXIS;
8213 +- accel_state->accel_val[CHANNEL_SCAN_INDEX_X + offset] =
8214 ++ accel_state->scan.accel_val[CHANNEL_SCAN_INDEX_X + offset] =
8215 + *(u32 *)raw_data;
8216 + ret = 0;
8217 + break;
8218 +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
8219 +index ff724bc17a458..f6720dbba0aa3 100644
8220 +--- a/drivers/iio/accel/kxcjk-1013.c
8221 ++++ b/drivers/iio/accel/kxcjk-1013.c
8222 +@@ -133,6 +133,13 @@ enum kx_acpi_type {
8223 + ACPI_KIOX010A,
8224 + };
8225 +
8226 ++enum kxcjk1013_axis {
8227 ++ AXIS_X,
8228 ++ AXIS_Y,
8229 ++ AXIS_Z,
8230 ++ AXIS_MAX
8231 ++};
8232 ++
8233 + struct kxcjk1013_data {
8234 + struct regulator_bulk_data regulators[2];
8235 + struct i2c_client *client;
8236 +@@ -140,7 +147,11 @@ struct kxcjk1013_data {
8237 + struct iio_trigger *motion_trig;
8238 + struct iio_mount_matrix orientation;
8239 + struct mutex mutex;
8240 +- s16 buffer[8];
8241 ++ /* Ensure timestamp naturally aligned */
8242 ++ struct {
8243 ++ s16 chans[AXIS_MAX];
8244 ++ s64 timestamp __aligned(8);
8245 ++ } scan;
8246 + u8 odr_bits;
8247 + u8 range;
8248 + int wake_thres;
8249 +@@ -154,13 +165,6 @@ struct kxcjk1013_data {
8250 + enum kx_acpi_type acpi_type;
8251 + };
8252 +
8253 +-enum kxcjk1013_axis {
8254 +- AXIS_X,
8255 +- AXIS_Y,
8256 +- AXIS_Z,
8257 +- AXIS_MAX,
8258 +-};
8259 +-
8260 + enum kxcjk1013_mode {
8261 + STANDBY,
8262 + OPERATION,
8263 +@@ -1094,12 +1098,12 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
8264 + ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client,
8265 + KXCJK1013_REG_XOUT_L,
8266 + AXIS_MAX * 2,
8267 +- (u8 *)data->buffer);
8268 ++ (u8 *)data->scan.chans);
8269 + mutex_unlock(&data->mutex);
8270 + if (ret < 0)
8271 + goto err;
8272 +
8273 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8274 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8275 + data->timestamp);
8276 + err:
8277 + iio_trigger_notify_done(indio_dev->trig);
8278 +diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
8279 +index fb3cbaa62bd87..0f90e6ec01e17 100644
8280 +--- a/drivers/iio/accel/mxc4005.c
8281 ++++ b/drivers/iio/accel/mxc4005.c
8282 +@@ -56,7 +56,11 @@ struct mxc4005_data {
8283 + struct mutex mutex;
8284 + struct regmap *regmap;
8285 + struct iio_trigger *dready_trig;
8286 +- __be16 buffer[8];
8287 ++ /* Ensure timestamp is naturally aligned */
8288 ++ struct {
8289 ++ __be16 chans[3];
8290 ++ s64 timestamp __aligned(8);
8291 ++ } scan;
8292 + bool trigger_enabled;
8293 + };
8294 +
8295 +@@ -135,7 +139,7 @@ static int mxc4005_read_xyz(struct mxc4005_data *data)
8296 + int ret;
8297 +
8298 + ret = regmap_bulk_read(data->regmap, MXC4005_REG_XOUT_UPPER,
8299 +- data->buffer, sizeof(data->buffer));
8300 ++ data->scan.chans, sizeof(data->scan.chans));
8301 + if (ret < 0) {
8302 + dev_err(data->dev, "failed to read axes\n");
8303 + return ret;
8304 +@@ -301,7 +305,7 @@ static irqreturn_t mxc4005_trigger_handler(int irq, void *private)
8305 + if (ret < 0)
8306 + goto err;
8307 +
8308 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8309 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8310 + pf->timestamp);
8311 +
8312 + err:
8313 +diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
8314 +index 157d8faefb9e4..ba571f0f5c985 100644
8315 +--- a/drivers/iio/accel/stk8312.c
8316 ++++ b/drivers/iio/accel/stk8312.c
8317 +@@ -103,7 +103,11 @@ struct stk8312_data {
8318 + u8 mode;
8319 + struct iio_trigger *dready_trig;
8320 + bool dready_trigger_on;
8321 +- s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 64-bit timestamp */
8322 ++ /* Ensure timestamp is naturally aligned */
8323 ++ struct {
8324 ++ s8 chans[3];
8325 ++ s64 timestamp __aligned(8);
8326 ++ } scan;
8327 + };
8328 +
8329 + static IIO_CONST_ATTR(in_accel_scale_available, STK8312_SCALE_AVAIL);
8330 +@@ -438,7 +442,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
8331 + ret = i2c_smbus_read_i2c_block_data(data->client,
8332 + STK8312_REG_XOUT,
8333 + STK8312_ALL_CHANNEL_SIZE,
8334 +- data->buffer);
8335 ++ data->scan.chans);
8336 + if (ret < STK8312_ALL_CHANNEL_SIZE) {
8337 + dev_err(&data->client->dev, "register read failed\n");
8338 + mutex_unlock(&data->lock);
8339 +@@ -452,12 +456,12 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
8340 + mutex_unlock(&data->lock);
8341 + goto err;
8342 + }
8343 +- data->buffer[i++] = ret;
8344 ++ data->scan.chans[i++] = ret;
8345 + }
8346 + }
8347 + mutex_unlock(&data->lock);
8348 +
8349 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8350 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8351 + pf->timestamp);
8352 + err:
8353 + iio_trigger_notify_done(indio_dev->trig);
8354 +diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
8355 +index 7cf9cb7e86667..eb9daa4e623a8 100644
8356 +--- a/drivers/iio/accel/stk8ba50.c
8357 ++++ b/drivers/iio/accel/stk8ba50.c
8358 +@@ -91,12 +91,11 @@ struct stk8ba50_data {
8359 + u8 sample_rate_idx;
8360 + struct iio_trigger *dready_trig;
8361 + bool dready_trigger_on;
8362 +- /*
8363 +- * 3 x 16-bit channels (10-bit data, 6-bit padding) +
8364 +- * 1 x 16 padding +
8365 +- * 4 x 16 64-bit timestamp
8366 +- */
8367 +- s16 buffer[8];
8368 ++ /* Ensure timestamp is naturally aligned */
8369 ++ struct {
8370 ++ s16 chans[3];
8371 ++ s64 timetamp __aligned(8);
8372 ++ } scan;
8373 + };
8374 +
8375 + #define STK8BA50_ACCEL_CHANNEL(index, reg, axis) { \
8376 +@@ -324,7 +323,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
8377 + ret = i2c_smbus_read_i2c_block_data(data->client,
8378 + STK8BA50_REG_XOUT,
8379 + STK8BA50_ALL_CHANNEL_SIZE,
8380 +- (u8 *)data->buffer);
8381 ++ (u8 *)data->scan.chans);
8382 + if (ret < STK8BA50_ALL_CHANNEL_SIZE) {
8383 + dev_err(&data->client->dev, "register read failed\n");
8384 + goto err;
8385 +@@ -337,10 +336,10 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
8386 + if (ret < 0)
8387 + goto err;
8388 +
8389 +- data->buffer[i++] = ret;
8390 ++ data->scan.chans[i++] = ret;
8391 + }
8392 + }
8393 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8394 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8395 + pf->timestamp);
8396 + err:
8397 + mutex_unlock(&data->lock);
8398 +diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
8399 +index a7826f097b95c..d356b515df090 100644
8400 +--- a/drivers/iio/adc/at91-sama5d2_adc.c
8401 ++++ b/drivers/iio/adc/at91-sama5d2_adc.c
8402 +@@ -403,7 +403,8 @@ struct at91_adc_state {
8403 + struct at91_adc_dma dma_st;
8404 + struct at91_adc_touch touch_st;
8405 + struct iio_dev *indio_dev;
8406 +- u16 buffer[AT91_BUFFER_MAX_HWORDS];
8407 ++ /* Ensure naturally aligned timestamp */
8408 ++ u16 buffer[AT91_BUFFER_MAX_HWORDS] __aligned(8);
8409 + /*
8410 + * lock to prevent concurrent 'single conversion' requests through
8411 + * sysfs.
8412 +diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
8413 +index 6a173531d355b..f7ee856a6b8b6 100644
8414 +--- a/drivers/iio/adc/hx711.c
8415 ++++ b/drivers/iio/adc/hx711.c
8416 +@@ -86,9 +86,9 @@ struct hx711_data {
8417 + struct mutex lock;
8418 + /*
8419 + * triggered buffer
8420 +- * 2x32-bit channel + 64-bit timestamp
8421 ++ * 2x32-bit channel + 64-bit naturally aligned timestamp
8422 + */
8423 +- u32 buffer[4];
8424 ++ u32 buffer[4] __aligned(8);
8425 + /*
8426 + * delay after a rising edge on SCK until the data is ready DOUT
8427 + * this is dependent on the hx711 where the datasheet tells a
8428 +diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
8429 +index 30e29f44ebd2e..c480cb489c1a3 100644
8430 +--- a/drivers/iio/adc/mxs-lradc-adc.c
8431 ++++ b/drivers/iio/adc/mxs-lradc-adc.c
8432 +@@ -115,7 +115,8 @@ struct mxs_lradc_adc {
8433 + struct device *dev;
8434 +
8435 + void __iomem *base;
8436 +- u32 buffer[10];
8437 ++ /* Maximum of 8 channels + 8 byte ts */
8438 ++ u32 buffer[10] __aligned(8);
8439 + struct iio_trigger *trig;
8440 + struct completion completion;
8441 + spinlock_t lock;
8442 +diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
8443 +index 9fef39bcf997b..5b828428be77c 100644
8444 +--- a/drivers/iio/adc/ti-ads1015.c
8445 ++++ b/drivers/iio/adc/ti-ads1015.c
8446 +@@ -395,10 +395,14 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
8447 + struct iio_poll_func *pf = p;
8448 + struct iio_dev *indio_dev = pf->indio_dev;
8449 + struct ads1015_data *data = iio_priv(indio_dev);
8450 +- s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding + 4x s16 timestamp */
8451 ++ /* Ensure natural alignment of timestamp */
8452 ++ struct {
8453 ++ s16 chan;
8454 ++ s64 timestamp __aligned(8);
8455 ++ } scan;
8456 + int chan, ret, res;
8457 +
8458 +- memset(buf, 0, sizeof(buf));
8459 ++ memset(&scan, 0, sizeof(scan));
8460 +
8461 + mutex_lock(&data->lock);
8462 + chan = find_first_bit(indio_dev->active_scan_mask,
8463 +@@ -409,10 +413,10 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
8464 + goto err;
8465 + }
8466 +
8467 +- buf[0] = res;
8468 ++ scan.chan = res;
8469 + mutex_unlock(&data->lock);
8470 +
8471 +- iio_push_to_buffers_with_timestamp(indio_dev, buf,
8472 ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
8473 + iio_get_time_ns(indio_dev));
8474 +
8475 + err:
8476 +diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
8477 +index 16bcb37eebb72..79c803537dc42 100644
8478 +--- a/drivers/iio/adc/ti-ads8688.c
8479 ++++ b/drivers/iio/adc/ti-ads8688.c
8480 +@@ -383,7 +383,8 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
8481 + {
8482 + struct iio_poll_func *pf = p;
8483 + struct iio_dev *indio_dev = pf->indio_dev;
8484 +- u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
8485 ++ /* Ensure naturally aligned timestamp */
8486 ++ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
8487 + int i, j = 0;
8488 +
8489 + for (i = 0; i < indio_dev->masklength; i++) {
8490 +diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
8491 +index 1d794cf3e3f13..fd57fc43e8e5c 100644
8492 +--- a/drivers/iio/adc/vf610_adc.c
8493 ++++ b/drivers/iio/adc/vf610_adc.c
8494 +@@ -167,7 +167,11 @@ struct vf610_adc {
8495 + u32 sample_freq_avail[5];
8496 +
8497 + struct completion completion;
8498 +- u16 buffer[8];
8499 ++ /* Ensure the timestamp is naturally aligned */
8500 ++ struct {
8501 ++ u16 chan;
8502 ++ s64 timestamp __aligned(8);
8503 ++ } scan;
8504 + };
8505 +
8506 + static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
8507 +@@ -579,9 +583,9 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
8508 + if (coco & VF610_ADC_HS_COCO0) {
8509 + info->value = vf610_adc_read_data(info);
8510 + if (iio_buffer_enabled(indio_dev)) {
8511 +- info->buffer[0] = info->value;
8512 ++ info->scan.chan = info->value;
8513 + iio_push_to_buffers_with_timestamp(indio_dev,
8514 +- info->buffer,
8515 ++ &info->scan,
8516 + iio_get_time_ns(indio_dev));
8517 + iio_trigger_notify_done(indio_dev->trig);
8518 + } else
8519 +diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
8520 +index 56ba6c82b501f..6795722c68b25 100644
8521 +--- a/drivers/iio/chemical/atlas-sensor.c
8522 ++++ b/drivers/iio/chemical/atlas-sensor.c
8523 +@@ -91,8 +91,8 @@ struct atlas_data {
8524 + struct regmap *regmap;
8525 + struct irq_work work;
8526 + unsigned int interrupt_enabled;
8527 +-
8528 +- __be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
8529 ++ /* 96-bit data + 32-bit pad + 64-bit timestamp */
8530 ++ __be32 buffer[6] __aligned(8);
8531 + };
8532 +
8533 + static const struct regmap_config atlas_regmap_config = {
8534 +diff --git a/drivers/iio/dummy/Kconfig b/drivers/iio/dummy/Kconfig
8535 +index 5c5c2f8c55f36..1f46cb9e51b74 100644
8536 +--- a/drivers/iio/dummy/Kconfig
8537 ++++ b/drivers/iio/dummy/Kconfig
8538 +@@ -34,6 +34,7 @@ config IIO_SIMPLE_DUMMY_BUFFER
8539 + select IIO_BUFFER
8540 + select IIO_TRIGGER
8541 + select IIO_KFIFO_BUF
8542 ++ select IIO_TRIGGERED_BUFFER
8543 + help
8544 + Add buffered data capture to the simple dummy driver.
8545 +
8546 +diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
8547 +index 1462a6a5bc6da..3d9eba716b691 100644
8548 +--- a/drivers/iio/frequency/adf4350.c
8549 ++++ b/drivers/iio/frequency/adf4350.c
8550 +@@ -563,8 +563,10 @@ static int adf4350_probe(struct spi_device *spi)
8551 +
8552 + st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
8553 + GPIOD_IN);
8554 +- if (IS_ERR(st->lock_detect_gpiod))
8555 +- return PTR_ERR(st->lock_detect_gpiod);
8556 ++ if (IS_ERR(st->lock_detect_gpiod)) {
8557 ++ ret = PTR_ERR(st->lock_detect_gpiod);
8558 ++ goto error_disable_reg;
8559 ++ }
8560 +
8561 + if (pdata->power_up_frequency) {
8562 + ret = adf4350_set_freq(st, pdata->power_up_frequency);
8563 +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
8564 +index b11ebd9bb7a41..7bc13ff2c3ac0 100644
8565 +--- a/drivers/iio/gyro/bmg160_core.c
8566 ++++ b/drivers/iio/gyro/bmg160_core.c
8567 +@@ -98,7 +98,11 @@ struct bmg160_data {
8568 + struct iio_trigger *motion_trig;
8569 + struct iio_mount_matrix orientation;
8570 + struct mutex mutex;
8571 +- s16 buffer[8];
8572 ++ /* Ensure naturally aligned timestamp */
8573 ++ struct {
8574 ++ s16 chans[3];
8575 ++ s64 timestamp __aligned(8);
8576 ++ } scan;
8577 + u32 dps_range;
8578 + int ev_enable_state;
8579 + int slope_thres;
8580 +@@ -882,12 +886,12 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
8581 +
8582 + mutex_lock(&data->mutex);
8583 + ret = regmap_bulk_read(data->regmap, BMG160_REG_XOUT_L,
8584 +- data->buffer, AXIS_MAX * 2);
8585 ++ data->scan.chans, AXIS_MAX * 2);
8586 + mutex_unlock(&data->mutex);
8587 + if (ret < 0)
8588 + goto err;
8589 +
8590 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8591 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8592 + pf->timestamp);
8593 + err:
8594 + iio_trigger_notify_done(indio_dev->trig);
8595 +diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
8596 +index 23bc9c784ef4b..248d0f262d601 100644
8597 +--- a/drivers/iio/humidity/am2315.c
8598 ++++ b/drivers/iio/humidity/am2315.c
8599 +@@ -33,7 +33,11 @@
8600 + struct am2315_data {
8601 + struct i2c_client *client;
8602 + struct mutex lock;
8603 +- s16 buffer[8]; /* 2x16-bit channels + 2x16 padding + 4x16 timestamp */
8604 ++ /* Ensure timestamp is naturally aligned */
8605 ++ struct {
8606 ++ s16 chans[2];
8607 ++ s64 timestamp __aligned(8);
8608 ++ } scan;
8609 + };
8610 +
8611 + struct am2315_sensor_data {
8612 +@@ -167,20 +171,20 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
8613 +
8614 + mutex_lock(&data->lock);
8615 + if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
8616 +- data->buffer[0] = sensor_data.hum_data;
8617 +- data->buffer[1] = sensor_data.temp_data;
8618 ++ data->scan.chans[0] = sensor_data.hum_data;
8619 ++ data->scan.chans[1] = sensor_data.temp_data;
8620 + } else {
8621 + i = 0;
8622 + for_each_set_bit(bit, indio_dev->active_scan_mask,
8623 + indio_dev->masklength) {
8624 +- data->buffer[i] = (bit ? sensor_data.temp_data :
8625 +- sensor_data.hum_data);
8626 ++ data->scan.chans[i] = (bit ? sensor_data.temp_data :
8627 ++ sensor_data.hum_data);
8628 + i++;
8629 + }
8630 + }
8631 + mutex_unlock(&data->lock);
8632 +
8633 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8634 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8635 + pf->timestamp);
8636 + err:
8637 + iio_trigger_notify_done(indio_dev->trig);
8638 +diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
8639 +index 768aa493a1a60..b2f92b55b910c 100644
8640 +--- a/drivers/iio/imu/adis16400.c
8641 ++++ b/drivers/iio/imu/adis16400.c
8642 +@@ -645,9 +645,6 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
8643 + void *buffer;
8644 + int ret;
8645 +
8646 +- if (!adis->buffer)
8647 +- return -ENOMEM;
8648 +-
8649 + if (!(st->variant->flags & ADIS16400_NO_BURST) &&
8650 + st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
8651 + st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
8652 +diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
8653 +index 1de62fc79e0fc..51b76444db0b9 100644
8654 +--- a/drivers/iio/imu/adis16475.c
8655 ++++ b/drivers/iio/imu/adis16475.c
8656 +@@ -1068,7 +1068,7 @@ static irqreturn_t adis16475_trigger_handler(int irq, void *p)
8657 +
8658 + ret = spi_sync(adis->spi, &adis->msg);
8659 + if (ret)
8660 +- return ret;
8661 ++ goto check_burst32;
8662 +
8663 + adis->spi->max_speed_hz = cached_spi_speed_hz;
8664 + buffer = adis->buffer;
8665 +diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
8666 +index ac354321f63a3..175af154e4437 100644
8667 +--- a/drivers/iio/imu/adis_buffer.c
8668 ++++ b/drivers/iio/imu/adis_buffer.c
8669 +@@ -129,9 +129,6 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
8670 + struct adis *adis = iio_device_get_drvdata(indio_dev);
8671 + int ret;
8672 +
8673 +- if (!adis->buffer)
8674 +- return -ENOMEM;
8675 +-
8676 + if (adis->data->has_paging) {
8677 + mutex_lock(&adis->state_lock);
8678 + if (adis->current_page != 0) {
8679 +diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
8680 +index b93b85dbc3a6a..ba53b50d711a1 100644
8681 +--- a/drivers/iio/light/isl29125.c
8682 ++++ b/drivers/iio/light/isl29125.c
8683 +@@ -51,7 +51,11 @@
8684 + struct isl29125_data {
8685 + struct i2c_client *client;
8686 + u8 conf1;
8687 +- u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */
8688 ++ /* Ensure timestamp is naturally aligned */
8689 ++ struct {
8690 ++ u16 chans[3];
8691 ++ s64 timestamp __aligned(8);
8692 ++ } scan;
8693 + };
8694 +
8695 + #define ISL29125_CHANNEL(_color, _si) { \
8696 +@@ -184,10 +188,10 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
8697 + if (ret < 0)
8698 + goto done;
8699 +
8700 +- data->buffer[j++] = ret;
8701 ++ data->scan.chans[j++] = ret;
8702 + }
8703 +
8704 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8705 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8706 + iio_get_time_ns(indio_dev));
8707 +
8708 + done:
8709 +diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
8710 +index b4323d2db0b19..74ed2d88a3ed3 100644
8711 +--- a/drivers/iio/light/ltr501.c
8712 ++++ b/drivers/iio/light/ltr501.c
8713 +@@ -32,9 +32,12 @@
8714 + #define LTR501_PART_ID 0x86
8715 + #define LTR501_MANUFAC_ID 0x87
8716 + #define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */
8717 ++#define LTR501_ALS_DATA1_UPPER 0x89 /* upper 8 bits of LTR501_ALS_DATA1 */
8718 + #define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */
8719 ++#define LTR501_ALS_DATA0_UPPER 0x8b /* upper 8 bits of LTR501_ALS_DATA0 */
8720 + #define LTR501_ALS_PS_STATUS 0x8c
8721 + #define LTR501_PS_DATA 0x8d /* 16-bit, little endian */
8722 ++#define LTR501_PS_DATA_UPPER 0x8e /* upper 8 bits of LTR501_PS_DATA */
8723 + #define LTR501_INTR 0x8f /* output mode, polarity, mode */
8724 + #define LTR501_PS_THRESH_UP 0x90 /* 11 bit, ps upper threshold */
8725 + #define LTR501_PS_THRESH_LOW 0x92 /* 11 bit, ps lower threshold */
8726 +@@ -406,18 +409,19 @@ static int ltr501_read_als(const struct ltr501_data *data, __le16 buf[2])
8727 +
8728 + static int ltr501_read_ps(const struct ltr501_data *data)
8729 + {
8730 +- int ret, status;
8731 ++ __le16 status;
8732 ++ int ret;
8733 +
8734 + ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY);
8735 + if (ret < 0)
8736 + return ret;
8737 +
8738 + ret = regmap_bulk_read(data->regmap, LTR501_PS_DATA,
8739 +- &status, 2);
8740 ++ &status, sizeof(status));
8741 + if (ret < 0)
8742 + return ret;
8743 +
8744 +- return status;
8745 ++ return le16_to_cpu(status);
8746 + }
8747 +
8748 + static int ltr501_read_intr_prst(const struct ltr501_data *data,
8749 +@@ -1205,7 +1209,7 @@ static struct ltr501_chip_info ltr501_chip_info_tbl[] = {
8750 + .als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl),
8751 + .ps_gain = ltr559_ps_gain_tbl,
8752 + .ps_gain_tbl_size = ARRAY_SIZE(ltr559_ps_gain_tbl),
8753 +- .als_mode_active = BIT(1),
8754 ++ .als_mode_active = BIT(0),
8755 + .als_gain_mask = BIT(2) | BIT(3) | BIT(4),
8756 + .als_gain_shift = 2,
8757 + .info = &ltr501_info,
8758 +@@ -1354,9 +1358,12 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg)
8759 + {
8760 + switch (reg) {
8761 + case LTR501_ALS_DATA1:
8762 ++ case LTR501_ALS_DATA1_UPPER:
8763 + case LTR501_ALS_DATA0:
8764 ++ case LTR501_ALS_DATA0_UPPER:
8765 + case LTR501_ALS_PS_STATUS:
8766 + case LTR501_PS_DATA:
8767 ++ case LTR501_PS_DATA_UPPER:
8768 + return true;
8769 + default:
8770 + return false;
8771 +diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
8772 +index 6fe5d46f80d40..0593abd600ec2 100644
8773 +--- a/drivers/iio/light/tcs3414.c
8774 ++++ b/drivers/iio/light/tcs3414.c
8775 +@@ -53,7 +53,11 @@ struct tcs3414_data {
8776 + u8 control;
8777 + u8 gain;
8778 + u8 timing;
8779 +- u16 buffer[8]; /* 4x 16-bit + 8 bytes timestamp */
8780 ++ /* Ensure timestamp is naturally aligned */
8781 ++ struct {
8782 ++ u16 chans[4];
8783 ++ s64 timestamp __aligned(8);
8784 ++ } scan;
8785 + };
8786 +
8787 + #define TCS3414_CHANNEL(_color, _si, _addr) { \
8788 +@@ -209,10 +213,10 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
8789 + if (ret < 0)
8790 + goto done;
8791 +
8792 +- data->buffer[j++] = ret;
8793 ++ data->scan.chans[j++] = ret;
8794 + }
8795 +
8796 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8797 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8798 + iio_get_time_ns(indio_dev));
8799 +
8800 + done:
8801 +diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
8802 +index a0dc447aeb68b..371c6a39a1654 100644
8803 +--- a/drivers/iio/light/tcs3472.c
8804 ++++ b/drivers/iio/light/tcs3472.c
8805 +@@ -64,7 +64,11 @@ struct tcs3472_data {
8806 + u8 control;
8807 + u8 atime;
8808 + u8 apers;
8809 +- u16 buffer[8]; /* 4 16-bit channels + 64-bit timestamp */
8810 ++ /* Ensure timestamp is naturally aligned */
8811 ++ struct {
8812 ++ u16 chans[4];
8813 ++ s64 timestamp __aligned(8);
8814 ++ } scan;
8815 + };
8816 +
8817 + static const struct iio_event_spec tcs3472_events[] = {
8818 +@@ -386,10 +390,10 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
8819 + if (ret < 0)
8820 + goto done;
8821 +
8822 +- data->buffer[j++] = ret;
8823 ++ data->scan.chans[j++] = ret;
8824 + }
8825 +
8826 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8827 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8828 + iio_get_time_ns(indio_dev));
8829 +
8830 + done:
8831 +@@ -531,7 +535,8 @@ static int tcs3472_probe(struct i2c_client *client,
8832 + return 0;
8833 +
8834 + free_irq:
8835 +- free_irq(client->irq, indio_dev);
8836 ++ if (client->irq)
8837 ++ free_irq(client->irq, indio_dev);
8838 + buffer_cleanup:
8839 + iio_triggered_buffer_cleanup(indio_dev);
8840 + return ret;
8841 +@@ -559,7 +564,8 @@ static int tcs3472_remove(struct i2c_client *client)
8842 + struct iio_dev *indio_dev = i2c_get_clientdata(client);
8843 +
8844 + iio_device_unregister(indio_dev);
8845 +- free_irq(client->irq, indio_dev);
8846 ++ if (client->irq)
8847 ++ free_irq(client->irq, indio_dev);
8848 + iio_triggered_buffer_cleanup(indio_dev);
8849 + tcs3472_powerdown(iio_priv(indio_dev));
8850 +
8851 +diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
8852 +index 2f7916f95689e..3b5e27053ef29 100644
8853 +--- a/drivers/iio/light/vcnl4000.c
8854 ++++ b/drivers/iio/light/vcnl4000.c
8855 +@@ -910,7 +910,7 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
8856 + struct iio_dev *indio_dev = pf->indio_dev;
8857 + struct vcnl4000_data *data = iio_priv(indio_dev);
8858 + const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
8859 +- u16 buffer[8] = {0}; /* 1x16-bit + ts */
8860 ++ u16 buffer[8] __aligned(8) = {0}; /* 1x16-bit + naturally aligned ts */
8861 + bool data_read = false;
8862 + unsigned long isr;
8863 + int val = 0;
8864 +diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
8865 +index ae87740d9cef2..bc07774117124 100644
8866 +--- a/drivers/iio/light/vcnl4035.c
8867 ++++ b/drivers/iio/light/vcnl4035.c
8868 +@@ -102,7 +102,8 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
8869 + struct iio_poll_func *pf = p;
8870 + struct iio_dev *indio_dev = pf->indio_dev;
8871 + struct vcnl4035_data *data = iio_priv(indio_dev);
8872 +- u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)];
8873 ++ /* Ensure naturally aligned timestamp */
8874 ++ u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8);
8875 + int ret;
8876 +
8877 + ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer);
8878 +diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
8879 +index 00f9766bad5c5..d534f4f3909eb 100644
8880 +--- a/drivers/iio/magnetometer/bmc150_magn.c
8881 ++++ b/drivers/iio/magnetometer/bmc150_magn.c
8882 +@@ -138,8 +138,11 @@ struct bmc150_magn_data {
8883 + struct regmap *regmap;
8884 + struct regulator_bulk_data regulators[2];
8885 + struct iio_mount_matrix orientation;
8886 +- /* 4 x 32 bits for x, y z, 4 bytes align, 64 bits timestamp */
8887 +- s32 buffer[6];
8888 ++ /* Ensure timestamp is naturally aligned */
8889 ++ struct {
8890 ++ s32 chans[3];
8891 ++ s64 timestamp __aligned(8);
8892 ++ } scan;
8893 + struct iio_trigger *dready_trig;
8894 + bool dready_trigger_on;
8895 + int max_odr;
8896 +@@ -675,11 +678,11 @@ static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
8897 + int ret;
8898 +
8899 + mutex_lock(&data->mutex);
8900 +- ret = bmc150_magn_read_xyz(data, data->buffer);
8901 ++ ret = bmc150_magn_read_xyz(data, data->scan.chans);
8902 + if (ret < 0)
8903 + goto err;
8904 +
8905 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8906 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8907 + pf->timestamp);
8908 +
8909 + err:
8910 +diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
8911 +index 3f6c0b6629415..242f742f2643a 100644
8912 +--- a/drivers/iio/magnetometer/hmc5843.h
8913 ++++ b/drivers/iio/magnetometer/hmc5843.h
8914 +@@ -33,7 +33,8 @@ enum hmc5843_ids {
8915 + * @lock: update and read regmap data
8916 + * @regmap: hardware access register maps
8917 + * @variant: describe chip variants
8918 +- * @buffer: 3x 16-bit channels + padding + 64-bit timestamp
8919 ++ * @scan: buffer to pack data for passing to
8920 ++ * iio_push_to_buffers_with_timestamp()
8921 + */
8922 + struct hmc5843_data {
8923 + struct device *dev;
8924 +@@ -41,7 +42,10 @@ struct hmc5843_data {
8925 + struct regmap *regmap;
8926 + const struct hmc5843_chip_info *variant;
8927 + struct iio_mount_matrix orientation;
8928 +- __be16 buffer[8];
8929 ++ struct {
8930 ++ __be16 chans[3];
8931 ++ s64 timestamp __aligned(8);
8932 ++ } scan;
8933 + };
8934 +
8935 + int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
8936 +diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
8937 +index 780faea61d82e..221563e0c18fd 100644
8938 +--- a/drivers/iio/magnetometer/hmc5843_core.c
8939 ++++ b/drivers/iio/magnetometer/hmc5843_core.c
8940 +@@ -446,13 +446,13 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
8941 + }
8942 +
8943 + ret = regmap_bulk_read(data->regmap, HMC5843_DATA_OUT_MSB_REGS,
8944 +- data->buffer, 3 * sizeof(__be16));
8945 ++ data->scan.chans, sizeof(data->scan.chans));
8946 +
8947 + mutex_unlock(&data->lock);
8948 + if (ret < 0)
8949 + goto done;
8950 +
8951 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
8952 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
8953 + iio_get_time_ns(indio_dev));
8954 +
8955 + done:
8956 +diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
8957 +index dd811da9cb6db..934da20781bba 100644
8958 +--- a/drivers/iio/magnetometer/rm3100-core.c
8959 ++++ b/drivers/iio/magnetometer/rm3100-core.c
8960 +@@ -78,7 +78,8 @@ struct rm3100_data {
8961 + bool use_interrupt;
8962 + int conversion_time;
8963 + int scale;
8964 +- u8 buffer[RM3100_SCAN_BYTES];
8965 ++ /* Ensure naturally aligned timestamp */
8966 ++ u8 buffer[RM3100_SCAN_BYTES] __aligned(8);
8967 + struct iio_trigger *drdy_trig;
8968 +
8969 + /*
8970 +diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
8971 +index 8a9c576616ee5..ff39ba975da70 100644
8972 +--- a/drivers/iio/potentiostat/lmp91000.c
8973 ++++ b/drivers/iio/potentiostat/lmp91000.c
8974 +@@ -71,8 +71,8 @@ struct lmp91000_data {
8975 +
8976 + struct completion completion;
8977 + u8 chan_select;
8978 +-
8979 +- u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
8980 ++ /* 64-bit data + 64-bit naturally aligned timestamp */
8981 ++ u32 buffer[4] __aligned(8);
8982 + };
8983 +
8984 + static const struct iio_chan_spec lmp91000_channels[] = {
8985 +diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
8986 +index edc4a35ae66d1..1d5ace2bde44d 100644
8987 +--- a/drivers/iio/proximity/as3935.c
8988 ++++ b/drivers/iio/proximity/as3935.c
8989 +@@ -59,7 +59,11 @@ struct as3935_state {
8990 + unsigned long noise_tripped;
8991 + u32 tune_cap;
8992 + u32 nflwdth_reg;
8993 +- u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
8994 ++ /* Ensure timestamp is naturally aligned */
8995 ++ struct {
8996 ++ u8 chan;
8997 ++ s64 timestamp __aligned(8);
8998 ++ } scan;
8999 + u8 buf[2] ____cacheline_aligned;
9000 + };
9001 +
9002 +@@ -225,8 +229,8 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
9003 + if (ret)
9004 + goto err_read;
9005 +
9006 +- st->buffer[0] = val & AS3935_DATA_MASK;
9007 +- iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
9008 ++ st->scan.chan = val & AS3935_DATA_MASK;
9009 ++ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan,
9010 + iio_get_time_ns(indio_dev));
9011 + err_read:
9012 + iio_trigger_notify_done(indio_dev->trig);
9013 +diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
9014 +index 90e76451c972a..5b6ea783795d9 100644
9015 +--- a/drivers/iio/proximity/isl29501.c
9016 ++++ b/drivers/iio/proximity/isl29501.c
9017 +@@ -938,7 +938,7 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p)
9018 + struct iio_dev *indio_dev = pf->indio_dev;
9019 + struct isl29501_private *isl29501 = iio_priv(indio_dev);
9020 + const unsigned long *active_mask = indio_dev->active_scan_mask;
9021 +- u32 buffer[4] = {}; /* 1x16-bit + ts */
9022 ++ u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */
9023 +
9024 + if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
9025 + isl29501_register_read(isl29501, REG_DISTANCE, buffer);
9026 +diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
9027 +index cc206bfa09c78..d854b8d5fbbaf 100644
9028 +--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
9029 ++++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
9030 +@@ -44,7 +44,11 @@ struct lidar_data {
9031 + int (*xfer)(struct lidar_data *data, u8 reg, u8 *val, int len);
9032 + int i2c_enabled;
9033 +
9034 +- u16 buffer[8]; /* 2 byte distance + 8 byte timestamp */
9035 ++ /* Ensure timestamp is naturally aligned */
9036 ++ struct {
9037 ++ u16 chan;
9038 ++ s64 timestamp __aligned(8);
9039 ++ } scan;
9040 + };
9041 +
9042 + static const struct iio_chan_spec lidar_channels[] = {
9043 +@@ -230,9 +234,9 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
9044 + struct lidar_data *data = iio_priv(indio_dev);
9045 + int ret;
9046 +
9047 +- ret = lidar_get_measurement(data, data->buffer);
9048 ++ ret = lidar_get_measurement(data, &data->scan.chan);
9049 + if (!ret) {
9050 +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
9051 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
9052 + iio_get_time_ns(indio_dev));
9053 + } else if (ret != -EINVAL) {
9054 + dev_err(&data->client->dev, "cannot read LIDAR measurement");
9055 +diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
9056 +index 70beac5c9c1df..9b0886760f76d 100644
9057 +--- a/drivers/iio/proximity/srf08.c
9058 ++++ b/drivers/iio/proximity/srf08.c
9059 +@@ -63,11 +63,11 @@ struct srf08_data {
9060 + int range_mm;
9061 + struct mutex lock;
9062 +
9063 +- /*
9064 +- * triggered buffer
9065 +- * 1x16-bit channel + 3x16 padding + 4x16 timestamp
9066 +- */
9067 +- s16 buffer[8];
9068 ++ /* Ensure timestamp is naturally aligned */
9069 ++ struct {
9070 ++ s16 chan;
9071 ++ s64 timestamp __aligned(8);
9072 ++ } scan;
9073 +
9074 + /* Sensor-Type */
9075 + enum srf08_sensor_type sensor_type;
9076 +@@ -190,9 +190,9 @@ static irqreturn_t srf08_trigger_handler(int irq, void *p)
9077 +
9078 + mutex_lock(&data->lock);
9079 +
9080 +- data->buffer[0] = sensor_data;
9081 ++ data->scan.chan = sensor_data;
9082 + iio_push_to_buffers_with_timestamp(indio_dev,
9083 +- data->buffer, pf->timestamp);
9084 ++ &data->scan, pf->timestamp);
9085 +
9086 + mutex_unlock(&data->lock);
9087 + err:
9088 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
9089 +index 0ead0d2231540..81d832646d27a 100644
9090 +--- a/drivers/infiniband/core/cm.c
9091 ++++ b/drivers/infiniband/core/cm.c
9092 +@@ -121,8 +121,6 @@ static struct ib_cm {
9093 + __be32 random_id_operand;
9094 + struct list_head timewait_list;
9095 + struct workqueue_struct *wq;
9096 +- /* Sync on cm change port state */
9097 +- spinlock_t state_lock;
9098 + } cm;
9099 +
9100 + /* Counter indexes ordered by attribute ID */
9101 +@@ -203,8 +201,6 @@ struct cm_port {
9102 + struct cm_device *cm_dev;
9103 + struct ib_mad_agent *mad_agent;
9104 + u32 port_num;
9105 +- struct list_head cm_priv_prim_list;
9106 +- struct list_head cm_priv_altr_list;
9107 + struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
9108 + };
9109 +
9110 +@@ -285,12 +281,6 @@ struct cm_id_private {
9111 + u8 service_timeout;
9112 + u8 target_ack_delay;
9113 +
9114 +- struct list_head prim_list;
9115 +- struct list_head altr_list;
9116 +- /* Indicates that the send port mad is registered and av is set */
9117 +- int prim_send_port_not_ready;
9118 +- int altr_send_port_not_ready;
9119 +-
9120 + struct list_head work_list;
9121 + atomic_t work_count;
9122 +
9123 +@@ -305,53 +295,25 @@ static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
9124 + complete(&cm_id_priv->comp);
9125 + }
9126 +
9127 +-static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
9128 +- struct ib_mad_send_buf **msg)
9129 ++static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
9130 + {
9131 + struct ib_mad_agent *mad_agent;
9132 + struct ib_mad_send_buf *m;
9133 + struct ib_ah *ah;
9134 +- struct cm_av *av;
9135 +- unsigned long flags, flags2;
9136 +- int ret = 0;
9137 +
9138 +- /* don't let the port to be released till the agent is down */
9139 +- spin_lock_irqsave(&cm.state_lock, flags2);
9140 +- spin_lock_irqsave(&cm.lock, flags);
9141 +- if (!cm_id_priv->prim_send_port_not_ready)
9142 +- av = &cm_id_priv->av;
9143 +- else if (!cm_id_priv->altr_send_port_not_ready &&
9144 +- (cm_id_priv->alt_av.port))
9145 +- av = &cm_id_priv->alt_av;
9146 +- else {
9147 +- pr_info("%s: not valid CM id\n", __func__);
9148 +- ret = -ENODEV;
9149 +- spin_unlock_irqrestore(&cm.lock, flags);
9150 +- goto out;
9151 +- }
9152 +- spin_unlock_irqrestore(&cm.lock, flags);
9153 +- /* Make sure the port haven't released the mad yet */
9154 + mad_agent = cm_id_priv->av.port->mad_agent;
9155 +- if (!mad_agent) {
9156 +- pr_info("%s: not a valid MAD agent\n", __func__);
9157 +- ret = -ENODEV;
9158 +- goto out;
9159 +- }
9160 +- ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
9161 +- if (IS_ERR(ah)) {
9162 +- ret = PTR_ERR(ah);
9163 +- goto out;
9164 +- }
9165 ++ ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0);
9166 ++ if (IS_ERR(ah))
9167 ++ return (void *)ah;
9168 +
9169 + m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
9170 +- av->pkey_index,
9171 ++ cm_id_priv->av.pkey_index,
9172 + 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
9173 + GFP_ATOMIC,
9174 + IB_MGMT_BASE_VERSION);
9175 + if (IS_ERR(m)) {
9176 + rdma_destroy_ah(ah, 0);
9177 +- ret = PTR_ERR(m);
9178 +- goto out;
9179 ++ return m;
9180 + }
9181 +
9182 + /* Timeout set by caller if response is expected. */
9183 +@@ -360,11 +322,36 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
9184 +
9185 + refcount_inc(&cm_id_priv->refcount);
9186 + m->context[0] = cm_id_priv;
9187 +- *msg = m;
9188 ++ return m;
9189 ++}
9190 +
9191 +-out:
9192 +- spin_unlock_irqrestore(&cm.state_lock, flags2);
9193 +- return ret;
9194 ++static struct ib_mad_send_buf *
9195 ++cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
9196 ++{
9197 ++ struct ib_mad_send_buf *msg;
9198 ++
9199 ++ lockdep_assert_held(&cm_id_priv->lock);
9200 ++
9201 ++ msg = cm_alloc_msg(cm_id_priv);
9202 ++ if (IS_ERR(msg))
9203 ++ return msg;
9204 ++ cm_id_priv->msg = msg;
9205 ++ return msg;
9206 ++}
9207 ++
9208 ++static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
9209 ++{
9210 ++ struct cm_id_private *cm_id_priv = msg->context[0];
9211 ++
9212 ++ lockdep_assert_held(&cm_id_priv->lock);
9213 ++
9214 ++ if (!WARN_ON(cm_id_priv->msg != msg))
9215 ++ cm_id_priv->msg = NULL;
9216 ++
9217 ++ if (msg->ah)
9218 ++ rdma_destroy_ah(msg->ah, 0);
9219 ++ cm_deref_id(cm_id_priv);
9220 ++ ib_free_send_mad(msg);
9221 + }
9222 +
9223 + static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
9224 +@@ -413,7 +400,7 @@ static int cm_alloc_response_msg(struct cm_port *port,
9225 +
9226 + ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
9227 + if (ret) {
9228 +- cm_free_msg(m);
9229 ++ ib_free_send_mad(m);
9230 + return ret;
9231 + }
9232 +
9233 +@@ -421,6 +408,13 @@ static int cm_alloc_response_msg(struct cm_port *port,
9234 + return 0;
9235 + }
9236 +
9237 ++static void cm_free_response_msg(struct ib_mad_send_buf *msg)
9238 ++{
9239 ++ if (msg->ah)
9240 ++ rdma_destroy_ah(msg->ah, 0);
9241 ++ ib_free_send_mad(msg);
9242 ++}
9243 ++
9244 + static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
9245 + {
9246 + void *data;
9247 +@@ -445,30 +439,12 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
9248 + cm_id_priv->private_data_len = private_data_len;
9249 + }
9250 +
9251 +-static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
9252 +- struct ib_grh *grh, struct cm_av *av)
9253 ++static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
9254 ++ struct rdma_ah_attr *ah_attr, struct cm_av *av)
9255 + {
9256 +- struct rdma_ah_attr new_ah_attr;
9257 +- int ret;
9258 +-
9259 + av->port = port;
9260 + av->pkey_index = wc->pkey_index;
9261 +-
9262 +- /*
9263 +- * av->ah_attr might be initialized based on past wc during incoming
9264 +- * connect request or while sending out connect request. So initialize
9265 +- * a new ah_attr on stack. If initialization fails, old ah_attr is
9266 +- * used for sending any responses. If initialization is successful,
9267 +- * than new ah_attr is used by overwriting old one.
9268 +- */
9269 +- ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
9270 +- port->port_num, wc,
9271 +- grh, &new_ah_attr);
9272 +- if (ret)
9273 +- return ret;
9274 +-
9275 +- rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
9276 +- return 0;
9277 ++ rdma_move_ah_attr(&av->ah_attr, ah_attr);
9278 + }
9279 +
9280 + static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
9281 +@@ -481,21 +457,6 @@ static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
9282 + grh, &av->ah_attr);
9283 + }
9284 +
9285 +-static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
9286 +- struct cm_av *av, struct cm_port *port)
9287 +-{
9288 +- unsigned long flags;
9289 +-
9290 +- spin_lock_irqsave(&cm.lock, flags);
9291 +- if (&cm_id_priv->av == av)
9292 +- list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
9293 +- else if (&cm_id_priv->alt_av == av)
9294 +- list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
9295 +- else
9296 +- WARN_ON(true);
9297 +- spin_unlock_irqrestore(&cm.lock, flags);
9298 +-}
9299 +-
9300 + static struct cm_port *
9301 + get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
9302 + {
9303 +@@ -539,8 +500,7 @@ get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
9304 +
9305 + static int cm_init_av_by_path(struct sa_path_rec *path,
9306 + const struct ib_gid_attr *sgid_attr,
9307 +- struct cm_av *av,
9308 +- struct cm_id_private *cm_id_priv)
9309 ++ struct cm_av *av)
9310 + {
9311 + struct rdma_ah_attr new_ah_attr;
9312 + struct cm_device *cm_dev;
9313 +@@ -574,11 +534,24 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
9314 + return ret;
9315 +
9316 + av->timeout = path->packet_life_time + 1;
9317 +- add_cm_id_to_port_list(cm_id_priv, av, port);
9318 + rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
9319 + return 0;
9320 + }
9321 +
9322 ++/* Move av created by cm_init_av_by_path(), so av.dgid is not moved */
9323 ++static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src)
9324 ++{
9325 ++ dest->port = src->port;
9326 ++ dest->pkey_index = src->pkey_index;
9327 ++ rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr);
9328 ++ dest->timeout = src->timeout;
9329 ++}
9330 ++
9331 ++static void cm_destroy_av(struct cm_av *av)
9332 ++{
9333 ++ rdma_destroy_ah_attr(&av->ah_attr);
9334 ++}
9335 ++
9336 + static u32 cm_local_id(__be32 local_id)
9337 + {
9338 + return (__force u32) (local_id ^ cm.random_id_operand);
9339 +@@ -854,8 +827,6 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
9340 + spin_lock_init(&cm_id_priv->lock);
9341 + init_completion(&cm_id_priv->comp);
9342 + INIT_LIST_HEAD(&cm_id_priv->work_list);
9343 +- INIT_LIST_HEAD(&cm_id_priv->prim_list);
9344 +- INIT_LIST_HEAD(&cm_id_priv->altr_list);
9345 + atomic_set(&cm_id_priv->work_count, -1);
9346 + refcount_set(&cm_id_priv->refcount, 1);
9347 +
9348 +@@ -1156,12 +1127,7 @@ retest:
9349 + kfree(cm_id_priv->timewait_info);
9350 + cm_id_priv->timewait_info = NULL;
9351 + }
9352 +- if (!list_empty(&cm_id_priv->altr_list) &&
9353 +- (!cm_id_priv->altr_send_port_not_ready))
9354 +- list_del(&cm_id_priv->altr_list);
9355 +- if (!list_empty(&cm_id_priv->prim_list) &&
9356 +- (!cm_id_priv->prim_send_port_not_ready))
9357 +- list_del(&cm_id_priv->prim_list);
9358 ++
9359 + WARN_ON(cm_id_priv->listen_sharecount);
9360 + WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
9361 + if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
9362 +@@ -1175,8 +1141,8 @@ retest:
9363 + while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
9364 + cm_free_work(work);
9365 +
9366 +- rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
9367 +- rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
9368 ++ cm_destroy_av(&cm_id_priv->av);
9369 ++ cm_destroy_av(&cm_id_priv->alt_av);
9370 + kfree(cm_id_priv->private_data);
9371 + kfree_rcu(cm_id_priv, rcu);
9372 + }
9373 +@@ -1500,7 +1466,9 @@ static int cm_validate_req_param(struct ib_cm_req_param *param)
9374 + int ib_send_cm_req(struct ib_cm_id *cm_id,
9375 + struct ib_cm_req_param *param)
9376 + {
9377 ++ struct cm_av av = {}, alt_av = {};
9378 + struct cm_id_private *cm_id_priv;
9379 ++ struct ib_mad_send_buf *msg;
9380 + struct cm_req_msg *req_msg;
9381 + unsigned long flags;
9382 + int ret;
9383 +@@ -1514,8 +1482,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
9384 + spin_lock_irqsave(&cm_id_priv->lock, flags);
9385 + if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
9386 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9387 +- ret = -EINVAL;
9388 +- goto out;
9389 ++ return -EINVAL;
9390 + }
9391 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9392 +
9393 +@@ -1524,19 +1491,20 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
9394 + if (IS_ERR(cm_id_priv->timewait_info)) {
9395 + ret = PTR_ERR(cm_id_priv->timewait_info);
9396 + cm_id_priv->timewait_info = NULL;
9397 +- goto out;
9398 ++ return ret;
9399 + }
9400 +
9401 + ret = cm_init_av_by_path(param->primary_path,
9402 +- param->ppath_sgid_attr, &cm_id_priv->av,
9403 +- cm_id_priv);
9404 ++ param->ppath_sgid_attr, &av);
9405 + if (ret)
9406 +- goto out;
9407 ++ return ret;
9408 + if (param->alternate_path) {
9409 + ret = cm_init_av_by_path(param->alternate_path, NULL,
9410 +- &cm_id_priv->alt_av, cm_id_priv);
9411 +- if (ret)
9412 +- goto out;
9413 ++ &alt_av);
9414 ++ if (ret) {
9415 ++ cm_destroy_av(&av);
9416 ++ return ret;
9417 ++ }
9418 + }
9419 + cm_id->service_id = param->service_id;
9420 + cm_id->service_mask = ~cpu_to_be64(0);
9421 +@@ -1552,33 +1520,40 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
9422 + cm_id_priv->pkey = param->primary_path->pkey;
9423 + cm_id_priv->qp_type = param->qp_type;
9424 +
9425 +- ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
9426 +- if (ret)
9427 +- goto out;
9428 ++ spin_lock_irqsave(&cm_id_priv->lock, flags);
9429 ++
9430 ++ cm_move_av_from_path(&cm_id_priv->av, &av);
9431 ++ if (param->alternate_path)
9432 ++ cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
9433 ++
9434 ++ msg = cm_alloc_priv_msg(cm_id_priv);
9435 ++ if (IS_ERR(msg)) {
9436 ++ ret = PTR_ERR(msg);
9437 ++ goto out_unlock;
9438 ++ }
9439 +
9440 +- req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
9441 ++ req_msg = (struct cm_req_msg *)msg->mad;
9442 + cm_format_req(req_msg, cm_id_priv, param);
9443 + cm_id_priv->tid = req_msg->hdr.tid;
9444 +- cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
9445 +- cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
9446 ++ msg->timeout_ms = cm_id_priv->timeout_ms;
9447 ++ msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
9448 +
9449 + cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
9450 + cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
9451 +
9452 + trace_icm_send_req(&cm_id_priv->id);
9453 +- spin_lock_irqsave(&cm_id_priv->lock, flags);
9454 +- ret = ib_post_send_mad(cm_id_priv->msg, NULL);
9455 +- if (ret) {
9456 +- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9457 +- goto error2;
9458 +- }
9459 ++ ret = ib_post_send_mad(msg, NULL);
9460 ++ if (ret)
9461 ++ goto out_free;
9462 + BUG_ON(cm_id->state != IB_CM_IDLE);
9463 + cm_id->state = IB_CM_REQ_SENT;
9464 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9465 + return 0;
9466 +-
9467 +-error2: cm_free_msg(cm_id_priv->msg);
9468 +-out: return ret;
9469 ++out_free:
9470 ++ cm_free_priv_msg(msg);
9471 ++out_unlock:
9472 ++ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9473 ++ return ret;
9474 + }
9475 + EXPORT_SYMBOL(ib_send_cm_req);
9476 +
9477 +@@ -1618,7 +1593,7 @@ static int cm_issue_rej(struct cm_port *port,
9478 + IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
9479 + ret = ib_post_send_mad(msg, NULL);
9480 + if (ret)
9481 +- cm_free_msg(msg);
9482 ++ cm_free_response_msg(msg);
9483 +
9484 + return ret;
9485 + }
9486 +@@ -1974,7 +1949,7 @@ static void cm_dup_req_handler(struct cm_work *work,
9487 + return;
9488 +
9489 + unlock: spin_unlock_irq(&cm_id_priv->lock);
9490 +-free: cm_free_msg(msg);
9491 ++free: cm_free_response_msg(msg);
9492 + }
9493 +
9494 + static struct cm_id_private *cm_match_req(struct cm_work *work,
9495 +@@ -2163,8 +2138,7 @@ static int cm_req_handler(struct cm_work *work)
9496 + sa_path_set_dmac(&work->path[0],
9497 + cm_id_priv->av.ah_attr.roce.dmac);
9498 + work->path[0].hop_limit = grh->hop_limit;
9499 +- ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
9500 +- cm_id_priv);
9501 ++ ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av);
9502 + if (ret) {
9503 + int err;
9504 +
9505 +@@ -2183,7 +2157,7 @@ static int cm_req_handler(struct cm_work *work)
9506 + }
9507 + if (cm_req_has_alt_path(req_msg)) {
9508 + ret = cm_init_av_by_path(&work->path[1], NULL,
9509 +- &cm_id_priv->alt_av, cm_id_priv);
9510 ++ &cm_id_priv->alt_av);
9511 + if (ret) {
9512 + ib_send_cm_rej(&cm_id_priv->id,
9513 + IB_CM_REJ_INVALID_ALT_GID,
9514 +@@ -2283,9 +2257,11 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
9515 + goto out;
9516 + }
9517 +
9518 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9519 +- if (ret)
9520 ++ msg = cm_alloc_priv_msg(cm_id_priv);
9521 ++ if (IS_ERR(msg)) {
9522 ++ ret = PTR_ERR(msg);
9523 + goto out;
9524 ++ }
9525 +
9526 + rep_msg = (struct cm_rep_msg *) msg->mad;
9527 + cm_format_rep(rep_msg, cm_id_priv, param);
9528 +@@ -2294,14 +2270,10 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
9529 +
9530 + trace_icm_send_rep(cm_id);
9531 + ret = ib_post_send_mad(msg, NULL);
9532 +- if (ret) {
9533 +- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9534 +- cm_free_msg(msg);
9535 +- return ret;
9536 +- }
9537 ++ if (ret)
9538 ++ goto out_free;
9539 +
9540 + cm_id->state = IB_CM_REP_SENT;
9541 +- cm_id_priv->msg = msg;
9542 + cm_id_priv->initiator_depth = param->initiator_depth;
9543 + cm_id_priv->responder_resources = param->responder_resources;
9544 + cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
9545 +@@ -2309,8 +2281,13 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
9546 + "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
9547 + param->qp_num);
9548 + cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
9549 ++ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9550 ++ return 0;
9551 +
9552 +-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9553 ++out_free:
9554 ++ cm_free_priv_msg(msg);
9555 ++out:
9556 ++ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9557 + return ret;
9558 + }
9559 + EXPORT_SYMBOL(ib_send_cm_rep);
9560 +@@ -2357,9 +2334,11 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
9561 + goto error;
9562 + }
9563 +
9564 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9565 +- if (ret)
9566 ++ msg = cm_alloc_msg(cm_id_priv);
9567 ++ if (IS_ERR(msg)) {
9568 ++ ret = PTR_ERR(msg);
9569 + goto error;
9570 ++ }
9571 +
9572 + cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
9573 + private_data, private_data_len);
9574 +@@ -2453,7 +2432,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
9575 + goto deref;
9576 +
9577 + unlock: spin_unlock_irq(&cm_id_priv->lock);
9578 +-free: cm_free_msg(msg);
9579 ++free: cm_free_response_msg(msg);
9580 + deref: cm_deref_id(cm_id_priv);
9581 + }
9582 +
9583 +@@ -2657,10 +2636,10 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
9584 + cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
9585 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
9586 +
9587 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9588 +- if (ret) {
9589 ++ msg = cm_alloc_priv_msg(cm_id_priv);
9590 ++ if (IS_ERR(msg)) {
9591 + cm_enter_timewait(cm_id_priv);
9592 +- return ret;
9593 ++ return PTR_ERR(msg);
9594 + }
9595 +
9596 + cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
9597 +@@ -2672,12 +2651,11 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
9598 + ret = ib_post_send_mad(msg, NULL);
9599 + if (ret) {
9600 + cm_enter_timewait(cm_id_priv);
9601 +- cm_free_msg(msg);
9602 ++ cm_free_priv_msg(msg);
9603 + return ret;
9604 + }
9605 +
9606 + cm_id_priv->id.state = IB_CM_DREQ_SENT;
9607 +- cm_id_priv->msg = msg;
9608 + return 0;
9609 + }
9610 +
9611 +@@ -2732,9 +2710,9 @@ static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
9612 + cm_set_private_data(cm_id_priv, private_data, private_data_len);
9613 + cm_enter_timewait(cm_id_priv);
9614 +
9615 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9616 +- if (ret)
9617 +- return ret;
9618 ++ msg = cm_alloc_msg(cm_id_priv);
9619 ++ if (IS_ERR(msg))
9620 ++ return PTR_ERR(msg);
9621 +
9622 + cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
9623 + private_data, private_data_len);
9624 +@@ -2794,7 +2772,7 @@ static int cm_issue_drep(struct cm_port *port,
9625 + IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
9626 + ret = ib_post_send_mad(msg, NULL);
9627 + if (ret)
9628 +- cm_free_msg(msg);
9629 ++ cm_free_response_msg(msg);
9630 +
9631 + return ret;
9632 + }
9633 +@@ -2853,7 +2831,7 @@ static int cm_dreq_handler(struct cm_work *work)
9634 +
9635 + if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
9636 + ib_post_send_mad(msg, NULL))
9637 +- cm_free_msg(msg);
9638 ++ cm_free_response_msg(msg);
9639 + goto deref;
9640 + case IB_CM_DREQ_RCVD:
9641 + atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
9642 +@@ -2927,9 +2905,9 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
9643 + case IB_CM_REP_RCVD:
9644 + case IB_CM_MRA_REP_SENT:
9645 + cm_reset_to_idle(cm_id_priv);
9646 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9647 +- if (ret)
9648 +- return ret;
9649 ++ msg = cm_alloc_msg(cm_id_priv);
9650 ++ if (IS_ERR(msg))
9651 ++ return PTR_ERR(msg);
9652 + cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
9653 + ari, ari_length, private_data, private_data_len,
9654 + state);
9655 +@@ -2937,9 +2915,9 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
9656 + case IB_CM_REP_SENT:
9657 + case IB_CM_MRA_REP_RCVD:
9658 + cm_enter_timewait(cm_id_priv);
9659 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9660 +- if (ret)
9661 +- return ret;
9662 ++ msg = cm_alloc_msg(cm_id_priv);
9663 ++ if (IS_ERR(msg))
9664 ++ return PTR_ERR(msg);
9665 + cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
9666 + ari, ari_length, private_data, private_data_len,
9667 + state);
9668 +@@ -3117,13 +3095,15 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
9669 + default:
9670 + trace_icm_send_mra_unknown_err(&cm_id_priv->id);
9671 + ret = -EINVAL;
9672 +- goto error1;
9673 ++ goto error_unlock;
9674 + }
9675 +
9676 + if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
9677 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9678 +- if (ret)
9679 +- goto error1;
9680 ++ msg = cm_alloc_msg(cm_id_priv);
9681 ++ if (IS_ERR(msg)) {
9682 ++ ret = PTR_ERR(msg);
9683 ++ goto error_unlock;
9684 ++ }
9685 +
9686 + cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
9687 + msg_response, service_timeout,
9688 +@@ -3131,7 +3111,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
9689 + trace_icm_send_mra(cm_id);
9690 + ret = ib_post_send_mad(msg, NULL);
9691 + if (ret)
9692 +- goto error2;
9693 ++ goto error_free_msg;
9694 + }
9695 +
9696 + cm_id->state = cm_state;
9697 +@@ -3141,13 +3121,11 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
9698 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9699 + return 0;
9700 +
9701 +-error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9702 +- kfree(data);
9703 +- return ret;
9704 +-
9705 +-error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9706 +- kfree(data);
9707 ++error_free_msg:
9708 + cm_free_msg(msg);
9709 ++error_unlock:
9710 ++ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9711 ++ kfree(data);
9712 + return ret;
9713 + }
9714 + EXPORT_SYMBOL(ib_send_cm_mra);
9715 +@@ -3291,6 +3269,8 @@ static int cm_lap_handler(struct cm_work *work)
9716 + struct cm_lap_msg *lap_msg;
9717 + struct ib_cm_lap_event_param *param;
9718 + struct ib_mad_send_buf *msg = NULL;
9719 ++ struct rdma_ah_attr ah_attr;
9720 ++ struct cm_av alt_av = {};
9721 + int ret;
9722 +
9723 + /* Currently Alternate path messages are not supported for
9724 +@@ -3319,7 +3299,25 @@ static int cm_lap_handler(struct cm_work *work)
9725 + work->cm_event.private_data =
9726 + IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
9727 +
9728 ++ ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device,
9729 ++ work->port->port_num,
9730 ++ work->mad_recv_wc->wc,
9731 ++ work->mad_recv_wc->recv_buf.grh,
9732 ++ &ah_attr);
9733 ++ if (ret)
9734 ++ goto deref;
9735 ++
9736 ++ ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
9737 ++ if (ret) {
9738 ++ rdma_destroy_ah_attr(&ah_attr);
9739 ++ return -EINVAL;
9740 ++ }
9741 ++
9742 + spin_lock_irq(&cm_id_priv->lock);
9743 ++ cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
9744 ++ &ah_attr, &cm_id_priv->av);
9745 ++ cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
9746 ++
9747 + if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
9748 + goto unlock;
9749 +
9750 +@@ -3343,7 +3341,7 @@ static int cm_lap_handler(struct cm_work *work)
9751 +
9752 + if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
9753 + ib_post_send_mad(msg, NULL))
9754 +- cm_free_msg(msg);
9755 ++ cm_free_response_msg(msg);
9756 + goto deref;
9757 + case IB_CM_LAP_RCVD:
9758 + atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
9759 +@@ -3353,17 +3351,6 @@ static int cm_lap_handler(struct cm_work *work)
9760 + goto unlock;
9761 + }
9762 +
9763 +- ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
9764 +- work->mad_recv_wc->recv_buf.grh,
9765 +- &cm_id_priv->av);
9766 +- if (ret)
9767 +- goto unlock;
9768 +-
9769 +- ret = cm_init_av_by_path(param->alternate_path, NULL,
9770 +- &cm_id_priv->alt_av, cm_id_priv);
9771 +- if (ret)
9772 +- goto unlock;
9773 +-
9774 + cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
9775 + cm_id_priv->tid = lap_msg->hdr.tid;
9776 + cm_queue_work_unlock(cm_id_priv, work);
9777 +@@ -3471,6 +3458,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
9778 + {
9779 + struct cm_id_private *cm_id_priv;
9780 + struct ib_mad_send_buf *msg;
9781 ++ struct cm_av av = {};
9782 + unsigned long flags;
9783 + int ret;
9784 +
9785 +@@ -3479,42 +3467,43 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
9786 + return -EINVAL;
9787 +
9788 + cm_id_priv = container_of(cm_id, struct cm_id_private, id);
9789 +- ret = cm_init_av_by_path(param->path, param->sgid_attr,
9790 +- &cm_id_priv->av,
9791 +- cm_id_priv);
9792 ++ ret = cm_init_av_by_path(param->path, param->sgid_attr, &av);
9793 + if (ret)
9794 +- goto out;
9795 ++ return ret;
9796 +
9797 ++ spin_lock_irqsave(&cm_id_priv->lock, flags);
9798 ++ cm_move_av_from_path(&cm_id_priv->av, &av);
9799 + cm_id->service_id = param->service_id;
9800 + cm_id->service_mask = ~cpu_to_be64(0);
9801 + cm_id_priv->timeout_ms = param->timeout_ms;
9802 + cm_id_priv->max_cm_retries = param->max_cm_retries;
9803 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9804 +- if (ret)
9805 +- goto out;
9806 +-
9807 +- cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
9808 +- param);
9809 +- msg->timeout_ms = cm_id_priv->timeout_ms;
9810 +- msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
9811 +-
9812 +- spin_lock_irqsave(&cm_id_priv->lock, flags);
9813 +- if (cm_id->state == IB_CM_IDLE) {
9814 +- trace_icm_send_sidr_req(&cm_id_priv->id);
9815 +- ret = ib_post_send_mad(msg, NULL);
9816 +- } else {
9817 ++ if (cm_id->state != IB_CM_IDLE) {
9818 + ret = -EINVAL;
9819 ++ goto out_unlock;
9820 + }
9821 +
9822 +- if (ret) {
9823 +- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9824 +- cm_free_msg(msg);
9825 +- goto out;
9826 ++ msg = cm_alloc_priv_msg(cm_id_priv);
9827 ++ if (IS_ERR(msg)) {
9828 ++ ret = PTR_ERR(msg);
9829 ++ goto out_unlock;
9830 + }
9831 ++
9832 ++ cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
9833 ++ param);
9834 ++ msg->timeout_ms = cm_id_priv->timeout_ms;
9835 ++ msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
9836 ++
9837 ++ trace_icm_send_sidr_req(&cm_id_priv->id);
9838 ++ ret = ib_post_send_mad(msg, NULL);
9839 ++ if (ret)
9840 ++ goto out_free;
9841 + cm_id->state = IB_CM_SIDR_REQ_SENT;
9842 +- cm_id_priv->msg = msg;
9843 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9844 +-out:
9845 ++ return 0;
9846 ++out_free:
9847 ++ cm_free_priv_msg(msg);
9848 ++out_unlock:
9849 ++ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9850 + return ret;
9851 + }
9852 + EXPORT_SYMBOL(ib_send_cm_sidr_req);
9853 +@@ -3661,9 +3650,9 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
9854 + if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
9855 + return -EINVAL;
9856 +
9857 +- ret = cm_alloc_msg(cm_id_priv, &msg);
9858 +- if (ret)
9859 +- return ret;
9860 ++ msg = cm_alloc_msg(cm_id_priv);
9861 ++ if (IS_ERR(msg))
9862 ++ return PTR_ERR(msg);
9863 +
9864 + cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
9865 + param);
9866 +@@ -3963,9 +3952,7 @@ out:
9867 + static int cm_migrate(struct ib_cm_id *cm_id)
9868 + {
9869 + struct cm_id_private *cm_id_priv;
9870 +- struct cm_av tmp_av;
9871 + unsigned long flags;
9872 +- int tmp_send_port_not_ready;
9873 + int ret = 0;
9874 +
9875 + cm_id_priv = container_of(cm_id, struct cm_id_private, id);
9876 +@@ -3974,14 +3961,7 @@ static int cm_migrate(struct ib_cm_id *cm_id)
9877 + (cm_id->lap_state == IB_CM_LAP_UNINIT ||
9878 + cm_id->lap_state == IB_CM_LAP_IDLE)) {
9879 + cm_id->lap_state = IB_CM_LAP_IDLE;
9880 +- /* Swap address vector */
9881 +- tmp_av = cm_id_priv->av;
9882 + cm_id_priv->av = cm_id_priv->alt_av;
9883 +- cm_id_priv->alt_av = tmp_av;
9884 +- /* Swap port send ready state */
9885 +- tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
9886 +- cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
9887 +- cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
9888 + } else
9889 + ret = -EINVAL;
9890 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
9891 +@@ -4356,9 +4336,6 @@ static int cm_add_one(struct ib_device *ib_device)
9892 + port->cm_dev = cm_dev;
9893 + port->port_num = i;
9894 +
9895 +- INIT_LIST_HEAD(&port->cm_priv_prim_list);
9896 +- INIT_LIST_HEAD(&port->cm_priv_altr_list);
9897 +-
9898 + ret = cm_create_port_fs(port);
9899 + if (ret)
9900 + goto error1;
9901 +@@ -4422,8 +4399,6 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
9902 + {
9903 + struct cm_device *cm_dev = client_data;
9904 + struct cm_port *port;
9905 +- struct cm_id_private *cm_id_priv;
9906 +- struct ib_mad_agent *cur_mad_agent;
9907 + struct ib_port_modify port_modify = {
9908 + .clr_port_cap_mask = IB_PORT_CM_SUP
9909 + };
9910 +@@ -4444,24 +4419,13 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
9911 +
9912 + port = cm_dev->port[i-1];
9913 + ib_modify_port(ib_device, port->port_num, 0, &port_modify);
9914 +- /* Mark all the cm_id's as not valid */
9915 +- spin_lock_irq(&cm.lock);
9916 +- list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
9917 +- cm_id_priv->altr_send_port_not_ready = 1;
9918 +- list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
9919 +- cm_id_priv->prim_send_port_not_ready = 1;
9920 +- spin_unlock_irq(&cm.lock);
9921 + /*
9922 + * We flush the queue here after the going_down set, this
9923 + * verify that no new works will be queued in the recv handler,
9924 + * after that we can call the unregister_mad_agent
9925 + */
9926 + flush_workqueue(cm.wq);
9927 +- spin_lock_irq(&cm.state_lock);
9928 +- cur_mad_agent = port->mad_agent;
9929 +- port->mad_agent = NULL;
9930 +- spin_unlock_irq(&cm.state_lock);
9931 +- ib_unregister_mad_agent(cur_mad_agent);
9932 ++ ib_unregister_mad_agent(port->mad_agent);
9933 + cm_remove_port_fs(port);
9934 + kfree(port);
9935 + }
9936 +@@ -4476,7 +4440,6 @@ static int __init ib_cm_init(void)
9937 + INIT_LIST_HEAD(&cm.device_list);
9938 + rwlock_init(&cm.device_lock);
9939 + spin_lock_init(&cm.lock);
9940 +- spin_lock_init(&cm.state_lock);
9941 + cm.listen_service_table = RB_ROOT;
9942 + cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
9943 + cm.remote_id_table = RB_ROOT;
9944 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
9945 +index ab148a696c0ce..ad9a9ba5f00d1 100644
9946 +--- a/drivers/infiniband/core/cma.c
9947 ++++ b/drivers/infiniband/core/cma.c
9948 +@@ -1852,6 +1852,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
9949 + {
9950 + cma_cancel_operation(id_priv, state);
9951 +
9952 ++ rdma_restrack_del(&id_priv->res);
9953 + if (id_priv->cma_dev) {
9954 + if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
9955 + if (id_priv->cm_id.ib)
9956 +@@ -1861,7 +1862,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
9957 + iw_destroy_cm_id(id_priv->cm_id.iw);
9958 + }
9959 + cma_leave_mc_groups(id_priv);
9960 +- rdma_restrack_del(&id_priv->res);
9961 + cma_release_dev(id_priv);
9962 + }
9963 +
9964 +@@ -2472,8 +2472,10 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
9965 + if (IS_ERR(id))
9966 + return PTR_ERR(id);
9967 +
9968 ++ mutex_lock(&id_priv->qp_mutex);
9969 + id->tos = id_priv->tos;
9970 + id->tos_set = id_priv->tos_set;
9971 ++ mutex_unlock(&id_priv->qp_mutex);
9972 + id->afonly = id_priv->afonly;
9973 + id_priv->cm_id.iw = id;
9974 +
9975 +@@ -2534,8 +2536,10 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
9976 + cma_id_get(id_priv);
9977 + dev_id_priv->internal_id = 1;
9978 + dev_id_priv->afonly = id_priv->afonly;
9979 ++ mutex_lock(&id_priv->qp_mutex);
9980 + dev_id_priv->tos_set = id_priv->tos_set;
9981 + dev_id_priv->tos = id_priv->tos;
9982 ++ mutex_unlock(&id_priv->qp_mutex);
9983 +
9984 + ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
9985 + if (ret)
9986 +@@ -2582,8 +2586,10 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos)
9987 + struct rdma_id_private *id_priv;
9988 +
9989 + id_priv = container_of(id, struct rdma_id_private, id);
9990 ++ mutex_lock(&id_priv->qp_mutex);
9991 + id_priv->tos = (u8) tos;
9992 + id_priv->tos_set = true;
9993 ++ mutex_unlock(&id_priv->qp_mutex);
9994 + }
9995 + EXPORT_SYMBOL(rdma_set_service_type);
9996 +
9997 +@@ -2610,8 +2616,10 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
9998 + return -EINVAL;
9999 +
10000 + id_priv = container_of(id, struct rdma_id_private, id);
10001 ++ mutex_lock(&id_priv->qp_mutex);
10002 + id_priv->timeout = timeout;
10003 + id_priv->timeout_set = true;
10004 ++ mutex_unlock(&id_priv->qp_mutex);
10005 +
10006 + return 0;
10007 + }
10008 +@@ -2647,8 +2655,10 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
10009 + return -EINVAL;
10010 +
10011 + id_priv = container_of(id, struct rdma_id_private, id);
10012 ++ mutex_lock(&id_priv->qp_mutex);
10013 + id_priv->min_rnr_timer = min_rnr_timer;
10014 + id_priv->min_rnr_timer_set = true;
10015 ++ mutex_unlock(&id_priv->qp_mutex);
10016 +
10017 + return 0;
10018 + }
10019 +@@ -3034,8 +3044,11 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
10020 +
10021 + u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
10022 + rdma_start_port(id_priv->cma_dev->device)];
10023 +- u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
10024 ++ u8 tos;
10025 +
10026 ++ mutex_lock(&id_priv->qp_mutex);
10027 ++ tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
10028 ++ mutex_unlock(&id_priv->qp_mutex);
10029 +
10030 + work = kzalloc(sizeof *work, GFP_KERNEL);
10031 + if (!work)
10032 +@@ -3082,8 +3095,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
10033 + * PacketLifeTime = local ACK timeout/2
10034 + * as a reasonable approximation for RoCE networks.
10035 + */
10036 +- route->path_rec->packet_life_time = id_priv->timeout_set ?
10037 +- id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
10038 ++ mutex_lock(&id_priv->qp_mutex);
10039 ++ if (id_priv->timeout_set && id_priv->timeout)
10040 ++ route->path_rec->packet_life_time = id_priv->timeout - 1;
10041 ++ else
10042 ++ route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
10043 ++ mutex_unlock(&id_priv->qp_mutex);
10044 +
10045 + if (!route->path_rec->mtu) {
10046 + ret = -EINVAL;
10047 +@@ -4107,8 +4124,11 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
10048 + if (IS_ERR(cm_id))
10049 + return PTR_ERR(cm_id);
10050 +
10051 ++ mutex_lock(&id_priv->qp_mutex);
10052 + cm_id->tos = id_priv->tos;
10053 + cm_id->tos_set = id_priv->tos_set;
10054 ++ mutex_unlock(&id_priv->qp_mutex);
10055 ++
10056 + id_priv->cm_id.iw = cm_id;
10057 +
10058 + memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
10059 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
10060 +index 64e4be1cbec7c..a1d1deca7c063 100644
10061 +--- a/drivers/infiniband/core/uverbs_cmd.c
10062 ++++ b/drivers/infiniband/core/uverbs_cmd.c
10063 +@@ -3034,12 +3034,29 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
10064 + if (!wq)
10065 + return -EINVAL;
10066 +
10067 +- wq_attr.curr_wq_state = cmd.curr_wq_state;
10068 +- wq_attr.wq_state = cmd.wq_state;
10069 + if (cmd.attr_mask & IB_WQ_FLAGS) {
10070 + wq_attr.flags = cmd.flags;
10071 + wq_attr.flags_mask = cmd.flags_mask;
10072 + }
10073 ++
10074 ++ if (cmd.attr_mask & IB_WQ_CUR_STATE) {
10075 ++ if (cmd.curr_wq_state > IB_WQS_ERR)
10076 ++ return -EINVAL;
10077 ++
10078 ++ wq_attr.curr_wq_state = cmd.curr_wq_state;
10079 ++ } else {
10080 ++ wq_attr.curr_wq_state = wq->state;
10081 ++ }
10082 ++
10083 ++ if (cmd.attr_mask & IB_WQ_STATE) {
10084 ++ if (cmd.wq_state > IB_WQS_ERR)
10085 ++ return -EINVAL;
10086 ++
10087 ++ wq_attr.wq_state = cmd.wq_state;
10088 ++ } else {
10089 ++ wq_attr.wq_state = wq_attr.curr_wq_state;
10090 ++ }
10091 ++
10092 + ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
10093 + &attrs->driver_udata);
10094 + rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
10095 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
10096 +index 7652dafe32eca..dcbe5e28a4f7a 100644
10097 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
10098 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
10099 +@@ -274,8 +274,6 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
10100 +
10101 + dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
10102 +
10103 +- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
10104 +-
10105 + if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
10106 + roce_set_bit(rc_sq_wqe->byte_20,
10107 + V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
10108 +@@ -320,6 +318,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
10109 + V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
10110 + (*sge_ind) & (qp->sge.sge_cnt - 1));
10111 +
10112 ++ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
10113 ++ !!(wr->send_flags & IB_SEND_INLINE));
10114 + if (wr->send_flags & IB_SEND_INLINE)
10115 + return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
10116 +
10117 +@@ -791,8 +791,7 @@ out:
10118 + qp->sq.head += nreq;
10119 + qp->next_sge = sge_idx;
10120 +
10121 +- if (nreq == 1 && qp->sq.head == qp->sq.tail + 1 &&
10122 +- (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
10123 ++ if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
10124 + write_dwqe(hr_dev, qp, wqe);
10125 + else
10126 + update_sq_db(hr_dev, qp);
10127 +@@ -1620,6 +1619,22 @@ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
10128 + }
10129 + }
10130 +
10131 ++static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
10132 ++{
10133 ++ struct hns_roce_cmq_desc desc;
10134 ++ int ret;
10135 ++
10136 ++ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
10137 ++ false);
10138 ++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
10139 ++ if (ret)
10140 ++ ibdev_err(&hr_dev->ib_dev,
10141 ++ "failed to clear extended doorbell info, ret = %d.\n",
10142 ++ ret);
10143 ++
10144 ++ return ret;
10145 ++}
10146 ++
10147 + static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
10148 + {
10149 + struct hns_roce_query_fw_info *resp;
10150 +@@ -2093,12 +2108,6 @@ static void set_hem_page_size(struct hns_roce_dev *hr_dev)
10151 + calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
10152 + 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
10153 +
10154 +- if (caps->cqc_timer_entry_sz)
10155 +- calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
10156 +- caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
10157 +- &caps->cqc_timer_buf_pg_sz,
10158 +- &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
10159 +-
10160 + /* SRQ */
10161 + if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
10162 + calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
10163 +@@ -2739,6 +2748,11 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
10164 + struct hns_roce_v2_priv *priv = hr_dev->priv;
10165 + int ret;
10166 +
10167 ++ /* The hns ROCEE requires the extdb info to be cleared before using */
10168 ++ ret = hns_roce_clear_extdb_list_info(hr_dev);
10169 ++ if (ret)
10170 ++ return ret;
10171 ++
10172 + ret = get_hem_table(hr_dev);
10173 + if (ret)
10174 + return ret;
10175 +@@ -4485,12 +4499,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
10176 + struct ib_device *ibdev = &hr_dev->ib_dev;
10177 + dma_addr_t trrl_ba;
10178 + dma_addr_t irrl_ba;
10179 +- enum ib_mtu mtu;
10180 ++ enum ib_mtu ib_mtu;
10181 + u8 lp_pktn_ini;
10182 + u64 *mtts;
10183 + u8 *dmac;
10184 + u8 *smac;
10185 + u32 port;
10186 ++ int mtu;
10187 + int ret;
10188 +
10189 + ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
10190 +@@ -4574,19 +4589,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
10191 + roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
10192 + V2_QPC_BYTE_52_DMAC_S, 0);
10193 +
10194 +- mtu = get_mtu(ibqp, attr);
10195 +- hr_qp->path_mtu = mtu;
10196 ++ ib_mtu = get_mtu(ibqp, attr);
10197 ++ hr_qp->path_mtu = ib_mtu;
10198 ++
10199 ++ mtu = ib_mtu_enum_to_int(ib_mtu);
10200 ++ if (WARN_ON(mtu < 0))
10201 ++ return -EINVAL;
10202 +
10203 + if (attr_mask & IB_QP_PATH_MTU) {
10204 + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
10205 +- V2_QPC_BYTE_24_MTU_S, mtu);
10206 ++ V2_QPC_BYTE_24_MTU_S, ib_mtu);
10207 + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
10208 + V2_QPC_BYTE_24_MTU_S, 0);
10209 + }
10210 +
10211 + #define MAX_LP_MSG_LEN 65536
10212 + /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
10213 +- lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
10214 ++ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
10215 +
10216 + roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
10217 + V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
10218 +@@ -4758,6 +4777,11 @@ enum {
10219 + DIP_VALID,
10220 + };
10221 +
10222 ++enum {
10223 ++ WND_LIMIT,
10224 ++ WND_UNLIMIT,
10225 ++};
10226 ++
10227 + static int check_cong_type(struct ib_qp *ibqp,
10228 + struct hns_roce_congestion_algorithm *cong_alg)
10229 + {
10230 +@@ -4769,21 +4793,25 @@ static int check_cong_type(struct ib_qp *ibqp,
10231 + cong_alg->alg_sel = CONG_DCQCN;
10232 + cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
10233 + cong_alg->dip_vld = DIP_INVALID;
10234 ++ cong_alg->wnd_mode_sel = WND_LIMIT;
10235 + break;
10236 + case CONG_TYPE_LDCP:
10237 + cong_alg->alg_sel = CONG_WINDOW;
10238 + cong_alg->alg_sub_sel = CONG_LDCP;
10239 + cong_alg->dip_vld = DIP_INVALID;
10240 ++ cong_alg->wnd_mode_sel = WND_UNLIMIT;
10241 + break;
10242 + case CONG_TYPE_HC3:
10243 + cong_alg->alg_sel = CONG_WINDOW;
10244 + cong_alg->alg_sub_sel = CONG_HC3;
10245 + cong_alg->dip_vld = DIP_INVALID;
10246 ++ cong_alg->wnd_mode_sel = WND_LIMIT;
10247 + break;
10248 + case CONG_TYPE_DIP:
10249 + cong_alg->alg_sel = CONG_DCQCN;
10250 + cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
10251 + cong_alg->dip_vld = DIP_VALID;
10252 ++ cong_alg->wnd_mode_sel = WND_LIMIT;
10253 + break;
10254 + default:
10255 + ibdev_err(&hr_dev->ib_dev,
10256 +@@ -4824,6 +4852,9 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
10257 + hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL, 0);
10258 + hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
10259 + hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD, 0);
10260 ++ hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
10261 ++ cong_field.wnd_mode_sel);
10262 ++ hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
10263 +
10264 + /* if dip is disabled, there is no need to set dip idx */
10265 + if (cong_field.dip_vld == 0)
10266 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
10267 +index a2100a629859a..23cf2f6bc7a54 100644
10268 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
10269 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
10270 +@@ -248,6 +248,7 @@ enum hns_roce_opcode_type {
10271 + HNS_ROCE_OPC_CLR_SCCC = 0x8509,
10272 + HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
10273 + HNS_ROCE_OPC_RESET_SCCC = 0x850b,
10274 ++ HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO = 0x850d,
10275 + HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
10276 + HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
10277 + HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
10278 +@@ -963,6 +964,7 @@ struct hns_roce_v2_qp_context {
10279 + #define QPCEX_CONG_ALG_SUB_SEL QPCEX_FIELD_LOC(1, 1)
10280 + #define QPCEX_DIP_CTX_IDX_VLD QPCEX_FIELD_LOC(2, 2)
10281 + #define QPCEX_DIP_CTX_IDX QPCEX_FIELD_LOC(22, 3)
10282 ++#define QPCEX_SQ_RQ_NOT_FORBID_EN QPCEX_FIELD_LOC(23, 23)
10283 + #define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
10284 +
10285 + #define V2_QP_RWE_S 1 /* rdma write enable */
10286 +@@ -1642,6 +1644,7 @@ struct hns_roce_congestion_algorithm {
10287 + u8 alg_sel;
10288 + u8 alg_sub_sel;
10289 + u8 dip_vld;
10290 ++ u8 wnd_mode_sel;
10291 + };
10292 +
10293 + #define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0
10294 +diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
10295 +index 79b3c3023fe7a..b8454dcb03183 100644
10296 +--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
10297 ++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
10298 +@@ -776,7 +776,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
10299 + struct ib_device *ibdev = &hr_dev->ib_dev;
10300 + struct hns_roce_buf_region *r;
10301 + unsigned int i, mapped_cnt;
10302 +- int ret;
10303 ++ int ret = 0;
10304 +
10305 + /*
10306 + * Only use the first page address as root ba when hopnum is 0, this
10307 +diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
10308 +index 92ddbcc00eb2a..2ae22bf50016a 100644
10309 +--- a/drivers/infiniband/hw/mlx4/qp.c
10310 ++++ b/drivers/infiniband/hw/mlx4/qp.c
10311 +@@ -4251,13 +4251,8 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
10312 + if (wq_attr_mask & IB_WQ_FLAGS)
10313 + return -EOPNOTSUPP;
10314 +
10315 +- cur_state = wq_attr_mask & IB_WQ_CUR_STATE ? wq_attr->curr_wq_state :
10316 +- ibwq->state;
10317 +- new_state = wq_attr_mask & IB_WQ_STATE ? wq_attr->wq_state : cur_state;
10318 +-
10319 +- if (cur_state < IB_WQS_RESET || cur_state > IB_WQS_ERR ||
10320 +- new_state < IB_WQS_RESET || new_state > IB_WQS_ERR)
10321 +- return -EINVAL;
10322 ++ cur_state = wq_attr->curr_wq_state;
10323 ++ new_state = wq_attr->wq_state;
10324 +
10325 + if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
10326 + return -EINVAL;
10327 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
10328 +index 644d5d0ac5442..cca7296b12d01 100644
10329 +--- a/drivers/infiniband/hw/mlx5/main.c
10330 ++++ b/drivers/infiniband/hw/mlx5/main.c
10331 +@@ -3178,8 +3178,6 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
10332 +
10333 + port->mp.mpi = NULL;
10334 +
10335 +- list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
10336 +-
10337 + spin_unlock(&port->mp.mpi_lock);
10338 +
10339 + err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
10340 +@@ -3327,7 +3325,10 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
10341 + } else {
10342 + mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
10343 + i + 1);
10344 +- mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
10345 ++ list_add_tail(&dev->port[i].mp.mpi->list,
10346 ++ &mlx5_ib_unaffiliated_port_list);
10347 ++ mlx5_ib_unbind_slave_port(dev,
10348 ++ dev->port[i].mp.mpi);
10349 + }
10350 + }
10351 + }
10352 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
10353 +index 9282eb10bfaed..5851486c0d930 100644
10354 +--- a/drivers/infiniband/hw/mlx5/qp.c
10355 ++++ b/drivers/infiniband/hw/mlx5/qp.c
10356 +@@ -5309,10 +5309,8 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
10357 +
10358 + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
10359 +
10360 +- curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
10361 +- wq_attr->curr_wq_state : wq->state;
10362 +- wq_state = (wq_attr_mask & IB_WQ_STATE) ?
10363 +- wq_attr->wq_state : curr_wq_state;
10364 ++ curr_wq_state = wq_attr->curr_wq_state;
10365 ++ wq_state = wq_attr->wq_state;
10366 + if (curr_wq_state == IB_WQS_ERR)
10367 + curr_wq_state = MLX5_RQC_STATE_ERR;
10368 + if (wq_state == IB_WQS_ERR)
10369 +diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
10370 +index 01662727dca08..fc1ba49042792 100644
10371 +--- a/drivers/infiniband/sw/rxe/rxe_net.c
10372 ++++ b/drivers/infiniband/sw/rxe/rxe_net.c
10373 +@@ -207,10 +207,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
10374 +
10375 + /* Create UDP socket */
10376 + err = udp_sock_create(net, &udp_cfg, &sock);
10377 +- if (err < 0) {
10378 +- pr_err("failed to create udp socket. err = %d\n", err);
10379 ++ if (err < 0)
10380 + return ERR_PTR(err);
10381 +- }
10382 +
10383 + tnl_cfg.encap_type = 1;
10384 + tnl_cfg.encap_rcv = rxe_udp_encap_recv;
10385 +@@ -619,6 +617,12 @@ static int rxe_net_ipv6_init(void)
10386 +
10387 + recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
10388 + htons(ROCE_V2_UDP_DPORT), true);
10389 ++ if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) {
10390 ++ recv_sockets.sk6 = NULL;
10391 ++ pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n");
10392 ++ return 0;
10393 ++ }
10394 ++
10395 + if (IS_ERR(recv_sockets.sk6)) {
10396 + recv_sockets.sk6 = NULL;
10397 + pr_err("Failed to create IPv6 UDP tunnel\n");
10398 +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
10399 +index b0f350d674fdb..93a41ebda1a85 100644
10400 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c
10401 ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
10402 +@@ -136,7 +136,6 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
10403 + void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
10404 + {
10405 + if (res->type == RXE_ATOMIC_MASK) {
10406 +- rxe_drop_ref(qp);
10407 + kfree_skb(res->atomic.skb);
10408 + } else if (res->type == RXE_READ_MASK) {
10409 + if (res->read.mr)
10410 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
10411 +index 2b220659bddbf..39dc39be586ec 100644
10412 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
10413 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
10414 +@@ -966,8 +966,6 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
10415 + goto out;
10416 + }
10417 +
10418 +- rxe_add_ref(qp);
10419 +-
10420 + res = &qp->resp.resources[qp->resp.res_head];
10421 + free_rd_atomic_resource(qp, res);
10422 + rxe_advance_resp_resource(qp);
10423 +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
10424 +index 8fcaa1136f2cd..776e46ee95dad 100644
10425 +--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
10426 ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
10427 +@@ -506,6 +506,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
10428 + iser_conn->iscsi_conn = conn;
10429 +
10430 + out:
10431 ++ iscsi_put_endpoint(ep);
10432 + mutex_unlock(&iser_conn->state_mutex);
10433 + return error;
10434 + }
10435 +@@ -1002,6 +1003,7 @@ static struct iscsi_transport iscsi_iser_transport = {
10436 + /* connection management */
10437 + .create_conn = iscsi_iser_conn_create,
10438 + .bind_conn = iscsi_iser_conn_bind,
10439 ++ .unbind_conn = iscsi_conn_unbind,
10440 + .destroy_conn = iscsi_conn_teardown,
10441 + .attr_is_visible = iser_attr_is_visible,
10442 + .set_param = iscsi_iser_set_param,
10443 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
10444 +index 0a794d748a7a6..ed7cf25a65c27 100644
10445 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
10446 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
10447 +@@ -814,6 +814,9 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
10448 + int inflight;
10449 +
10450 + list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
10451 ++ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
10452 ++ continue;
10453 ++
10454 + if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
10455 + continue;
10456 +
10457 +@@ -1788,7 +1791,19 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
10458 + queue_depth);
10459 + return -ECONNRESET;
10460 + }
10461 +- if (!sess->rbufs || sess->queue_depth < queue_depth) {
10462 ++ if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
10463 ++ rtrs_err(clt, "Error: queue depth changed\n");
10464 ++
10465 ++ /*
10466 ++ * Stop any more reconnection attempts
10467 ++ */
10468 ++ sess->reconnect_attempts = -1;
10469 ++ rtrs_err(clt,
10470 ++ "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
10471 ++ return -ECONNRESET;
10472 ++ }
10473 ++
10474 ++ if (!sess->rbufs) {
10475 + kfree(sess->rbufs);
10476 + sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
10477 + GFP_KERNEL);
10478 +@@ -1802,7 +1817,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
10479 + sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
10480 +
10481 + /*
10482 +- * Global queue depth and IO size is always a minimum.
10483 ++ * Global IO size is always a minimum.
10484 + * If while a reconnection server sends us a value a bit
10485 + * higher - client does not care and uses cached minimum.
10486 + *
10487 +@@ -1810,8 +1825,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
10488 + * connections in parallel, use lock.
10489 + */
10490 + mutex_lock(&clt->paths_mutex);
10491 +- clt->queue_depth = min_not_zero(sess->queue_depth,
10492 +- clt->queue_depth);
10493 ++ clt->queue_depth = sess->queue_depth;
10494 + clt->max_io_size = min_not_zero(sess->max_io_size,
10495 + clt->max_io_size);
10496 + mutex_unlock(&clt->paths_mutex);
10497 +@@ -2762,6 +2776,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
10498 + if (err) {
10499 + list_del_rcu(&sess->s.entry);
10500 + rtrs_clt_close_conns(sess, true);
10501 ++ free_percpu(sess->stats->pcpu_stats);
10502 ++ kfree(sess->stats);
10503 + free_sess(sess);
10504 + goto close_all_sess;
10505 + }
10506 +@@ -2770,6 +2786,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
10507 + if (err) {
10508 + list_del_rcu(&sess->s.entry);
10509 + rtrs_clt_close_conns(sess, true);
10510 ++ free_percpu(sess->stats->pcpu_stats);
10511 ++ kfree(sess->stats);
10512 + free_sess(sess);
10513 + goto close_all_sess;
10514 + }
10515 +@@ -3052,6 +3070,8 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
10516 + close_sess:
10517 + rtrs_clt_remove_path_from_arr(sess);
10518 + rtrs_clt_close_conns(sess, true);
10519 ++ free_percpu(sess->stats->pcpu_stats);
10520 ++ kfree(sess->stats);
10521 + free_sess(sess);
10522 +
10523 + return err;
10524 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
10525 +index a9288175fbb54..20efd44297fbb 100644
10526 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
10527 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
10528 +@@ -208,6 +208,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
10529 + device_del(&srv->dev);
10530 + put_device(&srv->dev);
10531 + } else {
10532 ++ put_device(&srv->dev);
10533 + mutex_unlock(&srv->paths_mutex);
10534 + }
10535 + }
10536 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
10537 +index 0fa116cabc445..8a9099684b8e3 100644
10538 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
10539 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
10540 +@@ -1481,6 +1481,7 @@ static void free_sess(struct rtrs_srv_sess *sess)
10541 + kobject_del(&sess->kobj);
10542 + kobject_put(&sess->kobj);
10543 + } else {
10544 ++ kfree(sess->stats);
10545 + kfree(sess);
10546 + }
10547 + }
10548 +@@ -1604,7 +1605,7 @@ static int create_con(struct rtrs_srv_sess *sess,
10549 + struct rtrs_sess *s = &sess->s;
10550 + struct rtrs_srv_con *con;
10551 +
10552 +- u32 cq_size, wr_queue_size;
10553 ++ u32 cq_size, max_send_wr, max_recv_wr, wr_limit;
10554 + int err, cq_vector;
10555 +
10556 + con = kzalloc(sizeof(*con), GFP_KERNEL);
10557 +@@ -1625,30 +1626,42 @@ static int create_con(struct rtrs_srv_sess *sess,
10558 + * All receive and all send (each requiring invalidate)
10559 + * + 2 for drain and heartbeat
10560 + */
10561 +- wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
10562 +- cq_size = wr_queue_size;
10563 ++ max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
10564 ++ max_recv_wr = SERVICE_CON_QUEUE_DEPTH + 2;
10565 ++ cq_size = max_send_wr + max_recv_wr;
10566 + } else {
10567 +- /*
10568 +- * If we have all receive requests posted and
10569 +- * all write requests posted and each read request
10570 +- * requires an invalidate request + drain
10571 +- * and qp gets into error state.
10572 +- */
10573 +- cq_size = srv->queue_depth * 3 + 1;
10574 + /*
10575 + * In theory we might have queue_depth * 32
10576 + * outstanding requests if an unsafe global key is used
10577 + * and we have queue_depth read requests each consisting
10578 + * of 32 different addresses. div 3 for mlx5.
10579 + */
10580 +- wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
10581 ++ wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
10582 ++ /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
10583 ++ if (always_invalidate)
10584 ++ max_send_wr =
10585 ++ min_t(int, wr_limit,
10586 ++ srv->queue_depth * (1 + 4) + 1);
10587 ++ else
10588 ++ max_send_wr =
10589 ++ min_t(int, wr_limit,
10590 ++ srv->queue_depth * (1 + 2) + 1);
10591 ++
10592 ++ max_recv_wr = srv->queue_depth + 1;
10593 ++ /*
10594 ++ * If we have all receive requests posted and
10595 ++ * all write requests posted and each read request
10596 ++ * requires an invalidate request + drain
10597 ++ * and qp gets into error state.
10598 ++ */
10599 ++ cq_size = max_send_wr + max_recv_wr;
10600 + }
10601 +- atomic_set(&con->sq_wr_avail, wr_queue_size);
10602 ++ atomic_set(&con->sq_wr_avail, max_send_wr);
10603 + cq_vector = rtrs_srv_get_next_cq_vector(sess);
10604 +
10605 + /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
10606 + err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
10607 +- wr_queue_size, wr_queue_size,
10608 ++ max_send_wr, max_recv_wr,
10609 + IB_POLL_WORKQUEUE);
10610 + if (err) {
10611 + rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
10612 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
10613 +index a7847282a2ebf..4e602e40f623b 100644
10614 +--- a/drivers/infiniband/ulp/rtrs/rtrs.c
10615 ++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
10616 +@@ -376,7 +376,6 @@ void rtrs_stop_hb(struct rtrs_sess *sess)
10617 + {
10618 + cancel_delayed_work_sync(&sess->hb_dwork);
10619 + sess->hb_missed_cnt = 0;
10620 +- sess->hb_missed_max = 0;
10621 + }
10622 + EXPORT_SYMBOL_GPL(rtrs_stop_hb);
10623 +
10624 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
10625 +index 31f8aa2c40ed8..168705c88e2fa 100644
10626 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
10627 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
10628 +@@ -998,7 +998,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
10629 + struct srp_device *srp_dev = target->srp_host->srp_dev;
10630 + struct ib_device *ibdev = srp_dev->dev;
10631 + struct srp_request *req;
10632 +- void *mr_list;
10633 + dma_addr_t dma_addr;
10634 + int i, ret = -ENOMEM;
10635 +
10636 +@@ -1009,12 +1008,12 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
10637 +
10638 + for (i = 0; i < target->req_ring_size; ++i) {
10639 + req = &ch->req_ring[i];
10640 +- mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
10641 +- GFP_KERNEL);
10642 +- if (!mr_list)
10643 +- goto out;
10644 +- if (srp_dev->use_fast_reg)
10645 +- req->fr_list = mr_list;
10646 ++ if (srp_dev->use_fast_reg) {
10647 ++ req->fr_list = kmalloc_array(target->mr_per_cmd,
10648 ++ sizeof(void *), GFP_KERNEL);
10649 ++ if (!req->fr_list)
10650 ++ goto out;
10651 ++ }
10652 + req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
10653 + if (!req->indirect_desc)
10654 + goto out;
10655 +diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
10656 +index da8963a9f044c..947d440a3be63 100644
10657 +--- a/drivers/input/joydev.c
10658 ++++ b/drivers/input/joydev.c
10659 +@@ -499,7 +499,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
10660 + memcpy(joydev->keypam, keypam, len);
10661 +
10662 + for (i = 0; i < joydev->nkey; i++)
10663 +- joydev->keymap[keypam[i] - BTN_MISC] = i;
10664 ++ joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
10665 +
10666 + out:
10667 + kfree(keypam);
10668 +diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
10669 +index 32d15809ae586..40a070a2e7f5b 100644
10670 +--- a/drivers/input/keyboard/Kconfig
10671 ++++ b/drivers/input/keyboard/Kconfig
10672 +@@ -67,9 +67,6 @@ config KEYBOARD_AMIGA
10673 + To compile this driver as a module, choose M here: the
10674 + module will be called amikbd.
10675 +
10676 +-config ATARI_KBD_CORE
10677 +- bool
10678 +-
10679 + config KEYBOARD_APPLESPI
10680 + tristate "Apple SPI keyboard and trackpad"
10681 + depends on ACPI && EFI
10682 +diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
10683 +index bb29a7c9a1c0c..54afb38601b9f 100644
10684 +--- a/drivers/input/keyboard/hil_kbd.c
10685 ++++ b/drivers/input/keyboard/hil_kbd.c
10686 +@@ -512,6 +512,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
10687 + HIL_IDD_NUM_AXES_PER_SET(*idd)) {
10688 + printk(KERN_INFO PREFIX
10689 + "combo devices are not supported.\n");
10690 ++ error = -EINVAL;
10691 + goto bail1;
10692 + }
10693 +
10694 +diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
10695 +index 17540bdb1eaf7..0f9e3ec99aae1 100644
10696 +--- a/drivers/input/touchscreen/elants_i2c.c
10697 ++++ b/drivers/input/touchscreen/elants_i2c.c
10698 +@@ -1396,7 +1396,7 @@ static int elants_i2c_probe(struct i2c_client *client,
10699 + init_completion(&ts->cmd_done);
10700 +
10701 + ts->client = client;
10702 +- ts->chip_id = (enum elants_chip_id)id->driver_data;
10703 ++ ts->chip_id = (enum elants_chip_id)(uintptr_t)device_get_match_data(&client->dev);
10704 + i2c_set_clientdata(client, ts);
10705 +
10706 + ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
10707 +@@ -1636,8 +1636,8 @@ MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
10708 +
10709 + #ifdef CONFIG_OF
10710 + static const struct of_device_id elants_of_match[] = {
10711 +- { .compatible = "elan,ekth3500" },
10712 +- { .compatible = "elan,ektf3624" },
10713 ++ { .compatible = "elan,ekth3500", .data = (void *)EKTH3500 },
10714 ++ { .compatible = "elan,ektf3624", .data = (void *)EKTF3624 },
10715 + { /* sentinel */ }
10716 + };
10717 + MODULE_DEVICE_TABLE(of, elants_of_match);
10718 +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
10719 +index c682b028f0a29..4f53d3c57e698 100644
10720 +--- a/drivers/input/touchscreen/goodix.c
10721 ++++ b/drivers/input/touchscreen/goodix.c
10722 +@@ -178,51 +178,6 @@ static const unsigned long goodix_irq_flags[] = {
10723 + IRQ_TYPE_LEVEL_HIGH,
10724 + };
10725 +
10726 +-/*
10727 +- * Those tablets have their coordinates origin at the bottom right
10728 +- * of the tablet, as if rotated 180 degrees
10729 +- */
10730 +-static const struct dmi_system_id rotated_screen[] = {
10731 +-#if defined(CONFIG_DMI) && defined(CONFIG_X86)
10732 +- {
10733 +- .ident = "Teclast X89",
10734 +- .matches = {
10735 +- /* tPAD is too generic, also match on bios date */
10736 +- DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
10737 +- DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
10738 +- DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
10739 +- },
10740 +- },
10741 +- {
10742 +- .ident = "Teclast X98 Pro",
10743 +- .matches = {
10744 +- /*
10745 +- * Only match BIOS date, because the manufacturers
10746 +- * BIOS does not report the board name at all
10747 +- * (sometimes)...
10748 +- */
10749 +- DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
10750 +- DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
10751 +- },
10752 +- },
10753 +- {
10754 +- .ident = "WinBook TW100",
10755 +- .matches = {
10756 +- DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
10757 +- DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
10758 +- }
10759 +- },
10760 +- {
10761 +- .ident = "WinBook TW700",
10762 +- .matches = {
10763 +- DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
10764 +- DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
10765 +- },
10766 +- },
10767 +-#endif
10768 +- {}
10769 +-};
10770 +-
10771 + static const struct dmi_system_id nine_bytes_report[] = {
10772 + #if defined(CONFIG_DMI) && defined(CONFIG_X86)
10773 + {
10774 +@@ -1123,13 +1078,6 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
10775 + ABS_MT_POSITION_Y, ts->prop.max_y);
10776 + }
10777 +
10778 +- if (dmi_check_system(rotated_screen)) {
10779 +- ts->prop.invert_x = true;
10780 +- ts->prop.invert_y = true;
10781 +- dev_dbg(&ts->client->dev,
10782 +- "Applying '180 degrees rotated screen' quirk\n");
10783 +- }
10784 +-
10785 + if (dmi_check_system(nine_bytes_report)) {
10786 + ts->contact_size = 9;
10787 +
10788 +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
10789 +index c847453a03c26..43c521f50c851 100644
10790 +--- a/drivers/input/touchscreen/usbtouchscreen.c
10791 ++++ b/drivers/input/touchscreen/usbtouchscreen.c
10792 +@@ -251,7 +251,7 @@ static int e2i_init(struct usbtouch_usb *usbtouch)
10793 + int ret;
10794 + struct usb_device *udev = interface_to_usbdev(usbtouch->interface);
10795 +
10796 +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
10797 ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
10798 + 0x01, 0x02, 0x0000, 0x0081,
10799 + NULL, 0, USB_CTRL_SET_TIMEOUT);
10800 +
10801 +@@ -531,7 +531,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
10802 + if (ret)
10803 + return ret;
10804 +
10805 +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
10806 ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
10807 + MTOUCHUSB_RESET,
10808 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
10809 + 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
10810 +@@ -543,7 +543,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
10811 + msleep(150);
10812 +
10813 + for (i = 0; i < 3; i++) {
10814 +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
10815 ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
10816 + MTOUCHUSB_ASYNC_REPORT,
10817 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
10818 + 1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT);
10819 +@@ -722,7 +722,7 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
10820 + }
10821 +
10822 + /* start sending data */
10823 +- ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
10824 ++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
10825 + TSC10_CMD_DATA1,
10826 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
10827 + 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
10828 +diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
10829 +index 55dd38d814d92..416815a525d67 100644
10830 +--- a/drivers/iommu/amd/amd_iommu.h
10831 ++++ b/drivers/iommu/amd/amd_iommu.h
10832 +@@ -11,8 +11,6 @@
10833 +
10834 + #include "amd_iommu_types.h"
10835 +
10836 +-extern int amd_iommu_init_dma_ops(void);
10837 +-extern int amd_iommu_init_passthrough(void);
10838 + extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
10839 + extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
10840 + extern void amd_iommu_apply_erratum_63(u16 devid);
10841 +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
10842 +index d006724f4dc21..5ff7e5364ef44 100644
10843 +--- a/drivers/iommu/amd/init.c
10844 ++++ b/drivers/iommu/amd/init.c
10845 +@@ -231,7 +231,6 @@ enum iommu_init_state {
10846 + IOMMU_ENABLED,
10847 + IOMMU_PCI_INIT,
10848 + IOMMU_INTERRUPTS_EN,
10849 +- IOMMU_DMA_OPS,
10850 + IOMMU_INITIALIZED,
10851 + IOMMU_NOT_FOUND,
10852 + IOMMU_INIT_ERROR,
10853 +@@ -1908,8 +1907,8 @@ static void print_iommu_info(void)
10854 + pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
10855 +
10856 + if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
10857 +- pci_info(pdev, "Extended features (%#llx):",
10858 +- iommu->features);
10859 ++ pr_info("Extended features (%#llx):", iommu->features);
10860 ++
10861 + for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
10862 + if (iommu_feature(iommu, (1ULL << i)))
10863 + pr_cont(" %s", feat_str[i]);
10864 +@@ -2895,10 +2894,6 @@ static int __init state_next(void)
10865 + init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
10866 + break;
10867 + case IOMMU_INTERRUPTS_EN:
10868 +- ret = amd_iommu_init_dma_ops();
10869 +- init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
10870 +- break;
10871 +- case IOMMU_DMA_OPS:
10872 + init_state = IOMMU_INITIALIZED;
10873 + break;
10874 + case IOMMU_INITIALIZED:
10875 +diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
10876 +index 3ac42bbdefc63..c46dde88a132b 100644
10877 +--- a/drivers/iommu/amd/iommu.c
10878 ++++ b/drivers/iommu/amd/iommu.c
10879 +@@ -30,7 +30,6 @@
10880 + #include <linux/msi.h>
10881 + #include <linux/irqdomain.h>
10882 + #include <linux/percpu.h>
10883 +-#include <linux/iova.h>
10884 + #include <linux/io-pgtable.h>
10885 + #include <asm/irq_remapping.h>
10886 + #include <asm/io_apic.h>
10887 +@@ -1773,13 +1772,22 @@ void amd_iommu_domain_update(struct protection_domain *domain)
10888 + amd_iommu_domain_flush_complete(domain);
10889 + }
10890 +
10891 ++static void __init amd_iommu_init_dma_ops(void)
10892 ++{
10893 ++ swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
10894 ++
10895 ++ if (amd_iommu_unmap_flush)
10896 ++ pr_info("IO/TLB flush on unmap enabled\n");
10897 ++ else
10898 ++ pr_info("Lazy IO/TLB flushing enabled\n");
10899 ++ iommu_set_dma_strict(amd_iommu_unmap_flush);
10900 ++}
10901 ++
10902 + int __init amd_iommu_init_api(void)
10903 + {
10904 +- int ret, err = 0;
10905 ++ int err = 0;
10906 +
10907 +- ret = iova_cache_get();
10908 +- if (ret)
10909 +- return ret;
10910 ++ amd_iommu_init_dma_ops();
10911 +
10912 + err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
10913 + if (err)
10914 +@@ -1796,19 +1804,6 @@ int __init amd_iommu_init_api(void)
10915 + return 0;
10916 + }
10917 +
10918 +-int __init amd_iommu_init_dma_ops(void)
10919 +-{
10920 +- swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
10921 +-
10922 +- if (amd_iommu_unmap_flush)
10923 +- pr_info("IO/TLB flush on unmap enabled\n");
10924 +- else
10925 +- pr_info("Lazy IO/TLB flushing enabled\n");
10926 +- iommu_set_dma_strict(amd_iommu_unmap_flush);
10927 +- return 0;
10928 +-
10929 +-}
10930 +-
10931 + /*****************************************************************************
10932 + *
10933 + * The following functions belong to the exported interface of AMD IOMMU
10934 +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
10935 +index 7bcdd12055358..5d96fcc45feca 100644
10936 +--- a/drivers/iommu/dma-iommu.c
10937 ++++ b/drivers/iommu/dma-iommu.c
10938 +@@ -243,9 +243,11 @@ resv_iova:
10939 + lo = iova_pfn(iovad, start);
10940 + hi = iova_pfn(iovad, end);
10941 + reserve_iova(iovad, lo, hi);
10942 +- } else {
10943 ++ } else if (end < start) {
10944 + /* dma_ranges list should be sorted */
10945 +- dev_err(&dev->dev, "Failed to reserve IOVA\n");
10946 ++ dev_err(&dev->dev,
10947 ++ "Failed to reserve IOVA [%pa-%pa]\n",
10948 ++ &start, &end);
10949 + return -EINVAL;
10950 + }
10951 +
10952 +diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
10953 +index 49d99cb084dbd..c81b1e60953c1 100644
10954 +--- a/drivers/leds/Kconfig
10955 ++++ b/drivers/leds/Kconfig
10956 +@@ -199,6 +199,7 @@ config LEDS_LM3530
10957 +
10958 + config LEDS_LM3532
10959 + tristate "LCD Backlight driver for LM3532"
10960 ++ select REGMAP_I2C
10961 + depends on LEDS_CLASS
10962 + depends on I2C
10963 + help
10964 +diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
10965 +index 6a63846d10b5e..7d5f0bf2817ad 100644
10966 +--- a/drivers/leds/blink/leds-lgm-sso.c
10967 ++++ b/drivers/leds/blink/leds-lgm-sso.c
10968 +@@ -132,8 +132,7 @@ struct sso_led_priv {
10969 + struct regmap *mmap;
10970 + struct device *dev;
10971 + struct platform_device *pdev;
10972 +- struct clk *gclk;
10973 +- struct clk *fpid_clk;
10974 ++ struct clk_bulk_data clocks[2];
10975 + u32 fpid_clkrate;
10976 + u32 gptc_clkrate;
10977 + u32 freq[MAX_FREQ_RANK];
10978 +@@ -763,12 +762,11 @@ static int sso_probe_gpios(struct sso_led_priv *priv)
10979 + return sso_gpio_gc_init(dev, priv);
10980 + }
10981 +
10982 +-static void sso_clk_disable(void *data)
10983 ++static void sso_clock_disable_unprepare(void *data)
10984 + {
10985 + struct sso_led_priv *priv = data;
10986 +
10987 +- clk_disable_unprepare(priv->fpid_clk);
10988 +- clk_disable_unprepare(priv->gclk);
10989 ++ clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
10990 + }
10991 +
10992 + static int intel_sso_led_probe(struct platform_device *pdev)
10993 +@@ -785,36 +783,30 @@ static int intel_sso_led_probe(struct platform_device *pdev)
10994 + priv->dev = dev;
10995 +
10996 + /* gate clock */
10997 +- priv->gclk = devm_clk_get(dev, "sso");
10998 +- if (IS_ERR(priv->gclk)) {
10999 +- dev_err(dev, "get sso gate clock failed!\n");
11000 +- return PTR_ERR(priv->gclk);
11001 +- }
11002 ++ priv->clocks[0].id = "sso";
11003 ++
11004 ++ /* fpid clock */
11005 ++ priv->clocks[1].id = "fpid";
11006 +
11007 +- ret = clk_prepare_enable(priv->gclk);
11008 ++ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clocks), priv->clocks);
11009 + if (ret) {
11010 +- dev_err(dev, "Failed to prepare/enable sso gate clock!\n");
11011 ++ dev_err(dev, "Getting clocks failed!\n");
11012 + return ret;
11013 + }
11014 +
11015 +- priv->fpid_clk = devm_clk_get(dev, "fpid");
11016 +- if (IS_ERR(priv->fpid_clk)) {
11017 +- dev_err(dev, "Failed to get fpid clock!\n");
11018 +- return PTR_ERR(priv->fpid_clk);
11019 +- }
11020 +-
11021 +- ret = clk_prepare_enable(priv->fpid_clk);
11022 ++ ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clocks), priv->clocks);
11023 + if (ret) {
11024 +- dev_err(dev, "Failed to prepare/enable fpid clock!\n");
11025 ++ dev_err(dev, "Failed to prepare and enable clocks!\n");
11026 + return ret;
11027 + }
11028 +- priv->fpid_clkrate = clk_get_rate(priv->fpid_clk);
11029 +
11030 +- ret = devm_add_action_or_reset(dev, sso_clk_disable, priv);
11031 +- if (ret) {
11032 +- dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
11033 ++ ret = devm_add_action_or_reset(dev, sso_clock_disable_unprepare, priv);
11034 ++ if (ret)
11035 + return ret;
11036 +- }
11037 ++
11038 ++ priv->fpid_clkrate = clk_get_rate(priv->clocks[1].clk);
11039 ++
11040 ++ priv->mmap = syscon_node_to_regmap(dev->of_node);
11041 +
11042 + priv->mmap = syscon_node_to_regmap(dev->of_node);
11043 + if (IS_ERR(priv->mmap)) {
11044 +@@ -859,8 +851,6 @@ static int intel_sso_led_remove(struct platform_device *pdev)
11045 + sso_led_shutdown(led);
11046 + }
11047 +
11048 +- clk_disable_unprepare(priv->fpid_clk);
11049 +- clk_disable_unprepare(priv->gclk);
11050 + regmap_exit(priv->mmap);
11051 +
11052 + return 0;
11053 +diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
11054 +index 2e495ff678562..fa3f5f504ff7d 100644
11055 +--- a/drivers/leds/led-class.c
11056 ++++ b/drivers/leds/led-class.c
11057 +@@ -285,10 +285,6 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
11058 + if (!dev)
11059 + return ERR_PTR(-EINVAL);
11060 +
11061 +- /* Not using device tree? */
11062 +- if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
11063 +- return ERR_PTR(-ENOTSUPP);
11064 +-
11065 + led = of_led_get(dev->of_node, index);
11066 + if (IS_ERR(led))
11067 + return led;
11068 +diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
11069 +index e8922fa033796..80411d41e802d 100644
11070 +--- a/drivers/leds/leds-as3645a.c
11071 ++++ b/drivers/leds/leds-as3645a.c
11072 +@@ -545,6 +545,7 @@ static int as3645a_parse_node(struct as3645a *flash,
11073 + if (!flash->indicator_node) {
11074 + dev_warn(&flash->client->dev,
11075 + "can't find indicator node\n");
11076 ++ rval = -ENODEV;
11077 + goto out_err;
11078 + }
11079 +
11080 +diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
11081 +index 632f10db4b3ff..f341da1503a49 100644
11082 +--- a/drivers/leds/leds-ktd2692.c
11083 ++++ b/drivers/leds/leds-ktd2692.c
11084 +@@ -256,6 +256,17 @@ static void ktd2692_setup(struct ktd2692_context *led)
11085 + | KTD2692_REG_FLASH_CURRENT_BASE);
11086 + }
11087 +
11088 ++static void regulator_disable_action(void *_data)
11089 ++{
11090 ++ struct device *dev = _data;
11091 ++ struct ktd2692_context *led = dev_get_drvdata(dev);
11092 ++ int ret;
11093 ++
11094 ++ ret = regulator_disable(led->regulator);
11095 ++ if (ret)
11096 ++ dev_err(dev, "Failed to disable supply: %d\n", ret);
11097 ++}
11098 ++
11099 + static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
11100 + struct ktd2692_led_config_data *cfg)
11101 + {
11102 +@@ -286,8 +297,14 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
11103 +
11104 + if (led->regulator) {
11105 + ret = regulator_enable(led->regulator);
11106 +- if (ret)
11107 ++ if (ret) {
11108 + dev_err(dev, "Failed to enable supply: %d\n", ret);
11109 ++ } else {
11110 ++ ret = devm_add_action_or_reset(dev,
11111 ++ regulator_disable_action, dev);
11112 ++ if (ret)
11113 ++ return ret;
11114 ++ }
11115 + }
11116 +
11117 + child_node = of_get_next_available_child(np, NULL);
11118 +@@ -377,17 +394,9 @@ static int ktd2692_probe(struct platform_device *pdev)
11119 + static int ktd2692_remove(struct platform_device *pdev)
11120 + {
11121 + struct ktd2692_context *led = platform_get_drvdata(pdev);
11122 +- int ret;
11123 +
11124 + led_classdev_flash_unregister(&led->fled_cdev);
11125 +
11126 +- if (led->regulator) {
11127 +- ret = regulator_disable(led->regulator);
11128 +- if (ret)
11129 +- dev_err(&pdev->dev,
11130 +- "Failed to disable supply: %d\n", ret);
11131 +- }
11132 +-
11133 + mutex_destroy(&led->lock);
11134 +
11135 + return 0;
11136 +diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c
11137 +index aadb03468a40a..a23a9424c2f38 100644
11138 +--- a/drivers/leds/leds-lm36274.c
11139 ++++ b/drivers/leds/leds-lm36274.c
11140 +@@ -127,6 +127,7 @@ static int lm36274_probe(struct platform_device *pdev)
11141 +
11142 + ret = lm36274_init(chip);
11143 + if (ret) {
11144 ++ fwnode_handle_put(init_data.fwnode);
11145 + dev_err(chip->dev, "Failed to init the device\n");
11146 + return ret;
11147 + }
11148 +diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
11149 +index e945de45388ca..55e6443997ec9 100644
11150 +--- a/drivers/leds/leds-lm3692x.c
11151 ++++ b/drivers/leds/leds-lm3692x.c
11152 +@@ -435,6 +435,7 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
11153 +
11154 + ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
11155 + if (ret) {
11156 ++ fwnode_handle_put(child);
11157 + dev_err(&led->client->dev, "reg DT property missing\n");
11158 + return ret;
11159 + }
11160 +@@ -449,12 +450,11 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
11161 +
11162 + ret = devm_led_classdev_register_ext(&led->client->dev, &led->led_dev,
11163 + &init_data);
11164 +- if (ret) {
11165 ++ if (ret)
11166 + dev_err(&led->client->dev, "led register err: %d\n", ret);
11167 +- return ret;
11168 +- }
11169 +
11170 +- return 0;
11171 ++ fwnode_handle_put(init_data.fwnode);
11172 ++ return ret;
11173 + }
11174 +
11175 + static int lm3692x_probe(struct i2c_client *client,
11176 +diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
11177 +index 7d216cdb91a8a..912e8bb22a995 100644
11178 +--- a/drivers/leds/leds-lm3697.c
11179 ++++ b/drivers/leds/leds-lm3697.c
11180 +@@ -203,11 +203,9 @@ static int lm3697_probe_dt(struct lm3697 *priv)
11181 +
11182 + priv->enable_gpio = devm_gpiod_get_optional(dev, "enable",
11183 + GPIOD_OUT_LOW);
11184 +- if (IS_ERR(priv->enable_gpio)) {
11185 +- ret = PTR_ERR(priv->enable_gpio);
11186 +- dev_err(dev, "Failed to get enable gpio: %d\n", ret);
11187 +- return ret;
11188 +- }
11189 ++ if (IS_ERR(priv->enable_gpio))
11190 ++ return dev_err_probe(dev, PTR_ERR(priv->enable_gpio),
11191 ++ "Failed to get enable GPIO\n");
11192 +
11193 + priv->regulator = devm_regulator_get(dev, "vled");
11194 + if (IS_ERR(priv->regulator))
11195 +diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
11196 +index 06230614fdc56..401df1e2e05d0 100644
11197 +--- a/drivers/leds/leds-lp50xx.c
11198 ++++ b/drivers/leds/leds-lp50xx.c
11199 +@@ -490,6 +490,7 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
11200 + ret = fwnode_property_read_u32(led_node, "color",
11201 + &color_id);
11202 + if (ret) {
11203 ++ fwnode_handle_put(led_node);
11204 + dev_err(priv->dev, "Cannot read color\n");
11205 + goto child_out;
11206 + }
11207 +@@ -512,7 +513,6 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
11208 + goto child_out;
11209 + }
11210 + i++;
11211 +- fwnode_handle_put(child);
11212 + }
11213 +
11214 + return 0;
11215 +diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
11216 +index f25324d03842e..15236d7296258 100644
11217 +--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
11218 ++++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
11219 +@@ -132,7 +132,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
11220 + if (apcs_data->clk_name) {
11221 + apcs->clk = platform_device_register_data(&pdev->dev,
11222 + apcs_data->clk_name,
11223 +- PLATFORM_DEVID_NONE,
11224 ++ PLATFORM_DEVID_AUTO,
11225 + NULL, 0);
11226 + if (IS_ERR(apcs->clk))
11227 + dev_err(&pdev->dev, "failed to register APCS clk\n");
11228 +diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
11229 +index 2d13c72944c6f..584700cd15855 100644
11230 +--- a/drivers/mailbox/qcom-ipcc.c
11231 ++++ b/drivers/mailbox/qcom-ipcc.c
11232 +@@ -155,6 +155,11 @@ static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
11233 + return 0;
11234 + }
11235 +
11236 ++static void qcom_ipcc_mbox_shutdown(struct mbox_chan *chan)
11237 ++{
11238 ++ chan->con_priv = NULL;
11239 ++}
11240 ++
11241 + static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
11242 + const struct of_phandle_args *ph)
11243 + {
11244 +@@ -184,6 +189,7 @@ static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
11245 +
11246 + static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
11247 + .send_data = qcom_ipcc_mbox_send_data,
11248 ++ .shutdown = qcom_ipcc_mbox_shutdown,
11249 + };
11250 +
11251 + static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
11252 +diff --git a/drivers/md/md.c b/drivers/md/md.c
11253 +index 49f897fbb89ba..7ba00e4c862d7 100644
11254 +--- a/drivers/md/md.c
11255 ++++ b/drivers/md/md.c
11256 +@@ -441,30 +441,6 @@ check_suspended:
11257 + }
11258 + EXPORT_SYMBOL(md_handle_request);
11259 +
11260 +-struct md_io {
11261 +- struct mddev *mddev;
11262 +- bio_end_io_t *orig_bi_end_io;
11263 +- void *orig_bi_private;
11264 +- struct block_device *orig_bi_bdev;
11265 +- unsigned long start_time;
11266 +-};
11267 +-
11268 +-static void md_end_io(struct bio *bio)
11269 +-{
11270 +- struct md_io *md_io = bio->bi_private;
11271 +- struct mddev *mddev = md_io->mddev;
11272 +-
11273 +- bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
11274 +-
11275 +- bio->bi_end_io = md_io->orig_bi_end_io;
11276 +- bio->bi_private = md_io->orig_bi_private;
11277 +-
11278 +- mempool_free(md_io, &mddev->md_io_pool);
11279 +-
11280 +- if (bio->bi_end_io)
11281 +- bio->bi_end_io(bio);
11282 +-}
11283 +-
11284 + static blk_qc_t md_submit_bio(struct bio *bio)
11285 + {
11286 + const int rw = bio_data_dir(bio);
11287 +@@ -489,21 +465,6 @@ static blk_qc_t md_submit_bio(struct bio *bio)
11288 + return BLK_QC_T_NONE;
11289 + }
11290 +
11291 +- if (bio->bi_end_io != md_end_io) {
11292 +- struct md_io *md_io;
11293 +-
11294 +- md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
11295 +- md_io->mddev = mddev;
11296 +- md_io->orig_bi_end_io = bio->bi_end_io;
11297 +- md_io->orig_bi_private = bio->bi_private;
11298 +- md_io->orig_bi_bdev = bio->bi_bdev;
11299 +-
11300 +- bio->bi_end_io = md_end_io;
11301 +- bio->bi_private = md_io;
11302 +-
11303 +- md_io->start_time = bio_start_io_acct(bio);
11304 +- }
11305 +-
11306 + /* bio could be mergeable after passing to underlayer */
11307 + bio->bi_opf &= ~REQ_NOMERGE;
11308 +
11309 +@@ -5608,7 +5569,6 @@ static void md_free(struct kobject *ko)
11310 +
11311 + bioset_exit(&mddev->bio_set);
11312 + bioset_exit(&mddev->sync_set);
11313 +- mempool_exit(&mddev->md_io_pool);
11314 + kfree(mddev);
11315 + }
11316 +
11317 +@@ -5705,11 +5665,6 @@ static int md_alloc(dev_t dev, char *name)
11318 + */
11319 + mddev->hold_active = UNTIL_STOP;
11320 +
11321 +- error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
11322 +- sizeof(struct md_io));
11323 +- if (error)
11324 +- goto abort;
11325 +-
11326 + error = -ENOMEM;
11327 + mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
11328 + if (!mddev->queue)
11329 +diff --git a/drivers/md/md.h b/drivers/md/md.h
11330 +index fb7eab58cfd51..4da240ffe2c5e 100644
11331 +--- a/drivers/md/md.h
11332 ++++ b/drivers/md/md.h
11333 +@@ -487,7 +487,6 @@ struct mddev {
11334 + struct bio_set sync_set; /* for sync operations like
11335 + * metadata and bitmap writes
11336 + */
11337 +- mempool_t md_io_pool;
11338 +
11339 + /* Generic flush handling.
11340 + * The last to finish preflush schedules a worker to submit
11341 +diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
11342 +index 2a3e7ffefe0a2..028a09a7531ef 100644
11343 +--- a/drivers/media/cec/platform/s5p/s5p_cec.c
11344 ++++ b/drivers/media/cec/platform/s5p/s5p_cec.c
11345 +@@ -35,10 +35,13 @@ MODULE_PARM_DESC(debug, "debug level (0-2)");
11346 +
11347 + static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
11348 + {
11349 ++ int ret;
11350 + struct s5p_cec_dev *cec = cec_get_drvdata(adap);
11351 +
11352 + if (enable) {
11353 +- pm_runtime_get_sync(cec->dev);
11354 ++ ret = pm_runtime_resume_and_get(cec->dev);
11355 ++ if (ret < 0)
11356 ++ return ret;
11357 +
11358 + s5p_cec_reset(cec);
11359 +
11360 +@@ -51,7 +54,7 @@ static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
11361 + } else {
11362 + s5p_cec_mask_tx_interrupts(cec);
11363 + s5p_cec_mask_rx_interrupts(cec);
11364 +- pm_runtime_disable(cec->dev);
11365 ++ pm_runtime_put(cec->dev);
11366 + }
11367 +
11368 + return 0;
11369 +diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
11370 +index 410cc3ac6f948..bceaf91faa15f 100644
11371 +--- a/drivers/media/common/siano/smscoreapi.c
11372 ++++ b/drivers/media/common/siano/smscoreapi.c
11373 +@@ -908,7 +908,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
11374 + void *buffer, size_t size)
11375 + {
11376 + struct sms_firmware *firmware = (struct sms_firmware *) buffer;
11377 +- struct sms_msg_data4 *msg;
11378 ++ struct sms_msg_data5 *msg;
11379 + u32 mem_address, calc_checksum = 0;
11380 + u32 i, *ptr;
11381 + u8 *payload = firmware->payload;
11382 +@@ -989,24 +989,20 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
11383 + goto exit_fw_download;
11384 +
11385 + if (coredev->mode == DEVICE_MODE_NONE) {
11386 +- struct sms_msg_data *trigger_msg =
11387 +- (struct sms_msg_data *) msg;
11388 +-
11389 + pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n");
11390 + SMS_INIT_MSG(&msg->x_msg_header,
11391 + MSG_SMS_SWDOWNLOAD_TRIGGER_REQ,
11392 +- sizeof(struct sms_msg_hdr) +
11393 +- sizeof(u32) * 5);
11394 ++ sizeof(*msg));
11395 +
11396 +- trigger_msg->msg_data[0] = firmware->start_address;
11397 ++ msg->msg_data[0] = firmware->start_address;
11398 + /* Entry point */
11399 +- trigger_msg->msg_data[1] = 6; /* Priority */
11400 +- trigger_msg->msg_data[2] = 0x200; /* Stack size */
11401 +- trigger_msg->msg_data[3] = 0; /* Parameter */
11402 +- trigger_msg->msg_data[4] = 4; /* Task ID */
11403 ++ msg->msg_data[1] = 6; /* Priority */
11404 ++ msg->msg_data[2] = 0x200; /* Stack size */
11405 ++ msg->msg_data[3] = 0; /* Parameter */
11406 ++ msg->msg_data[4] = 4; /* Task ID */
11407 +
11408 +- rc = smscore_sendrequest_and_wait(coredev, trigger_msg,
11409 +- trigger_msg->x_msg_header.msg_length,
11410 ++ rc = smscore_sendrequest_and_wait(coredev, msg,
11411 ++ msg->x_msg_header.msg_length,
11412 + &coredev->trigger_done);
11413 + } else {
11414 + SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_EXEC_REQ,
11415 +diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
11416 +index 4a6b9f4c44ace..f8789ee0d554e 100644
11417 +--- a/drivers/media/common/siano/smscoreapi.h
11418 ++++ b/drivers/media/common/siano/smscoreapi.h
11419 +@@ -624,9 +624,9 @@ struct sms_msg_data2 {
11420 + u32 msg_data[2];
11421 + };
11422 +
11423 +-struct sms_msg_data4 {
11424 ++struct sms_msg_data5 {
11425 + struct sms_msg_hdr x_msg_header;
11426 +- u32 msg_data[4];
11427 ++ u32 msg_data[5];
11428 + };
11429 +
11430 + struct sms_data_download {
11431 +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
11432 +index cd5bafe9a3aca..7e4100263381c 100644
11433 +--- a/drivers/media/common/siano/smsdvb-main.c
11434 ++++ b/drivers/media/common/siano/smsdvb-main.c
11435 +@@ -1212,6 +1212,10 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
11436 + return 0;
11437 +
11438 + media_graph_error:
11439 ++ mutex_lock(&g_smsdvb_clientslock);
11440 ++ list_del(&client->entry);
11441 ++ mutex_unlock(&g_smsdvb_clientslock);
11442 ++
11443 + smsdvb_debugfs_release(client);
11444 +
11445 + client_error:
11446 +diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
11447 +index 89620da983bab..dddebea644bb8 100644
11448 +--- a/drivers/media/dvb-core/dvb_net.c
11449 ++++ b/drivers/media/dvb-core/dvb_net.c
11450 +@@ -45,6 +45,7 @@
11451 + #include <linux/module.h>
11452 + #include <linux/kernel.h>
11453 + #include <linux/netdevice.h>
11454 ++#include <linux/nospec.h>
11455 + #include <linux/etherdevice.h>
11456 + #include <linux/dvb/net.h>
11457 + #include <linux/uio.h>
11458 +@@ -1462,14 +1463,20 @@ static int dvb_net_do_ioctl(struct file *file,
11459 + struct net_device *netdev;
11460 + struct dvb_net_priv *priv_data;
11461 + struct dvb_net_if *dvbnetif = parg;
11462 ++ int if_num = dvbnetif->if_num;
11463 +
11464 +- if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
11465 +- !dvbnet->state[dvbnetif->if_num]) {
11466 ++ if (if_num >= DVB_NET_DEVICES_MAX) {
11467 + ret = -EINVAL;
11468 + goto ioctl_error;
11469 + }
11470 ++ if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
11471 +
11472 +- netdev = dvbnet->device[dvbnetif->if_num];
11473 ++ if (!dvbnet->state[if_num]) {
11474 ++ ret = -EINVAL;
11475 ++ goto ioctl_error;
11476 ++ }
11477 ++
11478 ++ netdev = dvbnet->device[if_num];
11479 +
11480 + priv_data = netdev_priv(netdev);
11481 + dvbnetif->pid=priv_data->pid;
11482 +@@ -1522,14 +1529,20 @@ static int dvb_net_do_ioctl(struct file *file,
11483 + struct net_device *netdev;
11484 + struct dvb_net_priv *priv_data;
11485 + struct __dvb_net_if_old *dvbnetif = parg;
11486 ++ int if_num = dvbnetif->if_num;
11487 ++
11488 ++ if (if_num >= DVB_NET_DEVICES_MAX) {
11489 ++ ret = -EINVAL;
11490 ++ goto ioctl_error;
11491 ++ }
11492 ++ if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
11493 +
11494 +- if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
11495 +- !dvbnet->state[dvbnetif->if_num]) {
11496 ++ if (!dvbnet->state[if_num]) {
11497 + ret = -EINVAL;
11498 + goto ioctl_error;
11499 + }
11500 +
11501 +- netdev = dvbnet->device[dvbnetif->if_num];
11502 ++ netdev = dvbnet->device[if_num];
11503 +
11504 + priv_data = netdev_priv(netdev);
11505 + dvbnetif->pid=priv_data->pid;
11506 +diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
11507 +index 3862ddc86ec48..795d9bfaba5cf 100644
11508 +--- a/drivers/media/dvb-core/dvbdev.c
11509 ++++ b/drivers/media/dvb-core/dvbdev.c
11510 +@@ -506,6 +506,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
11511 + break;
11512 +
11513 + if (minor == MAX_DVB_MINORS) {
11514 ++ list_del (&dvbdev->list_head);
11515 + kfree(dvbdevfops);
11516 + kfree(dvbdev);
11517 + up_write(&minor_rwsem);
11518 +@@ -526,6 +527,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
11519 + __func__);
11520 +
11521 + dvb_media_device_free(dvbdev);
11522 ++ list_del (&dvbdev->list_head);
11523 + kfree(dvbdevfops);
11524 + kfree(dvbdev);
11525 + mutex_unlock(&dvbdev_register_lock);
11526 +@@ -541,6 +543,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
11527 + pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
11528 + __func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
11529 + dvb_media_device_free(dvbdev);
11530 ++ list_del (&dvbdev->list_head);
11531 + kfree(dvbdevfops);
11532 + kfree(dvbdev);
11533 + return PTR_ERR(clsdev);
11534 +diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
11535 +index 9dc3f45da3dcd..b05f409014b2f 100644
11536 +--- a/drivers/media/i2c/ccs/ccs-core.c
11537 ++++ b/drivers/media/i2c/ccs/ccs-core.c
11538 +@@ -3093,7 +3093,7 @@ static int __maybe_unused ccs_suspend(struct device *dev)
11539 + if (rval < 0) {
11540 + pm_runtime_put_noidle(dev);
11541 +
11542 +- return -EAGAIN;
11543 ++ return rval;
11544 + }
11545 +
11546 + if (sensor->streaming)
11547 +diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
11548 +index 047aa7658d217..23f28606e570f 100644
11549 +--- a/drivers/media/i2c/imx334.c
11550 ++++ b/drivers/media/i2c/imx334.c
11551 +@@ -717,9 +717,9 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
11552 + }
11553 +
11554 + if (enable) {
11555 +- ret = pm_runtime_get_sync(imx334->dev);
11556 +- if (ret)
11557 +- goto error_power_off;
11558 ++ ret = pm_runtime_resume_and_get(imx334->dev);
11559 ++ if (ret < 0)
11560 ++ goto error_unlock;
11561 +
11562 + ret = imx334_start_streaming(imx334);
11563 + if (ret)
11564 +@@ -737,6 +737,7 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
11565 +
11566 + error_power_off:
11567 + pm_runtime_put(imx334->dev);
11568 ++error_unlock:
11569 + mutex_unlock(&imx334->mutex);
11570 +
11571 + return ret;
11572 +diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
11573 +index e8119ad0bc71d..92376592455ee 100644
11574 +--- a/drivers/media/i2c/ir-kbd-i2c.c
11575 ++++ b/drivers/media/i2c/ir-kbd-i2c.c
11576 +@@ -678,8 +678,8 @@ static int zilog_tx(struct rc_dev *rcdev, unsigned int *txbuf,
11577 + goto out_unlock;
11578 + }
11579 +
11580 +- i = i2c_master_recv(ir->tx_c, buf, 1);
11581 +- if (i != 1) {
11582 ++ ret = i2c_master_recv(ir->tx_c, buf, 1);
11583 ++ if (ret != 1) {
11584 + dev_err(&ir->rc->dev, "i2c_master_recv failed with %d\n", ret);
11585 + ret = -EIO;
11586 + goto out_unlock;
11587 +diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
11588 +index 42f64175a6dff..fb78a1cedc03b 100644
11589 +--- a/drivers/media/i2c/ov2659.c
11590 ++++ b/drivers/media/i2c/ov2659.c
11591 +@@ -204,6 +204,7 @@ struct ov2659 {
11592 + struct i2c_client *client;
11593 + struct v4l2_ctrl_handler ctrls;
11594 + struct v4l2_ctrl *link_frequency;
11595 ++ struct clk *clk;
11596 + const struct ov2659_framesize *frame_size;
11597 + struct sensor_register *format_ctrl_regs;
11598 + struct ov2659_pll_ctrl pll;
11599 +@@ -1270,6 +1271,8 @@ static int ov2659_power_off(struct device *dev)
11600 +
11601 + gpiod_set_value(ov2659->pwdn_gpio, 1);
11602 +
11603 ++ clk_disable_unprepare(ov2659->clk);
11604 ++
11605 + return 0;
11606 + }
11607 +
11608 +@@ -1278,9 +1281,17 @@ static int ov2659_power_on(struct device *dev)
11609 + struct i2c_client *client = to_i2c_client(dev);
11610 + struct v4l2_subdev *sd = i2c_get_clientdata(client);
11611 + struct ov2659 *ov2659 = to_ov2659(sd);
11612 ++ int ret;
11613 +
11614 + dev_dbg(&client->dev, "%s:\n", __func__);
11615 +
11616 ++ ret = clk_prepare_enable(ov2659->clk);
11617 ++ if (ret) {
11618 ++ dev_err(&client->dev, "%s: failed to enable clock\n",
11619 ++ __func__);
11620 ++ return ret;
11621 ++ }
11622 ++
11623 + gpiod_set_value(ov2659->pwdn_gpio, 0);
11624 +
11625 + if (ov2659->resetb_gpio) {
11626 +@@ -1425,7 +1436,6 @@ static int ov2659_probe(struct i2c_client *client)
11627 + const struct ov2659_platform_data *pdata = ov2659_get_pdata(client);
11628 + struct v4l2_subdev *sd;
11629 + struct ov2659 *ov2659;
11630 +- struct clk *clk;
11631 + int ret;
11632 +
11633 + if (!pdata) {
11634 +@@ -1440,11 +1450,11 @@ static int ov2659_probe(struct i2c_client *client)
11635 + ov2659->pdata = pdata;
11636 + ov2659->client = client;
11637 +
11638 +- clk = devm_clk_get(&client->dev, "xvclk");
11639 +- if (IS_ERR(clk))
11640 +- return PTR_ERR(clk);
11641 ++ ov2659->clk = devm_clk_get(&client->dev, "xvclk");
11642 ++ if (IS_ERR(ov2659->clk))
11643 ++ return PTR_ERR(ov2659->clk);
11644 +
11645 +- ov2659->xvclk_frequency = clk_get_rate(clk);
11646 ++ ov2659->xvclk_frequency = clk_get_rate(ov2659->clk);
11647 + if (ov2659->xvclk_frequency < 6000000 ||
11648 + ov2659->xvclk_frequency > 27000000)
11649 + return -EINVAL;
11650 +@@ -1506,7 +1516,9 @@ static int ov2659_probe(struct i2c_client *client)
11651 + ov2659->frame_size = &ov2659_framesizes[2];
11652 + ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
11653 +
11654 +- ov2659_power_on(&client->dev);
11655 ++ ret = ov2659_power_on(&client->dev);
11656 ++ if (ret < 0)
11657 ++ goto error;
11658 +
11659 + ret = ov2659_detect(sd);
11660 + if (ret < 0)
11661 +diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
11662 +index 179d107f494ca..50e2af5227603 100644
11663 +--- a/drivers/media/i2c/rdacm21.c
11664 ++++ b/drivers/media/i2c/rdacm21.c
11665 +@@ -69,6 +69,7 @@
11666 + #define OV490_ISP_VSIZE_LOW 0x80820062
11667 + #define OV490_ISP_VSIZE_HIGH 0x80820063
11668 +
11669 ++#define OV10640_PID_TIMEOUT 20
11670 + #define OV10640_ID_HIGH 0xa6
11671 + #define OV10640_CHIP_ID 0x300a
11672 + #define OV10640_PIXEL_RATE 55000000
11673 +@@ -329,30 +330,51 @@ static const struct v4l2_subdev_ops rdacm21_subdev_ops = {
11674 + .pad = &rdacm21_subdev_pad_ops,
11675 + };
11676 +
11677 +-static int ov10640_initialize(struct rdacm21_device *dev)
11678 ++static void ov10640_power_up(struct rdacm21_device *dev)
11679 + {
11680 +- u8 val;
11681 +-
11682 +- /* Power-up OV10640 by setting RESETB and PWDNB pins high. */
11683 ++ /* Enable GPIO0#0 (reset) and GPIO1#0 (pwdn) as output lines. */
11684 + ov490_write_reg(dev, OV490_GPIO_SEL0, OV490_GPIO0);
11685 + ov490_write_reg(dev, OV490_GPIO_SEL1, OV490_SPWDN0);
11686 + ov490_write_reg(dev, OV490_GPIO_DIRECTION0, OV490_GPIO0);
11687 + ov490_write_reg(dev, OV490_GPIO_DIRECTION1, OV490_SPWDN0);
11688 ++
11689 ++ /* Power up OV10640 and then reset it. */
11690 ++ ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE1, OV490_SPWDN0);
11691 ++ usleep_range(1500, 3000);
11692 ++
11693 ++ ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, 0x00);
11694 ++ usleep_range(1500, 3000);
11695 + ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, OV490_GPIO0);
11696 +- ov490_write_reg(dev, OV490_GPIO_OUTPUT_VALUE0, OV490_SPWDN0);
11697 + usleep_range(3000, 5000);
11698 ++}
11699 +
11700 +- /* Read OV10640 ID to test communications. */
11701 +- ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
11702 +- ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
11703 +- ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, OV10640_CHIP_ID & 0xff);
11704 +-
11705 +- /* Trigger SCCB slave transaction and give it some time to complete. */
11706 +- ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
11707 +- usleep_range(1000, 1500);
11708 ++static int ov10640_check_id(struct rdacm21_device *dev)
11709 ++{
11710 ++ unsigned int i;
11711 ++ u8 val;
11712 +
11713 +- ov490_read_reg(dev, OV490_SCCB_SLAVE0_DIR, &val);
11714 +- if (val != OV10640_ID_HIGH) {
11715 ++ /* Read OV10640 ID to test communications. */
11716 ++ for (i = 0; i < OV10640_PID_TIMEOUT; ++i) {
11717 ++ ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR,
11718 ++ OV490_SCCB_SLAVE_READ);
11719 ++ ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH,
11720 ++ OV10640_CHIP_ID >> 8);
11721 ++ ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW,
11722 ++ OV10640_CHIP_ID & 0xff);
11723 ++
11724 ++ /*
11725 ++ * Trigger SCCB slave transaction and give it some time
11726 ++ * to complete.
11727 ++ */
11728 ++ ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
11729 ++ usleep_range(1000, 1500);
11730 ++
11731 ++ ov490_read_reg(dev, OV490_SCCB_SLAVE0_DIR, &val);
11732 ++ if (val == OV10640_ID_HIGH)
11733 ++ break;
11734 ++ usleep_range(1000, 1500);
11735 ++ }
11736 ++ if (i == OV10640_PID_TIMEOUT) {
11737 + dev_err(dev->dev, "OV10640 ID mismatch: (0x%02x)\n", val);
11738 + return -ENODEV;
11739 + }
11740 +@@ -368,6 +390,8 @@ static int ov490_initialize(struct rdacm21_device *dev)
11741 + unsigned int i;
11742 + int ret;
11743 +
11744 ++ ov10640_power_up(dev);
11745 ++
11746 + /*
11747 + * Read OV490 Id to test communications. Give it up to 40msec to
11748 + * exit from reset.
11749 +@@ -405,7 +429,7 @@ static int ov490_initialize(struct rdacm21_device *dev)
11750 + return -ENODEV;
11751 + }
11752 +
11753 +- ret = ov10640_initialize(dev);
11754 ++ ret = ov10640_check_id(dev);
11755 + if (ret)
11756 + return ret;
11757 +
11758 +diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
11759 +index 5b4c4a3547c93..71804a70bc6d7 100644
11760 +--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
11761 ++++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
11762 +@@ -1386,7 +1386,7 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
11763 + s5c73m3_gpio_deassert(state, STBY);
11764 + usleep_range(100, 200);
11765 +
11766 +- s5c73m3_gpio_deassert(state, RST);
11767 ++ s5c73m3_gpio_deassert(state, RSET);
11768 + usleep_range(50, 100);
11769 +
11770 + return 0;
11771 +@@ -1401,7 +1401,7 @@ static int __s5c73m3_power_off(struct s5c73m3 *state)
11772 + {
11773 + int i, ret;
11774 +
11775 +- if (s5c73m3_gpio_assert(state, RST))
11776 ++ if (s5c73m3_gpio_assert(state, RSET))
11777 + usleep_range(10, 50);
11778 +
11779 + if (s5c73m3_gpio_assert(state, STBY))
11780 +@@ -1606,7 +1606,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
11781 +
11782 + state->mclk_frequency = pdata->mclk_frequency;
11783 + state->gpio[STBY] = pdata->gpio_stby;
11784 +- state->gpio[RST] = pdata->gpio_reset;
11785 ++ state->gpio[RSET] = pdata->gpio_reset;
11786 + return 0;
11787 + }
11788 +
11789 +diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
11790 +index ef7e85b34263b..c3fcfdd3ea66d 100644
11791 +--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
11792 ++++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
11793 +@@ -353,7 +353,7 @@ struct s5c73m3_ctrls {
11794 +
11795 + enum s5c73m3_gpio_id {
11796 + STBY,
11797 +- RST,
11798 ++ RSET,
11799 + GPIO_NUM,
11800 + };
11801 +
11802 +diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
11803 +index b2d53417badf6..4e97309a67f41 100644
11804 +--- a/drivers/media/i2c/s5k4ecgx.c
11805 ++++ b/drivers/media/i2c/s5k4ecgx.c
11806 +@@ -173,7 +173,7 @@ static const char * const s5k4ecgx_supply_names[] = {
11807 +
11808 + enum s5k4ecgx_gpio_id {
11809 + STBY,
11810 +- RST,
11811 ++ RSET,
11812 + GPIO_NUM,
11813 + };
11814 +
11815 +@@ -476,7 +476,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv)
11816 + if (s5k4ecgx_gpio_set_value(priv, STBY, priv->gpio[STBY].level))
11817 + usleep_range(30, 50);
11818 +
11819 +- if (s5k4ecgx_gpio_set_value(priv, RST, priv->gpio[RST].level))
11820 ++ if (s5k4ecgx_gpio_set_value(priv, RSET, priv->gpio[RSET].level))
11821 + usleep_range(30, 50);
11822 +
11823 + return 0;
11824 +@@ -484,7 +484,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv)
11825 +
11826 + static int __s5k4ecgx_power_off(struct s5k4ecgx *priv)
11827 + {
11828 +- if (s5k4ecgx_gpio_set_value(priv, RST, !priv->gpio[RST].level))
11829 ++ if (s5k4ecgx_gpio_set_value(priv, RSET, !priv->gpio[RSET].level))
11830 + usleep_range(30, 50);
11831 +
11832 + if (s5k4ecgx_gpio_set_value(priv, STBY, !priv->gpio[STBY].level))
11833 +@@ -872,7 +872,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv,
11834 + int ret;
11835 +
11836 + priv->gpio[STBY].gpio = -EINVAL;
11837 +- priv->gpio[RST].gpio = -EINVAL;
11838 ++ priv->gpio[RSET].gpio = -EINVAL;
11839 +
11840 + ret = s5k4ecgx_config_gpio(gpio->gpio, gpio->level, "S5K4ECGX_STBY");
11841 +
11842 +@@ -891,7 +891,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv,
11843 + s5k4ecgx_free_gpios(priv);
11844 + return ret;
11845 + }
11846 +- priv->gpio[RST] = *gpio;
11847 ++ priv->gpio[RSET] = *gpio;
11848 + if (gpio_is_valid(gpio->gpio))
11849 + gpio_set_value(gpio->gpio, 0);
11850 +
11851 +diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
11852 +index 6e702b57c37da..bc560817e5046 100644
11853 +--- a/drivers/media/i2c/s5k5baf.c
11854 ++++ b/drivers/media/i2c/s5k5baf.c
11855 +@@ -235,7 +235,7 @@ struct s5k5baf_gpio {
11856 +
11857 + enum s5k5baf_gpio_id {
11858 + STBY,
11859 +- RST,
11860 ++ RSET,
11861 + NUM_GPIOS,
11862 + };
11863 +
11864 +@@ -969,7 +969,7 @@ static int s5k5baf_power_on(struct s5k5baf *state)
11865 +
11866 + s5k5baf_gpio_deassert(state, STBY);
11867 + usleep_range(50, 100);
11868 +- s5k5baf_gpio_deassert(state, RST);
11869 ++ s5k5baf_gpio_deassert(state, RSET);
11870 + return 0;
11871 +
11872 + err_reg_dis:
11873 +@@ -987,7 +987,7 @@ static int s5k5baf_power_off(struct s5k5baf *state)
11874 + state->apply_cfg = 0;
11875 + state->apply_crop = 0;
11876 +
11877 +- s5k5baf_gpio_assert(state, RST);
11878 ++ s5k5baf_gpio_assert(state, RSET);
11879 + s5k5baf_gpio_assert(state, STBY);
11880 +
11881 + if (!IS_ERR(state->clock))
11882 +diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
11883 +index 038e385007601..e9be7323a22e9 100644
11884 +--- a/drivers/media/i2c/s5k6aa.c
11885 ++++ b/drivers/media/i2c/s5k6aa.c
11886 +@@ -177,7 +177,7 @@ static const char * const s5k6aa_supply_names[] = {
11887 +
11888 + enum s5k6aa_gpio_id {
11889 + STBY,
11890 +- RST,
11891 ++ RSET,
11892 + GPIO_NUM,
11893 + };
11894 +
11895 +@@ -841,7 +841,7 @@ static int __s5k6aa_power_on(struct s5k6aa *s5k6aa)
11896 + ret = s5k6aa->s_power(1);
11897 + usleep_range(4000, 5000);
11898 +
11899 +- if (s5k6aa_gpio_deassert(s5k6aa, RST))
11900 ++ if (s5k6aa_gpio_deassert(s5k6aa, RSET))
11901 + msleep(20);
11902 +
11903 + return ret;
11904 +@@ -851,7 +851,7 @@ static int __s5k6aa_power_off(struct s5k6aa *s5k6aa)
11905 + {
11906 + int ret;
11907 +
11908 +- if (s5k6aa_gpio_assert(s5k6aa, RST))
11909 ++ if (s5k6aa_gpio_assert(s5k6aa, RSET))
11910 + usleep_range(100, 150);
11911 +
11912 + if (s5k6aa->s_power) {
11913 +@@ -1510,7 +1510,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
11914 + int ret;
11915 +
11916 + s5k6aa->gpio[STBY].gpio = -EINVAL;
11917 +- s5k6aa->gpio[RST].gpio = -EINVAL;
11918 ++ s5k6aa->gpio[RSET].gpio = -EINVAL;
11919 +
11920 + gpio = &pdata->gpio_stby;
11921 + if (gpio_is_valid(gpio->gpio)) {
11922 +@@ -1533,7 +1533,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa,
11923 + if (ret < 0)
11924 + return ret;
11925 +
11926 +- s5k6aa->gpio[RST] = *gpio;
11927 ++ s5k6aa->gpio[RSET] = *gpio;
11928 + }
11929 +
11930 + return 0;
11931 +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
11932 +index 1b309bb743c7b..f21da11caf224 100644
11933 +--- a/drivers/media/i2c/tc358743.c
11934 ++++ b/drivers/media/i2c/tc358743.c
11935 +@@ -1974,6 +1974,7 @@ static int tc358743_probe_of(struct tc358743_state *state)
11936 + bps_pr_lane = 2 * endpoint.link_frequencies[0];
11937 + if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) {
11938 + dev_err(dev, "unsupported bps per lane: %u bps\n", bps_pr_lane);
11939 ++ ret = -EINVAL;
11940 + goto disable_clk;
11941 + }
11942 +
11943 +diff --git a/drivers/media/mc/Makefile b/drivers/media/mc/Makefile
11944 +index 119037f0e686d..2b7af42ba59c1 100644
11945 +--- a/drivers/media/mc/Makefile
11946 ++++ b/drivers/media/mc/Makefile
11947 +@@ -3,7 +3,7 @@
11948 + mc-objs := mc-device.o mc-devnode.o mc-entity.o \
11949 + mc-request.o
11950 +
11951 +-ifeq ($(CONFIG_USB),y)
11952 ++ifneq ($(CONFIG_USB),)
11953 + mc-objs += mc-dev-allocator.o
11954 + endif
11955 +
11956 +diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
11957 +index 78dd35c9b65d7..90972d6952f1c 100644
11958 +--- a/drivers/media/pci/bt8xx/bt878.c
11959 ++++ b/drivers/media/pci/bt8xx/bt878.c
11960 +@@ -300,7 +300,8 @@ static irqreturn_t bt878_irq(int irq, void *dev_id)
11961 + }
11962 + if (astat & BT878_ARISCI) {
11963 + bt->finished_block = (stat & BT878_ARISCS) >> 28;
11964 +- tasklet_schedule(&bt->tasklet);
11965 ++ if (bt->tasklet.callback)
11966 ++ tasklet_schedule(&bt->tasklet);
11967 + break;
11968 + }
11969 + count++;
11970 +@@ -477,6 +478,9 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
11971 + btwrite(0, BT878_AINT_MASK);
11972 + bt878_num++;
11973 +
11974 ++ if (!bt->tasklet.func)
11975 ++ tasklet_disable(&bt->tasklet);
11976 ++
11977 + return 0;
11978 +
11979 + fail2:
11980 +diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
11981 +index 839503e654f46..16af58f2f93cc 100644
11982 +--- a/drivers/media/pci/cobalt/cobalt-driver.c
11983 ++++ b/drivers/media/pci/cobalt/cobalt-driver.c
11984 +@@ -667,6 +667,7 @@ static int cobalt_probe(struct pci_dev *pci_dev,
11985 + return -ENOMEM;
11986 + cobalt->pci_dev = pci_dev;
11987 + cobalt->instance = i;
11988 ++ mutex_init(&cobalt->pci_lock);
11989 +
11990 + retval = v4l2_device_register(&pci_dev->dev, &cobalt->v4l2_dev);
11991 + if (retval) {
11992 +diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
11993 +index bca68572b3242..12c33e035904c 100644
11994 +--- a/drivers/media/pci/cobalt/cobalt-driver.h
11995 ++++ b/drivers/media/pci/cobalt/cobalt-driver.h
11996 +@@ -251,6 +251,8 @@ struct cobalt {
11997 + int instance;
11998 + struct pci_dev *pci_dev;
11999 + struct v4l2_device v4l2_dev;
12000 ++ /* serialize PCI access in cobalt_s_bit_sysctrl() */
12001 ++ struct mutex pci_lock;
12002 +
12003 + void __iomem *bar0, *bar1;
12004 +
12005 +@@ -320,10 +322,13 @@ static inline u32 cobalt_g_sysctrl(struct cobalt *cobalt)
12006 + static inline void cobalt_s_bit_sysctrl(struct cobalt *cobalt,
12007 + int bit, int val)
12008 + {
12009 +- u32 ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE);
12010 ++ u32 ctrl;
12011 +
12012 ++ mutex_lock(&cobalt->pci_lock);
12013 ++ ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE);
12014 + cobalt_write_bar1(cobalt, COBALT_SYS_CTRL_BASE,
12015 + (ctrl & ~(1UL << bit)) | (val << bit));
12016 ++ mutex_unlock(&cobalt->pci_lock);
12017 + }
12018 +
12019 + static inline u32 cobalt_g_sysstat(struct cobalt *cobalt)
12020 +diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
12021 +index e8511787c1e43..4657e99df0339 100644
12022 +--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
12023 ++++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
12024 +@@ -173,14 +173,15 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
12025 + int ret;
12026 +
12027 + for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
12028 +- if (!adev->status.enabled)
12029 ++ if (!adev->status.enabled) {
12030 ++ acpi_dev_put(adev);
12031 + continue;
12032 ++ }
12033 +
12034 + if (bridge->n_sensors >= CIO2_NUM_PORTS) {
12035 ++ acpi_dev_put(adev);
12036 + dev_err(&cio2->dev, "Exceeded available CIO2 ports\n");
12037 +- cio2_bridge_unregister_sensors(bridge);
12038 +- ret = -EINVAL;
12039 +- goto err_out;
12040 ++ return -EINVAL;
12041 + }
12042 +
12043 + sensor = &bridge->sensors[bridge->n_sensors];
12044 +@@ -228,7 +229,6 @@ err_free_swnodes:
12045 + software_node_unregister_nodes(sensor->swnodes);
12046 + err_put_adev:
12047 + acpi_dev_put(sensor->adev);
12048 +-err_out:
12049 + return ret;
12050 + }
12051 +
12052 +diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
12053 +index 6cdc77dda0e49..1c9cb9e05fdf6 100644
12054 +--- a/drivers/media/platform/am437x/am437x-vpfe.c
12055 ++++ b/drivers/media/platform/am437x/am437x-vpfe.c
12056 +@@ -1021,7 +1021,9 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
12057 + if (ret)
12058 + return ret;
12059 +
12060 +- pm_runtime_get_sync(vpfe->pdev);
12061 ++ ret = pm_runtime_resume_and_get(vpfe->pdev);
12062 ++ if (ret < 0)
12063 ++ return ret;
12064 +
12065 + vpfe_config_enable(&vpfe->ccdc, 1);
12066 +
12067 +@@ -2443,7 +2445,11 @@ static int vpfe_probe(struct platform_device *pdev)
12068 + pm_runtime_enable(&pdev->dev);
12069 +
12070 + /* for now just enable it here instead of waiting for the open */
12071 +- pm_runtime_get_sync(&pdev->dev);
12072 ++ ret = pm_runtime_resume_and_get(&pdev->dev);
12073 ++ if (ret < 0) {
12074 ++ vpfe_err(vpfe, "Unable to resume device.\n");
12075 ++ goto probe_out_v4l2_unregister;
12076 ++ }
12077 +
12078 + vpfe_ccdc_config_defaults(ccdc);
12079 +
12080 +@@ -2530,6 +2536,11 @@ static int vpfe_suspend(struct device *dev)
12081 +
12082 + /* only do full suspend if streaming has started */
12083 + if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
12084 ++ /*
12085 ++ * ignore RPM resume errors here, as it is already too late.
12086 ++ * A check like that should happen earlier, either at
12087 ++ * open() or just before start streaming.
12088 ++ */
12089 + pm_runtime_get_sync(dev);
12090 + vpfe_config_enable(ccdc, 1);
12091 +
12092 +diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
12093 +index 27a3c92c73bce..f1cf847d1cc2d 100644
12094 +--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
12095 ++++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
12096 +@@ -56,10 +56,8 @@ static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
12097 + static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
12098 + {
12099 + struct gsc_ctx *ctx = q->drv_priv;
12100 +- int ret;
12101 +
12102 +- ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
12103 +- return ret > 0 ? 0 : ret;
12104 ++ return pm_runtime_resume_and_get(&ctx->gsc_dev->pdev->dev);
12105 + }
12106 +
12107 + static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
12108 +diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
12109 +index 13c838d3f9473..0da36443173c1 100644
12110 +--- a/drivers/media/platform/exynos4-is/fimc-capture.c
12111 ++++ b/drivers/media/platform/exynos4-is/fimc-capture.c
12112 +@@ -478,11 +478,9 @@ static int fimc_capture_open(struct file *file)
12113 + goto unlock;
12114 +
12115 + set_bit(ST_CAPT_BUSY, &fimc->state);
12116 +- ret = pm_runtime_get_sync(&fimc->pdev->dev);
12117 +- if (ret < 0) {
12118 +- pm_runtime_put_sync(&fimc->pdev->dev);
12119 ++ ret = pm_runtime_resume_and_get(&fimc->pdev->dev);
12120 ++ if (ret < 0)
12121 + goto unlock;
12122 +- }
12123 +
12124 + ret = v4l2_fh_open(file);
12125 + if (ret) {
12126 +diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
12127 +index 972d9601d2360..1b24f5bfc4af4 100644
12128 +--- a/drivers/media/platform/exynos4-is/fimc-is.c
12129 ++++ b/drivers/media/platform/exynos4-is/fimc-is.c
12130 +@@ -828,9 +828,9 @@ static int fimc_is_probe(struct platform_device *pdev)
12131 + goto err_irq;
12132 + }
12133 +
12134 +- ret = pm_runtime_get_sync(dev);
12135 ++ ret = pm_runtime_resume_and_get(dev);
12136 + if (ret < 0)
12137 +- goto err_pm;
12138 ++ goto err_irq;
12139 +
12140 + vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
12141 +
12142 +diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
12143 +index 612b9872afc87..83688a7982f70 100644
12144 +--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
12145 ++++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
12146 +@@ -275,7 +275,7 @@ static int isp_video_open(struct file *file)
12147 + if (ret < 0)
12148 + goto unlock;
12149 +
12150 +- ret = pm_runtime_get_sync(&isp->pdev->dev);
12151 ++ ret = pm_runtime_resume_and_get(&isp->pdev->dev);
12152 + if (ret < 0)
12153 + goto rel_fh;
12154 +
12155 +@@ -293,7 +293,6 @@ static int isp_video_open(struct file *file)
12156 + if (!ret)
12157 + goto unlock;
12158 + rel_fh:
12159 +- pm_runtime_put_noidle(&isp->pdev->dev);
12160 + v4l2_fh_release(file);
12161 + unlock:
12162 + mutex_unlock(&isp->video_lock);
12163 +@@ -306,17 +305,20 @@ static int isp_video_release(struct file *file)
12164 + struct fimc_is_video *ivc = &isp->video_capture;
12165 + struct media_entity *entity = &ivc->ve.vdev.entity;
12166 + struct media_device *mdev = entity->graph_obj.mdev;
12167 ++ bool is_singular_file;
12168 +
12169 + mutex_lock(&isp->video_lock);
12170 +
12171 +- if (v4l2_fh_is_singular_file(file) && ivc->streaming) {
12172 ++ is_singular_file = v4l2_fh_is_singular_file(file);
12173 ++
12174 ++ if (is_singular_file && ivc->streaming) {
12175 + media_pipeline_stop(entity);
12176 + ivc->streaming = 0;
12177 + }
12178 +
12179 + _vb2_fop_release(file, NULL);
12180 +
12181 +- if (v4l2_fh_is_singular_file(file)) {
12182 ++ if (is_singular_file) {
12183 + fimc_pipeline_call(&ivc->ve, close);
12184 +
12185 + mutex_lock(&mdev->graph_mutex);
12186 +diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
12187 +index a77c49b185115..74b49d30901ed 100644
12188 +--- a/drivers/media/platform/exynos4-is/fimc-isp.c
12189 ++++ b/drivers/media/platform/exynos4-is/fimc-isp.c
12190 +@@ -304,11 +304,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
12191 + pr_debug("on: %d\n", on);
12192 +
12193 + if (on) {
12194 +- ret = pm_runtime_get_sync(&is->pdev->dev);
12195 +- if (ret < 0) {
12196 +- pm_runtime_put(&is->pdev->dev);
12197 ++ ret = pm_runtime_resume_and_get(&is->pdev->dev);
12198 ++ if (ret < 0)
12199 + return ret;
12200 +- }
12201 ++
12202 + set_bit(IS_ST_PWR_ON, &is->state);
12203 +
12204 + ret = fimc_is_start_firmware(is);
12205 +diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
12206 +index fe20af3a7178a..4d8b18078ff37 100644
12207 +--- a/drivers/media/platform/exynos4-is/fimc-lite.c
12208 ++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
12209 +@@ -469,9 +469,9 @@ static int fimc_lite_open(struct file *file)
12210 + }
12211 +
12212 + set_bit(ST_FLITE_IN_USE, &fimc->state);
12213 +- ret = pm_runtime_get_sync(&fimc->pdev->dev);
12214 ++ ret = pm_runtime_resume_and_get(&fimc->pdev->dev);
12215 + if (ret < 0)
12216 +- goto err_pm;
12217 ++ goto err_in_use;
12218 +
12219 + ret = v4l2_fh_open(file);
12220 + if (ret < 0)
12221 +@@ -499,6 +499,7 @@ static int fimc_lite_open(struct file *file)
12222 + v4l2_fh_release(file);
12223 + err_pm:
12224 + pm_runtime_put_sync(&fimc->pdev->dev);
12225 ++err_in_use:
12226 + clear_bit(ST_FLITE_IN_USE, &fimc->state);
12227 + unlock:
12228 + mutex_unlock(&fimc->lock);
12229 +diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
12230 +index c9704a147e5cf..df8e2aa454d8f 100644
12231 +--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
12232 ++++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
12233 +@@ -73,17 +73,14 @@ static void fimc_m2m_shutdown(struct fimc_ctx *ctx)
12234 + static int start_streaming(struct vb2_queue *q, unsigned int count)
12235 + {
12236 + struct fimc_ctx *ctx = q->drv_priv;
12237 +- int ret;
12238 +
12239 +- ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
12240 +- return ret > 0 ? 0 : ret;
12241 ++ return pm_runtime_resume_and_get(&ctx->fimc_dev->pdev->dev);
12242 + }
12243 +
12244 + static void stop_streaming(struct vb2_queue *q)
12245 + {
12246 + struct fimc_ctx *ctx = q->drv_priv;
12247 +
12248 +-
12249 + fimc_m2m_shutdown(ctx);
12250 + fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
12251 + pm_runtime_put(&ctx->fimc_dev->pdev->dev);
12252 +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
12253 +index 13d192ba4aa6e..3b8a24bb724c8 100644
12254 +--- a/drivers/media/platform/exynos4-is/media-dev.c
12255 ++++ b/drivers/media/platform/exynos4-is/media-dev.c
12256 +@@ -512,11 +512,9 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
12257 + if (!fmd->pmf)
12258 + return -ENXIO;
12259 +
12260 +- ret = pm_runtime_get_sync(fmd->pmf);
12261 +- if (ret < 0) {
12262 +- pm_runtime_put(fmd->pmf);
12263 ++ ret = pm_runtime_resume_and_get(fmd->pmf);
12264 ++ if (ret < 0)
12265 + return ret;
12266 +- }
12267 +
12268 + fmd->num_sensors = 0;
12269 +
12270 +@@ -1286,13 +1284,11 @@ static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
12271 + static int cam_clk_prepare(struct clk_hw *hw)
12272 + {
12273 + struct cam_clk *camclk = to_cam_clk(hw);
12274 +- int ret;
12275 +
12276 + if (camclk->fmd->pmf == NULL)
12277 + return -ENODEV;
12278 +
12279 +- ret = pm_runtime_get_sync(camclk->fmd->pmf);
12280 +- return ret < 0 ? ret : 0;
12281 ++ return pm_runtime_resume_and_get(camclk->fmd->pmf);
12282 + }
12283 +
12284 + static void cam_clk_unprepare(struct clk_hw *hw)
12285 +diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
12286 +index 1aac167abb175..ebf39c8568943 100644
12287 +--- a/drivers/media/platform/exynos4-is/mipi-csis.c
12288 ++++ b/drivers/media/platform/exynos4-is/mipi-csis.c
12289 +@@ -494,7 +494,7 @@ static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
12290 + struct device *dev = &state->pdev->dev;
12291 +
12292 + if (on)
12293 +- return pm_runtime_get_sync(dev);
12294 ++ return pm_runtime_resume_and_get(dev);
12295 +
12296 + return pm_runtime_put_sync(dev);
12297 + }
12298 +@@ -509,11 +509,9 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
12299 +
12300 + if (enable) {
12301 + s5pcsis_clear_counters(state);
12302 +- ret = pm_runtime_get_sync(&state->pdev->dev);
12303 +- if (ret && ret != 1) {
12304 +- pm_runtime_put_noidle(&state->pdev->dev);
12305 ++ ret = pm_runtime_resume_and_get(&state->pdev->dev);
12306 ++ if (ret < 0)
12307 + return ret;
12308 +- }
12309 + }
12310 +
12311 + mutex_lock(&state->lock);
12312 +@@ -535,7 +533,7 @@ unlock:
12313 + if (!enable)
12314 + pm_runtime_put(&state->pdev->dev);
12315 +
12316 +- return ret == 1 ? 0 : ret;
12317 ++ return ret;
12318 + }
12319 +
12320 + static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
12321 +diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
12322 +index 141bf5d97a044..ea87110d90738 100644
12323 +--- a/drivers/media/platform/marvell-ccic/mcam-core.c
12324 ++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
12325 +@@ -918,6 +918,7 @@ static int mclk_enable(struct clk_hw *hw)
12326 + struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
12327 + int mclk_src;
12328 + int mclk_div;
12329 ++ int ret;
12330 +
12331 + /*
12332 + * Clock the sensor appropriately. Controller clock should
12333 +@@ -931,7 +932,9 @@ static int mclk_enable(struct clk_hw *hw)
12334 + mclk_div = 2;
12335 + }
12336 +
12337 +- pm_runtime_get_sync(cam->dev);
12338 ++ ret = pm_runtime_resume_and_get(cam->dev);
12339 ++ if (ret < 0)
12340 ++ return ret;
12341 + clk_enable(cam->clk[0]);
12342 + mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
12343 + mcam_ctlr_power_up(cam);
12344 +@@ -1611,7 +1614,9 @@ static int mcam_v4l_open(struct file *filp)
12345 + ret = sensor_call(cam, core, s_power, 1);
12346 + if (ret)
12347 + goto out;
12348 +- pm_runtime_get_sync(cam->dev);
12349 ++ ret = pm_runtime_resume_and_get(cam->dev);
12350 ++ if (ret < 0)
12351 ++ goto out;
12352 + __mcam_cam_reset(cam);
12353 + mcam_set_config_needed(cam, 1);
12354 + }
12355 +diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
12356 +index ace4528cdc5ef..f14779e7596e5 100644
12357 +--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
12358 ++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
12359 +@@ -391,12 +391,12 @@ static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
12360 + struct mtk_mdp_ctx *ctx = q->drv_priv;
12361 + int ret;
12362 +
12363 +- ret = pm_runtime_get_sync(&ctx->mdp_dev->pdev->dev);
12364 ++ ret = pm_runtime_resume_and_get(&ctx->mdp_dev->pdev->dev);
12365 + if (ret < 0)
12366 +- mtk_mdp_dbg(1, "[%d] pm_runtime_get_sync failed:%d",
12367 ++ mtk_mdp_dbg(1, "[%d] pm_runtime_resume_and_get failed:%d",
12368 + ctx->id, ret);
12369 +
12370 +- return 0;
12371 ++ return ret;
12372 + }
12373 +
12374 + static void *mtk_mdp_m2m_buf_remove(struct mtk_mdp_ctx *ctx,
12375 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
12376 +index 147dfef1638d2..f87dc47d9e638 100644
12377 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
12378 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
12379 +@@ -126,7 +126,9 @@ static int fops_vcodec_open(struct file *file)
12380 + mtk_vcodec_dec_set_default_params(ctx);
12381 +
12382 + if (v4l2_fh_is_singular(&ctx->fh)) {
12383 +- mtk_vcodec_dec_pw_on(&dev->pm);
12384 ++ ret = mtk_vcodec_dec_pw_on(&dev->pm);
12385 ++ if (ret < 0)
12386 ++ goto err_load_fw;
12387 + /*
12388 + * Does nothing if firmware was already loaded.
12389 + */
12390 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
12391 +index ddee7046ce422..6038db96f71c3 100644
12392 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
12393 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
12394 +@@ -88,13 +88,15 @@ void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
12395 + put_device(dev->pm.larbvdec);
12396 + }
12397 +
12398 +-void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
12399 ++int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
12400 + {
12401 + int ret;
12402 +
12403 +- ret = pm_runtime_get_sync(pm->dev);
12404 ++ ret = pm_runtime_resume_and_get(pm->dev);
12405 + if (ret)
12406 +- mtk_v4l2_err("pm_runtime_get_sync fail %d", ret);
12407 ++ mtk_v4l2_err("pm_runtime_resume_and_get fail %d", ret);
12408 ++
12409 ++ return ret;
12410 + }
12411 +
12412 + void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
12413 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
12414 +index 872d8bf8cfaf3..280aeaefdb651 100644
12415 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
12416 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
12417 +@@ -12,7 +12,7 @@
12418 + int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
12419 + void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
12420 +
12421 +-void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
12422 ++int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
12423 + void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
12424 + void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
12425 + void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
12426 +diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
12427 +index c8a56271b259e..7c4428cf14e6d 100644
12428 +--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
12429 ++++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
12430 +@@ -987,6 +987,12 @@ static int mtk_vpu_suspend(struct device *dev)
12431 + return ret;
12432 + }
12433 +
12434 ++ if (!vpu_running(vpu)) {
12435 ++ vpu_clock_disable(vpu);
12436 ++ clk_unprepare(vpu->clk);
12437 ++ return 0;
12438 ++ }
12439 ++
12440 + mutex_lock(&vpu->vpu_mutex);
12441 + /* disable vpu timer interrupt */
12442 + vpu_cfg_writel(vpu, vpu_cfg_readl(vpu, VPU_INT_STATUS) | VPU_IDLE_STATE,
12443 +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
12444 +index 54bac7ec14c50..91b15842c5558 100644
12445 +--- a/drivers/media/platform/qcom/venus/core.c
12446 ++++ b/drivers/media/platform/qcom/venus/core.c
12447 +@@ -78,22 +78,32 @@ static const struct hfi_core_ops venus_core_ops = {
12448 + .event_notify = venus_event_notify,
12449 + };
12450 +
12451 ++#define RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS 10
12452 ++
12453 + static void venus_sys_error_handler(struct work_struct *work)
12454 + {
12455 + struct venus_core *core =
12456 + container_of(work, struct venus_core, work.work);
12457 +- int ret = 0;
12458 +-
12459 +- pm_runtime_get_sync(core->dev);
12460 ++ int ret, i, max_attempts = RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS;
12461 ++ const char *err_msg = "";
12462 ++ bool failed = false;
12463 ++
12464 ++ ret = pm_runtime_get_sync(core->dev);
12465 ++ if (ret < 0) {
12466 ++ err_msg = "resume runtime PM";
12467 ++ max_attempts = 0;
12468 ++ failed = true;
12469 ++ }
12470 +
12471 + hfi_core_deinit(core, true);
12472 +
12473 +- dev_warn(core->dev, "system error has occurred, starting recovery!\n");
12474 +-
12475 + mutex_lock(&core->lock);
12476 +
12477 +- while (pm_runtime_active(core->dev_dec) || pm_runtime_active(core->dev_enc))
12478 ++ for (i = 0; i < max_attempts; i++) {
12479 ++ if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc))
12480 ++ break;
12481 + msleep(10);
12482 ++ }
12483 +
12484 + venus_shutdown(core);
12485 +
12486 +@@ -101,31 +111,55 @@ static void venus_sys_error_handler(struct work_struct *work)
12487 +
12488 + pm_runtime_put_sync(core->dev);
12489 +
12490 +- while (core->pmdomains[0] && pm_runtime_active(core->pmdomains[0]))
12491 ++ for (i = 0; i < max_attempts; i++) {
12492 ++ if (!core->pmdomains[0] || !pm_runtime_active(core->pmdomains[0]))
12493 ++ break;
12494 + usleep_range(1000, 1500);
12495 ++ }
12496 +
12497 + hfi_reinit(core);
12498 +
12499 +- pm_runtime_get_sync(core->dev);
12500 ++ ret = pm_runtime_get_sync(core->dev);
12501 ++ if (ret < 0) {
12502 ++ err_msg = "resume runtime PM";
12503 ++ failed = true;
12504 ++ }
12505 +
12506 +- ret |= venus_boot(core);
12507 +- ret |= hfi_core_resume(core, true);
12508 ++ ret = venus_boot(core);
12509 ++ if (ret && !failed) {
12510 ++ err_msg = "boot Venus";
12511 ++ failed = true;
12512 ++ }
12513 ++
12514 ++ ret = hfi_core_resume(core, true);
12515 ++ if (ret && !failed) {
12516 ++ err_msg = "resume HFI";
12517 ++ failed = true;
12518 ++ }
12519 +
12520 + enable_irq(core->irq);
12521 +
12522 + mutex_unlock(&core->lock);
12523 +
12524 +- ret |= hfi_core_init(core);
12525 ++ ret = hfi_core_init(core);
12526 ++ if (ret && !failed) {
12527 ++ err_msg = "init HFI";
12528 ++ failed = true;
12529 ++ }
12530 +
12531 + pm_runtime_put_sync(core->dev);
12532 +
12533 +- if (ret) {
12534 ++ if (failed) {
12535 + disable_irq_nosync(core->irq);
12536 +- dev_warn(core->dev, "recovery failed (%d)\n", ret);
12537 ++ dev_warn_ratelimited(core->dev,
12538 ++ "System error has occurred, recovery failed to %s\n",
12539 ++ err_msg);
12540 + schedule_delayed_work(&core->work, msecs_to_jiffies(10));
12541 + return;
12542 + }
12543 +
12544 ++ dev_warn(core->dev, "system error has occurred (recovered)\n");
12545 ++
12546 + mutex_lock(&core->lock);
12547 + core->sys_error = false;
12548 + mutex_unlock(&core->lock);
12549 +diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
12550 +index 11a8347e5f5c8..4b9dea7f6940e 100644
12551 +--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
12552 ++++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
12553 +@@ -1226,6 +1226,17 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
12554 + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hdr10);
12555 + break;
12556 + }
12557 ++ case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: {
12558 ++ struct hfi_conceal_color_v4 *color = prop_data;
12559 ++ u32 *in = pdata;
12560 ++
12561 ++ color->conceal_color_8bit = *in & 0xff;
12562 ++ color->conceal_color_8bit |= ((*in >> 10) & 0xff) << 8;
12563 ++ color->conceal_color_8bit |= ((*in >> 20) & 0xff) << 16;
12564 ++ color->conceal_color_10bit = *in;
12565 ++ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
12566 ++ break;
12567 ++ }
12568 +
12569 + case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
12570 + case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
12571 +@@ -1279,17 +1290,6 @@ pkt_session_set_property_6xx(struct hfi_session_set_property_pkt *pkt,
12572 + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cq);
12573 + break;
12574 + }
12575 +- case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: {
12576 +- struct hfi_conceal_color_v4 *color = prop_data;
12577 +- u32 *in = pdata;
12578 +-
12579 +- color->conceal_color_8bit = *in & 0xff;
12580 +- color->conceal_color_8bit |= ((*in >> 10) & 0xff) << 8;
12581 +- color->conceal_color_8bit |= ((*in >> 20) & 0xff) << 16;
12582 +- color->conceal_color_10bit = *in;
12583 +- pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
12584 +- break;
12585 +- }
12586 + default:
12587 + return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
12588 + }
12589 +diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
12590 +index 15bcb7f6e113c..1cb5eaabf340b 100644
12591 +--- a/drivers/media/platform/s5p-g2d/g2d.c
12592 ++++ b/drivers/media/platform/s5p-g2d/g2d.c
12593 +@@ -276,6 +276,9 @@ static int g2d_release(struct file *file)
12594 + struct g2d_dev *dev = video_drvdata(file);
12595 + struct g2d_ctx *ctx = fh2ctx(file->private_data);
12596 +
12597 ++ mutex_lock(&dev->mutex);
12598 ++ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
12599 ++ mutex_unlock(&dev->mutex);
12600 + v4l2_ctrl_handler_free(&ctx->ctrl_handler);
12601 + v4l2_fh_del(&ctx->fh);
12602 + v4l2_fh_exit(&ctx->fh);
12603 +diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
12604 +index 026111505f5a5..d402e456f27df 100644
12605 +--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
12606 ++++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
12607 +@@ -2566,11 +2566,8 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
12608 + static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
12609 + {
12610 + struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
12611 +- int ret;
12612 +-
12613 +- ret = pm_runtime_get_sync(ctx->jpeg->dev);
12614 +
12615 +- return ret > 0 ? 0 : ret;
12616 ++ return pm_runtime_resume_and_get(ctx->jpeg->dev);
12617 + }
12618 +
12619 + static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
12620 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
12621 +index a92a9ca6e87eb..c1d3bda8385b1 100644
12622 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
12623 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
12624 +@@ -172,6 +172,7 @@ static struct mfc_control controls[] = {
12625 + .type = V4L2_CTRL_TYPE_INTEGER,
12626 + .minimum = 0,
12627 + .maximum = 16383,
12628 ++ .step = 1,
12629 + .default_value = 0,
12630 + },
12631 + {
12632 +diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
12633 +index 4ac48441f22c4..ca4310e26c49e 100644
12634 +--- a/drivers/media/platform/sh_vou.c
12635 ++++ b/drivers/media/platform/sh_vou.c
12636 +@@ -1133,7 +1133,11 @@ static int sh_vou_open(struct file *file)
12637 + if (v4l2_fh_is_singular_file(file) &&
12638 + vou_dev->status == SH_VOU_INITIALISING) {
12639 + /* First open */
12640 +- pm_runtime_get_sync(vou_dev->v4l2_dev.dev);
12641 ++ err = pm_runtime_resume_and_get(vou_dev->v4l2_dev.dev);
12642 ++ if (err < 0) {
12643 ++ v4l2_fh_release(file);
12644 ++ goto done_open;
12645 ++ }
12646 + err = sh_vou_hw_init(vou_dev);
12647 + if (err < 0) {
12648 + pm_runtime_put(vou_dev->v4l2_dev.dev);
12649 +diff --git a/drivers/media/platform/sti/bdisp/Makefile b/drivers/media/platform/sti/bdisp/Makefile
12650 +index caf7ccd193eaa..39ade0a347236 100644
12651 +--- a/drivers/media/platform/sti/bdisp/Makefile
12652 ++++ b/drivers/media/platform/sti/bdisp/Makefile
12653 +@@ -1,4 +1,4 @@
12654 + # SPDX-License-Identifier: GPL-2.0-only
12655 +-obj-$(CONFIG_VIDEO_STI_BDISP) := bdisp.o
12656 ++obj-$(CONFIG_VIDEO_STI_BDISP) += bdisp.o
12657 +
12658 + bdisp-objs := bdisp-v4l2.o bdisp-hw.o bdisp-debug.o
12659 +diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
12660 +index 060ca85f64d5d..85288da9d2ae6 100644
12661 +--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
12662 ++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
12663 +@@ -499,7 +499,7 @@ static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
12664 + {
12665 + struct bdisp_ctx *ctx = q->drv_priv;
12666 + struct vb2_v4l2_buffer *buf;
12667 +- int ret = pm_runtime_get_sync(ctx->bdisp_dev->dev);
12668 ++ int ret = pm_runtime_resume_and_get(ctx->bdisp_dev->dev);
12669 +
12670 + if (ret < 0) {
12671 + dev_err(ctx->bdisp_dev->dev, "failed to set runtime PM\n");
12672 +@@ -1364,10 +1364,10 @@ static int bdisp_probe(struct platform_device *pdev)
12673 +
12674 + /* Power management */
12675 + pm_runtime_enable(dev);
12676 +- ret = pm_runtime_get_sync(dev);
12677 ++ ret = pm_runtime_resume_and_get(dev);
12678 + if (ret < 0) {
12679 + dev_err(dev, "failed to set PM\n");
12680 +- goto err_pm;
12681 ++ goto err_remove;
12682 + }
12683 +
12684 + /* Filters */
12685 +@@ -1395,6 +1395,7 @@ err_filter:
12686 + bdisp_hw_free_filters(bdisp->dev);
12687 + err_pm:
12688 + pm_runtime_put(dev);
12689 ++err_remove:
12690 + bdisp_debugfs_remove(bdisp);
12691 + v4l2_device_unregister(&bdisp->v4l2_dev);
12692 + err_clk:
12693 +diff --git a/drivers/media/platform/sti/delta/Makefile b/drivers/media/platform/sti/delta/Makefile
12694 +index 92b37e216f004..32412fa4c6328 100644
12695 +--- a/drivers/media/platform/sti/delta/Makefile
12696 ++++ b/drivers/media/platform/sti/delta/Makefile
12697 +@@ -1,5 +1,5 @@
12698 + # SPDX-License-Identifier: GPL-2.0-only
12699 +-obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) := st-delta.o
12700 ++obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) += st-delta.o
12701 + st-delta-y := delta-v4l2.o delta-mem.o delta-ipc.o delta-debug.o
12702 +
12703 + # MJPEG support
12704 +diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/sti/hva/Makefile
12705 +index 74b41ec52f976..b5a5478bdd016 100644
12706 +--- a/drivers/media/platform/sti/hva/Makefile
12707 ++++ b/drivers/media/platform/sti/hva/Makefile
12708 +@@ -1,4 +1,4 @@
12709 + # SPDX-License-Identifier: GPL-2.0-only
12710 +-obj-$(CONFIG_VIDEO_STI_HVA) := st-hva.o
12711 ++obj-$(CONFIG_VIDEO_STI_HVA) += st-hva.o
12712 + st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
12713 + st-hva-$(CONFIG_VIDEO_STI_HVA_DEBUGFS) += hva-debugfs.o
12714 +diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
12715 +index f59811e27f51f..6eeee5017fac4 100644
12716 +--- a/drivers/media/platform/sti/hva/hva-hw.c
12717 ++++ b/drivers/media/platform/sti/hva/hva-hw.c
12718 +@@ -130,8 +130,7 @@ static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
12719 + ctx_id = (hva->sts_reg & 0xFF00) >> 8;
12720 + if (ctx_id >= HVA_MAX_INSTANCES) {
12721 + dev_err(dev, "%s %s: bad context identifier: %d\n",
12722 +- ctx->name, __func__, ctx_id);
12723 +- ctx->hw_err = true;
12724 ++ HVA_PREFIX, __func__, ctx_id);
12725 + goto out;
12726 + }
12727 +
12728 +diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
12729 +index 3f81dd17755cb..fbcca59a0517c 100644
12730 +--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
12731 ++++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
12732 +@@ -494,7 +494,7 @@ static int rotate_start_streaming(struct vb2_queue *vq, unsigned int count)
12733 + struct device *dev = ctx->dev->dev;
12734 + int ret;
12735 +
12736 +- ret = pm_runtime_get_sync(dev);
12737 ++ ret = pm_runtime_resume_and_get(dev);
12738 + if (ret < 0) {
12739 + dev_err(dev, "Failed to enable module\n");
12740 +
12741 +diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
12742 +index 133122e385150..9bc0b4d8de095 100644
12743 +--- a/drivers/media/platform/video-mux.c
12744 ++++ b/drivers/media/platform/video-mux.c
12745 +@@ -362,7 +362,7 @@ static int video_mux_async_register(struct video_mux *vmux,
12746 +
12747 + for (i = 0; i < num_input_pads; i++) {
12748 + struct v4l2_async_subdev *asd;
12749 +- struct fwnode_handle *ep;
12750 ++ struct fwnode_handle *ep, *remote_ep;
12751 +
12752 + ep = fwnode_graph_get_endpoint_by_id(
12753 + dev_fwnode(vmux->subdev.dev), i, 0,
12754 +@@ -370,6 +370,14 @@ static int video_mux_async_register(struct video_mux *vmux,
12755 + if (!ep)
12756 + continue;
12757 +
12758 ++ /* Skip dangling endpoints for backwards compatibility */
12759 ++ remote_ep = fwnode_graph_get_remote_endpoint(ep);
12760 ++ if (!remote_ep) {
12761 ++ fwnode_handle_put(ep);
12762 ++ continue;
12763 ++ }
12764 ++ fwnode_handle_put(remote_ep);
12765 ++
12766 + asd = v4l2_async_notifier_add_fwnode_remote_subdev(
12767 + &vmux->notifier, ep, struct v4l2_async_subdev);
12768 +
12769 +diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
12770 +index a8a72d5fbd129..caefac07af927 100644
12771 +--- a/drivers/media/usb/au0828/au0828-core.c
12772 ++++ b/drivers/media/usb/au0828/au0828-core.c
12773 +@@ -199,8 +199,8 @@ static int au0828_media_device_init(struct au0828_dev *dev,
12774 + struct media_device *mdev;
12775 +
12776 + mdev = media_device_usb_allocate(udev, KBUILD_MODNAME, THIS_MODULE);
12777 +- if (!mdev)
12778 +- return -ENOMEM;
12779 ++ if (IS_ERR(mdev))
12780 ++ return PTR_ERR(mdev);
12781 +
12782 + dev->media_dev = mdev;
12783 + #endif
12784 +diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
12785 +index 50835f5f7512c..57b7f1ea68da5 100644
12786 +--- a/drivers/media/usb/cpia2/cpia2.h
12787 ++++ b/drivers/media/usb/cpia2/cpia2.h
12788 +@@ -429,6 +429,7 @@ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd);
12789 + int cpia2_do_command(struct camera_data *cam,
12790 + unsigned int command,
12791 + unsigned char direction, unsigned char param);
12792 ++void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf);
12793 + struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf);
12794 + int cpia2_init_camera(struct camera_data *cam);
12795 + int cpia2_allocate_buffers(struct camera_data *cam);
12796 +diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
12797 +index e747548ab2869..b5a2d06fb356b 100644
12798 +--- a/drivers/media/usb/cpia2/cpia2_core.c
12799 ++++ b/drivers/media/usb/cpia2/cpia2_core.c
12800 +@@ -2163,6 +2163,18 @@ static void reset_camera_struct(struct camera_data *cam)
12801 + cam->height = cam->params.roi.height;
12802 + }
12803 +
12804 ++/******************************************************************************
12805 ++ *
12806 ++ * cpia2_init_camera_struct
12807 ++ *
12808 ++ * Deinitialize camera struct
12809 ++ *****************************************************************************/
12810 ++void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf)
12811 ++{
12812 ++ v4l2_device_unregister(&cam->v4l2_dev);
12813 ++ kfree(cam);
12814 ++}
12815 ++
12816 + /******************************************************************************
12817 + *
12818 + * cpia2_init_camera_struct
12819 +diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
12820 +index 3ab80a7b44985..76aac06f9fb8e 100644
12821 +--- a/drivers/media/usb/cpia2/cpia2_usb.c
12822 ++++ b/drivers/media/usb/cpia2/cpia2_usb.c
12823 +@@ -844,15 +844,13 @@ static int cpia2_usb_probe(struct usb_interface *intf,
12824 + ret = set_alternate(cam, USBIF_CMDONLY);
12825 + if (ret < 0) {
12826 + ERR("%s: usb_set_interface error (ret = %d)\n", __func__, ret);
12827 +- kfree(cam);
12828 +- return ret;
12829 ++ goto alt_err;
12830 + }
12831 +
12832 +
12833 + if((ret = cpia2_init_camera(cam)) < 0) {
12834 + ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret);
12835 +- kfree(cam);
12836 +- return ret;
12837 ++ goto alt_err;
12838 + }
12839 + LOG(" CPiA Version: %d.%02d (%d.%d)\n",
12840 + cam->params.version.firmware_revision_hi,
12841 +@@ -872,11 +870,14 @@ static int cpia2_usb_probe(struct usb_interface *intf,
12842 + ret = cpia2_register_camera(cam);
12843 + if (ret < 0) {
12844 + ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
12845 +- kfree(cam);
12846 +- return ret;
12847 ++ goto alt_err;
12848 + }
12849 +
12850 + return 0;
12851 ++
12852 ++alt_err:
12853 ++ cpia2_deinit_camera_struct(cam, intf);
12854 ++ return ret;
12855 + }
12856 +
12857 + /******************************************************************************
12858 +diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
12859 +index 969a7ec71dff7..4116ba5c45fcb 100644
12860 +--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
12861 ++++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
12862 +@@ -78,6 +78,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
12863 +
12864 + ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
12865 + if (ret < 0) {
12866 ++ if (adap->fe_adap[0].fe)
12867 ++ adap->fe_adap[0].fe->ops.release(adap->fe_adap[0].fe);
12868 + deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep state info\n");
12869 + }
12870 + mutex_unlock(&d->data_mutex);
12871 +diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
12872 +index 761992ad05e2a..7707de7bae7ca 100644
12873 +--- a/drivers/media/usb/dvb-usb/cxusb.c
12874 ++++ b/drivers/media/usb/dvb-usb/cxusb.c
12875 +@@ -1947,7 +1947,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
12876 +
12877 + .size_of_priv = sizeof(struct cxusb_state),
12878 +
12879 +- .num_adapters = 2,
12880 ++ .num_adapters = 1,
12881 + .adapter = {
12882 + {
12883 + .num_frontends = 1,
12884 +diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
12885 +index 5aa15a7a49def..59529cbf9cd0b 100644
12886 +--- a/drivers/media/usb/em28xx/em28xx-input.c
12887 ++++ b/drivers/media/usb/em28xx/em28xx-input.c
12888 +@@ -720,7 +720,8 @@ static int em28xx_ir_init(struct em28xx *dev)
12889 + dev->board.has_ir_i2c = 0;
12890 + dev_warn(&dev->intf->dev,
12891 + "No i2c IR remote control device found.\n");
12892 +- return -ENODEV;
12893 ++ err = -ENODEV;
12894 ++ goto ref_put;
12895 + }
12896 + }
12897 +
12898 +@@ -735,7 +736,7 @@ static int em28xx_ir_init(struct em28xx *dev)
12899 +
12900 + ir = kzalloc(sizeof(*ir), GFP_KERNEL);
12901 + if (!ir)
12902 +- return -ENOMEM;
12903 ++ goto ref_put;
12904 + rc = rc_allocate_device(RC_DRIVER_SCANCODE);
12905 + if (!rc)
12906 + goto error;
12907 +@@ -839,6 +840,9 @@ error:
12908 + dev->ir = NULL;
12909 + rc_free_device(rc);
12910 + kfree(ir);
12911 ++ref_put:
12912 ++ em28xx_shutdown_buttons(dev);
12913 ++ kref_put(&dev->ref, em28xx_free_device);
12914 + return err;
12915 + }
12916 +
12917 +diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c
12918 +index 2c05ea2598e76..ce4ee8bc75c85 100644
12919 +--- a/drivers/media/usb/gspca/gl860/gl860.c
12920 ++++ b/drivers/media/usb/gspca/gl860/gl860.c
12921 +@@ -561,8 +561,8 @@ int gl860_RTx(struct gspca_dev *gspca_dev,
12922 + len, 400 + 200 * (len > 1));
12923 + memcpy(pdata, gspca_dev->usb_buf, len);
12924 + } else {
12925 +- r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
12926 +- req, pref, val, index, NULL, len, 400);
12927 ++ gspca_err(gspca_dev, "zero-length read request\n");
12928 ++ r = -EINVAL;
12929 + }
12930 + }
12931 +
12932 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
12933 +index f4a727918e352..d38dee1792e41 100644
12934 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
12935 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
12936 +@@ -2676,9 +2676,8 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
12937 + pvr2_stream_destroy(hdw->vid_stream);
12938 + hdw->vid_stream = NULL;
12939 + }
12940 +- pvr2_i2c_core_done(hdw);
12941 + v4l2_device_unregister(&hdw->v4l2_dev);
12942 +- pvr2_hdw_remove_usb_stuff(hdw);
12943 ++ pvr2_hdw_disconnect(hdw);
12944 + mutex_lock(&pvr2_unit_mtx);
12945 + do {
12946 + if ((hdw->unit_number >= 0) &&
12947 +@@ -2705,6 +2704,7 @@ void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
12948 + {
12949 + pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw);
12950 + LOCK_TAKE(hdw->big_lock);
12951 ++ pvr2_i2c_core_done(hdw);
12952 + LOCK_TAKE(hdw->ctl_lock);
12953 + pvr2_hdw_remove_usb_stuff(hdw);
12954 + LOCK_GIVE(hdw->ctl_lock);
12955 +diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
12956 +index 684574f58e82d..90eec79ee995a 100644
12957 +--- a/drivers/media/v4l2-core/v4l2-fh.c
12958 ++++ b/drivers/media/v4l2-core/v4l2-fh.c
12959 +@@ -96,6 +96,7 @@ int v4l2_fh_release(struct file *filp)
12960 + v4l2_fh_del(fh);
12961 + v4l2_fh_exit(fh);
12962 + kfree(fh);
12963 ++ filp->private_data = NULL;
12964 + }
12965 + return 0;
12966 + }
12967 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
12968 +index 2673f51aafa4d..07d823656ee65 100644
12969 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
12970 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
12971 +@@ -3072,8 +3072,8 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
12972 +
12973 + static unsigned int video_translate_cmd(unsigned int cmd)
12974 + {
12975 ++#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
12976 + switch (cmd) {
12977 +-#ifdef CONFIG_COMPAT_32BIT_TIME
12978 + case VIDIOC_DQEVENT_TIME32:
12979 + return VIDIOC_DQEVENT;
12980 + case VIDIOC_QUERYBUF_TIME32:
12981 +@@ -3084,8 +3084,8 @@ static unsigned int video_translate_cmd(unsigned int cmd)
12982 + return VIDIOC_DQBUF;
12983 + case VIDIOC_PREPARE_BUF_TIME32:
12984 + return VIDIOC_PREPARE_BUF;
12985 +-#endif
12986 + }
12987 ++#endif
12988 + if (in_compat_syscall())
12989 + return v4l2_compat_translate_cmd(cmd);
12990 +
12991 +@@ -3126,8 +3126,8 @@ static int video_get_user(void __user *arg, void *parg,
12992 + } else if (in_compat_syscall()) {
12993 + err = v4l2_compat_get_user(arg, parg, cmd);
12994 + } else {
12995 ++#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
12996 + switch (cmd) {
12997 +-#ifdef CONFIG_COMPAT_32BIT_TIME
12998 + case VIDIOC_QUERYBUF_TIME32:
12999 + case VIDIOC_QBUF_TIME32:
13000 + case VIDIOC_DQBUF_TIME32:
13001 +@@ -3155,8 +3155,8 @@ static int video_get_user(void __user *arg, void *parg,
13002 + };
13003 + break;
13004 + }
13005 +-#endif
13006 + }
13007 ++#endif
13008 + }
13009 +
13010 + /* zero out anything we don't copy from userspace */
13011 +@@ -3181,8 +3181,8 @@ static int video_put_user(void __user *arg, void *parg,
13012 + if (in_compat_syscall())
13013 + return v4l2_compat_put_user(arg, parg, cmd);
13014 +
13015 ++#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
13016 + switch (cmd) {
13017 +-#ifdef CONFIG_COMPAT_32BIT_TIME
13018 + case VIDIOC_DQEVENT_TIME32: {
13019 + struct v4l2_event *ev = parg;
13020 + struct v4l2_event_time32 ev32;
13021 +@@ -3230,8 +3230,8 @@ static int video_put_user(void __user *arg, void *parg,
13022 + return -EFAULT;
13023 + break;
13024 + }
13025 +-#endif
13026 + }
13027 ++#endif
13028 +
13029 + return 0;
13030 + }
13031 +diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
13032 +index 956dafab43d49..bf3aa92524584 100644
13033 +--- a/drivers/media/v4l2-core/v4l2-subdev.c
13034 ++++ b/drivers/media/v4l2-core/v4l2-subdev.c
13035 +@@ -428,30 +428,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
13036 +
13037 + return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
13038 +
13039 +- case VIDIOC_DQEVENT_TIME32: {
13040 +- struct v4l2_event_time32 *ev32 = arg;
13041 +- struct v4l2_event ev = { };
13042 +-
13043 +- if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
13044 +- return -ENOIOCTLCMD;
13045 +-
13046 +- rval = v4l2_event_dequeue(vfh, &ev, file->f_flags & O_NONBLOCK);
13047 +-
13048 +- *ev32 = (struct v4l2_event_time32) {
13049 +- .type = ev.type,
13050 +- .pending = ev.pending,
13051 +- .sequence = ev.sequence,
13052 +- .timestamp.tv_sec = ev.timestamp.tv_sec,
13053 +- .timestamp.tv_nsec = ev.timestamp.tv_nsec,
13054 +- .id = ev.id,
13055 +- };
13056 +-
13057 +- memcpy(&ev32->u, &ev.u, sizeof(ev.u));
13058 +- memcpy(&ev32->reserved, &ev.reserved, sizeof(ev.reserved));
13059 +-
13060 +- return rval;
13061 +- }
13062 +-
13063 + case VIDIOC_SUBSCRIBE_EVENT:
13064 + return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
13065 +
13066 +diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
13067 +index 102dbb8080da5..29271ad4728a2 100644
13068 +--- a/drivers/memstick/host/rtsx_usb_ms.c
13069 ++++ b/drivers/memstick/host/rtsx_usb_ms.c
13070 +@@ -799,9 +799,9 @@ static int rtsx_usb_ms_drv_probe(struct platform_device *pdev)
13071 +
13072 + return 0;
13073 + err_out:
13074 +- memstick_free_host(msh);
13075 + pm_runtime_disable(ms_dev(host));
13076 + pm_runtime_put_noidle(ms_dev(host));
13077 ++ memstick_free_host(msh);
13078 + return err;
13079 + }
13080 +
13081 +@@ -828,9 +828,6 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
13082 + }
13083 + mutex_unlock(&host->host_mutex);
13084 +
13085 +- memstick_remove_host(msh);
13086 +- memstick_free_host(msh);
13087 +-
13088 + /* Balance possible unbalanced usage count
13089 + * e.g. unconditional module removal
13090 + */
13091 +@@ -838,10 +835,11 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
13092 + pm_runtime_put(ms_dev(host));
13093 +
13094 + pm_runtime_disable(ms_dev(host));
13095 +- platform_set_drvdata(pdev, NULL);
13096 +-
13097 ++ memstick_remove_host(msh);
13098 + dev_dbg(ms_dev(host),
13099 + ": Realtek USB Memstick controller has been removed\n");
13100 ++ memstick_free_host(msh);
13101 ++ platform_set_drvdata(pdev, NULL);
13102 +
13103 + return 0;
13104 + }
13105 +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
13106 +index 5c7f2b1001911..5c408c1dc58ce 100644
13107 +--- a/drivers/mfd/Kconfig
13108 ++++ b/drivers/mfd/Kconfig
13109 +@@ -465,6 +465,7 @@ config MFD_MP2629
13110 + tristate "Monolithic Power Systems MP2629 ADC and Battery charger"
13111 + depends on I2C
13112 + select REGMAP_I2C
13113 ++ select MFD_CORE
13114 + help
13115 + Select this option to enable support for Monolithic Power Systems
13116 + battery charger. This provides ADC, thermal and battery charger power
13117 +diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
13118 +index 6f02b8022c6d5..79f5c6a18815a 100644
13119 +--- a/drivers/mfd/mfd-core.c
13120 ++++ b/drivers/mfd/mfd-core.c
13121 +@@ -266,18 +266,18 @@ static int mfd_add_device(struct device *parent, int id,
13122 + if (has_acpi_companion(&pdev->dev)) {
13123 + ret = acpi_check_resource_conflict(&res[r]);
13124 + if (ret)
13125 +- goto fail_of_entry;
13126 ++ goto fail_res_conflict;
13127 + }
13128 + }
13129 + }
13130 +
13131 + ret = platform_device_add_resources(pdev, res, cell->num_resources);
13132 + if (ret)
13133 +- goto fail_of_entry;
13134 ++ goto fail_res_conflict;
13135 +
13136 + ret = platform_device_add(pdev);
13137 + if (ret)
13138 +- goto fail_of_entry;
13139 ++ goto fail_res_conflict;
13140 +
13141 + if (cell->pm_runtime_no_callbacks)
13142 + pm_runtime_no_callbacks(&pdev->dev);
13143 +@@ -286,13 +286,15 @@ static int mfd_add_device(struct device *parent, int id,
13144 +
13145 + return 0;
13146 +
13147 ++fail_res_conflict:
13148 ++ if (cell->swnode)
13149 ++ device_remove_software_node(&pdev->dev);
13150 + fail_of_entry:
13151 + list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list)
13152 + if (of_entry->dev == &pdev->dev) {
13153 + list_del(&of_entry->list);
13154 + kfree(of_entry);
13155 + }
13156 +- device_remove_software_node(&pdev->dev);
13157 + fail_alias:
13158 + regulator_bulk_unregister_supply_alias(&pdev->dev,
13159 + cell->parent_supplies,
13160 +@@ -358,11 +360,12 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
13161 + if (level && cell->level > *level)
13162 + return 0;
13163 +
13164 ++ if (cell->swnode)
13165 ++ device_remove_software_node(&pdev->dev);
13166 ++
13167 + regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
13168 + cell->num_parent_supplies);
13169 +
13170 +- device_remove_software_node(&pdev->dev);
13171 +-
13172 + platform_device_unregister(pdev);
13173 + return 0;
13174 + }
13175 +diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
13176 +index 6ed04e6dbc783..384acb4594272 100644
13177 +--- a/drivers/mfd/rn5t618.c
13178 ++++ b/drivers/mfd/rn5t618.c
13179 +@@ -107,7 +107,7 @@ static int rn5t618_irq_init(struct rn5t618 *rn5t618)
13180 +
13181 + ret = devm_regmap_add_irq_chip(rn5t618->dev, rn5t618->regmap,
13182 + rn5t618->irq,
13183 +- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
13184 ++ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
13185 + 0, irq_chip, &rn5t618->irq_data);
13186 + if (ret)
13187 + dev_err(rn5t618->dev, "Failed to register IRQ chip\n");
13188 +diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
13189 +index 81c70e5bc168f..3e4a594c110b3 100644
13190 +--- a/drivers/misc/eeprom/idt_89hpesx.c
13191 ++++ b/drivers/misc/eeprom/idt_89hpesx.c
13192 +@@ -1126,11 +1126,10 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
13193 +
13194 + device_for_each_child_node(dev, fwnode) {
13195 + ee_id = idt_ee_match_id(fwnode);
13196 +- if (!ee_id) {
13197 +- dev_warn(dev, "Skip unsupported EEPROM device");
13198 +- continue;
13199 +- } else
13200 ++ if (ee_id)
13201 + break;
13202 ++
13203 ++ dev_warn(dev, "Skip unsupported EEPROM device %pfw\n", fwnode);
13204 + }
13205 +
13206 + /* If there is no fwnode EEPROM device, then set zero size */
13207 +@@ -1161,6 +1160,7 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
13208 + else /* if (!fwnode_property_read_bool(node, "read-only")) */
13209 + pdev->eero = false;
13210 +
13211 ++ fwnode_handle_put(fwnode);
13212 + dev_info(dev, "EEPROM of %d bytes found by 0x%x",
13213 + pdev->eesize, pdev->eeaddr);
13214 + }
13215 +diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
13216 +index 64d1530db9854..d15b912a347bd 100644
13217 +--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
13218 ++++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
13219 +@@ -464,6 +464,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
13220 + return 0;
13221 +
13222 + disable_device:
13223 ++ pci_disable_pcie_error_reporting(pdev);
13224 + pci_set_drvdata(pdev, NULL);
13225 + destroy_hdev(hdev);
13226 +
13227 +diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
13228 +index 4c08417760874..69b31f7adf4f1 100644
13229 +--- a/drivers/misc/pvpanic/pvpanic-mmio.c
13230 ++++ b/drivers/misc/pvpanic/pvpanic-mmio.c
13231 +@@ -93,7 +93,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
13232 + return -EINVAL;
13233 + }
13234 +
13235 +- pi = kmalloc(sizeof(*pi), GFP_ATOMIC);
13236 ++ pi = devm_kmalloc(dev, sizeof(*pi), GFP_ATOMIC);
13237 + if (!pi)
13238 + return -ENOMEM;
13239 +
13240 +@@ -114,7 +114,6 @@ static int pvpanic_mmio_remove(struct platform_device *pdev)
13241 + struct pvpanic_instance *pi = dev_get_drvdata(&pdev->dev);
13242 +
13243 + pvpanic_remove(pi);
13244 +- kfree(pi);
13245 +
13246 + return 0;
13247 + }
13248 +diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
13249 +index 9ecc4e8559d5d..046ce4ecc1959 100644
13250 +--- a/drivers/misc/pvpanic/pvpanic-pci.c
13251 ++++ b/drivers/misc/pvpanic/pvpanic-pci.c
13252 +@@ -78,15 +78,15 @@ static int pvpanic_pci_probe(struct pci_dev *pdev,
13253 + void __iomem *base;
13254 + int ret;
13255 +
13256 +- ret = pci_enable_device(pdev);
13257 ++ ret = pcim_enable_device(pdev);
13258 + if (ret < 0)
13259 + return ret;
13260 +
13261 +- base = pci_iomap(pdev, 0, 0);
13262 ++ base = pcim_iomap(pdev, 0, 0);
13263 + if (!base)
13264 + return -ENOMEM;
13265 +
13266 +- pi = kmalloc(sizeof(*pi), GFP_ATOMIC);
13267 ++ pi = devm_kmalloc(&pdev->dev, sizeof(*pi), GFP_ATOMIC);
13268 + if (!pi)
13269 + return -ENOMEM;
13270 +
13271 +@@ -107,9 +107,6 @@ static void pvpanic_pci_remove(struct pci_dev *pdev)
13272 + struct pvpanic_instance *pi = dev_get_drvdata(&pdev->dev);
13273 +
13274 + pvpanic_remove(pi);
13275 +- iounmap(pi->base);
13276 +- kfree(pi);
13277 +- pci_disable_device(pdev);
13278 + }
13279 +
13280 + static struct pci_driver pvpanic_pci_driver = {
13281 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
13282 +index 689eb9afeeed1..2518bc0856596 100644
13283 +--- a/drivers/mmc/core/block.c
13284 ++++ b/drivers/mmc/core/block.c
13285 +@@ -1004,6 +1004,12 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
13286 +
13287 + switch (mq_rq->drv_op) {
13288 + case MMC_DRV_OP_IOCTL:
13289 ++ if (card->ext_csd.cmdq_en) {
13290 ++ ret = mmc_cmdq_disable(card);
13291 ++ if (ret)
13292 ++ break;
13293 ++ }
13294 ++ fallthrough;
13295 + case MMC_DRV_OP_IOCTL_RPMB:
13296 + idata = mq_rq->drv_op_data;
13297 + for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
13298 +@@ -1014,6 +1020,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
13299 + /* Always switch back to main area after RPMB access */
13300 + if (rpmb_ioctl)
13301 + mmc_blk_part_switch(card, 0);
13302 ++ else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
13303 ++ mmc_cmdq_enable(card);
13304 + break;
13305 + case MMC_DRV_OP_BOOT_WP:
13306 + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
13307 +diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
13308 +index d001c51074a06..e4665a438ec56 100644
13309 +--- a/drivers/mmc/host/sdhci-of-aspeed.c
13310 ++++ b/drivers/mmc/host/sdhci-of-aspeed.c
13311 +@@ -150,7 +150,7 @@ static int aspeed_sdhci_phase_to_tap(struct device *dev, unsigned long rate_hz,
13312 +
13313 + tap = div_u64(phase_period_ps, prop_delay_ps);
13314 + if (tap > ASPEED_SDHCI_NR_TAPS) {
13315 +- dev_warn(dev,
13316 ++ dev_dbg(dev,
13317 + "Requested out of range phase tap %d for %d degrees of phase compensation at %luHz, clamping to tap %d\n",
13318 + tap, phase_deg, rate_hz, ASPEED_SDHCI_NR_TAPS);
13319 + tap = ASPEED_SDHCI_NR_TAPS;
13320 +diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
13321 +index 5dc36efff47ff..11e375579cfb9 100644
13322 +--- a/drivers/mmc/host/sdhci-sprd.c
13323 ++++ b/drivers/mmc/host/sdhci-sprd.c
13324 +@@ -393,6 +393,7 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
13325 + static struct sdhci_ops sdhci_sprd_ops = {
13326 + .read_l = sdhci_sprd_readl,
13327 + .write_l = sdhci_sprd_writel,
13328 ++ .write_w = sdhci_sprd_writew,
13329 + .write_b = sdhci_sprd_writeb,
13330 + .set_clock = sdhci_sprd_set_clock,
13331 + .get_max_clock = sdhci_sprd_get_max_clock,
13332 +diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
13333 +index 615f3d008af1e..b9b79b1089a00 100644
13334 +--- a/drivers/mmc/host/usdhi6rol0.c
13335 ++++ b/drivers/mmc/host/usdhi6rol0.c
13336 +@@ -1801,6 +1801,7 @@ static int usdhi6_probe(struct platform_device *pdev)
13337 +
13338 + version = usdhi6_read(host, USDHI6_VERSION);
13339 + if ((version & 0xfff) != 0xa0d) {
13340 ++ ret = -EPERM;
13341 + dev_err(dev, "Version not recognized %x\n", version);
13342 + goto e_clk_off;
13343 + }
13344 +diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
13345 +index a1d0985600990..c32df5530b943 100644
13346 +--- a/drivers/mmc/host/via-sdmmc.c
13347 ++++ b/drivers/mmc/host/via-sdmmc.c
13348 +@@ -857,6 +857,9 @@ static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
13349 + {
13350 + BUG_ON(intmask == 0);
13351 +
13352 ++ if (!host->data)
13353 ++ return;
13354 ++
13355 + if (intmask & VIA_CRDR_SDSTS_DT)
13356 + host->data->error = -ETIMEDOUT;
13357 + else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
13358 +diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
13359 +index 739cf63ef6e2f..4950d10d3a191 100644
13360 +--- a/drivers/mmc/host/vub300.c
13361 ++++ b/drivers/mmc/host/vub300.c
13362 +@@ -2279,7 +2279,7 @@ static int vub300_probe(struct usb_interface *interface,
13363 + if (retval < 0)
13364 + goto error5;
13365 + retval =
13366 +- usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
13367 ++ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
13368 + SET_ROM_WAIT_STATES,
13369 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
13370 + firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
13371 +diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
13372 +index 549aac00228eb..390f8d719c258 100644
13373 +--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
13374 ++++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
13375 +@@ -273,6 +273,37 @@ static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
13376 + return 0;
13377 + }
13378 +
13379 ++static int anfc_select_target(struct nand_chip *chip, int target)
13380 ++{
13381 ++ struct anand *anand = to_anand(chip);
13382 ++ struct arasan_nfc *nfc = to_anfc(chip->controller);
13383 ++ int ret;
13384 ++
13385 ++ /* Update the controller timings and the potential ECC configuration */
13386 ++ writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
13387 ++
13388 ++ /* Update clock frequency */
13389 ++ if (nfc->cur_clk != anand->clk) {
13390 ++ clk_disable_unprepare(nfc->controller_clk);
13391 ++ ret = clk_set_rate(nfc->controller_clk, anand->clk);
13392 ++ if (ret) {
13393 ++ dev_err(nfc->dev, "Failed to change clock rate\n");
13394 ++ return ret;
13395 ++ }
13396 ++
13397 ++ ret = clk_prepare_enable(nfc->controller_clk);
13398 ++ if (ret) {
13399 ++ dev_err(nfc->dev,
13400 ++ "Failed to re-enable the controller clock\n");
13401 ++ return ret;
13402 ++ }
13403 ++
13404 ++ nfc->cur_clk = anand->clk;
13405 ++ }
13406 ++
13407 ++ return 0;
13408 ++}
13409 ++
13410 + /*
13411 + * When using the embedded hardware ECC engine, the controller is in charge of
13412 + * feeding the engine with, first, the ECC residue present in the data array.
13413 +@@ -401,6 +432,18 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
13414 + return 0;
13415 + }
13416 +
13417 ++static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
13418 ++ int oob_required, int page)
13419 ++{
13420 ++ int ret;
13421 ++
13422 ++ ret = anfc_select_target(chip, chip->cur_cs);
13423 ++ if (ret)
13424 ++ return ret;
13425 ++
13426 ++ return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
13427 ++};
13428 ++
13429 + static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
13430 + int oob_required, int page)
13431 + {
13432 +@@ -461,6 +504,18 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
13433 + return ret;
13434 + }
13435 +
13436 ++static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
13437 ++ int oob_required, int page)
13438 ++{
13439 ++ int ret;
13440 ++
13441 ++ ret = anfc_select_target(chip, chip->cur_cs);
13442 ++ if (ret)
13443 ++ return ret;
13444 ++
13445 ++ return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
13446 ++};
13447 ++
13448 + /* NAND framework ->exec_op() hooks and related helpers */
13449 + static int anfc_parse_instructions(struct nand_chip *chip,
13450 + const struct nand_subop *subop,
13451 +@@ -753,37 +808,6 @@ static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
13452 + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
13453 + );
13454 +
13455 +-static int anfc_select_target(struct nand_chip *chip, int target)
13456 +-{
13457 +- struct anand *anand = to_anand(chip);
13458 +- struct arasan_nfc *nfc = to_anfc(chip->controller);
13459 +- int ret;
13460 +-
13461 +- /* Update the controller timings and the potential ECC configuration */
13462 +- writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
13463 +-
13464 +- /* Update clock frequency */
13465 +- if (nfc->cur_clk != anand->clk) {
13466 +- clk_disable_unprepare(nfc->controller_clk);
13467 +- ret = clk_set_rate(nfc->controller_clk, anand->clk);
13468 +- if (ret) {
13469 +- dev_err(nfc->dev, "Failed to change clock rate\n");
13470 +- return ret;
13471 +- }
13472 +-
13473 +- ret = clk_prepare_enable(nfc->controller_clk);
13474 +- if (ret) {
13475 +- dev_err(nfc->dev,
13476 +- "Failed to re-enable the controller clock\n");
13477 +- return ret;
13478 +- }
13479 +-
13480 +- nfc->cur_clk = anand->clk;
13481 +- }
13482 +-
13483 +- return 0;
13484 +-}
13485 +-
13486 + static int anfc_check_op(struct nand_chip *chip,
13487 + const struct nand_operation *op)
13488 + {
13489 +@@ -1007,8 +1031,8 @@ static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
13490 + if (!anand->bch)
13491 + return -EINVAL;
13492 +
13493 +- ecc->read_page = anfc_read_page_hw_ecc;
13494 +- ecc->write_page = anfc_write_page_hw_ecc;
13495 ++ ecc->read_page = anfc_sel_read_page_hw_ecc;
13496 ++ ecc->write_page = anfc_sel_write_page_hw_ecc;
13497 +
13498 + return 0;
13499 + }
13500 +diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
13501 +index 79da6b02e2095..f83525a1ab0e6 100644
13502 +--- a/drivers/mtd/nand/raw/marvell_nand.c
13503 ++++ b/drivers/mtd/nand/raw/marvell_nand.c
13504 +@@ -3030,8 +3030,10 @@ static int __maybe_unused marvell_nfc_resume(struct device *dev)
13505 + return ret;
13506 +
13507 + ret = clk_prepare_enable(nfc->reg_clk);
13508 +- if (ret < 0)
13509 ++ if (ret < 0) {
13510 ++ clk_disable_unprepare(nfc->core_clk);
13511 + return ret;
13512 ++ }
13513 +
13514 + /*
13515 + * Reset nfc->selected_chip so the next command will cause the timing
13516 +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
13517 +index 17f63f95f4a28..54ae540bc66b4 100644
13518 +--- a/drivers/mtd/nand/spi/core.c
13519 ++++ b/drivers/mtd/nand/spi/core.c
13520 +@@ -290,6 +290,8 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
13521 + {
13522 + struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
13523 + struct spinand_device *spinand = nand_to_spinand(nand);
13524 ++ struct mtd_info *mtd = spinand_to_mtd(spinand);
13525 ++ int ret;
13526 +
13527 + if (req->mode == MTD_OPS_RAW)
13528 + return 0;
13529 +@@ -299,7 +301,13 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
13530 + return 0;
13531 +
13532 + /* Finish a page write: check the status, report errors/bitflips */
13533 +- return spinand_check_ecc_status(spinand, engine_conf->status);
13534 ++ ret = spinand_check_ecc_status(spinand, engine_conf->status);
13535 ++ if (ret == -EBADMSG)
13536 ++ mtd->ecc_stats.failed++;
13537 ++ else if (ret > 0)
13538 ++ mtd->ecc_stats.corrected += ret;
13539 ++
13540 ++ return ret;
13541 + }
13542 +
13543 + static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
13544 +@@ -620,13 +628,10 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
13545 + if (ret < 0 && ret != -EBADMSG)
13546 + break;
13547 +
13548 +- if (ret == -EBADMSG) {
13549 ++ if (ret == -EBADMSG)
13550 + ecc_failed = true;
13551 +- mtd->ecc_stats.failed++;
13552 +- } else {
13553 +- mtd->ecc_stats.corrected += ret;
13554 ++ else
13555 + max_bitflips = max_t(unsigned int, max_bitflips, ret);
13556 +- }
13557 +
13558 + ret = 0;
13559 + ops->retlen += iter.req.datalen;
13560 +diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
13561 +index d9083308f6ba6..06a818cd2433f 100644
13562 +--- a/drivers/mtd/parsers/qcomsmempart.c
13563 ++++ b/drivers/mtd/parsers/qcomsmempart.c
13564 +@@ -159,6 +159,15 @@ out_free_parts:
13565 + return ret;
13566 + }
13567 +
13568 ++static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
13569 ++ int nr_parts)
13570 ++{
13571 ++ int i;
13572 ++
13573 ++ for (i = 0; i < nr_parts; i++)
13574 ++ kfree(pparts[i].name);
13575 ++}
13576 ++
13577 + static const struct of_device_id qcomsmem_of_match_table[] = {
13578 + { .compatible = "qcom,smem-part" },
13579 + {},
13580 +@@ -167,6 +176,7 @@ MODULE_DEVICE_TABLE(of, qcomsmem_of_match_table);
13581 +
13582 + static struct mtd_part_parser mtd_parser_qcomsmem = {
13583 + .parse_fn = parse_qcomsmem_part,
13584 ++ .cleanup = parse_qcomsmem_cleanup,
13585 + .name = "qcomsmem",
13586 + .of_match_table = qcomsmem_of_match_table,
13587 + };
13588 +diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
13589 +index 91146bdc47132..3ccd6363ee8cb 100644
13590 +--- a/drivers/mtd/parsers/redboot.c
13591 ++++ b/drivers/mtd/parsers/redboot.c
13592 +@@ -45,6 +45,7 @@ static inline int redboot_checksum(struct fis_image_desc *img)
13593 + static void parse_redboot_of(struct mtd_info *master)
13594 + {
13595 + struct device_node *np;
13596 ++ struct device_node *npart;
13597 + u32 dirblock;
13598 + int ret;
13599 +
13600 +@@ -52,7 +53,11 @@ static void parse_redboot_of(struct mtd_info *master)
13601 + if (!np)
13602 + return;
13603 +
13604 +- ret = of_property_read_u32(np, "fis-index-block", &dirblock);
13605 ++ npart = of_get_child_by_name(np, "partitions");
13606 ++ if (!npart)
13607 ++ return;
13608 ++
13609 ++ ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
13610 + if (ret)
13611 + return;
13612 +
13613 +diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
13614 +index fcf38d2603450..d8e68120a4b11 100644
13615 +--- a/drivers/mtd/spi-nor/otp.c
13616 ++++ b/drivers/mtd/spi-nor/otp.c
13617 +@@ -40,7 +40,6 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
13618 + rdesc = nor->dirmap.rdesc;
13619 +
13620 + nor->read_opcode = SPINOR_OP_RSECR;
13621 +- nor->addr_width = 3;
13622 + nor->read_dummy = 8;
13623 + nor->read_proto = SNOR_PROTO_1_1_1;
13624 + nor->dirmap.rdesc = NULL;
13625 +@@ -84,7 +83,6 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
13626 + wdesc = nor->dirmap.wdesc;
13627 +
13628 + nor->program_opcode = SPINOR_OP_PSECR;
13629 +- nor->addr_width = 3;
13630 + nor->write_proto = SNOR_PROTO_1_1_1;
13631 + nor->dirmap.wdesc = NULL;
13632 +
13633 +@@ -240,6 +238,29 @@ out:
13634 + return ret;
13635 + }
13636 +
13637 ++static int spi_nor_mtd_otp_range_is_locked(struct spi_nor *nor, loff_t ofs,
13638 ++ size_t len)
13639 ++{
13640 ++ const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
13641 ++ unsigned int region;
13642 ++ int locked;
13643 ++
13644 ++ /*
13645 ++ * If any of the affected OTP regions are locked the entire range is
13646 ++ * considered locked.
13647 ++ */
13648 ++ for (region = spi_nor_otp_offset_to_region(nor, ofs);
13649 ++ region <= spi_nor_otp_offset_to_region(nor, ofs + len - 1);
13650 ++ region++) {
13651 ++ locked = ops->is_locked(nor, region);
13652 ++ /* take the branch it is locked or in case of an error */
13653 ++ if (locked)
13654 ++ return locked;
13655 ++ }
13656 ++
13657 ++ return 0;
13658 ++}
13659 ++
13660 + static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs,
13661 + size_t total_len, size_t *retlen,
13662 + const u8 *buf, bool is_write)
13663 +@@ -255,14 +276,26 @@ static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs,
13664 + if (ofs < 0 || ofs >= spi_nor_otp_size(nor))
13665 + return 0;
13666 +
13667 ++ /* don't access beyond the end */
13668 ++ total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs);
13669 ++
13670 ++ if (!total_len)
13671 ++ return 0;
13672 ++
13673 + ret = spi_nor_lock_and_prep(nor);
13674 + if (ret)
13675 + return ret;
13676 +
13677 +- /* don't access beyond the end */
13678 +- total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs);
13679 ++ if (is_write) {
13680 ++ ret = spi_nor_mtd_otp_range_is_locked(nor, ofs, total_len);
13681 ++ if (ret < 0) {
13682 ++ goto out;
13683 ++ } else if (ret) {
13684 ++ ret = -EROFS;
13685 ++ goto out;
13686 ++ }
13687 ++ }
13688 +
13689 +- *retlen = 0;
13690 + while (total_len) {
13691 + /*
13692 + * The OTP regions are mapped into a contiguous area starting
13693 +diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
13694 +index 74dc8e249faa3..9b12a8e110f43 100644
13695 +--- a/drivers/net/Kconfig
13696 ++++ b/drivers/net/Kconfig
13697 +@@ -431,6 +431,7 @@ config VSOCKMON
13698 + config MHI_NET
13699 + tristate "MHI network driver"
13700 + depends on MHI_BUS
13701 ++ select WWAN
13702 + help
13703 + This is the network driver for MHI bus. It can be used with
13704 + QCOM based WWAN modems (like SDX55). Say Y or M.
13705 +diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
13706 +index 00847cbaf7b62..d08718e98e110 100644
13707 +--- a/drivers/net/can/peak_canfd/peak_canfd.c
13708 ++++ b/drivers/net/can/peak_canfd/peak_canfd.c
13709 +@@ -351,8 +351,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
13710 + return err;
13711 + }
13712 +
13713 +- /* start network queue (echo_skb array is empty) */
13714 +- netif_start_queue(ndev);
13715 ++ /* wake network queue up (echo_skb array is empty) */
13716 ++ netif_wake_queue(ndev);
13717 +
13718 + return 0;
13719 + }
13720 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
13721 +index 5af69787d9d5d..0a37af4a3fa40 100644
13722 +--- a/drivers/net/can/usb/ems_usb.c
13723 ++++ b/drivers/net/can/usb/ems_usb.c
13724 +@@ -1053,7 +1053,6 @@ static void ems_usb_disconnect(struct usb_interface *intf)
13725 +
13726 + if (dev) {
13727 + unregister_netdev(dev->netdev);
13728 +- free_candev(dev->netdev);
13729 +
13730 + unlink_all_urbs(dev);
13731 +
13732 +@@ -1061,6 +1060,8 @@ static void ems_usb_disconnect(struct usb_interface *intf)
13733 +
13734 + kfree(dev->intr_in_buffer);
13735 + kfree(dev->tx_msg_buffer);
13736 ++
13737 ++ free_candev(dev->netdev);
13738 + }
13739 + }
13740 +
13741 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
13742 +index eca285aaf72f8..961fa6b75cad8 100644
13743 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
13744 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
13745 +@@ -1618,9 +1618,6 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
13746 + struct mv88e6xxx_vtu_entry vlan;
13747 + int i, err;
13748 +
13749 +- if (!vid)
13750 +- return -EOPNOTSUPP;
13751 +-
13752 + /* DSA and CPU ports have to be members of multiple vlans */
13753 + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
13754 + return 0;
13755 +@@ -2109,6 +2106,9 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
13756 + u8 member;
13757 + int err;
13758 +
13759 ++ if (!vlan->vid)
13760 ++ return 0;
13761 ++
13762 + err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
13763 + if (err)
13764 + return err;
13765 +diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
13766 +index b88d9ef45a1f1..ebe4d33cda276 100644
13767 +--- a/drivers/net/dsa/sja1105/sja1105_main.c
13768 ++++ b/drivers/net/dsa/sja1105/sja1105_main.c
13769 +@@ -1798,6 +1798,12 @@ static int sja1105_reload_cbs(struct sja1105_private *priv)
13770 + {
13771 + int rc = 0, i;
13772 +
13773 ++ /* The credit based shapers are only allocated if
13774 ++ * CONFIG_NET_SCH_CBS is enabled.
13775 ++ */
13776 ++ if (!priv->cbs)
13777 ++ return 0;
13778 ++
13779 + for (i = 0; i < priv->info->num_cbs_shapers; i++) {
13780 + struct sja1105_cbs_entry *cbs = &priv->cbs[i];
13781 +
13782 +diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
13783 +index d77fafbc15301..c560ad06f0be3 100644
13784 +--- a/drivers/net/ethernet/aeroflex/greth.c
13785 ++++ b/drivers/net/ethernet/aeroflex/greth.c
13786 +@@ -1539,10 +1539,11 @@ static int greth_of_remove(struct platform_device *of_dev)
13787 + mdiobus_unregister(greth->mdio);
13788 +
13789 + unregister_netdev(ndev);
13790 +- free_netdev(ndev);
13791 +
13792 + of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
13793 +
13794 ++ free_netdev(ndev);
13795 ++
13796 + return 0;
13797 + }
13798 +
13799 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
13800 +index f5fba8b8cdea9..a47e2710487ec 100644
13801 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
13802 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
13803 +@@ -91,7 +91,7 @@ struct aq_macsec_txsc {
13804 + u32 hw_sc_idx;
13805 + unsigned long tx_sa_idx_busy;
13806 + const struct macsec_secy *sw_secy;
13807 +- u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
13808 ++ u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
13809 + struct aq_macsec_tx_sc_stats stats;
13810 + struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
13811 + };
13812 +@@ -101,7 +101,7 @@ struct aq_macsec_rxsc {
13813 + unsigned long rx_sa_idx_busy;
13814 + const struct macsec_secy *sw_secy;
13815 + const struct macsec_rx_sc *sw_rxsc;
13816 +- u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
13817 ++ u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN];
13818 + struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
13819 + };
13820 +
13821 +diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
13822 +index 60d908507f51d..02a569500234c 100644
13823 +--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
13824 ++++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
13825 +@@ -174,9 +174,6 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
13826 + if (!ring->slots)
13827 + goto err_free_buf_descs;
13828 +
13829 +- ring->read_idx = 0;
13830 +- ring->write_idx = 0;
13831 +-
13832 + return 0;
13833 +
13834 + err_free_buf_descs:
13835 +@@ -304,6 +301,9 @@ static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
13836 +
13837 + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
13838 + (uint32_t)ring->dma_addr);
13839 ++
13840 ++ ring->read_idx = 0;
13841 ++ ring->write_idx = 0;
13842 + }
13843 +
13844 + static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
13845 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
13846 +index fcca023f22e54..41f7f078cd27c 100644
13847 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
13848 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
13849 +@@ -4296,3 +4296,4 @@ MODULE_AUTHOR("Broadcom Corporation");
13850 + MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
13851 + MODULE_ALIAS("platform:bcmgenet");
13852 + MODULE_LICENSE("GPL");
13853 ++MODULE_SOFTDEP("pre: mdio-bcm-unimac");
13854 +diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
13855 +index 701c12c9e0337..649c5c429bd7c 100644
13856 +--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
13857 ++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
13858 +@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter)
13859 + int num = 0, status = 0;
13860 + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
13861 +
13862 +- spin_lock_bh(&adapter->mcc_cq_lock);
13863 ++ spin_lock(&adapter->mcc_cq_lock);
13864 +
13865 + while ((compl = be_mcc_compl_get(adapter))) {
13866 + if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
13867 +@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter)
13868 + if (num)
13869 + be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
13870 +
13871 +- spin_unlock_bh(&adapter->mcc_cq_lock);
13872 ++ spin_unlock(&adapter->mcc_cq_lock);
13873 + return status;
13874 + }
13875 +
13876 +@@ -581,7 +581,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
13877 + if (be_check_error(adapter, BE_ERROR_ANY))
13878 + return -EIO;
13879 +
13880 ++ local_bh_disable();
13881 + status = be_process_mcc(adapter);
13882 ++ local_bh_enable();
13883 +
13884 + if (atomic_read(&mcc_obj->q.used) == 0)
13885 + break;
13886 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
13887 +index 7968568bbe214..361c1c87c1830 100644
13888 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
13889 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
13890 +@@ -5501,7 +5501,9 @@ static void be_worker(struct work_struct *work)
13891 + * mcc completions
13892 + */
13893 + if (!netif_running(adapter->netdev)) {
13894 ++ local_bh_disable();
13895 + be_process_mcc(adapter);
13896 ++ local_bh_enable();
13897 + goto reschedule;
13898 + }
13899 +
13900 +diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
13901 +index e3954d8835e71..49957598301b5 100644
13902 +--- a/drivers/net/ethernet/ezchip/nps_enet.c
13903 ++++ b/drivers/net/ethernet/ezchip/nps_enet.c
13904 +@@ -607,7 +607,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
13905 +
13906 + /* Get IRQ number */
13907 + priv->irq = platform_get_irq(pdev, 0);
13908 +- if (!priv->irq) {
13909 ++ if (priv->irq < 0) {
13910 + dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
13911 + err = -ENODEV;
13912 + goto out_netdev;
13913 +@@ -642,8 +642,8 @@ static s32 nps_enet_remove(struct platform_device *pdev)
13914 + struct nps_enet_priv *priv = netdev_priv(ndev);
13915 +
13916 + unregister_netdev(ndev);
13917 +- free_netdev(ndev);
13918 + netif_napi_del(&priv->napi);
13919 ++ free_netdev(ndev);
13920 +
13921 + return 0;
13922 + }
13923 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
13924 +index 04421aec2dfd6..11dbbfd38770c 100644
13925 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
13926 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
13927 +@@ -1830,14 +1830,17 @@ static int ftgmac100_probe(struct platform_device *pdev)
13928 + if (np && of_get_property(np, "use-ncsi", NULL)) {
13929 + if (!IS_ENABLED(CONFIG_NET_NCSI)) {
13930 + dev_err(&pdev->dev, "NCSI stack not enabled\n");
13931 ++ err = -EINVAL;
13932 + goto err_phy_connect;
13933 + }
13934 +
13935 + dev_info(&pdev->dev, "Using NCSI interface\n");
13936 + priv->use_ncsi = true;
13937 + priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
13938 +- if (!priv->ndev)
13939 ++ if (!priv->ndev) {
13940 ++ err = -EINVAL;
13941 + goto err_phy_connect;
13942 ++ }
13943 + } else if (np && of_get_property(np, "phy-handle", NULL)) {
13944 + struct phy_device *phy;
13945 +
13946 +@@ -1856,6 +1859,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
13947 + &ftgmac100_adjust_link);
13948 + if (!phy) {
13949 + dev_err(&pdev->dev, "Failed to connect to phy\n");
13950 ++ err = -EINVAL;
13951 + goto err_phy_connect;
13952 + }
13953 +
13954 +diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
13955 +index bbc423e931223..79cefe85a799f 100644
13956 +--- a/drivers/net/ethernet/google/gve/gve_main.c
13957 ++++ b/drivers/net/ethernet/google/gve/gve_main.c
13958 +@@ -1295,8 +1295,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13959 +
13960 + gve_write_version(&reg_bar->driver_version);
13961 + /* Get max queues to alloc etherdev */
13962 +- max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
13963 +- max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
13964 ++ max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
13965 ++ max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
13966 + /* Alloc and setup the netdev and priv */
13967 + dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
13968 + if (!dev) {
13969 +diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
13970 +index ea55314b209db..d105bfbc7c1c0 100644
13971 +--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
13972 ++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
13973 +@@ -2618,10 +2618,8 @@ static int ehea_restart_qps(struct net_device *dev)
13974 + u16 dummy16 = 0;
13975 +
13976 + cb0 = (void *)get_zeroed_page(GFP_KERNEL);
13977 +- if (!cb0) {
13978 +- ret = -ENOMEM;
13979 +- goto out;
13980 +- }
13981 ++ if (!cb0)
13982 ++ return -ENOMEM;
13983 +
13984 + for (i = 0; i < (port->num_def_qps); i++) {
13985 + struct ehea_port_res *pr = &port->port_res[i];
13986 +@@ -2641,6 +2639,7 @@ static int ehea_restart_qps(struct net_device *dev)
13987 + cb0);
13988 + if (hret != H_SUCCESS) {
13989 + netdev_err(dev, "query_ehea_qp failed (1)\n");
13990 ++ ret = -EFAULT;
13991 + goto out;
13992 + }
13993 +
13994 +@@ -2653,6 +2652,7 @@ static int ehea_restart_qps(struct net_device *dev)
13995 + &dummy64, &dummy16, &dummy16);
13996 + if (hret != H_SUCCESS) {
13997 + netdev_err(dev, "modify_ehea_qp failed (1)\n");
13998 ++ ret = -EFAULT;
13999 + goto out;
14000 + }
14001 +
14002 +@@ -2661,6 +2661,7 @@ static int ehea_restart_qps(struct net_device *dev)
14003 + cb0);
14004 + if (hret != H_SUCCESS) {
14005 + netdev_err(dev, "query_ehea_qp failed (2)\n");
14006 ++ ret = -EFAULT;
14007 + goto out;
14008 + }
14009 +
14010 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
14011 +index 5788bb956d733..ede65b32f8212 100644
14012 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
14013 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
14014 +@@ -106,6 +106,8 @@ static void release_crq_queue(struct ibmvnic_adapter *);
14015 + static int __ibmvnic_set_mac(struct net_device *, u8 *);
14016 + static int init_crq_queue(struct ibmvnic_adapter *adapter);
14017 + static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
14018 ++static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
14019 ++ struct ibmvnic_sub_crq_queue *tx_scrq);
14020 +
14021 + struct ibmvnic_stat {
14022 + char name[ETH_GSTRING_LEN];
14023 +@@ -209,12 +211,11 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
14024 + mutex_lock(&adapter->fw_lock);
14025 + adapter->fw_done_rc = 0;
14026 + reinit_completion(&adapter->fw_done);
14027 +- rc = send_request_map(adapter, ltb->addr,
14028 +- ltb->size, ltb->map_id);
14029 ++
14030 ++ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
14031 + if (rc) {
14032 +- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
14033 +- mutex_unlock(&adapter->fw_lock);
14034 +- return rc;
14035 ++ dev_err(dev, "send_request_map failed, rc = %d\n", rc);
14036 ++ goto out;
14037 + }
14038 +
14039 + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
14040 +@@ -222,20 +223,23 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
14041 + dev_err(dev,
14042 + "Long term map request aborted or timed out,rc = %d\n",
14043 + rc);
14044 +- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
14045 +- mutex_unlock(&adapter->fw_lock);
14046 +- return rc;
14047 ++ goto out;
14048 + }
14049 +
14050 + if (adapter->fw_done_rc) {
14051 + dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
14052 + adapter->fw_done_rc);
14053 ++ rc = -1;
14054 ++ goto out;
14055 ++ }
14056 ++ rc = 0;
14057 ++out:
14058 ++ if (rc) {
14059 + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
14060 +- mutex_unlock(&adapter->fw_lock);
14061 +- return -1;
14062 ++ ltb->buff = NULL;
14063 + }
14064 + mutex_unlock(&adapter->fw_lock);
14065 +- return 0;
14066 ++ return rc;
14067 + }
14068 +
14069 + static void free_long_term_buff(struct ibmvnic_adapter *adapter,
14070 +@@ -255,14 +259,44 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
14071 + adapter->reset_reason != VNIC_RESET_TIMEOUT)
14072 + send_request_unmap(adapter, ltb->map_id);
14073 + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
14074 ++ ltb->buff = NULL;
14075 ++ ltb->map_id = 0;
14076 + }
14077 +
14078 +-static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
14079 ++static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
14080 ++ struct ibmvnic_long_term_buff *ltb)
14081 + {
14082 +- if (!ltb->buff)
14083 +- return -EINVAL;
14084 ++ struct device *dev = &adapter->vdev->dev;
14085 ++ int rc;
14086 +
14087 + memset(ltb->buff, 0, ltb->size);
14088 ++
14089 ++ mutex_lock(&adapter->fw_lock);
14090 ++ adapter->fw_done_rc = 0;
14091 ++
14092 ++ reinit_completion(&adapter->fw_done);
14093 ++ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
14094 ++ if (rc) {
14095 ++ mutex_unlock(&adapter->fw_lock);
14096 ++ return rc;
14097 ++ }
14098 ++
14099 ++ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
14100 ++ if (rc) {
14101 ++ dev_info(dev,
14102 ++ "Reset failed, long term map request timed out or aborted\n");
14103 ++ mutex_unlock(&adapter->fw_lock);
14104 ++ return rc;
14105 ++ }
14106 ++
14107 ++ if (adapter->fw_done_rc) {
14108 ++ dev_info(dev,
14109 ++ "Reset failed, attempting to free and reallocate buffer\n");
14110 ++ free_long_term_buff(adapter, ltb);
14111 ++ mutex_unlock(&adapter->fw_lock);
14112 ++ return alloc_long_term_buff(adapter, ltb, ltb->size);
14113 ++ }
14114 ++ mutex_unlock(&adapter->fw_lock);
14115 + return 0;
14116 + }
14117 +
14118 +@@ -298,7 +332,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
14119 +
14120 + rx_scrq = adapter->rx_scrq[pool->index];
14121 + ind_bufp = &rx_scrq->ind_buf;
14122 +- for (i = 0; i < count; ++i) {
14123 ++
14124 ++ /* netdev_skb_alloc() could have failed after we saved a few skbs
14125 ++ * in the indir_buf and we would not have sent them to VIOS yet.
14126 ++ * To account for them, start the loop at ind_bufp->index rather
14127 ++ * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
14128 ++ * be 0.
14129 ++ */
14130 ++ for (i = ind_bufp->index; i < count; ++i) {
14131 + skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
14132 + if (!skb) {
14133 + dev_err(dev, "Couldn't replenish rx buff\n");
14134 +@@ -484,7 +525,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
14135 + rx_pool->size *
14136 + rx_pool->buff_size);
14137 + } else {
14138 +- rc = reset_long_term_buff(&rx_pool->long_term_buff);
14139 ++ rc = reset_long_term_buff(adapter,
14140 ++ &rx_pool->long_term_buff);
14141 + }
14142 +
14143 + if (rc)
14144 +@@ -607,11 +649,12 @@ static int init_rx_pools(struct net_device *netdev)
14145 + return 0;
14146 + }
14147 +
14148 +-static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
14149 ++static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
14150 ++ struct ibmvnic_tx_pool *tx_pool)
14151 + {
14152 + int rc, i;
14153 +
14154 +- rc = reset_long_term_buff(&tx_pool->long_term_buff);
14155 ++ rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
14156 + if (rc)
14157 + return rc;
14158 +
14159 +@@ -638,10 +681,11 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
14160 +
14161 + tx_scrqs = adapter->num_active_tx_pools;
14162 + for (i = 0; i < tx_scrqs; i++) {
14163 +- rc = reset_one_tx_pool(&adapter->tso_pool[i]);
14164 ++ ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
14165 ++ rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
14166 + if (rc)
14167 + return rc;
14168 +- rc = reset_one_tx_pool(&adapter->tx_pool[i]);
14169 ++ rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
14170 + if (rc)
14171 + return rc;
14172 + }
14173 +@@ -734,8 +778,11 @@ static int init_tx_pools(struct net_device *netdev)
14174 +
14175 + adapter->tso_pool = kcalloc(tx_subcrqs,
14176 + sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
14177 +- if (!adapter->tso_pool)
14178 ++ if (!adapter->tso_pool) {
14179 ++ kfree(adapter->tx_pool);
14180 ++ adapter->tx_pool = NULL;
14181 + return -1;
14182 ++ }
14183 +
14184 + adapter->num_active_tx_pools = tx_subcrqs;
14185 +
14186 +@@ -1180,6 +1227,11 @@ static int __ibmvnic_open(struct net_device *netdev)
14187 +
14188 + netif_tx_start_all_queues(netdev);
14189 +
14190 ++ if (prev_state == VNIC_CLOSED) {
14191 ++ for (i = 0; i < adapter->req_rx_queues; i++)
14192 ++ napi_schedule(&adapter->napi[i]);
14193 ++ }
14194 ++
14195 + adapter->state = VNIC_OPEN;
14196 + return rc;
14197 + }
14198 +@@ -1583,7 +1635,8 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
14199 + ind_bufp->index = 0;
14200 + if (atomic_sub_return(entries, &tx_scrq->used) <=
14201 + (adapter->req_tx_entries_per_subcrq / 2) &&
14202 +- __netif_subqueue_stopped(adapter->netdev, queue_num)) {
14203 ++ __netif_subqueue_stopped(adapter->netdev, queue_num) &&
14204 ++ !test_bit(0, &adapter->resetting)) {
14205 + netif_wake_subqueue(adapter->netdev, queue_num);
14206 + netdev_dbg(adapter->netdev, "Started queue %d\n",
14207 + queue_num);
14208 +@@ -1676,7 +1729,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
14209 + tx_send_failed++;
14210 + tx_dropped++;
14211 + ret = NETDEV_TX_OK;
14212 +- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
14213 + goto out;
14214 + }
14215 +
14216 +@@ -3140,6 +3192,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
14217 +
14218 + netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
14219 + i);
14220 ++ ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
14221 + if (adapter->tx_scrq[i]->irq) {
14222 + free_irq(adapter->tx_scrq[i]->irq,
14223 + adapter->tx_scrq[i]);
14224 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
14225 +index 88e9035b75cf7..dc0ded7e5e614 100644
14226 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
14227 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
14228 +@@ -5223,18 +5223,20 @@ static void e1000_watchdog_task(struct work_struct *work)
14229 + pm_runtime_resume(netdev->dev.parent);
14230 +
14231 + /* Checking if MAC is in DMoff state*/
14232 +- pcim_state = er32(STATUS);
14233 +- while (pcim_state & E1000_STATUS_PCIM_STATE) {
14234 +- if (tries++ == dmoff_exit_timeout) {
14235 +- e_dbg("Error in exiting dmoff\n");
14236 +- break;
14237 +- }
14238 +- usleep_range(10000, 20000);
14239 ++ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
14240 + pcim_state = er32(STATUS);
14241 +-
14242 +- /* Checking if MAC exited DMoff state */
14243 +- if (!(pcim_state & E1000_STATUS_PCIM_STATE))
14244 +- e1000_phy_hw_reset(&adapter->hw);
14245 ++ while (pcim_state & E1000_STATUS_PCIM_STATE) {
14246 ++ if (tries++ == dmoff_exit_timeout) {
14247 ++ e_dbg("Error in exiting dmoff\n");
14248 ++ break;
14249 ++ }
14250 ++ usleep_range(10000, 20000);
14251 ++ pcim_state = er32(STATUS);
14252 ++
14253 ++ /* Checking if MAC exited DMoff state */
14254 ++ if (!(pcim_state & E1000_STATUS_PCIM_STATE))
14255 ++ e1000_phy_hw_reset(&adapter->hw);
14256 ++ }
14257 + }
14258 +
14259 + /* update snapshot of PHY registers on LSC */
14260 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
14261 +index ccd5b9486ea98..3e822bad48513 100644
14262 +--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
14263 ++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
14264 +@@ -1262,8 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
14265 + if (ethtool_link_ksettings_test_link_mode(&safe_ks,
14266 + supported,
14267 + Autoneg) &&
14268 +- hw->phy.link_info.phy_type !=
14269 +- I40E_PHY_TYPE_10GBASE_T) {
14270 ++ hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
14271 + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
14272 + err = -EINVAL;
14273 + goto done;
14274 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
14275 +index 704e474879c5b..f9fe500d4ec44 100644
14276 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
14277 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
14278 +@@ -32,7 +32,7 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
14279 + static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
14280 + static int i40e_add_vsi(struct i40e_vsi *vsi);
14281 + static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
14282 +-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
14283 ++static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
14284 + static int i40e_setup_misc_vector(struct i40e_pf *pf);
14285 + static void i40e_determine_queue_usage(struct i40e_pf *pf);
14286 + static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
14287 +@@ -8703,6 +8703,8 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
14288 + dev_driver_string(&pf->pdev->dev),
14289 + dev_name(&pf->pdev->dev));
14290 + err = i40e_vsi_request_irq(vsi, int_name);
14291 ++ if (err)
14292 ++ goto err_setup_rx;
14293 +
14294 + } else {
14295 + err = -EINVAL;
14296 +@@ -10569,7 +10571,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
14297 + #endif /* CONFIG_I40E_DCB */
14298 + if (!lock_acquired)
14299 + rtnl_lock();
14300 +- ret = i40e_setup_pf_switch(pf, reinit);
14301 ++ ret = i40e_setup_pf_switch(pf, reinit, true);
14302 + if (ret)
14303 + goto end_unlock;
14304 +
14305 +@@ -14627,10 +14629,11 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14306 + * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14307 + * @pf: board private structure
14308 + * @reinit: if the Main VSI needs to re-initialized.
14309 ++ * @lock_acquired: indicates whether or not the lock has been acquired
14310 + *
14311 + * Returns 0 on success, negative value on failure
14312 + **/
14313 +-static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14314 ++static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
14315 + {
14316 + u16 flags = 0;
14317 + int ret;
14318 +@@ -14732,9 +14735,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14319 +
14320 + i40e_ptp_init(pf);
14321 +
14322 ++ if (!lock_acquired)
14323 ++ rtnl_lock();
14324 ++
14325 + /* repopulate tunnel port filters */
14326 + udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14327 +
14328 ++ if (!lock_acquired)
14329 ++ rtnl_unlock();
14330 ++
14331 + return ret;
14332 + }
14333 +
14334 +@@ -15528,7 +15537,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14335 + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
14336 + }
14337 + #endif
14338 +- err = i40e_setup_pf_switch(pf, false);
14339 ++ err = i40e_setup_pf_switch(pf, false, false);
14340 + if (err) {
14341 + dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
14342 + goto err_vsis;
14343 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
14344 +index d39c7639cdbab..b3041fe6c0aed 100644
14345 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
14346 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
14347 +@@ -7588,6 +7588,8 @@ static int mvpp2_probe(struct platform_device *pdev)
14348 + return 0;
14349 +
14350 + err_port_probe:
14351 ++ fwnode_handle_put(port_fwnode);
14352 ++
14353 + i = 0;
14354 + fwnode_for_each_available_child_node(fwnode, port_fwnode) {
14355 + if (priv->port_list[i])
14356 +diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
14357 +index e967867828d89..9b48ae4bac39f 100644
14358 +--- a/drivers/net/ethernet/marvell/pxa168_eth.c
14359 ++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
14360 +@@ -1528,6 +1528,7 @@ static int pxa168_eth_remove(struct platform_device *pdev)
14361 + struct net_device *dev = platform_get_drvdata(pdev);
14362 + struct pxa168_eth_private *pep = netdev_priv(dev);
14363 +
14364 ++ cancel_work_sync(&pep->tx_timeout_task);
14365 + if (pep->htpr) {
14366 + dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
14367 + pep->htpr, pep->htpr_dma);
14368 +@@ -1539,7 +1540,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
14369 + clk_disable_unprepare(pep->clk);
14370 + mdiobus_unregister(pep->smi_bus);
14371 + mdiobus_free(pep->smi_bus);
14372 +- cancel_work_sync(&pep->tx_timeout_task);
14373 + unregister_netdev(dev);
14374 + free_netdev(dev);
14375 + return 0;
14376 +diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
14377 +index 04d067243457b..1ed25e48f6165 100644
14378 +--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
14379 ++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
14380 +@@ -1230,8 +1230,10 @@ static int mana_create_txq(struct mana_port_context *apc,
14381 +
14382 + cq->gdma_id = cq->gdma_cq->id;
14383 +
14384 +- if (WARN_ON(cq->gdma_id >= gc->max_num_cqs))
14385 +- return -EINVAL;
14386 ++ if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
14387 ++ err = -EINVAL;
14388 ++ goto out;
14389 ++ }
14390 +
14391 + gc->cq_table[cq->gdma_id] = cq->gdma_cq;
14392 +
14393 +diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
14394 +index 334af49e5add1..3dc29b282a884 100644
14395 +--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
14396 ++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
14397 +@@ -2532,9 +2532,13 @@ static int pch_gbe_probe(struct pci_dev *pdev,
14398 + adapter->pdev = pdev;
14399 + adapter->hw.back = adapter;
14400 + adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
14401 ++
14402 + adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
14403 +- if (adapter->pdata && adapter->pdata->platform_init)
14404 +- adapter->pdata->platform_init(pdev);
14405 ++ if (adapter->pdata && adapter->pdata->platform_init) {
14406 ++ ret = adapter->pdata->platform_init(pdev);
14407 ++ if (ret)
14408 ++ goto err_free_netdev;
14409 ++ }
14410 +
14411 + adapter->ptp_pdev =
14412 + pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
14413 +@@ -2629,7 +2633,7 @@ err_free_netdev:
14414 + */
14415 + static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
14416 + {
14417 +- unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
14418 ++ unsigned long flags = GPIOF_OUT_INIT_HIGH;
14419 + unsigned gpio = MINNOW_PHY_RESET_GPIO;
14420 + int ret;
14421 +
14422 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
14423 +index b6cd43eda7acc..8aa55612d0949 100644
14424 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
14425 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
14426 +@@ -75,7 +75,7 @@ struct stmmac_tx_queue {
14427 + unsigned int cur_tx;
14428 + unsigned int dirty_tx;
14429 + dma_addr_t dma_tx_phy;
14430 +- u32 tx_tail_addr;
14431 ++ dma_addr_t tx_tail_addr;
14432 + u32 mss;
14433 + };
14434 +
14435 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
14436 +index c87202cbd3d6d..91cd5073ddb26 100644
14437 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
14438 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
14439 +@@ -5138,7 +5138,7 @@ read_again:
14440 +
14441 + /* Buffer is good. Go on. */
14442 +
14443 +- prefetch(page_address(buf->page));
14444 ++ prefetch(page_address(buf->page) + buf->page_offset);
14445 + if (buf->sec_page)
14446 + prefetch(page_address(buf->sec_page));
14447 +
14448 +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
14449 +index 6a67b026df0b6..718539cdd2f2e 100644
14450 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
14451 ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
14452 +@@ -1506,12 +1506,12 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
14453 + for (i = 0; i < common->tx_ch_num; i++) {
14454 + struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
14455 +
14456 +- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
14457 +- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
14458 +-
14459 + if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
14460 + k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
14461 +
14462 ++ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
14463 ++ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
14464 ++
14465 + memset(tx_chn, 0, sizeof(*tx_chn));
14466 + }
14467 + }
14468 +@@ -1531,12 +1531,12 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
14469 +
14470 + netif_napi_del(&tx_chn->napi_tx);
14471 +
14472 +- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
14473 +- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
14474 +-
14475 + if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
14476 + k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
14477 +
14478 ++ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
14479 ++ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
14480 ++
14481 + memset(tx_chn, 0, sizeof(*tx_chn));
14482 + }
14483 + }
14484 +@@ -1624,11 +1624,11 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
14485 +
14486 + rx_chn = &common->rx_chns;
14487 +
14488 +- if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
14489 +- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
14490 +-
14491 + if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
14492 + k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
14493 ++
14494 ++ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
14495 ++ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
14496 + }
14497 +
14498 + static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
14499 +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
14500 +index da9135231c079..ebc976b7fcc2a 100644
14501 +--- a/drivers/net/ieee802154/mac802154_hwsim.c
14502 ++++ b/drivers/net/ieee802154/mac802154_hwsim.c
14503 +@@ -480,7 +480,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
14504 + struct hwsim_edge *e;
14505 + u32 v0, v1;
14506 +
14507 +- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
14508 ++ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
14509 + !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
14510 + return -EINVAL;
14511 +
14512 +@@ -715,6 +715,8 @@ static int hwsim_subscribe_all_others(struct hwsim_phy *phy)
14513 +
14514 + return 0;
14515 +
14516 ++sub_fail:
14517 ++ hwsim_edge_unsubscribe_me(phy);
14518 + me_fail:
14519 + rcu_read_lock();
14520 + list_for_each_entry_rcu(e, &phy->edges, list) {
14521 +@@ -722,8 +724,6 @@ me_fail:
14522 + hwsim_free_edge(e);
14523 + }
14524 + rcu_read_unlock();
14525 +-sub_fail:
14526 +- hwsim_edge_unsubscribe_me(phy);
14527 + return -ENOMEM;
14528 + }
14529 +
14530 +@@ -824,12 +824,17 @@ err_pib:
14531 + static void hwsim_del(struct hwsim_phy *phy)
14532 + {
14533 + struct hwsim_pib *pib;
14534 ++ struct hwsim_edge *e;
14535 +
14536 + hwsim_edge_unsubscribe_me(phy);
14537 +
14538 + list_del(&phy->list);
14539 +
14540 + rcu_read_lock();
14541 ++ list_for_each_entry_rcu(e, &phy->edges, list) {
14542 ++ list_del_rcu(&e->list);
14543 ++ hwsim_free_edge(e);
14544 ++ }
14545 + pib = rcu_dereference(phy->pib);
14546 + rcu_read_unlock();
14547 +
14548 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
14549 +index 92425e1fd70c0..93dc48b9b4f24 100644
14550 +--- a/drivers/net/macsec.c
14551 ++++ b/drivers/net/macsec.c
14552 +@@ -1819,7 +1819,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
14553 + ctx.sa.rx_sa = rx_sa;
14554 + ctx.secy = secy;
14555 + memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
14556 +- MACSEC_KEYID_LEN);
14557 ++ secy->key_len);
14558 +
14559 + err = macsec_offload(ops->mdo_add_rxsa, &ctx);
14560 + if (err)
14561 +@@ -2061,7 +2061,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
14562 + ctx.sa.tx_sa = tx_sa;
14563 + ctx.secy = secy;
14564 + memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
14565 +- MACSEC_KEYID_LEN);
14566 ++ secy->key_len);
14567 +
14568 + err = macsec_offload(ops->mdo_add_txsa, &ctx);
14569 + if (err)
14570 +diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
14571 +index 10be266e48e8b..b7b2521c73fb6 100644
14572 +--- a/drivers/net/phy/mscc/mscc_macsec.c
14573 ++++ b/drivers/net/phy/mscc/mscc_macsec.c
14574 +@@ -501,7 +501,7 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
14575 + }
14576 +
14577 + /* Derive the AES key to get a key for the hash autentication */
14578 +-static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
14579 ++static int vsc8584_macsec_derive_key(const u8 key[MACSEC_MAX_KEY_LEN],
14580 + u16 key_len, u8 hkey[16])
14581 + {
14582 + const u8 input[AES_BLOCK_SIZE] = {0};
14583 +diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h
14584 +index 9c6d25e36de2a..453304bae7784 100644
14585 +--- a/drivers/net/phy/mscc/mscc_macsec.h
14586 ++++ b/drivers/net/phy/mscc/mscc_macsec.h
14587 +@@ -81,7 +81,7 @@ struct macsec_flow {
14588 + /* Highest takes precedence [0..15] */
14589 + u8 priority;
14590 +
14591 +- u8 key[MACSEC_KEYID_LEN];
14592 ++ u8 key[MACSEC_MAX_KEY_LEN];
14593 +
14594 + union {
14595 + struct macsec_rx_sa *rx_sa;
14596 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
14597 +index 28a6c4cfe9b8c..414afcb0a23f8 100644
14598 +--- a/drivers/net/vrf.c
14599 ++++ b/drivers/net/vrf.c
14600 +@@ -1366,22 +1366,22 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
14601 + int orig_iif = skb->skb_iif;
14602 + bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
14603 + bool is_ndisc = ipv6_ndisc_frame(skb);
14604 +- bool is_ll_src;
14605 +
14606 + /* loopback, multicast & non-ND link-local traffic; do not push through
14607 + * packet taps again. Reset pkt_type for upper layers to process skb.
14608 +- * for packets with lladdr src, however, skip so that the dst can be
14609 +- * determine at input using original ifindex in the case that daddr
14610 +- * needs strict
14611 ++ * For strict packets with a source LLA, determine the dst using the
14612 ++ * original ifindex.
14613 + */
14614 +- is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
14615 +- if (skb->pkt_type == PACKET_LOOPBACK ||
14616 +- (need_strict && !is_ndisc && !is_ll_src)) {
14617 ++ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
14618 + skb->dev = vrf_dev;
14619 + skb->skb_iif = vrf_dev->ifindex;
14620 + IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
14621 ++
14622 + if (skb->pkt_type == PACKET_LOOPBACK)
14623 + skb->pkt_type = PACKET_HOST;
14624 ++ else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
14625 ++ vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
14626 ++
14627 + goto out;
14628 + }
14629 +
14630 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
14631 +index 02a14f1b938ad..5a8df5a195cb5 100644
14632 +--- a/drivers/net/vxlan.c
14633 ++++ b/drivers/net/vxlan.c
14634 +@@ -2164,6 +2164,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
14635 + struct neighbour *n;
14636 + struct nd_msg *msg;
14637 +
14638 ++ rcu_read_lock();
14639 + in6_dev = __in6_dev_get(dev);
14640 + if (!in6_dev)
14641 + goto out;
14642 +@@ -2215,6 +2216,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
14643 + }
14644 +
14645 + out:
14646 ++ rcu_read_unlock();
14647 + consume_skb(skb);
14648 + return NETDEV_TX_OK;
14649 + }
14650 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
14651 +index 5ce4f8d038b9b..c272b290fa73d 100644
14652 +--- a/drivers/net/wireless/ath/ath10k/mac.c
14653 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
14654 +@@ -5592,6 +5592,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
14655 +
14656 + if (arvif->nohwcrypt &&
14657 + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
14658 ++ ret = -EINVAL;
14659 + ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
14660 + goto err;
14661 + }
14662 +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
14663 +index e7fde635e0eef..71878ab35b93c 100644
14664 +--- a/drivers/net/wireless/ath/ath10k/pci.c
14665 ++++ b/drivers/net/wireless/ath/ath10k/pci.c
14666 +@@ -3685,8 +3685,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
14667 + ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
14668 + if (bus_params.chip_id != 0xffffffff) {
14669 + if (!ath10k_pci_chip_is_supported(pdev->device,
14670 +- bus_params.chip_id))
14671 ++ bus_params.chip_id)) {
14672 ++ ret = -ENODEV;
14673 + goto err_unsupported;
14674 ++ }
14675 + }
14676 + }
14677 +
14678 +@@ -3697,11 +3699,15 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
14679 + }
14680 +
14681 + bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
14682 +- if (bus_params.chip_id == 0xffffffff)
14683 ++ if (bus_params.chip_id == 0xffffffff) {
14684 ++ ret = -ENODEV;
14685 + goto err_unsupported;
14686 ++ }
14687 +
14688 +- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
14689 +- goto err_free_irq;
14690 ++ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
14691 ++ ret = -ENODEV;
14692 ++ goto err_unsupported;
14693 ++ }
14694 +
14695 + ret = ath10k_core_register(ar, &bus_params);
14696 + if (ret) {
14697 +diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
14698 +index 77ce3347ab86d..595e83fe09904 100644
14699 +--- a/drivers/net/wireless/ath/ath11k/core.c
14700 ++++ b/drivers/net/wireless/ath/ath11k/core.c
14701 +@@ -488,7 +488,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
14702 + if (len < ALIGN(ie_len, 4)) {
14703 + ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
14704 + ie_id, ie_len, len);
14705 +- return -EINVAL;
14706 ++ ret = -EINVAL;
14707 ++ goto err;
14708 + }
14709 +
14710 + switch (ie_id) {
14711 +diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
14712 +index 9d0ff150ec30f..eb52332dbe3f1 100644
14713 +--- a/drivers/net/wireless/ath/ath11k/mac.c
14714 ++++ b/drivers/net/wireless/ath/ath11k/mac.c
14715 +@@ -5379,11 +5379,6 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
14716 + if (WARN_ON(!arvif->is_up))
14717 + continue;
14718 +
14719 +- ret = ath11k_mac_setup_bcn_tmpl(arvif);
14720 +- if (ret)
14721 +- ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
14722 +- ret);
14723 +-
14724 + ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
14725 + if (ret) {
14726 + ath11k_warn(ab, "failed to restart vdev %d: %d\n",
14727 +@@ -5391,6 +5386,11 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
14728 + continue;
14729 + }
14730 +
14731 ++ ret = ath11k_mac_setup_bcn_tmpl(arvif);
14732 ++ if (ret)
14733 ++ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
14734 ++ ret);
14735 ++
14736 + ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
14737 + arvif->bssid);
14738 + if (ret) {
14739 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
14740 +index 45f6402478b50..97c3a53f9cef2 100644
14741 +--- a/drivers/net/wireless/ath/ath9k/main.c
14742 ++++ b/drivers/net/wireless/ath/ath9k/main.c
14743 +@@ -307,6 +307,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
14744 + hchan = ah->curchan;
14745 + }
14746 +
14747 ++ if (!hchan) {
14748 ++ fastcc = false;
14749 ++ hchan = ath9k_cmn_get_channel(sc->hw, ah, &sc->cur_chan->chandef);
14750 ++ }
14751 ++
14752 + if (!ath_prepare_reset(sc))
14753 + fastcc = false;
14754 +
14755 +diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
14756 +index b2d760873992f..ba9bea79381c5 100644
14757 +--- a/drivers/net/wireless/ath/carl9170/Kconfig
14758 ++++ b/drivers/net/wireless/ath/carl9170/Kconfig
14759 +@@ -16,13 +16,11 @@ config CARL9170
14760 +
14761 + config CARL9170_LEDS
14762 + bool "SoftLED Support"
14763 +- depends on CARL9170
14764 +- select MAC80211_LEDS
14765 +- select LEDS_CLASS
14766 +- select NEW_LEDS
14767 + default y
14768 ++ depends on CARL9170
14769 ++ depends on MAC80211_LEDS
14770 + help
14771 +- This option is necessary, if you want your device' LEDs to blink
14772 ++ This option is necessary, if you want your device's LEDs to blink.
14773 +
14774 + Say Y, unless you need the LEDs for firmware debugging.
14775 +
14776 +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
14777 +index afb4877eaad8f..dabed4e3ca457 100644
14778 +--- a/drivers/net/wireless/ath/wcn36xx/main.c
14779 ++++ b/drivers/net/wireless/ath/wcn36xx/main.c
14780 +@@ -293,23 +293,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
14781 + goto out_free_dxe_pool;
14782 + }
14783 +
14784 +- wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
14785 +- if (!wcn->hal_buf) {
14786 +- wcn36xx_err("Failed to allocate smd buf\n");
14787 +- ret = -ENOMEM;
14788 +- goto out_free_dxe_ctl;
14789 +- }
14790 +-
14791 + ret = wcn36xx_smd_load_nv(wcn);
14792 + if (ret) {
14793 + wcn36xx_err("Failed to push NV to chip\n");
14794 +- goto out_free_smd_buf;
14795 ++ goto out_free_dxe_ctl;
14796 + }
14797 +
14798 + ret = wcn36xx_smd_start(wcn);
14799 + if (ret) {
14800 + wcn36xx_err("Failed to start chip\n");
14801 +- goto out_free_smd_buf;
14802 ++ goto out_free_dxe_ctl;
14803 + }
14804 +
14805 + if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
14806 +@@ -336,8 +329,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
14807 +
14808 + out_smd_stop:
14809 + wcn36xx_smd_stop(wcn);
14810 +-out_free_smd_buf:
14811 +- kfree(wcn->hal_buf);
14812 + out_free_dxe_ctl:
14813 + wcn36xx_dxe_free_ctl_blks(wcn);
14814 + out_free_dxe_pool:
14815 +@@ -372,8 +363,6 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
14816 +
14817 + wcn36xx_dxe_free_mem_pools(wcn);
14818 + wcn36xx_dxe_free_ctl_blks(wcn);
14819 +-
14820 +- kfree(wcn->hal_buf);
14821 + }
14822 +
14823 + static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
14824 +@@ -1401,6 +1390,12 @@ static int wcn36xx_probe(struct platform_device *pdev)
14825 + mutex_init(&wcn->hal_mutex);
14826 + mutex_init(&wcn->scan_lock);
14827 +
14828 ++ wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
14829 ++ if (!wcn->hal_buf) {
14830 ++ ret = -ENOMEM;
14831 ++ goto out_wq;
14832 ++ }
14833 ++
14834 + ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
14835 + if (ret < 0) {
14836 + wcn36xx_err("failed to set DMA mask: %d\n", ret);
14837 +diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
14838 +index 6746fd206d2a9..1ff2679963f06 100644
14839 +--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
14840 ++++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
14841 +@@ -2842,9 +2842,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
14842 + wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
14843 + mutex_unlock(&wil->vif_mutex);
14844 + if (p2p_wdev) {
14845 +- wiphy_lock(wil->wiphy);
14846 + cfg80211_unregister_wdev(p2p_wdev);
14847 +- wiphy_unlock(wil->wiphy);
14848 + kfree(p2p_wdev);
14849 + }
14850 + }
14851 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
14852 +index f4405d7861b69..d8822a01d277e 100644
14853 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
14854 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
14855 +@@ -2767,8 +2767,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
14856 + struct brcmf_sta_info_le sta_info_le;
14857 + u32 sta_flags;
14858 + u32 is_tdls_peer;
14859 +- s32 total_rssi;
14860 +- s32 count_rssi;
14861 ++ s32 total_rssi_avg = 0;
14862 ++ s32 total_rssi = 0;
14863 ++ s32 count_rssi = 0;
14864 + int rssi;
14865 + u32 i;
14866 +
14867 +@@ -2834,25 +2835,27 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
14868 + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
14869 + sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
14870 + }
14871 +- total_rssi = 0;
14872 +- count_rssi = 0;
14873 + for (i = 0; i < BRCMF_ANT_MAX; i++) {
14874 +- if (sta_info_le.rssi[i]) {
14875 +- sinfo->chain_signal_avg[count_rssi] =
14876 +- sta_info_le.rssi[i];
14877 +- sinfo->chain_signal[count_rssi] =
14878 +- sta_info_le.rssi[i];
14879 +- total_rssi += sta_info_le.rssi[i];
14880 +- count_rssi++;
14881 +- }
14882 ++ if (sta_info_le.rssi[i] == 0 ||
14883 ++ sta_info_le.rx_lastpkt_rssi[i] == 0)
14884 ++ continue;
14885 ++ sinfo->chains |= BIT(count_rssi);
14886 ++ sinfo->chain_signal[count_rssi] =
14887 ++ sta_info_le.rx_lastpkt_rssi[i];
14888 ++ sinfo->chain_signal_avg[count_rssi] =
14889 ++ sta_info_le.rssi[i];
14890 ++ total_rssi += sta_info_le.rx_lastpkt_rssi[i];
14891 ++ total_rssi_avg += sta_info_le.rssi[i];
14892 ++ count_rssi++;
14893 + }
14894 + if (count_rssi) {
14895 +- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
14896 +- sinfo->chains = count_rssi;
14897 +-
14898 + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
14899 +- total_rssi /= count_rssi;
14900 +- sinfo->signal = total_rssi;
14901 ++ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
14902 ++ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
14903 ++ sinfo->filled |=
14904 ++ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
14905 ++ sinfo->signal = total_rssi / count_rssi;
14906 ++ sinfo->signal_avg = total_rssi_avg / count_rssi;
14907 + } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
14908 + &ifp->vif->sme_state)) {
14909 + memset(&scb_val, 0, sizeof(scb_val));
14910 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
14911 +index 16ed325795a8b..faf5f8e5eee33 100644
14912 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
14913 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
14914 +@@ -626,8 +626,8 @@ BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
14915 + BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
14916 +
14917 + /* firmware config files */
14918 +-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-sdio.*.txt");
14919 +-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-pcie.*.txt");
14920 ++MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
14921 ++MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
14922 +
14923 + static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
14924 + BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
14925 +@@ -4162,7 +4162,6 @@ static int brcmf_sdio_bus_reset(struct device *dev)
14926 + if (ret) {
14927 + brcmf_err("Failed to probe after sdio device reset: ret %d\n",
14928 + ret);
14929 +- brcmf_sdiod_remove(sdiodev);
14930 + }
14931 +
14932 + return ret;
14933 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
14934 +index 39f3af2d0439b..eadac0f5590fc 100644
14935 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
14936 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
14937 +@@ -1220,6 +1220,7 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
14938 + {
14939 + struct brcms_info *wl;
14940 + struct ieee80211_hw *hw;
14941 ++ int ret;
14942 +
14943 + dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
14944 + pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
14945 +@@ -1244,11 +1245,16 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
14946 + wl = brcms_attach(pdev);
14947 + if (!wl) {
14948 + pr_err("%s: brcms_attach failed!\n", __func__);
14949 +- return -ENODEV;
14950 ++ ret = -ENODEV;
14951 ++ goto err_free_ieee80211;
14952 + }
14953 + brcms_led_register(wl);
14954 +
14955 + return 0;
14956 ++
14957 ++err_free_ieee80211:
14958 ++ ieee80211_free_hw(hw);
14959 ++ return ret;
14960 + }
14961 +
14962 + static int brcms_suspend(struct bcma_device *pdev)
14963 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
14964 +index e4f91bce222d8..61d3d4e0b7d94 100644
14965 +--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
14966 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
14967 +@@ -1,7 +1,7 @@
14968 + /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
14969 + /******************************************************************************
14970 + *
14971 +- * Copyright(c) 2020 Intel Corporation
14972 ++ * Copyright(c) 2020-2021 Intel Corporation
14973 + *
14974 + *****************************************************************************/
14975 +
14976 +@@ -10,7 +10,7 @@
14977 +
14978 + #include "fw/notif-wait.h"
14979 +
14980 +-#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
14981 ++#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
14982 +
14983 + int iwl_pnvm_load(struct iwl_trans *trans,
14984 + struct iwl_notif_wait_data *notif_wait);
14985 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
14986 +index 1ad621d13ad3a..0a13c2bda2eed 100644
14987 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
14988 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
14989 +@@ -1032,6 +1032,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
14990 + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
14991 + return -1;
14992 +
14993 ++ if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he)
14994 ++ return -1;
14995 ++
14996 + if (unlikely(ieee80211_is_probe_resp(fc)))
14997 + iwl_mvm_probe_resp_set_noa(mvm, skb);
14998 +
14999 +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
15000 +index 94228b316df1b..46517515ba728 100644
15001 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
15002 ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
15003 +@@ -1231,7 +1231,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
15004 + static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
15005 + {
15006 + struct pcie_service_card *card = adapter->card;
15007 +- u32 tmp;
15008 ++ u32 *cookie;
15009 +
15010 + card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
15011 + sizeof(u32),
15012 +@@ -1242,13 +1242,11 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
15013 + "dma_alloc_coherent failed!\n");
15014 + return -ENOMEM;
15015 + }
15016 ++ cookie = (u32 *)card->sleep_cookie_vbase;
15017 + /* Init val of Sleep Cookie */
15018 +- tmp = FW_AWAKE_COOKIE;
15019 +- put_unaligned(tmp, card->sleep_cookie_vbase);
15020 ++ *cookie = FW_AWAKE_COOKIE;
15021 +
15022 +- mwifiex_dbg(adapter, INFO,
15023 +- "alloc_scook: sleep cookie=0x%x\n",
15024 +- get_unaligned(card->sleep_cookie_vbase));
15025 ++ mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", *cookie);
15026 +
15027 + return 0;
15028 + }
15029 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
15030 +index aa42af9ebfd6a..ae2191371f511 100644
15031 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
15032 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
15033 +@@ -411,6 +411,9 @@ mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
15034 +
15035 + c = (struct mt7615_mcu_csa_notify *)skb->data;
15036 +
15037 ++ if (c->omac_idx > EXT_BSSID_MAX)
15038 ++ return;
15039 ++
15040 + if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
15041 + mphy = dev->mt76.phy2;
15042 +
15043 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
15044 +index d7cbef752f9fd..cc278d8cb8886 100644
15045 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
15046 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
15047 +@@ -131,20 +131,21 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
15048 + struct mt76_tx_info *tx_info)
15049 + {
15050 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
15051 +- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
15052 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
15053 + struct ieee80211_key_conf *key = info->control.hw_key;
15054 + int pid, id;
15055 + u8 *txwi = (u8 *)txwi_ptr;
15056 + struct mt76_txwi_cache *t;
15057 ++ struct mt7615_sta *msta;
15058 + void *txp;
15059 +
15060 ++ msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
15061 + if (!wcid)
15062 + wcid = &dev->mt76.global_wcid;
15063 +
15064 + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
15065 +
15066 +- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
15067 ++ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
15068 + struct mt7615_phy *phy = &dev->phy;
15069 +
15070 + if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
15071 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
15072 +index f8d3673c2cae8..7010101f6b147 100644
15073 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
15074 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
15075 +@@ -191,14 +191,15 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
15076 + struct ieee80211_sta *sta,
15077 + struct mt76_tx_info *tx_info)
15078 + {
15079 +- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
15080 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
15081 + struct sk_buff *skb = tx_info->skb;
15082 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
15083 ++ struct mt7615_sta *msta;
15084 + int pad;
15085 +
15086 ++ msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
15087 + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
15088 +- !msta->rate_probe) {
15089 ++ msta && !msta->rate_probe) {
15090 + /* request to configure sampling rate */
15091 + spin_lock_bh(&dev->mt76.lock);
15092 + mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
15093 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
15094 +index 6c889b90fd12a..c26cfef425ed8 100644
15095 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
15096 ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
15097 +@@ -12,7 +12,7 @@
15098 + #define MT76_CONNAC_MAX_SCAN_MATCH 16
15099 +
15100 + #define MT76_CONNAC_COREDUMP_TIMEOUT (HZ / 20)
15101 +-#define MT76_CONNAC_COREDUMP_SZ (128 * 1024)
15102 ++#define MT76_CONNAC_COREDUMP_SZ (1300 * 1024)
15103 +
15104 + enum {
15105 + CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
15106 +@@ -45,6 +45,7 @@ enum {
15107 +
15108 + struct mt76_connac_pm {
15109 + bool enable;
15110 ++ bool suspended;
15111 +
15112 + spinlock_t txq_lock;
15113 + struct {
15114 +@@ -127,8 +128,12 @@ mt76_connac_pm_unref(struct mt76_connac_pm *pm)
15115 + static inline bool
15116 + mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm)
15117 + {
15118 ++ struct mt76_dev *dev = phy->dev;
15119 + bool ret;
15120 +
15121 ++ if (dev->token_count)
15122 ++ return true;
15123 ++
15124 + spin_lock_bh(&pm->wake.lock);
15125 + ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state);
15126 + spin_unlock_bh(&pm->wake.lock);
15127 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
15128 +index 6f180c92d4132..5f2705fbd6803 100644
15129 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
15130 ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
15131 +@@ -17,6 +17,9 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
15132 + if (!test_bit(MT76_STATE_PM, &phy->state))
15133 + return 0;
15134 +
15135 ++ if (pm->suspended)
15136 ++ return 0;
15137 ++
15138 + queue_work(dev->wq, &pm->wake_work);
15139 + if (!wait_event_timeout(pm->wait,
15140 + !test_bit(MT76_STATE_PM, &phy->state),
15141 +@@ -40,6 +43,9 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
15142 + if (!pm->enable)
15143 + return;
15144 +
15145 ++ if (pm->suspended)
15146 ++ return;
15147 ++
15148 + pm->last_activity = jiffies;
15149 +
15150 + if (!test_bit(MT76_STATE_PM, &phy->state)) {
15151 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
15152 +index 619561606f96d..eb19721f9d79a 100644
15153 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
15154 ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
15155 +@@ -1939,7 +1939,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
15156 + ptlv->index = index;
15157 +
15158 + memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
15159 +- memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
15160 ++ memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
15161 +
15162 + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
15163 + }
15164 +@@ -1974,14 +1974,17 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
15165 + };
15166 +
15167 + if (wowlan->magic_pkt)
15168 +- req.wow_ctrl_tlv.trigger |= BIT(0);
15169 ++ req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC;
15170 + if (wowlan->disconnect)
15171 +- req.wow_ctrl_tlv.trigger |= BIT(2);
15172 ++ req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT |
15173 ++ UNI_WOW_DETECT_TYPE_BCN_LOST);
15174 + if (wowlan->nd_config) {
15175 + mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
15176 +- req.wow_ctrl_tlv.trigger |= BIT(5);
15177 ++ req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT;
15178 + mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
15179 + }
15180 ++ if (wowlan->n_patterns)
15181 ++ req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP;
15182 +
15183 + if (mt76_is_mmio(dev))
15184 + req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
15185 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
15186 +index a1096861d04a3..3bcae732872ed 100644
15187 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
15188 ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
15189 +@@ -590,6 +590,14 @@ enum {
15190 + UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
15191 + };
15192 +
15193 ++#define UNI_WOW_DETECT_TYPE_MAGIC BIT(0)
15194 ++#define UNI_WOW_DETECT_TYPE_ANY BIT(1)
15195 ++#define UNI_WOW_DETECT_TYPE_DISCONNECT BIT(2)
15196 ++#define UNI_WOW_DETECT_TYPE_GTK_REKEY_FAIL BIT(3)
15197 ++#define UNI_WOW_DETECT_TYPE_BCN_LOST BIT(4)
15198 ++#define UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT BIT(5)
15199 ++#define UNI_WOW_DETECT_TYPE_BITMAP BIT(6)
15200 ++
15201 + enum {
15202 + UNI_SUSPEND_MODE_SETTING,
15203 + UNI_SUSPEND_WOW_CTRL,
15204 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
15205 +index 033fb592bdf02..30bf41b8ed152 100644
15206 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
15207 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
15208 +@@ -33,7 +33,7 @@ enum mt7915_eeprom_field {
15209 + #define MT_EE_WIFI_CAL_GROUP BIT(0)
15210 + #define MT_EE_WIFI_CAL_DPD GENMASK(2, 1)
15211 + #define MT_EE_CAL_UNIT 1024
15212 +-#define MT_EE_CAL_GROUP_SIZE (44 * MT_EE_CAL_UNIT)
15213 ++#define MT_EE_CAL_GROUP_SIZE (49 * MT_EE_CAL_UNIT + 16)
15214 + #define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT)
15215 +
15216 + #define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
15217 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
15218 +index b3f14ff67c5ae..764f25a828fa2 100644
15219 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
15220 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
15221 +@@ -3440,8 +3440,9 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
15222 + {
15223 + struct mt7915_dev *dev = phy->dev;
15224 + struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
15225 +- u16 total = 2, idx, center_freq = chandef->center_freq1;
15226 ++ u16 total = 2, center_freq = chandef->center_freq1;
15227 + u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
15228 ++ int idx;
15229 +
15230 + if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
15231 + return 0;
15232 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
15233 +index f9d81e36ef09a..b220b334906bc 100644
15234 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
15235 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
15236 +@@ -464,10 +464,17 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
15237 + static void
15238 + mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
15239 + {
15240 +- if (en)
15241 ++ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
15242 ++
15243 ++ if (en) {
15244 ++ struct mt7915_dev *dev = phy->dev;
15245 ++
15246 + mt7915_tm_update_channel(phy);
15247 +
15248 +- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
15249 ++ /* read-clear */
15250 ++ mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy));
15251 ++ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
15252 ++ }
15253 + }
15254 +
15255 + static int
15256 +@@ -690,7 +697,11 @@ static int
15257 + mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
15258 + {
15259 + struct mt7915_phy *phy = mphy->priv;
15260 ++ struct mt7915_dev *dev = phy->dev;
15261 ++ bool ext_phy = phy != &dev->phy;
15262 ++ enum mt76_rxq_id q;
15263 + void *rx, *rssi;
15264 ++ u16 fcs_err;
15265 + int i;
15266 +
15267 + rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
15268 +@@ -735,6 +746,12 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
15269 +
15270 + nla_nest_end(msg, rx);
15271 +
15272 ++ fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
15273 ++ MT_MIB_SDR3_FCS_ERR_MASK);
15274 ++ q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
15275 ++ mphy->test.rx_stats.packets[q] += fcs_err;
15276 ++ mphy->test.rx_stats.fcs_error[q] += fcs_err;
15277 ++
15278 + return 0;
15279 + }
15280 +
15281 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
15282 +index 6ee423dd4027c..6602903c0d026 100644
15283 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
15284 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
15285 +@@ -184,7 +184,10 @@ mt7921_txpwr(struct seq_file *s, void *data)
15286 + struct mt7921_txpwr txpwr;
15287 + int ret;
15288 +
15289 ++ mt7921_mutex_acquire(dev);
15290 + ret = mt7921_get_txpwr_info(dev, &txpwr);
15291 ++ mt7921_mutex_release(dev);
15292 ++
15293 + if (ret)
15294 + return ret;
15295 +
15296 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
15297 +index 71e664ee76520..bd9143dc865f9 100644
15298 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
15299 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
15300 +@@ -313,9 +313,9 @@ static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
15301 +
15302 + int mt7921_wfsys_reset(struct mt7921_dev *dev)
15303 + {
15304 +- mt76_set(dev, 0x70002600, BIT(0));
15305 +- msleep(200);
15306 +- mt76_clear(dev, 0x70002600, BIT(0));
15307 ++ mt76_clear(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
15308 ++ msleep(50);
15309 ++ mt76_set(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
15310 +
15311 + if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
15312 + WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
15313 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
15314 +index 1763ea0614ce2..2cb0252e63b21 100644
15315 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
15316 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
15317 +@@ -73,6 +73,7 @@ static void
15318 + mt7921_init_wiphy(struct ieee80211_hw *hw)
15319 + {
15320 + struct mt7921_phy *phy = mt7921_hw_phy(hw);
15321 ++ struct mt7921_dev *dev = phy->dev;
15322 + struct wiphy *wiphy = hw->wiphy;
15323 +
15324 + hw->queues = 4;
15325 +@@ -110,36 +111,21 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
15326 + ieee80211_hw_set(hw, SUPPORTS_PS);
15327 + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
15328 +
15329 ++ if (dev->pm.enable)
15330 ++ ieee80211_hw_set(hw, CONNECTION_MONITOR);
15331 ++
15332 + hw->max_tx_fragments = 4;
15333 + }
15334 +
15335 + static void
15336 + mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
15337 + {
15338 +- u32 mask, set;
15339 +-
15340 + mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
15341 + MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
15342 + mt76_set(dev, MT_TMAC_CTCR0(band),
15343 + MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
15344 + MT_TMAC_CTCR0_INS_DDLMT_EN);
15345 +
15346 +- mask = MT_MDP_RCFR0_MCU_RX_MGMT |
15347 +- MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
15348 +- MT_MDP_RCFR0_MCU_RX_CTL_BAR;
15349 +- set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
15350 +- FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
15351 +- FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
15352 +- mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
15353 +-
15354 +- mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
15355 +- MT_MDP_RCFR1_RX_DROPPED_UCAST |
15356 +- MT_MDP_RCFR1_RX_DROPPED_MCAST;
15357 +- set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
15358 +- FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
15359 +- FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
15360 +- mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
15361 +-
15362 + mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
15363 + mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
15364 +
15365 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
15366 +index decf2d5f0ce3a..493c2aba2f791 100644
15367 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
15368 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
15369 +@@ -444,16 +444,19 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
15370 + status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
15371 + status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
15372 + status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
15373 +- status->signal = status->chain_signal[0];
15374 +-
15375 +- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
15376 +- if (!(status->chains & BIT(i)))
15377 ++ status->signal = -128;
15378 ++ for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
15379 ++ if (!(status->chains & BIT(i)) ||
15380 ++ status->chain_signal[i] >= 0)
15381 + continue;
15382 +
15383 + status->signal = max(status->signal,
15384 + status->chain_signal[i]);
15385 + }
15386 +
15387 ++ if (status->signal == -128)
15388 ++ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
15389 ++
15390 + stbc = FIELD_GET(MT_PRXV_STBC, v0);
15391 + gi = FIELD_GET(MT_PRXV_SGI, v0);
15392 + cck = false;
15393 +@@ -1196,7 +1199,8 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
15394 + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
15395 + struct mt7921_dev *dev = mvif->phy->dev;
15396 +
15397 +- ieee80211_disconnect(vif, true);
15398 ++ if (vif->type == NL80211_IFTYPE_STATION)
15399 ++ ieee80211_disconnect(vif, true);
15400 +
15401 + mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
15402 + mt7921_mcu_set_tx(dev, vif);
15403 +@@ -1269,6 +1273,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
15404 + hw = mt76_hw(dev);
15405 +
15406 + dev_err(dev->mt76.dev, "chip reset\n");
15407 ++ dev->hw_full_reset = true;
15408 + ieee80211_stop_queues(hw);
15409 +
15410 + cancel_delayed_work_sync(&dev->mphy.mac_work);
15411 +@@ -1293,6 +1298,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
15412 + ieee80211_scan_completed(dev->mphy.hw, &info);
15413 + }
15414 +
15415 ++ dev->hw_full_reset = false;
15416 + ieee80211_wake_queues(hw);
15417 + ieee80211_iterate_active_interfaces(hw,
15418 + IEEE80211_IFACE_ITER_RESUME_ALL,
15419 +@@ -1303,7 +1309,11 @@ void mt7921_reset(struct mt76_dev *mdev)
15420 + {
15421 + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
15422 +
15423 +- queue_work(dev->mt76.wq, &dev->reset_work);
15424 ++ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
15425 ++ return;
15426 ++
15427 ++ if (!dev->hw_full_reset)
15428 ++ queue_work(dev->mt76.wq, &dev->reset_work);
15429 + }
15430 +
15431 + static void
15432 +@@ -1494,7 +1504,7 @@ void mt7921_coredump_work(struct work_struct *work)
15433 + break;
15434 +
15435 + skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
15436 +- if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
15437 ++ if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
15438 + dev_kfree_skb(skb);
15439 + continue;
15440 + }
15441 +@@ -1504,7 +1514,10 @@ void mt7921_coredump_work(struct work_struct *work)
15442 +
15443 + dev_kfree_skb(skb);
15444 + }
15445 +- dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
15446 +- GFP_KERNEL);
15447 ++
15448 ++ if (dump)
15449 ++ dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
15450 ++ GFP_KERNEL);
15451 ++
15452 + mt7921_reset(&dev->mt76);
15453 + }
15454 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
15455 +index 97a0ef331ac32..bd77a04a15fb2 100644
15456 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
15457 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
15458 +@@ -223,54 +223,6 @@ static void mt7921_stop(struct ieee80211_hw *hw)
15459 + mt7921_mutex_release(dev);
15460 + }
15461 +
15462 +-static inline int get_free_idx(u32 mask, u8 start, u8 end)
15463 +-{
15464 +- return ffs(~mask & GENMASK(end, start));
15465 +-}
15466 +-
15467 +-static int get_omac_idx(enum nl80211_iftype type, u64 mask)
15468 +-{
15469 +- int i;
15470 +-
15471 +- switch (type) {
15472 +- case NL80211_IFTYPE_STATION:
15473 +- /* prefer hw bssid slot 1-3 */
15474 +- i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
15475 +- if (i)
15476 +- return i - 1;
15477 +-
15478 +- /* next, try to find a free repeater entry for the sta */
15479 +- i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
15480 +- REPEATER_BSSID_MAX - REPEATER_BSSID_START);
15481 +- if (i)
15482 +- return i + 32 - 1;
15483 +-
15484 +- i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
15485 +- if (i)
15486 +- return i - 1;
15487 +-
15488 +- if (~mask & BIT(HW_BSSID_0))
15489 +- return HW_BSSID_0;
15490 +-
15491 +- break;
15492 +- case NL80211_IFTYPE_MONITOR:
15493 +- /* ap uses hw bssid 0 and ext bssid */
15494 +- if (~mask & BIT(HW_BSSID_0))
15495 +- return HW_BSSID_0;
15496 +-
15497 +- i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
15498 +- if (i)
15499 +- return i - 1;
15500 +-
15501 +- break;
15502 +- default:
15503 +- WARN_ON(1);
15504 +- break;
15505 +- }
15506 +-
15507 +- return -1;
15508 +-}
15509 +-
15510 + static int mt7921_add_interface(struct ieee80211_hw *hw,
15511 + struct ieee80211_vif *vif)
15512 + {
15513 +@@ -292,12 +244,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
15514 + goto out;
15515 + }
15516 +
15517 +- idx = get_omac_idx(vif->type, phy->omac_mask);
15518 +- if (idx < 0) {
15519 +- ret = -ENOSPC;
15520 +- goto out;
15521 +- }
15522 +- mvif->mt76.omac_idx = idx;
15523 ++ mvif->mt76.omac_idx = mvif->mt76.idx;
15524 + mvif->phy = phy;
15525 + mvif->mt76.band_idx = 0;
15526 + mvif->mt76.wmm_idx = mvif->mt76.idx % MT7921_MAX_WMM_SETS;
15527 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
15528 +index 67dc4b4cc0945..7c68182cad552 100644
15529 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
15530 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
15531 +@@ -450,22 +450,33 @@ mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
15532 + }
15533 +
15534 + static void
15535 +-mt7921_mcu_beacon_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
15536 ++mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
15537 ++ struct ieee80211_vif *vif)
15538 ++{
15539 ++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
15540 ++ struct mt76_connac_beacon_loss_event *event = priv;
15541 ++
15542 ++ if (mvif->idx != event->bss_idx)
15543 ++ return;
15544 ++
15545 ++ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
15546 ++ return;
15547 ++
15548 ++ ieee80211_connection_loss(vif);
15549 ++}
15550 ++
15551 ++static void
15552 ++mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
15553 + {
15554 + struct mt76_connac_beacon_loss_event *event;
15555 +- struct mt76_phy *mphy;
15556 +- u8 band_idx = 0; /* DBDC support */
15557 ++ struct mt76_phy *mphy = &dev->mt76.phy;
15558 +
15559 + skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
15560 + event = (struct mt76_connac_beacon_loss_event *)skb->data;
15561 +- if (band_idx && dev->mt76.phy2)
15562 +- mphy = dev->mt76.phy2;
15563 +- else
15564 +- mphy = &dev->mt76.phy;
15565 +
15566 + ieee80211_iterate_active_interfaces_atomic(mphy->hw,
15567 + IEEE80211_IFACE_ITER_RESUME_ALL,
15568 +- mt76_connac_mcu_beacon_loss_iter, event);
15569 ++ mt7921_mcu_connection_loss_iter, event);
15570 + }
15571 +
15572 + static void
15573 +@@ -530,7 +541,7 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
15574 +
15575 + switch (rxd->eid) {
15576 + case MCU_EVENT_BSS_BEACON_LOSS:
15577 +- mt7921_mcu_beacon_loss_event(dev, skb);
15578 ++ mt7921_mcu_connection_loss_event(dev, skb);
15579 + break;
15580 + case MCU_EVENT_SCHED_SCAN_DONE:
15581 + case MCU_EVENT_SCAN_DONE:
15582 +@@ -1368,6 +1379,7 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
15583 + {
15584 + struct mt7921_phy *phy = priv;
15585 + struct mt7921_dev *dev = phy->dev;
15586 ++ struct ieee80211_hw *hw = mt76_hw(dev);
15587 + int ret;
15588 +
15589 + if (dev->pm.enable)
15590 +@@ -1380,9 +1392,11 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
15591 +
15592 + if (dev->pm.enable) {
15593 + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
15594 ++ ieee80211_hw_set(hw, CONNECTION_MONITOR);
15595 + mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
15596 + } else {
15597 + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
15598 ++ __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
15599 + mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
15600 + }
15601 + }
15602 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
15603 +index 59862ea4951ce..4cc8a372b2772 100644
15604 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
15605 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
15606 +@@ -156,6 +156,7 @@ struct mt7921_dev {
15607 + u16 chainmask;
15608 +
15609 + struct work_struct reset_work;
15610 ++ bool hw_full_reset;
15611 +
15612 + struct list_head sta_poll_list;
15613 + spinlock_t sta_poll_lock;
15614 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
15615 +index fa02d934f0bff..13263f50dc00a 100644
15616 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
15617 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
15618 +@@ -188,21 +188,26 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
15619 + {
15620 + struct mt76_dev *mdev = pci_get_drvdata(pdev);
15621 + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
15622 ++ struct mt76_connac_pm *pm = &dev->pm;
15623 + bool hif_suspend;
15624 + int i, err;
15625 +
15626 +- err = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
15627 ++ pm->suspended = true;
15628 ++ cancel_delayed_work_sync(&pm->ps_work);
15629 ++ cancel_work_sync(&pm->wake_work);
15630 ++
15631 ++ err = mt7921_mcu_drv_pmctrl(dev);
15632 + if (err < 0)
15633 +- return err;
15634 ++ goto restore_suspend;
15635 +
15636 + hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
15637 + if (hif_suspend) {
15638 + err = mt76_connac_mcu_set_hif_suspend(mdev, true);
15639 + if (err)
15640 +- return err;
15641 ++ goto restore_suspend;
15642 + }
15643 +
15644 +- if (!dev->pm.enable)
15645 ++ if (!pm->enable)
15646 + mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
15647 +
15648 + napi_disable(&mdev->tx_napi);
15649 +@@ -231,27 +236,30 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
15650 +
15651 + err = mt7921_mcu_fw_pmctrl(dev);
15652 + if (err)
15653 +- goto restore;
15654 ++ goto restore_napi;
15655 +
15656 + pci_save_state(pdev);
15657 + err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
15658 + if (err)
15659 +- goto restore;
15660 ++ goto restore_napi;
15661 +
15662 + return 0;
15663 +
15664 +-restore:
15665 ++restore_napi:
15666 + mt76_for_each_q_rx(mdev, i) {
15667 + napi_enable(&mdev->napi[i]);
15668 + }
15669 + napi_enable(&mdev->tx_napi);
15670 +
15671 +- if (!dev->pm.enable)
15672 ++ if (!pm->enable)
15673 + mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
15674 +
15675 + if (hif_suspend)
15676 + mt76_connac_mcu_set_hif_suspend(mdev, false);
15677 +
15678 ++restore_suspend:
15679 ++ pm->suspended = false;
15680 ++
15681 + return err;
15682 + }
15683 +
15684 +@@ -261,6 +269,7 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
15685 + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
15686 + int i, err;
15687 +
15688 ++ dev->pm.suspended = false;
15689 + err = pci_set_power_state(pdev, PCI_D0);
15690 + if (err)
15691 + return err;
15692 +diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
15693 +index 001d0ba5f73e6..f614c887f3233 100644
15694 +--- a/drivers/net/wireless/mediatek/mt76/testmode.c
15695 ++++ b/drivers/net/wireless/mediatek/mt76/testmode.c
15696 +@@ -158,19 +158,18 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
15697 + frag_len = MT_TXP_MAX_LEN;
15698 +
15699 + frag = alloc_skb(frag_len, GFP_KERNEL);
15700 +- if (!frag)
15701 ++ if (!frag) {
15702 ++ mt76_testmode_free_skb(phy);
15703 ++ dev_kfree_skb(head);
15704 + return -ENOMEM;
15705 ++ }
15706 +
15707 + __skb_put_zero(frag, frag_len);
15708 + head->len += frag->len;
15709 + head->data_len += frag->len;
15710 +
15711 +- if (*frag_tail) {
15712 +- (*frag_tail)->next = frag;
15713 +- frag_tail = &frag;
15714 +- } else {
15715 +- *frag_tail = frag;
15716 +- }
15717 ++ *frag_tail = frag;
15718 ++ frag_tail = &(*frag_tail)->next;
15719 + }
15720 +
15721 + mt76_testmode_free_skb(phy);
15722 +diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
15723 +index 53ea8de82df06..441d06e30b1a5 100644
15724 +--- a/drivers/net/wireless/mediatek/mt76/tx.c
15725 ++++ b/drivers/net/wireless/mediatek/mt76/tx.c
15726 +@@ -285,7 +285,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
15727 + skb_set_queue_mapping(skb, qid);
15728 + }
15729 +
15730 +- if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
15731 ++ if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
15732 + ieee80211_get_tx_rates(info->control.vif, sta, skb,
15733 + info->control.rates, 1);
15734 +
15735 +diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
15736 +index 6cb593cc33c2d..6d06f26a48942 100644
15737 +--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
15738 ++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
15739 +@@ -4371,26 +4371,28 @@ static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
15740 + }
15741 + }
15742 +
15743 +-static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
15744 +- struct rtw_swing_table *swing_table,
15745 +- u8 path)
15746 ++static void rtw8822c_pwr_track_stats(struct rtw_dev *rtwdev, u8 path)
15747 + {
15748 +- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
15749 +- u8 thermal_value, delta;
15750 ++ u8 thermal_value;
15751 +
15752 + if (rtwdev->efuse.thermal_meter[path] == 0xff)
15753 + return;
15754 +
15755 + thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
15756 +-
15757 + rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
15758 ++}
15759 +
15760 +- delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
15761 ++static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
15762 ++ struct rtw_swing_table *swing_table,
15763 ++ u8 path)
15764 ++{
15765 ++ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
15766 ++ u8 delta;
15767 +
15768 ++ delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
15769 + dm_info->delta_power_index[path] =
15770 + rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
15771 + delta);
15772 +-
15773 + rtw8822c_pwrtrack_set(rtwdev, path);
15774 + }
15775 +
15776 +@@ -4401,12 +4403,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
15777 +
15778 + rtw_phy_config_swing_table(rtwdev, &swing_table);
15779 +
15780 ++ for (i = 0; i < rtwdev->hal.rf_path_num; i++)
15781 ++ rtw8822c_pwr_track_stats(rtwdev, i);
15782 + if (rtw_phy_pwrtrack_need_lck(rtwdev))
15783 + rtw8822c_do_lck(rtwdev);
15784 +-
15785 + for (i = 0; i < rtwdev->hal.rf_path_num; i++)
15786 + rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
15787 +-
15788 + }
15789 +
15790 + static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
15791 +diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
15792 +index ce9892152f4d4..99b21a2c83861 100644
15793 +--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
15794 ++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
15795 +@@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
15796 + wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
15797 +
15798 + if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
15799 +- (common->secinfo.security_enable)) {
15800 ++ info->control.hw_key) {
15801 + if (rsi_is_cipher_wep(common))
15802 + ieee80211_size += 4;
15803 + else
15804 +@@ -470,9 +470,9 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
15805 + }
15806 +
15807 + if (common->band == NL80211_BAND_2GHZ)
15808 +- bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_1);
15809 ++ bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_1);
15810 + else
15811 +- bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_6);
15812 ++ bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_6);
15813 +
15814 + if (mac_bcn->data[tim_offset + 2] == 0)
15815 + bcn_frm->frame_info |= cpu_to_le16(RSI_DATA_DESC_DTIM_BEACON);
15816 +diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
15817 +index 16025300cddb3..57c9e3559dfd1 100644
15818 +--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
15819 ++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
15820 +@@ -1028,7 +1028,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
15821 + mutex_lock(&common->mutex);
15822 + switch (cmd) {
15823 + case SET_KEY:
15824 +- secinfo->security_enable = true;
15825 + status = rsi_hal_key_config(hw, vif, key, sta);
15826 + if (status) {
15827 + mutex_unlock(&common->mutex);
15828 +@@ -1047,8 +1046,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
15829 + break;
15830 +
15831 + case DISABLE_KEY:
15832 +- if (vif->type == NL80211_IFTYPE_STATION)
15833 +- secinfo->security_enable = false;
15834 + rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
15835 + memset(key, 0, sizeof(struct ieee80211_key_conf));
15836 + status = rsi_hal_key_config(hw, vif, key, sta);
15837 +diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
15838 +index 33c76d39a8e96..b6d050a2fbe7e 100644
15839 +--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
15840 ++++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
15841 +@@ -1803,8 +1803,7 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
15842 + RSI_WIFI_MGMT_Q);
15843 + cmd_frame->desc.desc_dword0.frame_type = WOWLAN_CONFIG_PARAMS;
15844 + cmd_frame->host_sleep_status = sleep_status;
15845 +- if (common->secinfo.security_enable &&
15846 +- common->secinfo.gtk_cipher)
15847 ++ if (common->secinfo.gtk_cipher)
15848 + flags |= RSI_WOW_GTK_REKEY;
15849 + if (sleep_status)
15850 + cmd_frame->wow_flags = flags;
15851 +diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
15852 +index a1065e5a92b43..0f535850a3836 100644
15853 +--- a/drivers/net/wireless/rsi/rsi_main.h
15854 ++++ b/drivers/net/wireless/rsi/rsi_main.h
15855 +@@ -151,7 +151,6 @@ enum edca_queue {
15856 + };
15857 +
15858 + struct security_info {
15859 +- bool security_enable;
15860 + u32 ptk_cipher;
15861 + u32 gtk_cipher;
15862 + };
15863 +diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
15864 +index 988581cc134b7..1f856fbbc0ea4 100644
15865 +--- a/drivers/net/wireless/st/cw1200/scan.c
15866 ++++ b/drivers/net/wireless/st/cw1200/scan.c
15867 +@@ -75,30 +75,27 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
15868 + if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
15869 + return -EINVAL;
15870 +
15871 +- /* will be unlocked in cw1200_scan_work() */
15872 +- down(&priv->scan.lock);
15873 +- mutex_lock(&priv->conf_mutex);
15874 +-
15875 + frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
15876 + req->ie_len);
15877 +- if (!frame.skb) {
15878 +- mutex_unlock(&priv->conf_mutex);
15879 +- up(&priv->scan.lock);
15880 ++ if (!frame.skb)
15881 + return -ENOMEM;
15882 +- }
15883 +
15884 + if (req->ie_len)
15885 + skb_put_data(frame.skb, req->ie, req->ie_len);
15886 +
15887 ++ /* will be unlocked in cw1200_scan_work() */
15888 ++ down(&priv->scan.lock);
15889 ++ mutex_lock(&priv->conf_mutex);
15890 ++
15891 + ret = wsm_set_template_frame(priv, &frame);
15892 + if (!ret) {
15893 + /* Host want to be the probe responder. */
15894 + ret = wsm_set_probe_responder(priv, true);
15895 + }
15896 + if (ret) {
15897 +- dev_kfree_skb(frame.skb);
15898 + mutex_unlock(&priv->conf_mutex);
15899 + up(&priv->scan.lock);
15900 ++ dev_kfree_skb(frame.skb);
15901 + return ret;
15902 + }
15903 +
15904 +@@ -120,8 +117,8 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
15905 + ++priv->scan.n_ssids;
15906 + }
15907 +
15908 +- dev_kfree_skb(frame.skb);
15909 + mutex_unlock(&priv->conf_mutex);
15910 ++ dev_kfree_skb(frame.skb);
15911 + queue_work(priv->workqueue, &priv->scan.work);
15912 + return 0;
15913 + }
15914 +diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
15915 +index 7ad1920120bcb..e9d8a1c25e433 100644
15916 +--- a/drivers/net/wwan/Kconfig
15917 ++++ b/drivers/net/wwan/Kconfig
15918 +@@ -3,15 +3,9 @@
15919 + # Wireless WAN device configuration
15920 + #
15921 +
15922 +-menuconfig WWAN
15923 +- bool "Wireless WAN"
15924 +- help
15925 +- This section contains Wireless WAN configuration for WWAN framework
15926 +- and drivers.
15927 +-
15928 +-if WWAN
15929 ++menu "Wireless WAN"
15930 +
15931 +-config WWAN_CORE
15932 ++config WWAN
15933 + tristate "WWAN Driver Core"
15934 + help
15935 + Say Y here if you want to use the WWAN driver core. This driver
15936 +@@ -20,9 +14,10 @@ config WWAN_CORE
15937 + To compile this driver as a module, choose M here: the module will be
15938 + called wwan.
15939 +
15940 ++if WWAN
15941 ++
15942 + config MHI_WWAN_CTRL
15943 + tristate "MHI WWAN control driver for QCOM-based PCIe modems"
15944 +- select WWAN_CORE
15945 + depends on MHI_BUS
15946 + help
15947 + MHI WWAN CTRL allows QCOM-based PCIe modems to expose different modem
15948 +@@ -35,3 +30,5 @@ config MHI_WWAN_CTRL
15949 + called mhi_wwan_ctrl.
15950 +
15951 + endif # WWAN
15952 ++
15953 ++endmenu
15954 +diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
15955 +index 556cd90958cae..289771a4f952e 100644
15956 +--- a/drivers/net/wwan/Makefile
15957 ++++ b/drivers/net/wwan/Makefile
15958 +@@ -3,7 +3,7 @@
15959 + # Makefile for the Linux WWAN device drivers.
15960 + #
15961 +
15962 +-obj-$(CONFIG_WWAN_CORE) += wwan.o
15963 ++obj-$(CONFIG_WWAN) += wwan.o
15964 + wwan-objs += wwan_core.o
15965 +
15966 + obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
15967 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
15968 +index a29b170701fc6..42ad75ff13481 100644
15969 +--- a/drivers/nvme/host/pci.c
15970 ++++ b/drivers/nvme/host/pci.c
15971 +@@ -1032,7 +1032,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
15972 +
15973 + static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
15974 + {
15975 +- u16 tmp = nvmeq->cq_head + 1;
15976 ++ u32 tmp = nvmeq->cq_head + 1;
15977 +
15978 + if (tmp == nvmeq->q_depth) {
15979 + nvmeq->cq_head = 0;
15980 +@@ -2831,10 +2831,7 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
15981 + #ifdef CONFIG_ACPI
15982 + static bool nvme_acpi_storage_d3(struct pci_dev *dev)
15983 + {
15984 +- struct acpi_device *adev;
15985 +- struct pci_dev *root;
15986 +- acpi_handle handle;
15987 +- acpi_status status;
15988 ++ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
15989 + u8 val;
15990 +
15991 + /*
15992 +@@ -2842,28 +2839,9 @@ static bool nvme_acpi_storage_d3(struct pci_dev *dev)
15993 + * must use D3 to support deep platform power savings during
15994 + * suspend-to-idle.
15995 + */
15996 +- root = pcie_find_root_port(dev);
15997 +- if (!root)
15998 +- return false;
15999 +
16000 +- adev = ACPI_COMPANION(&root->dev);
16001 + if (!adev)
16002 + return false;
16003 +-
16004 +- /*
16005 +- * The property is defined in the PXSX device for South complex ports
16006 +- * and in the PEGP device for North complex ports.
16007 +- */
16008 +- status = acpi_get_handle(adev->handle, "PXSX", &handle);
16009 +- if (ACPI_FAILURE(status)) {
16010 +- status = acpi_get_handle(adev->handle, "PEGP", &handle);
16011 +- if (ACPI_FAILURE(status))
16012 +- return false;
16013 +- }
16014 +-
16015 +- if (acpi_bus_get_device(handle, &adev))
16016 +- return false;
16017 +-
16018 + if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
16019 + &val))
16020 + return false;
16021 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
16022 +index 34f4b3402f7c1..79a463090dd30 100644
16023 +--- a/drivers/nvme/host/tcp.c
16024 ++++ b/drivers/nvme/host/tcp.c
16025 +@@ -1973,11 +1973,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
16026 + return ret;
16027 +
16028 + if (ctrl->icdoff) {
16029 ++ ret = -EOPNOTSUPP;
16030 + dev_err(ctrl->device, "icdoff is not supported!\n");
16031 + goto destroy_admin;
16032 + }
16033 +
16034 + if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) {
16035 ++ ret = -EOPNOTSUPP;
16036 + dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
16037 + goto destroy_admin;
16038 + }
16039 +diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
16040 +index 19e113240fff9..22b5108168a6a 100644
16041 +--- a/drivers/nvme/target/fc.c
16042 ++++ b/drivers/nvme/target/fc.c
16043 +@@ -2510,13 +2510,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
16044 + u32 xfrlen = be32_to_cpu(cmdiu->data_len);
16045 + int ret;
16046 +
16047 +- /*
16048 +- * if there is no nvmet mapping to the targetport there
16049 +- * shouldn't be requests. just terminate them.
16050 +- */
16051 +- if (!tgtport->pe)
16052 +- goto transport_error;
16053 +-
16054 + /*
16055 + * Fused commands are currently not supported in the linux
16056 + * implementation.
16057 +@@ -2544,7 +2537,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
16058 +
16059 + fod->req.cmd = &fod->cmdiubuf.sqe;
16060 + fod->req.cqe = &fod->rspiubuf.cqe;
16061 +- fod->req.port = tgtport->pe->port;
16062 ++ if (tgtport->pe)
16063 ++ fod->req.port = tgtport->pe->port;
16064 +
16065 + /* clear any response payload */
16066 + memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
16067 +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
16068 +index ba17a80b8c79c..cc71e0b3eed9f 100644
16069 +--- a/drivers/of/fdt.c
16070 ++++ b/drivers/of/fdt.c
16071 +@@ -510,11 +510,11 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
16072 +
16073 + if (size &&
16074 + early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
16075 +- pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
16076 +- uname, &base, (unsigned long)size / SZ_1M);
16077 ++ pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
16078 ++ uname, &base, (unsigned long)(size / SZ_1M));
16079 + else
16080 +- pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
16081 +- uname, &base, (unsigned long)size / SZ_1M);
16082 ++ pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
16083 ++ uname, &base, (unsigned long)(size / SZ_1M));
16084 +
16085 + len -= t_len;
16086 + if (first) {
16087 +diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
16088 +index 15e2417974d67..3502ba522c397 100644
16089 +--- a/drivers/of/of_reserved_mem.c
16090 ++++ b/drivers/of/of_reserved_mem.c
16091 +@@ -134,9 +134,9 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
16092 + ret = early_init_dt_alloc_reserved_memory_arch(size,
16093 + align, start, end, nomap, &base);
16094 + if (ret == 0) {
16095 +- pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
16096 ++ pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
16097 + uname, &base,
16098 +- (unsigned long)size / SZ_1M);
16099 ++ (unsigned long)(size / SZ_1M));
16100 + break;
16101 + }
16102 + len -= t_len;
16103 +@@ -146,8 +146,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
16104 + ret = early_init_dt_alloc_reserved_memory_arch(size, align,
16105 + 0, 0, nomap, &base);
16106 + if (ret == 0)
16107 +- pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
16108 +- uname, &base, (unsigned long)size / SZ_1M);
16109 ++ pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
16110 ++ uname, &base, (unsigned long)(size / SZ_1M));
16111 + }
16112 +
16113 + if (base == 0) {
16114 +diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
16115 +index 6511648271b23..bebe3eeebc4e1 100644
16116 +--- a/drivers/pci/controller/pci-hyperv.c
16117 ++++ b/drivers/pci/controller/pci-hyperv.c
16118 +@@ -3476,6 +3476,9 @@ static void __exit exit_hv_pci_drv(void)
16119 +
16120 + static int __init init_hv_pci_drv(void)
16121 + {
16122 ++ if (!hv_is_hyperv_initialized())
16123 ++ return -ENODEV;
16124 ++
16125 + /* Set the invalid domain number's bit, so it will not be used */
16126 + set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
16127 +
16128 +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
16129 +index 56a5c355701d0..49016f2f505e0 100644
16130 +--- a/drivers/perf/arm-cmn.c
16131 ++++ b/drivers/perf/arm-cmn.c
16132 +@@ -1212,7 +1212,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
16133 + irq = cmn->dtc[i].irq;
16134 + for (j = i; j--; ) {
16135 + if (cmn->dtc[j].irq == irq) {
16136 +- cmn->dtc[j].irq_friend = j - i;
16137 ++ cmn->dtc[j].irq_friend = i - j;
16138 + goto next;
16139 + }
16140 + }
16141 +diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
16142 +index ff6fab4bae30d..863d9f702aa17 100644
16143 +--- a/drivers/perf/arm_smmuv3_pmu.c
16144 ++++ b/drivers/perf/arm_smmuv3_pmu.c
16145 +@@ -277,7 +277,7 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
16146 + struct perf_event *event, int idx)
16147 + {
16148 + u32 span, sid;
16149 +- unsigned int num_ctrs = smmu_pmu->num_counters;
16150 ++ unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
16151 + bool filter_en = !!get_filter_enable(event);
16152 +
16153 + span = filter_en ? get_filter_span(event) :
16154 +@@ -285,17 +285,19 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
16155 + sid = filter_en ? get_filter_stream_id(event) :
16156 + SMMU_PMCG_DEFAULT_FILTER_SID;
16157 +
16158 +- /* Support individual filter settings */
16159 +- if (!smmu_pmu->global_filter) {
16160 ++ cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
16161 ++ /*
16162 ++ * Per-counter filtering, or scheduling the first globally-filtered
16163 ++ * event into an empty PMU so idx == 0 and it works out equivalent.
16164 ++ */
16165 ++ if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
16166 + smmu_pmu_set_event_filter(event, idx, span, sid);
16167 + return 0;
16168 + }
16169 +
16170 +- /* Requested settings same as current global settings*/
16171 +- idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
16172 +- if (idx == num_ctrs ||
16173 +- smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
16174 +- smmu_pmu_set_event_filter(event, 0, span, sid);
16175 ++ /* Otherwise, must match whatever's currently scheduled */
16176 ++ if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
16177 ++ smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
16178 + return 0;
16179 + }
16180 +
16181 +diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
16182 +index 2bbb931880649..7b87aaf267d57 100644
16183 +--- a/drivers/perf/fsl_imx8_ddr_perf.c
16184 ++++ b/drivers/perf/fsl_imx8_ddr_perf.c
16185 +@@ -705,8 +705,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
16186 +
16187 + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
16188 + num);
16189 +- if (!name)
16190 +- return -ENOMEM;
16191 ++ if (!name) {
16192 ++ ret = -ENOMEM;
16193 ++ goto cpuhp_state_err;
16194 ++ }
16195 +
16196 + pmu->devtype_data = of_device_get_match_data(&pdev->dev);
16197 +
16198 +diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
16199 +index 0316fabe32f1a..acc864bded2be 100644
16200 +--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
16201 ++++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
16202 +@@ -90,7 +90,7 @@ static void hisi_hha_pmu_config_ds(struct perf_event *event)
16203 +
16204 + val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
16205 + val |= HHA_DATSRC_SKT_EN;
16206 +- writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
16207 ++ writel(val, hha_pmu->base + HHA_DATSRC_CTRL);
16208 + }
16209 + }
16210 +
16211 +@@ -104,7 +104,7 @@ static void hisi_hha_pmu_clear_ds(struct perf_event *event)
16212 +
16213 + val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
16214 + val &= ~HHA_DATSRC_SKT_EN;
16215 +- writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
16216 ++ writel(val, hha_pmu->base + HHA_DATSRC_CTRL);
16217 + }
16218 + }
16219 +
16220 +diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
16221 +index 2a9465f4bb3a9..3b1245fc5a02e 100644
16222 +--- a/drivers/phy/ralink/phy-mt7621-pci.c
16223 ++++ b/drivers/phy/ralink/phy-mt7621-pci.c
16224 +@@ -272,8 +272,8 @@ static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
16225 +
16226 + mt7621_phy->has_dual_port = args->args[0];
16227 +
16228 +- dev_info(dev, "PHY for 0x%08x (dual port = %d)\n",
16229 +- (unsigned int)mt7621_phy->port_base, mt7621_phy->has_dual_port);
16230 ++ dev_dbg(dev, "PHY for 0x%px (dual port = %d)\n",
16231 ++ mt7621_phy->port_base, mt7621_phy->has_dual_port);
16232 +
16233 + return mt7621_phy->phy;
16234 + }
16235 +diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
16236 +index e4adab375c737..6bdbd1f214dd4 100644
16237 +--- a/drivers/phy/socionext/phy-uniphier-pcie.c
16238 ++++ b/drivers/phy/socionext/phy-uniphier-pcie.c
16239 +@@ -24,11 +24,13 @@
16240 + #define PORT_SEL_1 FIELD_PREP(PORT_SEL_MASK, 1)
16241 +
16242 + #define PCL_PHY_TEST_I 0x2000
16243 +-#define PCL_PHY_TEST_O 0x2004
16244 + #define TESTI_DAT_MASK GENMASK(13, 6)
16245 + #define TESTI_ADR_MASK GENMASK(5, 1)
16246 + #define TESTI_WR_EN BIT(0)
16247 +
16248 ++#define PCL_PHY_TEST_O 0x2004
16249 ++#define TESTO_DAT_MASK GENMASK(7, 0)
16250 ++
16251 + #define PCL_PHY_RESET 0x200c
16252 + #define PCL_PHY_RESET_N_MNMODE BIT(8) /* =1:manual */
16253 + #define PCL_PHY_RESET_N BIT(0) /* =1:deasssert */
16254 +@@ -77,11 +79,12 @@ static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
16255 + val = FIELD_PREP(TESTI_DAT_MASK, 1);
16256 + val |= FIELD_PREP(TESTI_ADR_MASK, reg);
16257 + uniphier_pciephy_testio_write(priv, val);
16258 +- val = readl(priv->base + PCL_PHY_TEST_O);
16259 ++ val = readl(priv->base + PCL_PHY_TEST_O) & TESTO_DAT_MASK;
16260 +
16261 + /* update value */
16262 +- val &= ~FIELD_PREP(TESTI_DAT_MASK, mask);
16263 +- val = FIELD_PREP(TESTI_DAT_MASK, mask & param);
16264 ++ val &= ~mask;
16265 ++ val |= mask & param;
16266 ++ val = FIELD_PREP(TESTI_DAT_MASK, val);
16267 + val |= FIELD_PREP(TESTI_ADR_MASK, reg);
16268 + uniphier_pciephy_testio_write(priv, val);
16269 + uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
16270 +diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
16271 +index 57adc08a89b2d..9fe6ea6fdae55 100644
16272 +--- a/drivers/phy/ti/phy-dm816x-usb.c
16273 ++++ b/drivers/phy/ti/phy-dm816x-usb.c
16274 +@@ -242,19 +242,28 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
16275 +
16276 + pm_runtime_enable(phy->dev);
16277 + generic_phy = devm_phy_create(phy->dev, NULL, &ops);
16278 +- if (IS_ERR(generic_phy))
16279 +- return PTR_ERR(generic_phy);
16280 ++ if (IS_ERR(generic_phy)) {
16281 ++ error = PTR_ERR(generic_phy);
16282 ++ goto clk_unprepare;
16283 ++ }
16284 +
16285 + phy_set_drvdata(generic_phy, phy);
16286 +
16287 + phy_provider = devm_of_phy_provider_register(phy->dev,
16288 + of_phy_simple_xlate);
16289 +- if (IS_ERR(phy_provider))
16290 +- return PTR_ERR(phy_provider);
16291 ++ if (IS_ERR(phy_provider)) {
16292 ++ error = PTR_ERR(phy_provider);
16293 ++ goto clk_unprepare;
16294 ++ }
16295 +
16296 + usb_add_phy_dev(&phy->phy);
16297 +
16298 + return 0;
16299 ++
16300 ++clk_unprepare:
16301 ++ pm_runtime_disable(phy->dev);
16302 ++ clk_unprepare(phy->refclk);
16303 ++ return error;
16304 + }
16305 +
16306 + static int dm816x_usb_phy_remove(struct platform_device *pdev)
16307 +diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
16308 +index 44e9d2eea484a..bbb1b436ded31 100644
16309 +--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
16310 ++++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
16311 +@@ -67,6 +67,7 @@
16312 + PIN_NOGP_CFG(QSPI1_MOSI_IO0, "QSPI1_MOSI_IO0", fn, CFG_FLAGS), \
16313 + PIN_NOGP_CFG(QSPI1_SPCLK, "QSPI1_SPCLK", fn, CFG_FLAGS), \
16314 + PIN_NOGP_CFG(QSPI1_SSL, "QSPI1_SSL", fn, CFG_FLAGS), \
16315 ++ PIN_NOGP_CFG(PRESET_N, "PRESET#", fn, SH_PFC_PIN_CFG_PULL_DOWN),\
16316 + PIN_NOGP_CFG(RPC_INT_N, "RPC_INT#", fn, CFG_FLAGS), \
16317 + PIN_NOGP_CFG(RPC_RESET_N, "RPC_RESET#", fn, CFG_FLAGS), \
16318 + PIN_NOGP_CFG(RPC_WP_N, "RPC_WP#", fn, CFG_FLAGS), \
16319 +@@ -6218,7 +6219,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
16320 + [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
16321 + [ 5] = RCAR_GP_PIN(6, 30), /* GP6_30 */
16322 + [ 6] = RCAR_GP_PIN(6, 31), /* GP6_31 */
16323 +- [ 7] = SH_PFC_PIN_NONE,
16324 ++ [ 7] = PIN_PRESET_N, /* PRESET# */
16325 + [ 8] = SH_PFC_PIN_NONE,
16326 + [ 9] = SH_PFC_PIN_NONE,
16327 + [10] = SH_PFC_PIN_NONE,
16328 +diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c
16329 +index d040eb3e305da..eeebbab4dd811 100644
16330 +--- a/drivers/pinctrl/renesas/pfc-r8a77990.c
16331 ++++ b/drivers/pinctrl/renesas/pfc-r8a77990.c
16332 +@@ -53,10 +53,10 @@
16333 + PIN_NOGP_CFG(FSCLKST_N, "FSCLKST_N", fn, CFG_FLAGS), \
16334 + PIN_NOGP_CFG(MLB_REF, "MLB_REF", fn, CFG_FLAGS), \
16335 + PIN_NOGP_CFG(PRESETOUT_N, "PRESETOUT_N", fn, CFG_FLAGS), \
16336 +- PIN_NOGP_CFG(TCK, "TCK", fn, CFG_FLAGS), \
16337 +- PIN_NOGP_CFG(TDI, "TDI", fn, CFG_FLAGS), \
16338 +- PIN_NOGP_CFG(TMS, "TMS", fn, CFG_FLAGS), \
16339 +- PIN_NOGP_CFG(TRST_N, "TRST_N", fn, CFG_FLAGS)
16340 ++ PIN_NOGP_CFG(TCK, "TCK", fn, SH_PFC_PIN_CFG_PULL_UP), \
16341 ++ PIN_NOGP_CFG(TDI, "TDI", fn, SH_PFC_PIN_CFG_PULL_UP), \
16342 ++ PIN_NOGP_CFG(TMS, "TMS", fn, SH_PFC_PIN_CFG_PULL_UP), \
16343 ++ PIN_NOGP_CFG(TRST_N, "TRST_N", fn, SH_PFC_PIN_CFG_PULL_UP)
16344 +
16345 + /*
16346 + * F_() : just information
16347 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
16348 +index d41d7ad14be0d..0cb927f0f301a 100644
16349 +--- a/drivers/platform/x86/asus-nb-wmi.c
16350 ++++ b/drivers/platform/x86/asus-nb-wmi.c
16351 +@@ -110,11 +110,6 @@ static struct quirk_entry quirk_asus_forceals = {
16352 + .wmi_force_als_set = true,
16353 + };
16354 +
16355 +-static struct quirk_entry quirk_asus_vendor_backlight = {
16356 +- .wmi_backlight_power = true,
16357 +- .wmi_backlight_set_devstate = true,
16358 +-};
16359 +-
16360 + static struct quirk_entry quirk_asus_use_kbd_dock_devid = {
16361 + .use_kbd_dock_devid = true,
16362 + };
16363 +@@ -425,78 +420,6 @@ static const struct dmi_system_id asus_quirks[] = {
16364 + },
16365 + .driver_data = &quirk_asus_forceals,
16366 + },
16367 +- {
16368 +- .callback = dmi_matched,
16369 +- .ident = "ASUSTeK COMPUTER INC. GA401IH",
16370 +- .matches = {
16371 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
16372 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"),
16373 +- },
16374 +- .driver_data = &quirk_asus_vendor_backlight,
16375 +- },
16376 +- {
16377 +- .callback = dmi_matched,
16378 +- .ident = "ASUSTeK COMPUTER INC. GA401II",
16379 +- .matches = {
16380 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
16381 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"),
16382 +- },
16383 +- .driver_data = &quirk_asus_vendor_backlight,
16384 +- },
16385 +- {
16386 +- .callback = dmi_matched,
16387 +- .ident = "ASUSTeK COMPUTER INC. GA401IU",
16388 +- .matches = {
16389 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
16390 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"),
16391 +- },
16392 +- .driver_data = &quirk_asus_vendor_backlight,
16393 +- },
16394 +- {
16395 +- .callback = dmi_matched,
16396 +- .ident = "ASUSTeK COMPUTER INC. GA401IV",
16397 +- .matches = {
16398 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
16399 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"),
16400 +- },
16401 +- .driver_data = &quirk_asus_vendor_backlight,
16402 +- },
16403 +- {
16404 +- .callback = dmi_matched,
16405 +- .ident = "ASUSTeK COMPUTER INC. GA401IVC",
16406 +- .matches = {
16407 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
16408 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"),
16409 +- },
16410 +- .driver_data = &quirk_asus_vendor_backlight,
16411 +- },
16412 +- {
16413 +- .callback = dmi_matched,
16414 +- .ident = "ASUSTeK COMPUTER INC. GA502II",
16415 +- .matches = {
16416 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
16417 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"),
16418 +- },
16419 +- .driver_data = &quirk_asus_vendor_backlight,
16420 +- },
16421 +- {
16422 +- .callback = dmi_matched,
16423 +- .ident = "ASUSTeK COMPUTER INC. GA502IU",
16424 +- .matches = {
16425 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
16426 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"),
16427 +- },
16428 +- .driver_data = &quirk_asus_vendor_backlight,
16429 +- },
16430 +- {
16431 +- .callback = dmi_matched,
16432 +- .ident = "ASUSTeK COMPUTER INC. GA502IV",
16433 +- .matches = {
16434 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
16435 +- DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"),
16436 +- },
16437 +- .driver_data = &quirk_asus_vendor_backlight,
16438 +- },
16439 + {
16440 + .callback = dmi_matched,
16441 + .ident = "Asus Transformer T100TA / T100HA / T100CHI",
16442 +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
16443 +index fa7232ad8c395..352508d304675 100644
16444 +--- a/drivers/platform/x86/toshiba_acpi.c
16445 ++++ b/drivers/platform/x86/toshiba_acpi.c
16446 +@@ -2831,6 +2831,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
16447 +
16448 + if (!dev->info_supported && !dev->system_event_supported) {
16449 + pr_warn("No hotkey query interface found\n");
16450 ++ error = -EINVAL;
16451 + goto err_remove_filter;
16452 + }
16453 +
16454 +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
16455 +index bde740d6120e1..424cf2a847448 100644
16456 +--- a/drivers/platform/x86/touchscreen_dmi.c
16457 ++++ b/drivers/platform/x86/touchscreen_dmi.c
16458 +@@ -299,6 +299,35 @@ static const struct ts_dmi_data estar_beauty_hd_data = {
16459 + .properties = estar_beauty_hd_props,
16460 + };
16461 +
16462 ++/* Generic props + data for upside-down mounted GDIX1001 touchscreens */
16463 ++static const struct property_entry gdix1001_upside_down_props[] = {
16464 ++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
16465 ++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
16466 ++ { }
16467 ++};
16468 ++
16469 ++static const struct ts_dmi_data gdix1001_00_upside_down_data = {
16470 ++ .acpi_name = "GDIX1001:00",
16471 ++ .properties = gdix1001_upside_down_props,
16472 ++};
16473 ++
16474 ++static const struct ts_dmi_data gdix1001_01_upside_down_data = {
16475 ++ .acpi_name = "GDIX1001:01",
16476 ++ .properties = gdix1001_upside_down_props,
16477 ++};
16478 ++
16479 ++static const struct property_entry glavey_tm800a550l_props[] = {
16480 ++ PROPERTY_ENTRY_STRING("firmware-name", "gt912-glavey-tm800a550l.fw"),
16481 ++ PROPERTY_ENTRY_STRING("goodix,config-name", "gt912-glavey-tm800a550l.cfg"),
16482 ++ PROPERTY_ENTRY_U32("goodix,main-clk", 54),
16483 ++ { }
16484 ++};
16485 ++
16486 ++static const struct ts_dmi_data glavey_tm800a550l_data = {
16487 ++ .acpi_name = "GDIX1001:00",
16488 ++ .properties = glavey_tm800a550l_props,
16489 ++};
16490 ++
16491 + static const struct property_entry gp_electronic_t701_props[] = {
16492 + PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
16493 + PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
16494 +@@ -1038,6 +1067,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
16495 + DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
16496 + },
16497 + },
16498 ++ { /* Glavey TM800A550L */
16499 ++ .driver_data = (void *)&glavey_tm800a550l_data,
16500 ++ .matches = {
16501 ++ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
16502 ++ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
16503 ++ /* Above strings are too generic, also match on BIOS version */
16504 ++ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
16505 ++ },
16506 ++ },
16507 + {
16508 + /* GP-electronic T701 */
16509 + .driver_data = (void *)&gp_electronic_t701_data,
16510 +@@ -1330,6 +1368,24 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
16511 + DMI_MATCH(DMI_BOARD_NAME, "X3 Plus"),
16512 + },
16513 + },
16514 ++ {
16515 ++ /* Teclast X89 (Android version / BIOS) */
16516 ++ .driver_data = (void *)&gdix1001_00_upside_down_data,
16517 ++ .matches = {
16518 ++ DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
16519 ++ DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
16520 ++ },
16521 ++ },
16522 ++ {
16523 ++ /* Teclast X89 (Windows version / BIOS) */
16524 ++ .driver_data = (void *)&gdix1001_01_upside_down_data,
16525 ++ .matches = {
16526 ++ /* tPAD is too generic, also match on bios date */
16527 ++ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
16528 ++ DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
16529 ++ DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
16530 ++ },
16531 ++ },
16532 + {
16533 + /* Teclast X98 Plus II */
16534 + .driver_data = (void *)&teclast_x98plus2_data,
16535 +@@ -1338,6 +1394,19 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
16536 + DMI_MATCH(DMI_PRODUCT_NAME, "X98 Plus II"),
16537 + },
16538 + },
16539 ++ {
16540 ++ /* Teclast X98 Pro */
16541 ++ .driver_data = (void *)&gdix1001_00_upside_down_data,
16542 ++ .matches = {
16543 ++ /*
16544 ++ * Only match BIOS date, because the manufacturers
16545 ++ * BIOS does not report the board name at all
16546 ++ * (sometimes)...
16547 ++ */
16548 ++ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
16549 ++ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
16550 ++ },
16551 ++ },
16552 + {
16553 + /* Trekstor Primebook C11 */
16554 + .driver_data = (void *)&trekstor_primebook_c11_data,
16555 +@@ -1413,6 +1482,22 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
16556 + DMI_MATCH(DMI_PRODUCT_NAME, "VINGA Twizzle J116"),
16557 + },
16558 + },
16559 ++ {
16560 ++ /* "WinBook TW100" */
16561 ++ .driver_data = (void *)&gdix1001_00_upside_down_data,
16562 ++ .matches = {
16563 ++ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
16564 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
16565 ++ }
16566 ++ },
16567 ++ {
16568 ++ /* WinBook TW700 */
16569 ++ .driver_data = (void *)&gdix1001_00_upside_down_data,
16570 ++ .matches = {
16571 ++ DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
16572 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
16573 ++ },
16574 ++ },
16575 + {
16576 + /* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
16577 + .driver_data = (void *)&chuwi_vi8_data,
16578 +diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
16579 +index 3e7a38525cb3f..fc9e8f589d16e 100644
16580 +--- a/drivers/regulator/Kconfig
16581 ++++ b/drivers/regulator/Kconfig
16582 +@@ -207,6 +207,7 @@ config REGULATOR_BD70528
16583 + config REGULATOR_BD71815
16584 + tristate "ROHM BD71815 Power Regulator"
16585 + depends on MFD_ROHM_BD71828
16586 ++ select REGULATOR_ROHM
16587 + help
16588 + This driver supports voltage regulators on ROHM BD71815 PMIC.
16589 + This will enable support for the software controllable buck
16590 +diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
16591 +index 204a2da054f53..cdf30481a5820 100644
16592 +--- a/drivers/regulator/bd9576-regulator.c
16593 ++++ b/drivers/regulator/bd9576-regulator.c
16594 +@@ -312,8 +312,8 @@ static int bd957x_probe(struct platform_device *pdev)
16595 + }
16596 +
16597 + static const struct platform_device_id bd957x_pmic_id[] = {
16598 +- { "bd9573-pmic", ROHM_CHIP_TYPE_BD9573 },
16599 +- { "bd9576-pmic", ROHM_CHIP_TYPE_BD9576 },
16600 ++ { "bd9573-regulator", ROHM_CHIP_TYPE_BD9573 },
16601 ++ { "bd9576-regulator", ROHM_CHIP_TYPE_BD9576 },
16602 + { },
16603 + };
16604 + MODULE_DEVICE_TABLE(platform, bd957x_pmic_id);
16605 +diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
16606 +index e18d291c7f21c..23fa429ebe760 100644
16607 +--- a/drivers/regulator/da9052-regulator.c
16608 ++++ b/drivers/regulator/da9052-regulator.c
16609 +@@ -250,7 +250,8 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
16610 + case DA9052_ID_BUCK3:
16611 + case DA9052_ID_LDO2:
16612 + case DA9052_ID_LDO3:
16613 +- ret = (new_sel - old_sel) * info->step_uV / 6250;
16614 ++ ret = DIV_ROUND_UP(abs(new_sel - old_sel) * info->step_uV,
16615 ++ 6250);
16616 + break;
16617 + }
16618 +
16619 +diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
16620 +index 26f06f685b1b6..b2ee38c5b573a 100644
16621 +--- a/drivers/regulator/fan53555.c
16622 ++++ b/drivers/regulator/fan53555.c
16623 +@@ -293,6 +293,9 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
16624 + return -EINVAL;
16625 + }
16626 +
16627 ++ di->slew_reg = FAN53555_CONTROL;
16628 ++ di->slew_mask = CTL_SLEW_MASK;
16629 ++ di->slew_shift = CTL_SLEW_SHIFT;
16630 + di->vsel_count = FAN53526_NVOLTAGES;
16631 +
16632 + return 0;
16633 +diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
16634 +index 1684faf82ed25..94f02f3099dd4 100644
16635 +--- a/drivers/regulator/fan53880.c
16636 ++++ b/drivers/regulator/fan53880.c
16637 +@@ -79,7 +79,7 @@ static const struct regulator_desc fan53880_regulators[] = {
16638 + .n_linear_ranges = 2,
16639 + .n_voltages = 0xf8,
16640 + .vsel_reg = FAN53880_BUCKVOUT,
16641 +- .vsel_mask = 0x7f,
16642 ++ .vsel_mask = 0xff,
16643 + .enable_reg = FAN53880_ENABLE,
16644 + .enable_mask = 0x10,
16645 + .enable_time = 480,
16646 +diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c
16647 +index d6340bb492967..d1e9406b2e3e2 100644
16648 +--- a/drivers/regulator/hi6421v600-regulator.c
16649 ++++ b/drivers/regulator/hi6421v600-regulator.c
16650 +@@ -129,7 +129,7 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
16651 + {
16652 + struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
16653 + struct hi6421_spmi_pmic *pmic = sreg->pmic;
16654 +- u32 reg_val;
16655 ++ unsigned int reg_val;
16656 +
16657 + regmap_read(pmic->regmap, rdev->desc->enable_reg, &reg_val);
16658 +
16659 +@@ -144,14 +144,17 @@ static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
16660 + {
16661 + struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
16662 + struct hi6421_spmi_pmic *pmic = sreg->pmic;
16663 +- u32 val;
16664 ++ unsigned int val;
16665 +
16666 + switch (mode) {
16667 + case REGULATOR_MODE_NORMAL:
16668 + val = 0;
16669 + break;
16670 + case REGULATOR_MODE_IDLE:
16671 +- val = sreg->eco_mode_mask << (ffs(sreg->eco_mode_mask) - 1);
16672 ++ if (!sreg->eco_mode_mask)
16673 ++ return -EINVAL;
16674 ++
16675 ++ val = sreg->eco_mode_mask;
16676 + break;
16677 + default:
16678 + return -EINVAL;
16679 +diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
16680 +index 68cdb173196d6..556bb73f33292 100644
16681 +--- a/drivers/regulator/hi655x-regulator.c
16682 ++++ b/drivers/regulator/hi655x-regulator.c
16683 +@@ -72,7 +72,7 @@ enum hi655x_regulator_id {
16684 + static int hi655x_is_enabled(struct regulator_dev *rdev)
16685 + {
16686 + unsigned int value = 0;
16687 +- struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
16688 ++ const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
16689 +
16690 + regmap_read(rdev->regmap, regulator->status_reg, &value);
16691 + return (value & rdev->desc->enable_mask);
16692 +@@ -80,7 +80,7 @@ static int hi655x_is_enabled(struct regulator_dev *rdev)
16693 +
16694 + static int hi655x_disable(struct regulator_dev *rdev)
16695 + {
16696 +- struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
16697 ++ const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
16698 +
16699 + return regmap_write(rdev->regmap, regulator->disable_reg,
16700 + rdev->desc->enable_mask);
16701 +@@ -169,7 +169,6 @@ static const struct hi655x_regulator regulators[] = {
16702 + static int hi655x_regulator_probe(struct platform_device *pdev)
16703 + {
16704 + unsigned int i;
16705 +- struct hi655x_regulator *regulator;
16706 + struct hi655x_pmic *pmic;
16707 + struct regulator_config config = { };
16708 + struct regulator_dev *rdev;
16709 +@@ -180,22 +179,17 @@ static int hi655x_regulator_probe(struct platform_device *pdev)
16710 + return -ENODEV;
16711 + }
16712 +
16713 +- regulator = devm_kzalloc(&pdev->dev, sizeof(*regulator), GFP_KERNEL);
16714 +- if (!regulator)
16715 +- return -ENOMEM;
16716 +-
16717 +- platform_set_drvdata(pdev, regulator);
16718 +-
16719 + config.dev = pdev->dev.parent;
16720 + config.regmap = pmic->regmap;
16721 +- config.driver_data = regulator;
16722 + for (i = 0; i < ARRAY_SIZE(regulators); i++) {
16723 ++ config.driver_data = (void *) &regulators[i];
16724 ++
16725 + rdev = devm_regulator_register(&pdev->dev,
16726 + &regulators[i].rdesc,
16727 + &config);
16728 + if (IS_ERR(rdev)) {
16729 + dev_err(&pdev->dev, "failed to register regulator %s\n",
16730 +- regulator->rdesc.name);
16731 ++ regulators[i].rdesc.name);
16732 + return PTR_ERR(rdev);
16733 + }
16734 + }
16735 +diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
16736 +index 6b8be52c3772a..7514702f78cf7 100644
16737 +--- a/drivers/regulator/mt6315-regulator.c
16738 ++++ b/drivers/regulator/mt6315-regulator.c
16739 +@@ -223,8 +223,8 @@ static int mt6315_regulator_probe(struct spmi_device *pdev)
16740 + int i;
16741 +
16742 + regmap = devm_regmap_init_spmi_ext(pdev, &mt6315_regmap_config);
16743 +- if (!regmap)
16744 +- return -ENODEV;
16745 ++ if (IS_ERR(regmap))
16746 ++ return PTR_ERR(regmap);
16747 +
16748 + chip = devm_kzalloc(dev, sizeof(struct mt6315_chip), GFP_KERNEL);
16749 + if (!chip)
16750 +diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
16751 +index 13cb6ac9a8929..1d4eb5dc4fac8 100644
16752 +--- a/drivers/regulator/mt6358-regulator.c
16753 ++++ b/drivers/regulator/mt6358-regulator.c
16754 +@@ -457,7 +457,7 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
16755 + MT6358_REG_FIXED("ldo_vaud28", VAUD28,
16756 + MT6358_LDO_VAUD28_CON0, 0, 2800000),
16757 + MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
16758 +- MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0x10, 0),
16759 ++ MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf, 0),
16760 + MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
16761 + MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
16762 + MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
16763 +diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
16764 +index 22fec370fa610..ac79dc34f9e8b 100644
16765 +--- a/drivers/regulator/qcom-rpmh-regulator.c
16766 ++++ b/drivers/regulator/qcom-rpmh-regulator.c
16767 +@@ -1070,6 +1070,7 @@ static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
16768 + RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
16769 + RPMH_VREG("ldo18", "ldo%s18", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
16770 + RPMH_VREG("ldo19", "ldo%s19", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
16771 ++ {}
16772 + };
16773 +
16774 + static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
16775 +@@ -1083,6 +1084,7 @@ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
16776 + RPMH_VREG("ldo5", "ldo%s5", &pmic5_nldo, "vdd-l5-l6"),
16777 + RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l5-l6"),
16778 + RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-bob"),
16779 ++ {}
16780 + };
16781 +
16782 + static int rpmh_regulator_probe(struct platform_device *pdev)
16783 +diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
16784 +index 2e02e26b516c4..e75b0973e3256 100644
16785 +--- a/drivers/regulator/uniphier-regulator.c
16786 ++++ b/drivers/regulator/uniphier-regulator.c
16787 +@@ -201,6 +201,7 @@ static const struct of_device_id uniphier_regulator_match[] = {
16788 + },
16789 + { /* Sentinel */ },
16790 + };
16791 ++MODULE_DEVICE_TABLE(of, uniphier_regulator_match);
16792 +
16793 + static struct platform_driver uniphier_regulator_driver = {
16794 + .probe = uniphier_regulator_probe,
16795 +diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
16796 +index 75a8924ba12b3..ac9e228b56d0b 100644
16797 +--- a/drivers/rtc/rtc-stm32.c
16798 ++++ b/drivers/rtc/rtc-stm32.c
16799 +@@ -754,7 +754,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
16800 +
16801 + ret = clk_prepare_enable(rtc->rtc_ck);
16802 + if (ret)
16803 +- goto err;
16804 ++ goto err_no_rtc_ck;
16805 +
16806 + if (rtc->data->need_dbp)
16807 + regmap_update_bits(rtc->dbp, rtc->dbp_reg,
16808 +@@ -830,10 +830,12 @@ static int stm32_rtc_probe(struct platform_device *pdev)
16809 + }
16810 +
16811 + return 0;
16812 ++
16813 + err:
16814 ++ clk_disable_unprepare(rtc->rtc_ck);
16815 ++err_no_rtc_ck:
16816 + if (rtc->data->has_pclk)
16817 + clk_disable_unprepare(rtc->pclk);
16818 +- clk_disable_unprepare(rtc->rtc_ck);
16819 +
16820 + if (rtc->data->need_dbp)
16821 + regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
16822 +diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
16823 +index e421138254152..1097e76982a5d 100644
16824 +--- a/drivers/s390/cio/chp.c
16825 ++++ b/drivers/s390/cio/chp.c
16826 +@@ -255,6 +255,9 @@ static ssize_t chp_status_write(struct device *dev,
16827 + if (!num_args)
16828 + return count;
16829 +
16830 ++ /* Wait until previous actions have settled. */
16831 ++ css_wait_for_slow_path();
16832 ++
16833 + if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
16834 + mutex_lock(&cp->lock);
16835 + error = s390_vary_chpid(cp->chpid, 1);
16836 +diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
16837 +index c22d9ee27ba19..297fb399363cc 100644
16838 +--- a/drivers/s390/cio/chsc.c
16839 ++++ b/drivers/s390/cio/chsc.c
16840 +@@ -801,8 +801,6 @@ int chsc_chp_vary(struct chp_id chpid, int on)
16841 + {
16842 + struct channel_path *chp = chpid_to_chp(chpid);
16843 +
16844 +- /* Wait until previous actions have settled. */
16845 +- css_wait_for_slow_path();
16846 + /*
16847 + * Redo PathVerification on the devices the chpid connects to
16848 + */
16849 +diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
16850 +index 0464e37c806a4..2e25ef67825ac 100644
16851 +--- a/drivers/scsi/FlashPoint.c
16852 ++++ b/drivers/scsi/FlashPoint.c
16853 +@@ -40,7 +40,7 @@ struct sccb_mgr_info {
16854 + u16 si_per_targ_ultra_nego;
16855 + u16 si_per_targ_no_disc;
16856 + u16 si_per_targ_wide_nego;
16857 +- u16 si_flags;
16858 ++ u16 si_mflags;
16859 + unsigned char si_card_family;
16860 + unsigned char si_bustype;
16861 + unsigned char si_card_model[3];
16862 +@@ -1073,22 +1073,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
16863 + ScamFlg =
16864 + (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
16865 +
16866 +- pCardInfo->si_flags = 0x0000;
16867 ++ pCardInfo->si_mflags = 0x0000;
16868 +
16869 + if (i & 0x01)
16870 +- pCardInfo->si_flags |= SCSI_PARITY_ENA;
16871 ++ pCardInfo->si_mflags |= SCSI_PARITY_ENA;
16872 +
16873 + if (!(i & 0x02))
16874 +- pCardInfo->si_flags |= SOFT_RESET;
16875 ++ pCardInfo->si_mflags |= SOFT_RESET;
16876 +
16877 + if (i & 0x10)
16878 +- pCardInfo->si_flags |= EXTENDED_TRANSLATION;
16879 ++ pCardInfo->si_mflags |= EXTENDED_TRANSLATION;
16880 +
16881 + if (ScamFlg & SCAM_ENABLED)
16882 +- pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
16883 ++ pCardInfo->si_mflags |= FLAG_SCAM_ENABLED;
16884 +
16885 + if (ScamFlg & SCAM_LEVEL2)
16886 +- pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
16887 ++ pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2;
16888 +
16889 + j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
16890 + if (i & 0x04) {
16891 +@@ -1104,7 +1104,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
16892 +
16893 + if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
16894 +
16895 +- pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
16896 ++ pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN;
16897 +
16898 + pCardInfo->si_card_family = HARPOON_FAMILY;
16899 + pCardInfo->si_bustype = BUSTYPE_PCI;
16900 +@@ -1140,15 +1140,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
16901 +
16902 + if (pCardInfo->si_card_model[1] == '3') {
16903 + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
16904 +- pCardInfo->si_flags |= LOW_BYTE_TERM;
16905 ++ pCardInfo->si_mflags |= LOW_BYTE_TERM;
16906 + } else if (pCardInfo->si_card_model[2] == '0') {
16907 + temp = RD_HARPOON(ioport + hp_xfer_pad);
16908 + WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
16909 + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
16910 +- pCardInfo->si_flags |= LOW_BYTE_TERM;
16911 ++ pCardInfo->si_mflags |= LOW_BYTE_TERM;
16912 + WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
16913 + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
16914 +- pCardInfo->si_flags |= HIGH_BYTE_TERM;
16915 ++ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
16916 + WR_HARPOON(ioport + hp_xfer_pad, temp);
16917 + } else {
16918 + temp = RD_HARPOON(ioport + hp_ee_ctrl);
16919 +@@ -1166,9 +1166,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
16920 + WR_HARPOON(ioport + hp_ee_ctrl, temp);
16921 + WR_HARPOON(ioport + hp_xfer_pad, temp2);
16922 + if (!(temp3 & BIT(7)))
16923 +- pCardInfo->si_flags |= LOW_BYTE_TERM;
16924 ++ pCardInfo->si_mflags |= LOW_BYTE_TERM;
16925 + if (!(temp3 & BIT(6)))
16926 +- pCardInfo->si_flags |= HIGH_BYTE_TERM;
16927 ++ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
16928 + }
16929 +
16930 + ARAM_ACCESS(ioport);
16931 +@@ -1275,7 +1275,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
16932 + WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
16933 + CurrCard->ourId = pCardInfo->si_id;
16934 +
16935 +- i = (unsigned char)pCardInfo->si_flags;
16936 ++ i = (unsigned char)pCardInfo->si_mflags;
16937 + if (i & SCSI_PARITY_ENA)
16938 + WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
16939 +
16940 +@@ -1289,14 +1289,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
16941 + j |= SCSI_TERM_ENA_H;
16942 + WR_HARPOON(ioport + hp_ee_ctrl, j);
16943 +
16944 +- if (!(pCardInfo->si_flags & SOFT_RESET)) {
16945 ++ if (!(pCardInfo->si_mflags & SOFT_RESET)) {
16946 +
16947 + FPT_sresb(ioport, thisCard);
16948 +
16949 + FPT_scini(thisCard, pCardInfo->si_id, 0);
16950 + }
16951 +
16952 +- if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
16953 ++ if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS)
16954 + CurrCard->globalFlags |= F_NO_FILTER;
16955 +
16956 + if (pCurrNvRam) {
16957 +diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
16958 +index 0e935c49b57bd..dd419e295184d 100644
16959 +--- a/drivers/scsi/be2iscsi/be_iscsi.c
16960 ++++ b/drivers/scsi/be2iscsi/be_iscsi.c
16961 +@@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
16962 + struct beiscsi_endpoint *beiscsi_ep;
16963 + struct iscsi_endpoint *ep;
16964 + uint16_t cri_index;
16965 ++ int rc = 0;
16966 +
16967 + ep = iscsi_lookup_endpoint(transport_fd);
16968 + if (!ep)
16969 +@@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
16970 +
16971 + beiscsi_ep = ep->dd_data;
16972 +
16973 +- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
16974 +- return -EINVAL;
16975 ++ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
16976 ++ rc = -EINVAL;
16977 ++ goto put_ep;
16978 ++ }
16979 +
16980 + if (beiscsi_ep->phba != phba) {
16981 + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
16982 + "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
16983 + beiscsi_ep->phba, phba);
16984 +-
16985 +- return -EEXIST;
16986 ++ rc = -EEXIST;
16987 ++ goto put_ep;
16988 + }
16989 + cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
16990 + if (phba->conn_table[cri_index]) {
16991 +@@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
16992 + beiscsi_ep->ep_cid,
16993 + beiscsi_conn,
16994 + phba->conn_table[cri_index]);
16995 +- return -EINVAL;
16996 ++ rc = -EINVAL;
16997 ++ goto put_ep;
16998 + }
16999 + }
17000 +
17001 +@@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
17002 + "BS_%d : cid %d phba->conn_table[%u]=%p\n",
17003 + beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
17004 + phba->conn_table[cri_index] = beiscsi_conn;
17005 +- return 0;
17006 ++
17007 ++put_ep:
17008 ++ iscsi_put_endpoint(ep);
17009 ++ return rc;
17010 + }
17011 +
17012 + static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
17013 +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
17014 +index 22cf7f4b8d8c8..27c4f1598f765 100644
17015 +--- a/drivers/scsi/be2iscsi/be_main.c
17016 ++++ b/drivers/scsi/be2iscsi/be_main.c
17017 +@@ -5809,6 +5809,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
17018 + .destroy_session = beiscsi_session_destroy,
17019 + .create_conn = beiscsi_conn_create,
17020 + .bind_conn = beiscsi_conn_bind,
17021 ++ .unbind_conn = iscsi_conn_unbind,
17022 + .destroy_conn = iscsi_conn_teardown,
17023 + .attr_is_visible = beiscsi_attr_is_visible,
17024 + .set_iface_param = beiscsi_iface_set_param,
17025 +diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
17026 +index 1e6d8f62ea3c2..2ad85c6b99fd2 100644
17027 +--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
17028 ++++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
17029 +@@ -1420,17 +1420,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
17030 + * Forcefully terminate all in progress connection recovery at the
17031 + * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
17032 + */
17033 +- if (bnx2i_adapter_ready(hba))
17034 +- return -EIO;
17035 ++ if (bnx2i_adapter_ready(hba)) {
17036 ++ ret_code = -EIO;
17037 ++ goto put_ep;
17038 ++ }
17039 +
17040 + bnx2i_ep = ep->dd_data;
17041 + if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
17042 +- (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
17043 ++ (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
17044 + /* Peer disconnect via' FIN or RST */
17045 +- return -EINVAL;
17046 ++ ret_code = -EINVAL;
17047 ++ goto put_ep;
17048 ++ }
17049 +
17050 +- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
17051 +- return -EINVAL;
17052 ++ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
17053 ++ ret_code = -EINVAL;
17054 ++ goto put_ep;
17055 ++ }
17056 +
17057 + if (bnx2i_ep->hba != hba) {
17058 + /* Error - TCP connection does not belong to this device
17059 +@@ -1441,7 +1447,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
17060 + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
17061 + "belong to hba (%s)\n",
17062 + hba->netdev->name);
17063 +- return -EEXIST;
17064 ++ ret_code = -EEXIST;
17065 ++ goto put_ep;
17066 + }
17067 + bnx2i_ep->conn = bnx2i_conn;
17068 + bnx2i_conn->ep = bnx2i_ep;
17069 +@@ -1458,6 +1465,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
17070 + bnx2i_put_rq_buf(bnx2i_conn, 0);
17071 +
17072 + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
17073 ++put_ep:
17074 ++ iscsi_put_endpoint(ep);
17075 + return ret_code;
17076 + }
17077 +
17078 +@@ -2276,6 +2285,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
17079 + .destroy_session = bnx2i_session_destroy,
17080 + .create_conn = bnx2i_conn_create,
17081 + .bind_conn = bnx2i_conn_bind,
17082 ++ .unbind_conn = iscsi_conn_unbind,
17083 + .destroy_conn = bnx2i_conn_destroy,
17084 + .attr_is_visible = bnx2i_attr_is_visible,
17085 + .set_param = iscsi_set_param,
17086 +diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
17087 +index 203f938fca7e5..f949a4e007834 100644
17088 +--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
17089 ++++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
17090 +@@ -117,6 +117,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
17091 + /* connection management */
17092 + .create_conn = cxgbi_create_conn,
17093 + .bind_conn = cxgbi_bind_conn,
17094 ++ .unbind_conn = iscsi_conn_unbind,
17095 + .destroy_conn = iscsi_tcp_conn_teardown,
17096 + .start_conn = iscsi_conn_start,
17097 + .stop_conn = iscsi_conn_stop,
17098 +diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
17099 +index 2c3491528d424..efb3e2b3398e2 100644
17100 +--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
17101 ++++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
17102 +@@ -134,6 +134,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
17103 + /* connection management */
17104 + .create_conn = cxgbi_create_conn,
17105 + .bind_conn = cxgbi_bind_conn,
17106 ++ .unbind_conn = iscsi_conn_unbind,
17107 + .destroy_conn = iscsi_tcp_conn_teardown,
17108 + .start_conn = iscsi_conn_start,
17109 + .stop_conn = iscsi_conn_stop,
17110 +diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
17111 +index f078b3c4e083f..f6bcae829c29b 100644
17112 +--- a/drivers/scsi/cxgbi/libcxgbi.c
17113 ++++ b/drivers/scsi/cxgbi/libcxgbi.c
17114 +@@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
17115 + err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
17116 + ppm->tformat.pgsz_idx_dflt);
17117 + if (err < 0)
17118 +- return err;
17119 ++ goto put_ep;
17120 +
17121 + err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
17122 +- if (err)
17123 +- return -EINVAL;
17124 ++ if (err) {
17125 ++ err = -EINVAL;
17126 ++ goto put_ep;
17127 ++ }
17128 +
17129 + /* calculate the tag idx bits needed for this conn based on cmds_max */
17130 + cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
17131 +@@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
17132 + /* init recv engine */
17133 + iscsi_tcp_hdr_recv_prep(tcp_conn);
17134 +
17135 +- return 0;
17136 ++put_ep:
17137 ++ iscsi_put_endpoint(ep);
17138 ++ return err;
17139 + }
17140 + EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
17141 +
17142 +diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
17143 +index 602c97a651bc0..9ea4ceadb5594 100644
17144 +--- a/drivers/scsi/libfc/fc_encode.h
17145 ++++ b/drivers/scsi/libfc/fc_encode.h
17146 +@@ -166,9 +166,11 @@ static inline int fc_ct_ns_fill(struct fc_lport *lport,
17147 + static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry,
17148 + const char *in, size_t len)
17149 + {
17150 +- int copied = strscpy(entry->value, in, len);
17151 +- if (copied > 0)
17152 +- memset(entry->value, copied, len - copied);
17153 ++ int copied;
17154 ++
17155 ++ copied = strscpy((char *)&entry->value, in, len);
17156 ++ if (copied > 0 && (copied + 1) < len)
17157 ++ memset((entry->value + copied + 1), 0, len - copied - 1);
17158 + }
17159 +
17160 + /**
17161 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
17162 +index 4834219497eeb..2aaf836786548 100644
17163 +--- a/drivers/scsi/libiscsi.c
17164 ++++ b/drivers/scsi/libiscsi.c
17165 +@@ -1387,23 +1387,32 @@ void iscsi_session_failure(struct iscsi_session *session,
17166 + }
17167 + EXPORT_SYMBOL_GPL(iscsi_session_failure);
17168 +
17169 +-void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
17170 ++static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
17171 + {
17172 + struct iscsi_session *session = conn->session;
17173 +
17174 +- spin_lock_bh(&session->frwd_lock);
17175 +- if (session->state == ISCSI_STATE_FAILED) {
17176 +- spin_unlock_bh(&session->frwd_lock);
17177 +- return;
17178 +- }
17179 ++ if (session->state == ISCSI_STATE_FAILED)
17180 ++ return false;
17181 +
17182 + if (conn->stop_stage == 0)
17183 + session->state = ISCSI_STATE_FAILED;
17184 +- spin_unlock_bh(&session->frwd_lock);
17185 +
17186 + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
17187 + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
17188 +- iscsi_conn_error_event(conn->cls_conn, err);
17189 ++ return true;
17190 ++}
17191 ++
17192 ++void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
17193 ++{
17194 ++ struct iscsi_session *session = conn->session;
17195 ++ bool needs_evt;
17196 ++
17197 ++ spin_lock_bh(&session->frwd_lock);
17198 ++ needs_evt = iscsi_set_conn_failed(conn);
17199 ++ spin_unlock_bh(&session->frwd_lock);
17200 ++
17201 ++ if (needs_evt)
17202 ++ iscsi_conn_error_event(conn->cls_conn, err);
17203 + }
17204 + EXPORT_SYMBOL_GPL(iscsi_conn_failure);
17205 +
17206 +@@ -2180,6 +2189,51 @@ done:
17207 + spin_unlock(&session->frwd_lock);
17208 + }
17209 +
17210 ++/**
17211 ++ * iscsi_conn_unbind - prevent queueing to conn.
17212 ++ * @cls_conn: iscsi conn ep is bound to.
17213 ++ * @is_active: is the conn in use for boot or is this for EH/termination
17214 ++ *
17215 ++ * This must be called by drivers implementing the ep_disconnect callout.
17216 ++ * It disables queueing to the connection from libiscsi in preparation for
17217 ++ * an ep_disconnect call.
17218 ++ */
17219 ++void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
17220 ++{
17221 ++ struct iscsi_session *session;
17222 ++ struct iscsi_conn *conn;
17223 ++
17224 ++ if (!cls_conn)
17225 ++ return;
17226 ++
17227 ++ conn = cls_conn->dd_data;
17228 ++ session = conn->session;
17229 ++ /*
17230 ++ * Wait for iscsi_eh calls to exit. We don't wait for the tmf to
17231 ++ * complete or timeout. The caller just wants to know what's running
17232 ++ * is everything that needs to be cleaned up, and no cmds will be
17233 ++ * queued.
17234 ++ */
17235 ++ mutex_lock(&session->eh_mutex);
17236 ++
17237 ++ iscsi_suspend_queue(conn);
17238 ++ iscsi_suspend_tx(conn);
17239 ++
17240 ++ spin_lock_bh(&session->frwd_lock);
17241 ++ if (!is_active) {
17242 ++ /*
17243 ++ * if logout timed out before userspace could even send a PDU
17244 ++ * the state might still be in ISCSI_STATE_LOGGED_IN and
17245 ++ * allowing new cmds and TMFs.
17246 ++ */
17247 ++ if (session->state == ISCSI_STATE_LOGGED_IN)
17248 ++ iscsi_set_conn_failed(conn);
17249 ++ }
17250 ++ spin_unlock_bh(&session->frwd_lock);
17251 ++ mutex_unlock(&session->eh_mutex);
17252 ++}
17253 ++EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
17254 ++
17255 + static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
17256 + struct iscsi_tm *hdr)
17257 + {
17258 +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
17259 +index 658a962832b35..7bddd74658b9e 100644
17260 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c
17261 ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
17262 +@@ -868,11 +868,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
17263 + len += scnprintf(buf+len, size-len,
17264 + "WWNN x%llx ",
17265 + wwn_to_u64(ndlp->nlp_nodename.u.wwn));
17266 +- if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
17267 +- len += scnprintf(buf+len, size-len, "RPI:%04d ",
17268 +- ndlp->nlp_rpi);
17269 +- else
17270 +- len += scnprintf(buf+len, size-len, "RPI:none ");
17271 ++ len += scnprintf(buf+len, size-len, "RPI:x%04x ",
17272 ++ ndlp->nlp_rpi);
17273 + len += scnprintf(buf+len, size-len, "flag:x%08x ",
17274 + ndlp->nlp_flag);
17275 + if (!ndlp->nlp_type)
17276 +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
17277 +index 21108f322c995..c3ca2ccf9f828 100644
17278 +--- a/drivers/scsi/lpfc/lpfc_els.c
17279 ++++ b/drivers/scsi/lpfc/lpfc_els.c
17280 +@@ -1998,9 +1998,20 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17281 + lpfc_disc_state_machine(vport, ndlp, cmdiocb,
17282 + NLP_EVT_CMPL_PLOGI);
17283 +
17284 +- /* As long as this node is not registered with the scsi or nvme
17285 +- * transport, it is no longer an active node. Otherwise
17286 +- * devloss handles the final cleanup.
17287 ++ /* If a PLOGI collision occurred, the node needs to continue
17288 ++ * with the reglogin process.
17289 ++ */
17290 ++ spin_lock_irq(&ndlp->lock);
17291 ++ if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
17292 ++ ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
17293 ++ spin_unlock_irq(&ndlp->lock);
17294 ++ goto out;
17295 ++ }
17296 ++ spin_unlock_irq(&ndlp->lock);
17297 ++
17298 ++ /* No PLOGI collision and the node is not registered with the
17299 ++ * scsi or nvme transport. It is no longer an active node. Just
17300 ++ * start the device remove process.
17301 + */
17302 + if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
17303 + spin_lock_irq(&ndlp->lock);
17304 +@@ -2869,6 +2880,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17305 + * log into the remote port.
17306 + */
17307 + if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
17308 ++ spin_lock_irq(&ndlp->lock);
17309 ++ if (phba->sli_rev == LPFC_SLI_REV4)
17310 ++ ndlp->nlp_flag |= NLP_RELEASE_RPI;
17311 ++ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
17312 ++ spin_unlock_irq(&ndlp->lock);
17313 + lpfc_disc_state_machine(vport, ndlp, cmdiocb,
17314 + NLP_EVT_DEVICE_RM);
17315 + lpfc_els_free_iocb(phba, cmdiocb);
17316 +@@ -4371,6 +4387,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17317 + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
17318 + struct lpfc_vport *vport = cmdiocb->vport;
17319 + IOCB_t *irsp;
17320 ++ u32 xpt_flags = 0, did_mask = 0;
17321 +
17322 + irsp = &rspiocb->iocb;
17323 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
17324 +@@ -4386,9 +4403,20 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17325 + if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
17326 + /* NPort Recovery mode or node is just allocated */
17327 + if (!lpfc_nlp_not_used(ndlp)) {
17328 +- /* If the ndlp is being used by another discovery
17329 +- * thread, just unregister the RPI.
17330 ++ /* A LOGO is completing and the node is in NPR state.
17331 ++ * If this a fabric node that cleared its transport
17332 ++ * registration, release the rpi.
17333 + */
17334 ++ xpt_flags = SCSI_XPT_REGD | NVME_XPT_REGD;
17335 ++ did_mask = ndlp->nlp_DID & Fabric_DID_MASK;
17336 ++ if (did_mask == Fabric_DID_MASK &&
17337 ++ !(ndlp->fc4_xpt_flags & xpt_flags)) {
17338 ++ spin_lock_irq(&ndlp->lock);
17339 ++ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
17340 ++ if (phba->sli_rev == LPFC_SLI_REV4)
17341 ++ ndlp->nlp_flag |= NLP_RELEASE_RPI;
17342 ++ spin_unlock_irq(&ndlp->lock);
17343 ++ }
17344 + lpfc_unreg_rpi(vport, ndlp);
17345 + } else {
17346 + /* Indicate the node has already released, should
17347 +@@ -4424,28 +4452,37 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
17348 + {
17349 + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
17350 + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
17351 ++ u32 mbx_flag = pmb->mbox_flag;
17352 ++ u32 mbx_cmd = pmb->u.mb.mbxCommand;
17353 +
17354 + pmb->ctx_buf = NULL;
17355 + pmb->ctx_ndlp = NULL;
17356 +
17357 +- lpfc_mbuf_free(phba, mp->virt, mp->phys);
17358 +- kfree(mp);
17359 +- mempool_free(pmb, phba->mbox_mem_pool);
17360 + if (ndlp) {
17361 + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
17362 +- "0006 rpi x%x DID:%x flg:%x %d x%px\n",
17363 ++ "0006 rpi x%x DID:%x flg:%x %d x%px "
17364 ++ "mbx_cmd x%x mbx_flag x%x x%px\n",
17365 + ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
17366 +- kref_read(&ndlp->kref),
17367 +- ndlp);
17368 +- /* This is the end of the default RPI cleanup logic for
17369 +- * this ndlp and it could get released. Clear the nlp_flags to
17370 +- * prevent any further processing.
17371 ++ kref_read(&ndlp->kref), ndlp, mbx_cmd,
17372 ++ mbx_flag, pmb);
17373 ++
17374 ++ /* This ends the default/temporary RPI cleanup logic for this
17375 ++ * ndlp and the node and rpi needs to be released. Free the rpi
17376 ++ * first on an UNREG_LOGIN and then release the final
17377 ++ * references.
17378 + */
17379 ++ spin_lock_irq(&ndlp->lock);
17380 + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
17381 ++ if (mbx_cmd == MBX_UNREG_LOGIN)
17382 ++ ndlp->nlp_flag &= ~NLP_UNREG_INP;
17383 ++ spin_unlock_irq(&ndlp->lock);
17384 + lpfc_nlp_put(ndlp);
17385 +- lpfc_nlp_not_used(ndlp);
17386 ++ lpfc_drop_node(ndlp->vport, ndlp);
17387 + }
17388 +
17389 ++ lpfc_mbuf_free(phba, mp->virt, mp->phys);
17390 ++ kfree(mp);
17391 ++ mempool_free(pmb, phba->mbox_mem_pool);
17392 + return;
17393 + }
17394 +
17395 +@@ -4503,11 +4540,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17396 + /* ELS response tag <ulpIoTag> completes */
17397 + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17398 + "0110 ELS response tag x%x completes "
17399 +- "Data: x%x x%x x%x x%x x%x x%x x%x\n",
17400 ++ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n",
17401 + cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
17402 + rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
17403 + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
17404 +- ndlp->nlp_rpi);
17405 ++ ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox);
17406 + if (mbox) {
17407 + if ((rspiocb->iocb.ulpStatus == 0) &&
17408 + (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
17409 +@@ -4587,6 +4624,20 @@ out:
17410 + spin_unlock_irq(&ndlp->lock);
17411 + }
17412 +
17413 ++ /* An SLI4 NPIV instance wants to drop the node at this point under
17414 ++ * these conditions and release the RPI.
17415 ++ */
17416 ++ if (phba->sli_rev == LPFC_SLI_REV4 &&
17417 ++ (vport && vport->port_type == LPFC_NPIV_PORT) &&
17418 ++ ndlp->nlp_flag & NLP_RELEASE_RPI) {
17419 ++ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
17420 ++ spin_lock_irq(&ndlp->lock);
17421 ++ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
17422 ++ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
17423 ++ spin_unlock_irq(&ndlp->lock);
17424 ++ lpfc_drop_node(vport, ndlp);
17425 ++ }
17426 ++
17427 + /* Release the originating I/O reference. */
17428 + lpfc_els_free_iocb(phba, cmdiocb);
17429 + lpfc_nlp_put(ndlp);
17430 +@@ -4775,10 +4826,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
17431 + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17432 + "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
17433 + "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
17434 +- "RPI: x%x, fc_flag x%x\n",
17435 ++ "RPI: x%x, fc_flag x%x refcnt %d\n",
17436 + rc, elsiocb->iotag, elsiocb->sli4_xritag,
17437 + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
17438 +- ndlp->nlp_rpi, vport->fc_flag);
17439 ++ ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
17440 + return 0;
17441 + }
17442 +
17443 +@@ -4856,6 +4907,17 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
17444 + return 1;
17445 + }
17446 +
17447 ++ /* The NPIV instance is rejecting this unsolicited ELS. Make sure the
17448 ++ * node's assigned RPI needs to be released as this node will get
17449 ++ * freed.
17450 ++ */
17451 ++ if (phba->sli_rev == LPFC_SLI_REV4 &&
17452 ++ vport->port_type == LPFC_NPIV_PORT) {
17453 ++ spin_lock_irq(&ndlp->lock);
17454 ++ ndlp->nlp_flag |= NLP_RELEASE_RPI;
17455 ++ spin_unlock_irq(&ndlp->lock);
17456 ++ }
17457 ++
17458 + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
17459 + if (rc == IOCB_ERROR) {
17460 + lpfc_els_free_iocb(phba, elsiocb);
17461 +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
17462 +index f5a898c2c9043..3ea07034ab97c 100644
17463 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
17464 ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
17465 +@@ -4789,12 +4789,17 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
17466 + ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
17467 + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
17468 + } else {
17469 ++ /* NLP_RELEASE_RPI is only set for SLI4 ports. */
17470 + if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
17471 + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
17472 ++ spin_lock_irq(&ndlp->lock);
17473 + ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
17474 + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
17475 ++ spin_unlock_irq(&ndlp->lock);
17476 + }
17477 ++ spin_lock_irq(&ndlp->lock);
17478 + ndlp->nlp_flag &= ~NLP_UNREG_INP;
17479 ++ spin_unlock_irq(&ndlp->lock);
17480 + }
17481 + }
17482 +
17483 +@@ -5129,8 +5134,10 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
17484 + list_del_init(&ndlp->dev_loss_evt.evt_listp);
17485 + list_del_init(&ndlp->recovery_evt.evt_listp);
17486 + lpfc_cleanup_vports_rrqs(vport, ndlp);
17487 ++
17488 + if (phba->sli_rev == LPFC_SLI_REV4)
17489 + ndlp->nlp_flag |= NLP_RELEASE_RPI;
17490 ++
17491 + return 0;
17492 + }
17493 +
17494 +@@ -6176,8 +6183,23 @@ lpfc_nlp_release(struct kref *kref)
17495 + lpfc_cancel_retry_delay_tmo(vport, ndlp);
17496 + lpfc_cleanup_node(vport, ndlp);
17497 +
17498 +- /* Clear Node key fields to give other threads notice
17499 +- * that this node memory is not valid anymore.
17500 ++ /* Not all ELS transactions have registered the RPI with the port.
17501 ++ * In these cases the rpi usage is temporary and the node is
17502 ++ * released when the WQE is completed. Catch this case to free the
17503 ++ * RPI to the pool. Because this node is in the release path, a lock
17504 ++ * is unnecessary. All references are gone and the node has been
17505 ++ * dequeued.
17506 ++ */
17507 ++ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
17508 ++ if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
17509 ++ !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
17510 ++ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
17511 ++ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
17512 ++ }
17513 ++ }
17514 ++
17515 ++ /* The node is not freed back to memory, it is released to a pool so
17516 ++ * the node fields need to be cleaned up.
17517 + */
17518 + ndlp->vport = NULL;
17519 + ndlp->nlp_state = NLP_STE_FREED_NODE;
17520 +@@ -6257,6 +6279,7 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
17521 + "node not used: did:x%x flg:x%x refcnt:x%x",
17522 + ndlp->nlp_DID, ndlp->nlp_flag,
17523 + kref_read(&ndlp->kref));
17524 ++
17525 + if (kref_read(&ndlp->kref) == 1)
17526 + if (lpfc_nlp_put(ndlp))
17527 + return 1;
17528 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
17529 +index 5f018d02bf562..f81dfa3cb0a1e 100644
17530 +--- a/drivers/scsi/lpfc/lpfc_init.c
17531 ++++ b/drivers/scsi/lpfc/lpfc_init.c
17532 +@@ -3532,13 +3532,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
17533 + list_for_each_entry_safe(ndlp, next_ndlp,
17534 + &vports[i]->fc_nodes,
17535 + nlp_listp) {
17536 +- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
17537 +- /* Driver must assume RPI is invalid for
17538 +- * any unused or inactive node.
17539 +- */
17540 +- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
17541 +- continue;
17542 +- }
17543 +
17544 + spin_lock_irq(&ndlp->lock);
17545 + ndlp->nlp_flag &= ~NLP_NPR_ADISC;
17546 +diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
17547 +index bb4e65a32ecc6..3dac116c405bf 100644
17548 +--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
17549 ++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
17550 +@@ -567,15 +567,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
17551 + /* no deferred ACC */
17552 + kfree(save_iocb);
17553 +
17554 +- /* In order to preserve RPIs, we want to cleanup
17555 +- * the default RPI the firmware created to rcv
17556 +- * this ELS request. The only way to do this is
17557 +- * to register, then unregister the RPI.
17558 ++ /* This is an NPIV SLI4 instance that does not need to register
17559 ++ * a default RPI.
17560 + */
17561 +- spin_lock_irq(&ndlp->lock);
17562 +- ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
17563 +- NLP_RCV_PLOGI);
17564 +- spin_unlock_irq(&ndlp->lock);
17565 ++ if (phba->sli_rev == LPFC_SLI_REV4) {
17566 ++ mempool_free(login_mbox, phba->mbox_mem_pool);
17567 ++ login_mbox = NULL;
17568 ++ } else {
17569 ++ /* In order to preserve RPIs, we want to cleanup
17570 ++ * the default RPI the firmware created to rcv
17571 ++ * this ELS request. The only way to do this is
17572 ++ * to register, then unregister the RPI.
17573 ++ */
17574 ++ spin_lock_irq(&ndlp->lock);
17575 ++ ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
17576 ++ NLP_RCV_PLOGI);
17577 ++ spin_unlock_irq(&ndlp->lock);
17578 ++ }
17579 ++
17580 + stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
17581 + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
17582 + rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
17583 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
17584 +index fc3682f15f509..bc0bcb0dccc9a 100644
17585 +--- a/drivers/scsi/lpfc/lpfc_sli.c
17586 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
17587 +@@ -13625,9 +13625,15 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
17588 + if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
17589 + mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
17590 + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
17591 +- /* Reg_LOGIN of dflt RPI was successful. Now lets get
17592 +- * RID of the PPI using the same mbox buffer.
17593 ++
17594 ++ /* Reg_LOGIN of dflt RPI was successful. Mark the
17595 ++ * node as having an UNREG_LOGIN in progress to stop
17596 ++ * an unsolicited PLOGI from the same NPortId from
17597 ++ * starting another mailbox transaction.
17598 + */
17599 ++ spin_lock_irqsave(&ndlp->lock, iflags);
17600 ++ ndlp->nlp_flag |= NLP_UNREG_INP;
17601 ++ spin_unlock_irqrestore(&ndlp->lock, iflags);
17602 + lpfc_unreg_login(phba, vport->vpi,
17603 + pmbox->un.varWords[0], pmb);
17604 + pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
17605 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
17606 +index 2221175ae051f..cd94a0c81f835 100644
17607 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
17608 ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
17609 +@@ -3203,6 +3203,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
17610 + {
17611 + int sge_count;
17612 + u8 cmd_type;
17613 ++ u16 pd_index = 0;
17614 ++ u8 drive_type = 0;
17615 + struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
17616 + struct MR_PRIV_DEVICE *mr_device_priv_data;
17617 + mr_device_priv_data = scp->device->hostdata;
17618 +@@ -3237,8 +3239,12 @@ megasas_build_io_fusion(struct megasas_instance *instance,
17619 + megasas_build_syspd_fusion(instance, scp, cmd, true);
17620 + break;
17621 + case NON_READ_WRITE_SYSPDIO:
17622 +- if (instance->secure_jbod_support ||
17623 +- mr_device_priv_data->is_tm_capable)
17624 ++ pd_index = MEGASAS_PD_INDEX(scp);
17625 ++ drive_type = instance->pd_list[pd_index].driveType;
17626 ++ if ((instance->secure_jbod_support ||
17627 ++ mr_device_priv_data->is_tm_capable) ||
17628 ++ (instance->adapter_type >= VENTURA_SERIES &&
17629 ++ drive_type == TYPE_ENCLOSURE))
17630 + megasas_build_syspd_fusion(instance, scp, cmd, false);
17631 + else
17632 + megasas_build_syspd_fusion(instance, scp, cmd, true);
17633 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
17634 +index d00aca3c77cec..a5f70f0e02871 100644
17635 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
17636 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
17637 +@@ -6884,8 +6884,10 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
17638 + handle, parent_handle,
17639 + (u64)sas_expander->sas_address, sas_expander->num_phys);
17640 +
17641 +- if (!sas_expander->num_phys)
17642 ++ if (!sas_expander->num_phys) {
17643 ++ rc = -1;
17644 + goto out_fail;
17645 ++ }
17646 + sas_expander->phy = kcalloc(sas_expander->num_phys,
17647 + sizeof(struct _sas_phy), GFP_KERNEL);
17648 + if (!sas_expander->phy) {
17649 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
17650 +index 08c05403cd720..087c7ff28cd52 100644
17651 +--- a/drivers/scsi/qedi/qedi_iscsi.c
17652 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
17653 +@@ -377,6 +377,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
17654 + struct qedi_ctx *qedi = iscsi_host_priv(shost);
17655 + struct qedi_endpoint *qedi_ep;
17656 + struct iscsi_endpoint *ep;
17657 ++ int rc = 0;
17658 +
17659 + ep = iscsi_lookup_endpoint(transport_fd);
17660 + if (!ep)
17661 +@@ -384,11 +385,16 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
17662 +
17663 + qedi_ep = ep->dd_data;
17664 + if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
17665 +- (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
17666 +- return -EINVAL;
17667 ++ (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) {
17668 ++ rc = -EINVAL;
17669 ++ goto put_ep;
17670 ++ }
17671 ++
17672 ++ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
17673 ++ rc = -EINVAL;
17674 ++ goto put_ep;
17675 ++ }
17676 +
17677 +- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
17678 +- return -EINVAL;
17679 +
17680 + qedi_ep->conn = qedi_conn;
17681 + qedi_conn->ep = qedi_ep;
17682 +@@ -398,13 +404,18 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
17683 + qedi_conn->cmd_cleanup_req = 0;
17684 + qedi_conn->cmd_cleanup_cmpl = 0;
17685 +
17686 +- if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
17687 +- return -EINVAL;
17688 ++ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
17689 ++ rc = -EINVAL;
17690 ++ goto put_ep;
17691 ++ }
17692 ++
17693 +
17694 + spin_lock_init(&qedi_conn->tmf_work_lock);
17695 + INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
17696 + init_waitqueue_head(&qedi_conn->wait_queue);
17697 +- return 0;
17698 ++put_ep:
17699 ++ iscsi_put_endpoint(ep);
17700 ++ return rc;
17701 + }
17702 +
17703 + static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
17704 +@@ -1401,6 +1412,7 @@ struct iscsi_transport qedi_iscsi_transport = {
17705 + .destroy_session = qedi_session_destroy,
17706 + .create_conn = qedi_conn_create,
17707 + .bind_conn = qedi_conn_bind,
17708 ++ .unbind_conn = iscsi_conn_unbind,
17709 + .start_conn = qedi_conn_start,
17710 + .stop_conn = iscsi_conn_stop,
17711 + .destroy_conn = qedi_conn_destroy,
17712 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
17713 +index ad3afe30f617d..0e7a7e82e0284 100644
17714 +--- a/drivers/scsi/qla4xxx/ql4_os.c
17715 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
17716 +@@ -259,6 +259,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
17717 + .start_conn = qla4xxx_conn_start,
17718 + .create_conn = qla4xxx_conn_create,
17719 + .bind_conn = qla4xxx_conn_bind,
17720 ++ .unbind_conn = iscsi_conn_unbind,
17721 + .stop_conn = iscsi_conn_stop,
17722 + .destroy_conn = qla4xxx_conn_destroy,
17723 + .set_param = iscsi_set_param,
17724 +@@ -3234,6 +3235,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
17725 + conn = cls_conn->dd_data;
17726 + qla_conn = conn->dd_data;
17727 + qla_conn->qla_ep = ep->dd_data;
17728 ++ iscsi_put_endpoint(ep);
17729 + return 0;
17730 + }
17731 +
17732 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
17733 +index 532304d42f00e..269bfb8f91655 100644
17734 +--- a/drivers/scsi/scsi_lib.c
17735 ++++ b/drivers/scsi/scsi_lib.c
17736 +@@ -728,6 +728,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
17737 + case 0x07: /* operation in progress */
17738 + case 0x08: /* Long write in progress */
17739 + case 0x09: /* self test in progress */
17740 ++ case 0x11: /* notify (enable spinup) required */
17741 + case 0x14: /* space allocation in progress */
17742 + case 0x1a: /* start stop unit in progress */
17743 + case 0x1b: /* sanitize in progress */
17744 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
17745 +index 441f0152193f7..6ce1cc992d1d0 100644
17746 +--- a/drivers/scsi/scsi_transport_iscsi.c
17747 ++++ b/drivers/scsi/scsi_transport_iscsi.c
17748 +@@ -86,16 +86,10 @@ struct iscsi_internal {
17749 + struct transport_container session_cont;
17750 + };
17751 +
17752 +-/* Worker to perform connection failure on unresponsive connections
17753 +- * completely in kernel space.
17754 +- */
17755 +-static void stop_conn_work_fn(struct work_struct *work);
17756 +-static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
17757 +-
17758 + static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
17759 + static struct workqueue_struct *iscsi_eh_timer_workq;
17760 +
17761 +-static struct workqueue_struct *iscsi_destroy_workq;
17762 ++static struct workqueue_struct *iscsi_conn_cleanup_workq;
17763 +
17764 + static DEFINE_IDA(iscsi_sess_ida);
17765 + /*
17766 +@@ -268,9 +262,20 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
17767 + }
17768 + EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
17769 +
17770 ++void iscsi_put_endpoint(struct iscsi_endpoint *ep)
17771 ++{
17772 ++ put_device(&ep->dev);
17773 ++}
17774 ++EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
17775 ++
17776 ++/**
17777 ++ * iscsi_lookup_endpoint - get ep from handle
17778 ++ * @handle: endpoint handle
17779 ++ *
17780 ++ * Caller must do a iscsi_put_endpoint.
17781 ++ */
17782 + struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
17783 + {
17784 +- struct iscsi_endpoint *ep;
17785 + struct device *dev;
17786 +
17787 + dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
17788 +@@ -278,13 +283,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
17789 + if (!dev)
17790 + return NULL;
17791 +
17792 +- ep = iscsi_dev_to_endpoint(dev);
17793 +- /*
17794 +- * we can drop this now because the interface will prevent
17795 +- * removals and lookups from racing.
17796 +- */
17797 +- put_device(dev);
17798 +- return ep;
17799 ++ return iscsi_dev_to_endpoint(dev);
17800 + }
17801 + EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
17802 +
17803 +@@ -1620,12 +1619,6 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
17804 + static struct sock *nls;
17805 + static DEFINE_MUTEX(rx_queue_mutex);
17806 +
17807 +-/*
17808 +- * conn_mutex protects the {start,bind,stop,destroy}_conn from racing
17809 +- * against the kernel stop_connection recovery mechanism
17810 +- */
17811 +-static DEFINE_MUTEX(conn_mutex);
17812 +-
17813 + static LIST_HEAD(sesslist);
17814 + static DEFINE_SPINLOCK(sesslock);
17815 + static LIST_HEAD(connlist);
17816 +@@ -1976,6 +1969,8 @@ static void __iscsi_unblock_session(struct work_struct *work)
17817 + */
17818 + void iscsi_unblock_session(struct iscsi_cls_session *session)
17819 + {
17820 ++ flush_work(&session->block_work);
17821 ++
17822 + queue_work(iscsi_eh_timer_workq, &session->unblock_work);
17823 + /*
17824 + * Blocking the session can be done from any context so we only
17825 +@@ -2242,6 +2237,123 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
17826 + }
17827 + EXPORT_SYMBOL_GPL(iscsi_remove_session);
17828 +
17829 ++static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
17830 ++{
17831 ++ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n");
17832 ++
17833 ++ switch (flag) {
17834 ++ case STOP_CONN_RECOVER:
17835 ++ conn->state = ISCSI_CONN_FAILED;
17836 ++ break;
17837 ++ case STOP_CONN_TERM:
17838 ++ conn->state = ISCSI_CONN_DOWN;
17839 ++ break;
17840 ++ default:
17841 ++ iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
17842 ++ flag);
17843 ++ return;
17844 ++ }
17845 ++
17846 ++ conn->transport->stop_conn(conn, flag);
17847 ++ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
17848 ++}
17849 ++
17850 ++static int iscsi_if_stop_conn(struct iscsi_transport *transport,
17851 ++ struct iscsi_uevent *ev)
17852 ++{
17853 ++ int flag = ev->u.stop_conn.flag;
17854 ++ struct iscsi_cls_conn *conn;
17855 ++
17856 ++ conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
17857 ++ if (!conn)
17858 ++ return -EINVAL;
17859 ++
17860 ++ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
17861 ++ /*
17862 ++ * If this is a termination we have to call stop_conn with that flag
17863 ++ * so the correct states get set. If we haven't run the work yet try to
17864 ++ * avoid the extra run.
17865 ++ */
17866 ++ if (flag == STOP_CONN_TERM) {
17867 ++ cancel_work_sync(&conn->cleanup_work);
17868 ++ iscsi_stop_conn(conn, flag);
17869 ++ } else {
17870 ++ /*
17871 ++ * Figure out if it was the kernel or userspace initiating this.
17872 ++ */
17873 ++ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
17874 ++ iscsi_stop_conn(conn, flag);
17875 ++ } else {
17876 ++ ISCSI_DBG_TRANS_CONN(conn,
17877 ++ "flush kernel conn cleanup.\n");
17878 ++ flush_work(&conn->cleanup_work);
17879 ++ }
17880 ++ /*
17881 ++ * Only clear for recovery to avoid extra cleanup runs during
17882 ++ * termination.
17883 ++ */
17884 ++ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
17885 ++ }
17886 ++ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
17887 ++ return 0;
17888 ++}
17889 ++
17890 ++static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
17891 ++{
17892 ++ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
17893 ++ struct iscsi_endpoint *ep;
17894 ++
17895 ++ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
17896 ++ conn->state = ISCSI_CONN_FAILED;
17897 ++
17898 ++ if (!conn->ep || !session->transport->ep_disconnect)
17899 ++ return;
17900 ++
17901 ++ ep = conn->ep;
17902 ++ conn->ep = NULL;
17903 ++
17904 ++ session->transport->unbind_conn(conn, is_active);
17905 ++ session->transport->ep_disconnect(ep);
17906 ++ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
17907 ++}
17908 ++
17909 ++static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
17910 ++{
17911 ++ struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
17912 ++ cleanup_work);
17913 ++ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
17914 ++
17915 ++ mutex_lock(&conn->ep_mutex);
17916 ++ /*
17917 ++ * If we are not at least bound there is nothing for us to do. Userspace
17918 ++ * will do a ep_disconnect call if offload is used, but will not be
17919 ++ * doing a stop since there is nothing to clean up, so we have to clear
17920 ++ * the cleanup bit here.
17921 ++ */
17922 ++ if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
17923 ++ ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
17924 ++ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
17925 ++ mutex_unlock(&conn->ep_mutex);
17926 ++ return;
17927 ++ }
17928 ++
17929 ++ iscsi_ep_disconnect(conn, false);
17930 ++
17931 ++ if (system_state != SYSTEM_RUNNING) {
17932 ++ /*
17933 ++ * If the user has set up for the session to never timeout
17934 ++ * then hang like they wanted. For all other cases fail right
17935 ++ * away since userspace is not going to relogin.
17936 ++ */
17937 ++ if (session->recovery_tmo > 0)
17938 ++ session->recovery_tmo = 0;
17939 ++ }
17940 ++
17941 ++ iscsi_stop_conn(conn, STOP_CONN_RECOVER);
17942 ++ mutex_unlock(&conn->ep_mutex);
17943 ++ ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
17944 ++}
17945 ++
17946 + void iscsi_free_session(struct iscsi_cls_session *session)
17947 + {
17948 + ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
17949 +@@ -2281,7 +2393,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
17950 +
17951 + mutex_init(&conn->ep_mutex);
17952 + INIT_LIST_HEAD(&conn->conn_list);
17953 +- INIT_LIST_HEAD(&conn->conn_list_err);
17954 ++ INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
17955 + conn->transport = transport;
17956 + conn->cid = cid;
17957 + conn->state = ISCSI_CONN_DOWN;
17958 +@@ -2338,7 +2450,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
17959 +
17960 + spin_lock_irqsave(&connlock, flags);
17961 + list_del(&conn->conn_list);
17962 +- list_del(&conn->conn_list_err);
17963 + spin_unlock_irqrestore(&connlock, flags);
17964 +
17965 + transport_unregister_device(&conn->dev);
17966 +@@ -2453,77 +2564,6 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
17967 + }
17968 + EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
17969 +
17970 +-/*
17971 +- * This can be called without the rx_queue_mutex, if invoked by the kernel
17972 +- * stop work. But, in that case, it is guaranteed not to race with
17973 +- * iscsi_destroy by conn_mutex.
17974 +- */
17975 +-static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
17976 +-{
17977 +- /*
17978 +- * It is important that this path doesn't rely on
17979 +- * rx_queue_mutex, otherwise, a thread doing allocation on a
17980 +- * start_session/start_connection could sleep waiting on a
17981 +- * writeback to a failed iscsi device, that cannot be recovered
17982 +- * because the lock is held. If we don't hold it here, the
17983 +- * kernel stop_conn_work_fn has a chance to stop the broken
17984 +- * session and resolve the allocation.
17985 +- *
17986 +- * Still, the user invoked .stop_conn() needs to be serialized
17987 +- * with stop_conn_work_fn by a private mutex. Not pretty, but
17988 +- * it works.
17989 +- */
17990 +- mutex_lock(&conn_mutex);
17991 +- switch (flag) {
17992 +- case STOP_CONN_RECOVER:
17993 +- conn->state = ISCSI_CONN_FAILED;
17994 +- break;
17995 +- case STOP_CONN_TERM:
17996 +- conn->state = ISCSI_CONN_DOWN;
17997 +- break;
17998 +- default:
17999 +- iscsi_cls_conn_printk(KERN_ERR, conn,
18000 +- "invalid stop flag %d\n", flag);
18001 +- goto unlock;
18002 +- }
18003 +-
18004 +- conn->transport->stop_conn(conn, flag);
18005 +-unlock:
18006 +- mutex_unlock(&conn_mutex);
18007 +-}
18008 +-
18009 +-static void stop_conn_work_fn(struct work_struct *work)
18010 +-{
18011 +- struct iscsi_cls_conn *conn, *tmp;
18012 +- unsigned long flags;
18013 +- LIST_HEAD(recovery_list);
18014 +-
18015 +- spin_lock_irqsave(&connlock, flags);
18016 +- if (list_empty(&connlist_err)) {
18017 +- spin_unlock_irqrestore(&connlock, flags);
18018 +- return;
18019 +- }
18020 +- list_splice_init(&connlist_err, &recovery_list);
18021 +- spin_unlock_irqrestore(&connlock, flags);
18022 +-
18023 +- list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
18024 +- uint32_t sid = iscsi_conn_get_sid(conn);
18025 +- struct iscsi_cls_session *session;
18026 +-
18027 +- session = iscsi_session_lookup(sid);
18028 +- if (session) {
18029 +- if (system_state != SYSTEM_RUNNING) {
18030 +- session->recovery_tmo = 0;
18031 +- iscsi_if_stop_conn(conn, STOP_CONN_TERM);
18032 +- } else {
18033 +- iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
18034 +- }
18035 +- }
18036 +-
18037 +- list_del_init(&conn->conn_list_err);
18038 +- }
18039 +-}
18040 +-
18041 + void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
18042 + {
18043 + struct nlmsghdr *nlh;
18044 +@@ -2531,12 +2571,9 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
18045 + struct iscsi_uevent *ev;
18046 + struct iscsi_internal *priv;
18047 + int len = nlmsg_total_size(sizeof(*ev));
18048 +- unsigned long flags;
18049 +
18050 +- spin_lock_irqsave(&connlock, flags);
18051 +- list_add(&conn->conn_list_err, &connlist_err);
18052 +- spin_unlock_irqrestore(&connlock, flags);
18053 +- queue_work(system_unbound_wq, &stop_conn_work);
18054 ++ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
18055 ++ queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
18056 +
18057 + priv = iscsi_if_transport_lookup(conn->transport);
18058 + if (!priv)
18059 +@@ -2866,26 +2903,17 @@ static int
18060 + iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
18061 + {
18062 + struct iscsi_cls_conn *conn;
18063 +- unsigned long flags;
18064 +
18065 + conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
18066 + if (!conn)
18067 + return -EINVAL;
18068 +
18069 +- spin_lock_irqsave(&connlock, flags);
18070 +- if (!list_empty(&conn->conn_list_err)) {
18071 +- spin_unlock_irqrestore(&connlock, flags);
18072 +- return -EAGAIN;
18073 +- }
18074 +- spin_unlock_irqrestore(&connlock, flags);
18075 +-
18076 ++ ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n");
18077 ++ flush_work(&conn->cleanup_work);
18078 + ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
18079 +
18080 +- mutex_lock(&conn_mutex);
18081 + if (transport->destroy_conn)
18082 + transport->destroy_conn(conn);
18083 +- mutex_unlock(&conn_mutex);
18084 +-
18085 + return 0;
18086 + }
18087 +
18088 +@@ -2975,15 +3003,31 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
18089 + ep = iscsi_lookup_endpoint(ep_handle);
18090 + if (!ep)
18091 + return -EINVAL;
18092 ++
18093 + conn = ep->conn;
18094 +- if (conn) {
18095 +- mutex_lock(&conn->ep_mutex);
18096 +- conn->ep = NULL;
18097 ++ if (!conn) {
18098 ++ /*
18099 ++ * conn was not even bound yet, so we can't get iscsi conn
18100 ++ * failures yet.
18101 ++ */
18102 ++ transport->ep_disconnect(ep);
18103 ++ goto put_ep;
18104 ++ }
18105 ++
18106 ++ mutex_lock(&conn->ep_mutex);
18107 ++ /* Check if this was a conn error and the kernel took ownership */
18108 ++ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
18109 ++ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
18110 + mutex_unlock(&conn->ep_mutex);
18111 +- conn->state = ISCSI_CONN_FAILED;
18112 ++
18113 ++ flush_work(&conn->cleanup_work);
18114 ++ goto put_ep;
18115 + }
18116 +
18117 +- transport->ep_disconnect(ep);
18118 ++ iscsi_ep_disconnect(conn, false);
18119 ++ mutex_unlock(&conn->ep_mutex);
18120 ++put_ep:
18121 ++ iscsi_put_endpoint(ep);
18122 + return 0;
18123 + }
18124 +
18125 +@@ -3009,6 +3053,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
18126 +
18127 + ev->r.retcode = transport->ep_poll(ep,
18128 + ev->u.ep_poll.timeout_ms);
18129 ++ iscsi_put_endpoint(ep);
18130 + break;
18131 + case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
18132 + rc = iscsi_if_ep_disconnect(transport,
18133 +@@ -3639,18 +3684,129 @@ exit_host_stats:
18134 + return err;
18135 + }
18136 +
18137 ++static int iscsi_if_transport_conn(struct iscsi_transport *transport,
18138 ++ struct nlmsghdr *nlh)
18139 ++{
18140 ++ struct iscsi_uevent *ev = nlmsg_data(nlh);
18141 ++ struct iscsi_cls_session *session;
18142 ++ struct iscsi_cls_conn *conn = NULL;
18143 ++ struct iscsi_endpoint *ep;
18144 ++ uint32_t pdu_len;
18145 ++ int err = 0;
18146 ++
18147 ++ switch (nlh->nlmsg_type) {
18148 ++ case ISCSI_UEVENT_CREATE_CONN:
18149 ++ return iscsi_if_create_conn(transport, ev);
18150 ++ case ISCSI_UEVENT_DESTROY_CONN:
18151 ++ return iscsi_if_destroy_conn(transport, ev);
18152 ++ case ISCSI_UEVENT_STOP_CONN:
18153 ++ return iscsi_if_stop_conn(transport, ev);
18154 ++ }
18155 ++
18156 ++ /*
18157 ++ * The following cmds need to be run under the ep_mutex so in kernel
18158 ++ * conn cleanup (ep_disconnect + unbind and conn) is not done while
18159 ++ * these are running. They also must not run if we have just run a conn
18160 ++ * cleanup because they would set the state in a way that might allow
18161 ++ * IO or send IO themselves.
18162 ++ */
18163 ++ switch (nlh->nlmsg_type) {
18164 ++ case ISCSI_UEVENT_START_CONN:
18165 ++ conn = iscsi_conn_lookup(ev->u.start_conn.sid,
18166 ++ ev->u.start_conn.cid);
18167 ++ break;
18168 ++ case ISCSI_UEVENT_BIND_CONN:
18169 ++ conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
18170 ++ break;
18171 ++ case ISCSI_UEVENT_SEND_PDU:
18172 ++ conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
18173 ++ break;
18174 ++ }
18175 ++
18176 ++ if (!conn)
18177 ++ return -EINVAL;
18178 ++
18179 ++ mutex_lock(&conn->ep_mutex);
18180 ++ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
18181 ++ mutex_unlock(&conn->ep_mutex);
18182 ++ ev->r.retcode = -ENOTCONN;
18183 ++ return 0;
18184 ++ }
18185 ++
18186 ++ switch (nlh->nlmsg_type) {
18187 ++ case ISCSI_UEVENT_BIND_CONN:
18188 ++ if (conn->ep) {
18189 ++ /*
18190 ++ * For offload boot support where iscsid is restarted
18191 ++ * during the pivot root stage, the ep will be intact
18192 ++ * here when the new iscsid instance starts up and
18193 ++ * reconnects.
18194 ++ */
18195 ++ iscsi_ep_disconnect(conn, true);
18196 ++ }
18197 ++
18198 ++ session = iscsi_session_lookup(ev->u.b_conn.sid);
18199 ++ if (!session) {
18200 ++ err = -EINVAL;
18201 ++ break;
18202 ++ }
18203 ++
18204 ++ ev->r.retcode = transport->bind_conn(session, conn,
18205 ++ ev->u.b_conn.transport_eph,
18206 ++ ev->u.b_conn.is_leading);
18207 ++ if (!ev->r.retcode)
18208 ++ conn->state = ISCSI_CONN_BOUND;
18209 ++
18210 ++ if (ev->r.retcode || !transport->ep_connect)
18211 ++ break;
18212 ++
18213 ++ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
18214 ++ if (ep) {
18215 ++ ep->conn = conn;
18216 ++ conn->ep = ep;
18217 ++ iscsi_put_endpoint(ep);
18218 ++ } else {
18219 ++ err = -ENOTCONN;
18220 ++ iscsi_cls_conn_printk(KERN_ERR, conn,
18221 ++ "Could not set ep conn binding\n");
18222 ++ }
18223 ++ break;
18224 ++ case ISCSI_UEVENT_START_CONN:
18225 ++ ev->r.retcode = transport->start_conn(conn);
18226 ++ if (!ev->r.retcode)
18227 ++ conn->state = ISCSI_CONN_UP;
18228 ++ break;
18229 ++ case ISCSI_UEVENT_SEND_PDU:
18230 ++ pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
18231 ++
18232 ++ if ((ev->u.send_pdu.hdr_size > pdu_len) ||
18233 ++ (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
18234 ++ err = -EINVAL;
18235 ++ break;
18236 ++ }
18237 ++
18238 ++ ev->r.retcode = transport->send_pdu(conn,
18239 ++ (struct iscsi_hdr *)((char *)ev + sizeof(*ev)),
18240 ++ (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
18241 ++ ev->u.send_pdu.data_size);
18242 ++ break;
18243 ++ default:
18244 ++ err = -ENOSYS;
18245 ++ }
18246 ++
18247 ++ mutex_unlock(&conn->ep_mutex);
18248 ++ return err;
18249 ++}
18250 +
18251 + static int
18252 + iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
18253 + {
18254 + int err = 0;
18255 + u32 portid;
18256 +- u32 pdu_len;
18257 + struct iscsi_uevent *ev = nlmsg_data(nlh);
18258 + struct iscsi_transport *transport = NULL;
18259 + struct iscsi_internal *priv;
18260 + struct iscsi_cls_session *session;
18261 +- struct iscsi_cls_conn *conn;
18262 + struct iscsi_endpoint *ep = NULL;
18263 +
18264 + if (!netlink_capable(skb, CAP_SYS_ADMIN))
18265 +@@ -3691,6 +3847,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
18266 + ev->u.c_bound_session.initial_cmdsn,
18267 + ev->u.c_bound_session.cmds_max,
18268 + ev->u.c_bound_session.queue_depth);
18269 ++ iscsi_put_endpoint(ep);
18270 + break;
18271 + case ISCSI_UEVENT_DESTROY_SESSION:
18272 + session = iscsi_session_lookup(ev->u.d_session.sid);
18273 +@@ -3715,7 +3872,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
18274 + list_del_init(&session->sess_list);
18275 + spin_unlock_irqrestore(&sesslock, flags);
18276 +
18277 +- queue_work(iscsi_destroy_workq, &session->destroy_work);
18278 ++ queue_work(system_unbound_wq, &session->destroy_work);
18279 + }
18280 + break;
18281 + case ISCSI_UEVENT_UNBIND_SESSION:
18282 +@@ -3726,89 +3883,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
18283 + else
18284 + err = -EINVAL;
18285 + break;
18286 +- case ISCSI_UEVENT_CREATE_CONN:
18287 +- err = iscsi_if_create_conn(transport, ev);
18288 +- break;
18289 +- case ISCSI_UEVENT_DESTROY_CONN:
18290 +- err = iscsi_if_destroy_conn(transport, ev);
18291 +- break;
18292 +- case ISCSI_UEVENT_BIND_CONN:
18293 +- session = iscsi_session_lookup(ev->u.b_conn.sid);
18294 +- conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
18295 +-
18296 +- if (conn && conn->ep)
18297 +- iscsi_if_ep_disconnect(transport, conn->ep->id);
18298 +-
18299 +- if (!session || !conn) {
18300 +- err = -EINVAL;
18301 +- break;
18302 +- }
18303 +-
18304 +- mutex_lock(&conn_mutex);
18305 +- ev->r.retcode = transport->bind_conn(session, conn,
18306 +- ev->u.b_conn.transport_eph,
18307 +- ev->u.b_conn.is_leading);
18308 +- if (!ev->r.retcode)
18309 +- conn->state = ISCSI_CONN_BOUND;
18310 +- mutex_unlock(&conn_mutex);
18311 +-
18312 +- if (ev->r.retcode || !transport->ep_connect)
18313 +- break;
18314 +-
18315 +- ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
18316 +- if (ep) {
18317 +- ep->conn = conn;
18318 +-
18319 +- mutex_lock(&conn->ep_mutex);
18320 +- conn->ep = ep;
18321 +- mutex_unlock(&conn->ep_mutex);
18322 +- } else
18323 +- iscsi_cls_conn_printk(KERN_ERR, conn,
18324 +- "Could not set ep conn "
18325 +- "binding\n");
18326 +- break;
18327 + case ISCSI_UEVENT_SET_PARAM:
18328 + err = iscsi_set_param(transport, ev);
18329 + break;
18330 +- case ISCSI_UEVENT_START_CONN:
18331 +- conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
18332 +- if (conn) {
18333 +- mutex_lock(&conn_mutex);
18334 +- ev->r.retcode = transport->start_conn(conn);
18335 +- if (!ev->r.retcode)
18336 +- conn->state = ISCSI_CONN_UP;
18337 +- mutex_unlock(&conn_mutex);
18338 +- }
18339 +- else
18340 +- err = -EINVAL;
18341 +- break;
18342 ++ case ISCSI_UEVENT_CREATE_CONN:
18343 ++ case ISCSI_UEVENT_DESTROY_CONN:
18344 + case ISCSI_UEVENT_STOP_CONN:
18345 +- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
18346 +- if (conn)
18347 +- iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
18348 +- else
18349 +- err = -EINVAL;
18350 +- break;
18351 ++ case ISCSI_UEVENT_START_CONN:
18352 ++ case ISCSI_UEVENT_BIND_CONN:
18353 + case ISCSI_UEVENT_SEND_PDU:
18354 +- pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
18355 +-
18356 +- if ((ev->u.send_pdu.hdr_size > pdu_len) ||
18357 +- (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
18358 +- err = -EINVAL;
18359 +- break;
18360 +- }
18361 +-
18362 +- conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
18363 +- if (conn) {
18364 +- mutex_lock(&conn_mutex);
18365 +- ev->r.retcode = transport->send_pdu(conn,
18366 +- (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
18367 +- (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
18368 +- ev->u.send_pdu.data_size);
18369 +- mutex_unlock(&conn_mutex);
18370 +- }
18371 +- else
18372 +- err = -EINVAL;
18373 ++ err = iscsi_if_transport_conn(transport, nlh);
18374 + break;
18375 + case ISCSI_UEVENT_GET_STATS:
18376 + err = iscsi_if_get_stats(transport, nlh);
18377 +@@ -4656,6 +4740,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
18378 + int err;
18379 +
18380 + BUG_ON(!tt);
18381 ++ WARN_ON(tt->ep_disconnect && !tt->unbind_conn);
18382 +
18383 + priv = iscsi_if_transport_lookup(tt);
18384 + if (priv)
18385 +@@ -4810,10 +4895,10 @@ static __init int iscsi_transport_init(void)
18386 + goto release_nls;
18387 + }
18388 +
18389 +- iscsi_destroy_workq = alloc_workqueue("%s",
18390 +- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
18391 +- 1, "iscsi_destroy");
18392 +- if (!iscsi_destroy_workq) {
18393 ++ iscsi_conn_cleanup_workq = alloc_workqueue("%s",
18394 ++ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
18395 ++ "iscsi_conn_cleanup");
18396 ++ if (!iscsi_conn_cleanup_workq) {
18397 + err = -ENOMEM;
18398 + goto destroy_wq;
18399 + }
18400 +@@ -4843,7 +4928,7 @@ unregister_transport_class:
18401 +
18402 + static void __exit iscsi_transport_exit(void)
18403 + {
18404 +- destroy_workqueue(iscsi_destroy_workq);
18405 ++ destroy_workqueue(iscsi_conn_cleanup_workq);
18406 + destroy_workqueue(iscsi_eh_timer_workq);
18407 + netlink_kernel_release(nls);
18408 + bus_unregister(&iscsi_flashnode_bus);
18409 +diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
18410 +index 1eaedaaba0944..1a18308f4ef4f 100644
18411 +--- a/drivers/soundwire/stream.c
18412 ++++ b/drivers/soundwire/stream.c
18413 +@@ -422,7 +422,6 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
18414 + struct completion *port_ready;
18415 + struct sdw_dpn_prop *dpn_prop;
18416 + struct sdw_prepare_ch prep_ch;
18417 +- unsigned int time_left;
18418 + bool intr = false;
18419 + int ret = 0, val;
18420 + u32 addr;
18421 +@@ -479,15 +478,15 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
18422 +
18423 + /* Wait for completion on port ready */
18424 + port_ready = &s_rt->slave->port_ready[prep_ch.num];
18425 +- time_left = wait_for_completion_timeout(port_ready,
18426 +- msecs_to_jiffies(dpn_prop->ch_prep_timeout));
18427 ++ wait_for_completion_timeout(port_ready,
18428 ++ msecs_to_jiffies(dpn_prop->ch_prep_timeout));
18429 +
18430 + val = sdw_read(s_rt->slave, SDW_DPN_PREPARESTATUS(p_rt->num));
18431 +- val &= p_rt->ch_mask;
18432 +- if (!time_left || val) {
18433 ++ if ((val < 0) || (val & p_rt->ch_mask)) {
18434 ++ ret = (val < 0) ? val : -ETIMEDOUT;
18435 + dev_err(&s_rt->slave->dev,
18436 +- "Chn prep failed for port:%d\n", prep_ch.num);
18437 +- return -ETIMEDOUT;
18438 ++ "Chn prep failed for port %d: %d\n", prep_ch.num, ret);
18439 ++ return ret;
18440 + }
18441 + }
18442 +
18443 +diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
18444 +index f1cf2232f0b5e..4d4f77a186a98 100644
18445 +--- a/drivers/spi/spi-loopback-test.c
18446 ++++ b/drivers/spi/spi-loopback-test.c
18447 +@@ -875,7 +875,7 @@ static int spi_test_run_iter(struct spi_device *spi,
18448 + test.transfers[i].len = len;
18449 + if (test.transfers[i].tx_buf)
18450 + test.transfers[i].tx_buf += tx_off;
18451 +- if (test.transfers[i].tx_buf)
18452 ++ if (test.transfers[i].rx_buf)
18453 + test.transfers[i].rx_buf += rx_off;
18454 + }
18455 +
18456 +diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
18457 +index ecba6b4a5d85d..b2c4621db34d7 100644
18458 +--- a/drivers/spi/spi-meson-spicc.c
18459 ++++ b/drivers/spi/spi-meson-spicc.c
18460 +@@ -725,7 +725,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
18461 + ret = clk_prepare_enable(spicc->pclk);
18462 + if (ret) {
18463 + dev_err(&pdev->dev, "pclk clock enable failed\n");
18464 +- goto out_master;
18465 ++ goto out_core_clk;
18466 + }
18467 +
18468 + device_reset_optional(&pdev->dev);
18469 +@@ -752,7 +752,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
18470 + ret = meson_spicc_clk_init(spicc);
18471 + if (ret) {
18472 + dev_err(&pdev->dev, "clock registration failed\n");
18473 +- goto out_master;
18474 ++ goto out_clk;
18475 + }
18476 +
18477 + ret = devm_spi_register_master(&pdev->dev, master);
18478 +@@ -764,9 +764,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
18479 + return 0;
18480 +
18481 + out_clk:
18482 +- clk_disable_unprepare(spicc->core);
18483 + clk_disable_unprepare(spicc->pclk);
18484 +
18485 ++out_core_clk:
18486 ++ clk_disable_unprepare(spicc->core);
18487 ++
18488 + out_master:
18489 + spi_master_put(master);
18490 +
18491 +diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
18492 +index 7062f29022539..f104470605b38 100644
18493 +--- a/drivers/spi/spi-omap-100k.c
18494 ++++ b/drivers/spi/spi-omap-100k.c
18495 +@@ -241,7 +241,7 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi,
18496 + else
18497 + word_len = spi->bits_per_word;
18498 +
18499 +- if (spi->bits_per_word > 32)
18500 ++ if (word_len > 32)
18501 + return -EINVAL;
18502 + cs->word_len = word_len;
18503 +
18504 +diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
18505 +index cc8401980125d..23ad052528dbe 100644
18506 +--- a/drivers/spi/spi-sun6i.c
18507 ++++ b/drivers/spi/spi-sun6i.c
18508 +@@ -379,6 +379,10 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
18509 + }
18510 +
18511 + sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
18512 ++ /* Finally enable the bus - doing so before might raise SCK to HIGH */
18513 ++ reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG);
18514 ++ reg |= SUN6I_GBL_CTL_BUS_ENABLE;
18515 ++ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
18516 +
18517 + /* Setup the transfer now... */
18518 + if (sspi->tx_buf)
18519 +@@ -504,7 +508,7 @@ static int sun6i_spi_runtime_resume(struct device *dev)
18520 + }
18521 +
18522 + sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
18523 +- SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
18524 ++ SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
18525 +
18526 + return 0;
18527 +
18528 +diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
18529 +index b8870784fc6ef..8c4615b763398 100644
18530 +--- a/drivers/spi/spi-topcliff-pch.c
18531 ++++ b/drivers/spi/spi-topcliff-pch.c
18532 +@@ -580,8 +580,10 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
18533 + data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
18534 + if (data->pkt_tx_buff != NULL) {
18535 + data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
18536 +- if (!data->pkt_rx_buff)
18537 ++ if (!data->pkt_rx_buff) {
18538 + kfree(data->pkt_tx_buff);
18539 ++ data->pkt_tx_buff = NULL;
18540 ++ }
18541 + }
18542 +
18543 + if (!data->pkt_rx_buff) {
18544 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
18545 +index e353b7a9e54eb..56c173869d975 100644
18546 +--- a/drivers/spi/spi.c
18547 ++++ b/drivers/spi/spi.c
18548 +@@ -2057,6 +2057,7 @@ of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
18549 + /* Store a pointer to the node in the device structure */
18550 + of_node_get(nc);
18551 + spi->dev.of_node = nc;
18552 ++ spi->dev.fwnode = of_fwnode_handle(nc);
18553 +
18554 + /* Register the new device */
18555 + rc = spi_add_device(spi);
18556 +@@ -2621,9 +2622,10 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
18557 + native_cs_mask |= BIT(i);
18558 + }
18559 +
18560 +- ctlr->unused_native_cs = ffz(native_cs_mask);
18561 +- if (num_cs_gpios && ctlr->max_native_cs &&
18562 +- ctlr->unused_native_cs >= ctlr->max_native_cs) {
18563 ++ ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
18564 ++
18565 ++ if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
18566 ++ ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
18567 + dev_err(dev, "No unused native chip select available\n");
18568 + return -EINVAL;
18569 + }
18570 +diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
18571 +index f49ab1aa2149a..4161e5d1f276e 100644
18572 +--- a/drivers/ssb/scan.c
18573 ++++ b/drivers/ssb/scan.c
18574 +@@ -325,6 +325,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
18575 + if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
18576 + pr_err("More than %d ssb cores found (%d)\n",
18577 + SSB_MAX_NR_CORES, bus->nr_devices);
18578 ++ err = -EINVAL;
18579 + goto err_unmap;
18580 + }
18581 + if (bus->bustype == SSB_BUSTYPE_SSB) {
18582 +diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
18583 +index 7fe0afb42234f..66c5c2169704b 100644
18584 +--- a/drivers/ssb/sdio.c
18585 ++++ b/drivers/ssb/sdio.c
18586 +@@ -411,7 +411,6 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
18587 + sdio_claim_host(bus->host_sdio);
18588 + if (unlikely(ssb_sdio_switch_core(bus, dev))) {
18589 + error = -EIO;
18590 +- memset((void *)buffer, 0xff, count);
18591 + goto err_out;
18592 + }
18593 + offset |= bus->sdio_sbaddr & 0xffff;
18594 +diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
18595 +index eeeeec97ad278..b545c2ca80a41 100644
18596 +--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
18597 ++++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
18598 +@@ -84,9 +84,9 @@ static void reset(struct fbtft_par *par)
18599 +
18600 + dev_dbg(par->info->device, "%s()\n", __func__);
18601 +
18602 +- gpiod_set_value(par->gpio.reset, 0);
18603 +- udelay(20);
18604 + gpiod_set_value(par->gpio.reset, 1);
18605 ++ udelay(20);
18606 ++ gpiod_set_value(par->gpio.reset, 0);
18607 + mdelay(120);
18608 + }
18609 +
18610 +@@ -194,12 +194,12 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
18611 + /* select chip */
18612 + if (*buf) {
18613 + /* cs1 */
18614 +- gpiod_set_value(par->CS0, 1);
18615 +- gpiod_set_value(par->CS1, 0);
18616 +- } else {
18617 +- /* cs0 */
18618 + gpiod_set_value(par->CS0, 0);
18619 + gpiod_set_value(par->CS1, 1);
18620 ++ } else {
18621 ++ /* cs0 */
18622 ++ gpiod_set_value(par->CS0, 1);
18623 ++ gpiod_set_value(par->CS1, 0);
18624 + }
18625 +
18626 + gpiod_set_value(par->RS, 0); /* RS->0 (command mode) */
18627 +@@ -397,8 +397,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
18628 + }
18629 + kfree(convert_buf);
18630 +
18631 +- gpiod_set_value(par->CS0, 1);
18632 +- gpiod_set_value(par->CS1, 1);
18633 ++ gpiod_set_value(par->CS0, 0);
18634 ++ gpiod_set_value(par->CS1, 0);
18635 +
18636 + return ret;
18637 + }
18638 +@@ -419,10 +419,10 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
18639 + for (i = 0; i < 8; ++i)
18640 + gpiod_set_value(par->gpio.db[i], data & (1 << i));
18641 + /* set E */
18642 +- gpiod_set_value(par->EPIN, 1);
18643 ++ gpiod_set_value(par->EPIN, 0);
18644 + udelay(5);
18645 + /* unset E - write */
18646 +- gpiod_set_value(par->EPIN, 0);
18647 ++ gpiod_set_value(par->EPIN, 1);
18648 + udelay(1);
18649 + }
18650 +
18651 +diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
18652 +index e2c7646588f8c..1629c2c440a97 100644
18653 +--- a/drivers/staging/fbtft/fb_bd663474.c
18654 ++++ b/drivers/staging/fbtft/fb_bd663474.c
18655 +@@ -12,7 +12,6 @@
18656 + #include <linux/module.h>
18657 + #include <linux/kernel.h>
18658 + #include <linux/init.h>
18659 +-#include <linux/gpio/consumer.h>
18660 + #include <linux/delay.h>
18661 +
18662 + #include "fbtft.h"
18663 +@@ -24,9 +23,6 @@
18664 +
18665 + static int init_display(struct fbtft_par *par)
18666 + {
18667 +- if (par->gpio.cs)
18668 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
18669 +-
18670 + par->fbtftops.reset(par);
18671 +
18672 + /* Initialization sequence from Lib_UTFT */
18673 +diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
18674 +index 05648c3ffe474..6582a2c90aafc 100644
18675 +--- a/drivers/staging/fbtft/fb_ili9163.c
18676 ++++ b/drivers/staging/fbtft/fb_ili9163.c
18677 +@@ -11,7 +11,6 @@
18678 + #include <linux/module.h>
18679 + #include <linux/kernel.h>
18680 + #include <linux/init.h>
18681 +-#include <linux/gpio/consumer.h>
18682 + #include <linux/delay.h>
18683 + #include <video/mipi_display.h>
18684 +
18685 +@@ -77,9 +76,6 @@ static int init_display(struct fbtft_par *par)
18686 + {
18687 + par->fbtftops.reset(par);
18688 +
18689 +- if (par->gpio.cs)
18690 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
18691 +-
18692 + write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
18693 + mdelay(500);
18694 + write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); /* exit sleep */
18695 +diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
18696 +index f2e72d14431db..a8f4c618b754c 100644
18697 +--- a/drivers/staging/fbtft/fb_ili9320.c
18698 ++++ b/drivers/staging/fbtft/fb_ili9320.c
18699 +@@ -8,7 +8,6 @@
18700 + #include <linux/module.h>
18701 + #include <linux/kernel.h>
18702 + #include <linux/init.h>
18703 +-#include <linux/gpio/consumer.h>
18704 + #include <linux/spi/spi.h>
18705 + #include <linux/delay.h>
18706 +
18707 +diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
18708 +index c9aa4cb431236..16d3b17ca2798 100644
18709 +--- a/drivers/staging/fbtft/fb_ili9325.c
18710 ++++ b/drivers/staging/fbtft/fb_ili9325.c
18711 +@@ -10,7 +10,6 @@
18712 + #include <linux/module.h>
18713 + #include <linux/kernel.h>
18714 + #include <linux/init.h>
18715 +-#include <linux/gpio/consumer.h>
18716 + #include <linux/delay.h>
18717 +
18718 + #include "fbtft.h"
18719 +@@ -85,9 +84,6 @@ static int init_display(struct fbtft_par *par)
18720 + {
18721 + par->fbtftops.reset(par);
18722 +
18723 +- if (par->gpio.cs)
18724 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
18725 +-
18726 + bt &= 0x07;
18727 + vc &= 0x07;
18728 + vrh &= 0x0f;
18729 +diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
18730 +index 415183c7054a8..704236bcaf3ff 100644
18731 +--- a/drivers/staging/fbtft/fb_ili9340.c
18732 ++++ b/drivers/staging/fbtft/fb_ili9340.c
18733 +@@ -8,7 +8,6 @@
18734 + #include <linux/module.h>
18735 + #include <linux/kernel.h>
18736 + #include <linux/init.h>
18737 +-#include <linux/gpio/consumer.h>
18738 + #include <linux/delay.h>
18739 + #include <video/mipi_display.h>
18740 +
18741 +diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
18742 +index 8c7de32903434..62f27172f8449 100644
18743 +--- a/drivers/staging/fbtft/fb_s6d1121.c
18744 ++++ b/drivers/staging/fbtft/fb_s6d1121.c
18745 +@@ -12,7 +12,6 @@
18746 + #include <linux/module.h>
18747 + #include <linux/kernel.h>
18748 + #include <linux/init.h>
18749 +-#include <linux/gpio/consumer.h>
18750 + #include <linux/delay.h>
18751 +
18752 + #include "fbtft.h"
18753 +@@ -29,9 +28,6 @@ static int init_display(struct fbtft_par *par)
18754 + {
18755 + par->fbtftops.reset(par);
18756 +
18757 +- if (par->gpio.cs)
18758 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
18759 +-
18760 + /* Initialization sequence from Lib_UTFT */
18761 +
18762 + write_reg(par, 0x0011, 0x2004);
18763 +diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
18764 +index 6f7249493ea3b..7b9ab39e1c1a8 100644
18765 +--- a/drivers/staging/fbtft/fb_sh1106.c
18766 ++++ b/drivers/staging/fbtft/fb_sh1106.c
18767 +@@ -9,7 +9,6 @@
18768 + #include <linux/module.h>
18769 + #include <linux/kernel.h>
18770 + #include <linux/init.h>
18771 +-#include <linux/gpio/consumer.h>
18772 + #include <linux/delay.h>
18773 +
18774 + #include "fbtft.h"
18775 +diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
18776 +index 7a3fe022cc69d..f27bab38b3ec4 100644
18777 +--- a/drivers/staging/fbtft/fb_ssd1289.c
18778 ++++ b/drivers/staging/fbtft/fb_ssd1289.c
18779 +@@ -10,7 +10,6 @@
18780 + #include <linux/module.h>
18781 + #include <linux/kernel.h>
18782 + #include <linux/init.h>
18783 +-#include <linux/gpio/consumer.h>
18784 +
18785 + #include "fbtft.h"
18786 +
18787 +@@ -28,9 +27,6 @@ static int init_display(struct fbtft_par *par)
18788 + {
18789 + par->fbtftops.reset(par);
18790 +
18791 +- if (par->gpio.cs)
18792 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
18793 +-
18794 + write_reg(par, 0x00, 0x0001);
18795 + write_reg(par, 0x03, 0xA8A4);
18796 + write_reg(par, 0x0C, 0x0000);
18797 +diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
18798 +index 8a3140d41d8bb..796a2ac3e1948 100644
18799 +--- a/drivers/staging/fbtft/fb_ssd1325.c
18800 ++++ b/drivers/staging/fbtft/fb_ssd1325.c
18801 +@@ -35,8 +35,6 @@ static int init_display(struct fbtft_par *par)
18802 + {
18803 + par->fbtftops.reset(par);
18804 +
18805 +- gpiod_set_value(par->gpio.cs, 0);
18806 +-
18807 + write_reg(par, 0xb3);
18808 + write_reg(par, 0xf0);
18809 + write_reg(par, 0xae);
18810 +diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
18811 +index 37622c9462aa7..ec5eced7f8cbd 100644
18812 +--- a/drivers/staging/fbtft/fb_ssd1331.c
18813 ++++ b/drivers/staging/fbtft/fb_ssd1331.c
18814 +@@ -81,8 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
18815 + va_start(args, len);
18816 +
18817 + *buf = (u8)va_arg(args, unsigned int);
18818 +- if (par->gpio.dc)
18819 +- gpiod_set_value(par->gpio.dc, 0);
18820 ++ gpiod_set_value(par->gpio.dc, 0);
18821 + ret = par->fbtftops.write(par, par->buf, sizeof(u8));
18822 + if (ret < 0) {
18823 + va_end(args);
18824 +@@ -104,8 +103,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
18825 + return;
18826 + }
18827 + }
18828 +- if (par->gpio.dc)
18829 +- gpiod_set_value(par->gpio.dc, 1);
18830 ++ gpiod_set_value(par->gpio.dc, 1);
18831 + va_end(args);
18832 + }
18833 +
18834 +diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
18835 +index 900b28d826b28..cf263a58a1489 100644
18836 +--- a/drivers/staging/fbtft/fb_ssd1351.c
18837 ++++ b/drivers/staging/fbtft/fb_ssd1351.c
18838 +@@ -2,7 +2,6 @@
18839 + #include <linux/module.h>
18840 + #include <linux/kernel.h>
18841 + #include <linux/init.h>
18842 +-#include <linux/gpio/consumer.h>
18843 + #include <linux/spi/spi.h>
18844 + #include <linux/delay.h>
18845 +
18846 +diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
18847 +index c77832ae5e5ba..c680160d63807 100644
18848 +--- a/drivers/staging/fbtft/fb_upd161704.c
18849 ++++ b/drivers/staging/fbtft/fb_upd161704.c
18850 +@@ -12,7 +12,6 @@
18851 + #include <linux/module.h>
18852 + #include <linux/kernel.h>
18853 + #include <linux/init.h>
18854 +-#include <linux/gpio/consumer.h>
18855 + #include <linux/delay.h>
18856 +
18857 + #include "fbtft.h"
18858 +@@ -26,9 +25,6 @@ static int init_display(struct fbtft_par *par)
18859 + {
18860 + par->fbtftops.reset(par);
18861 +
18862 +- if (par->gpio.cs)
18863 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
18864 +-
18865 + /* Initialization sequence from Lib_UTFT */
18866 +
18867 + /* register reset */
18868 +diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
18869 +index 76b25df376b8f..a57e1f4feef35 100644
18870 +--- a/drivers/staging/fbtft/fb_watterott.c
18871 ++++ b/drivers/staging/fbtft/fb_watterott.c
18872 +@@ -8,7 +8,6 @@
18873 + #include <linux/module.h>
18874 + #include <linux/kernel.h>
18875 + #include <linux/init.h>
18876 +-#include <linux/gpio/consumer.h>
18877 + #include <linux/delay.h>
18878 +
18879 + #include "fbtft.h"
18880 +diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
18881 +index 63c65dd67b175..3d422bc116411 100644
18882 +--- a/drivers/staging/fbtft/fbtft-bus.c
18883 ++++ b/drivers/staging/fbtft/fbtft-bus.c
18884 +@@ -135,8 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
18885 + remain = len / 2;
18886 + vmem16 = (u16 *)(par->info->screen_buffer + offset);
18887 +
18888 +- if (par->gpio.dc)
18889 +- gpiod_set_value(par->gpio.dc, 1);
18890 ++ gpiod_set_value(par->gpio.dc, 1);
18891 +
18892 + /* non buffered write */
18893 + if (!par->txbuf.buf)
18894 +diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
18895 +index 4f362dad4436a..3723269890d5f 100644
18896 +--- a/drivers/staging/fbtft/fbtft-core.c
18897 ++++ b/drivers/staging/fbtft/fbtft-core.c
18898 +@@ -38,8 +38,7 @@ int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc)
18899 + {
18900 + int ret;
18901 +
18902 +- if (par->gpio.dc)
18903 +- gpiod_set_value(par->gpio.dc, dc);
18904 ++ gpiod_set_value(par->gpio.dc, dc);
18905 +
18906 + ret = par->fbtftops.write(par, buf, len);
18907 + if (ret < 0)
18908 +@@ -76,20 +75,16 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
18909 + struct gpio_desc **gpiop)
18910 + {
18911 + struct device *dev = par->info->device;
18912 +- int ret = 0;
18913 +
18914 + *gpiop = devm_gpiod_get_index_optional(dev, name, index,
18915 +- GPIOD_OUT_HIGH);
18916 +- if (IS_ERR(*gpiop)) {
18917 +- ret = PTR_ERR(*gpiop);
18918 +- dev_err(dev,
18919 +- "Failed to request %s GPIO: %d\n", name, ret);
18920 +- return ret;
18921 +- }
18922 ++ GPIOD_OUT_LOW);
18923 ++ if (IS_ERR(*gpiop))
18924 ++ return dev_err_probe(dev, PTR_ERR(*gpiop), "Failed to request %s GPIO\n", name);
18925 ++
18926 + fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
18927 + __func__, name);
18928 +
18929 +- return ret;
18930 ++ return 0;
18931 + }
18932 +
18933 + static int fbtft_request_gpios(struct fbtft_par *par)
18934 +@@ -226,11 +221,15 @@ static void fbtft_reset(struct fbtft_par *par)
18935 + {
18936 + if (!par->gpio.reset)
18937 + return;
18938 ++
18939 + fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
18940 ++
18941 + gpiod_set_value_cansleep(par->gpio.reset, 1);
18942 + usleep_range(20, 40);
18943 + gpiod_set_value_cansleep(par->gpio.reset, 0);
18944 + msleep(120);
18945 ++
18946 ++ gpiod_set_value_cansleep(par->gpio.cs, 1); /* Activate chip */
18947 + }
18948 +
18949 + static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
18950 +@@ -922,8 +921,6 @@ static int fbtft_init_display_from_property(struct fbtft_par *par)
18951 + goto out_free;
18952 +
18953 + par->fbtftops.reset(par);
18954 +- if (par->gpio.cs)
18955 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
18956 +
18957 + index = -1;
18958 + val = values[++index];
18959 +@@ -1018,8 +1015,6 @@ int fbtft_init_display(struct fbtft_par *par)
18960 + }
18961 +
18962 + par->fbtftops.reset(par);
18963 +- if (par->gpio.cs)
18964 +- gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
18965 +
18966 + i = 0;
18967 + while (i < FBTFT_MAX_INIT_SEQUENCE) {
18968 +diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
18969 +index 0863d257d7620..de1904a443c27 100644
18970 +--- a/drivers/staging/fbtft/fbtft-io.c
18971 ++++ b/drivers/staging/fbtft/fbtft-io.c
18972 +@@ -142,12 +142,12 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
18973 + data = *(u8 *)buf;
18974 +
18975 + /* Start writing by pulling down /WR */
18976 +- gpiod_set_value(par->gpio.wr, 0);
18977 ++ gpiod_set_value(par->gpio.wr, 1);
18978 +
18979 + /* Set data */
18980 + #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
18981 + if (data == prev_data) {
18982 +- gpiod_set_value(par->gpio.wr, 0); /* used as delay */
18983 ++ gpiod_set_value(par->gpio.wr, 1); /* used as delay */
18984 + } else {
18985 + for (i = 0; i < 8; i++) {
18986 + if ((data & 1) != (prev_data & 1))
18987 +@@ -165,7 +165,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
18988 + #endif
18989 +
18990 + /* Pullup /WR */
18991 +- gpiod_set_value(par->gpio.wr, 1);
18992 ++ gpiod_set_value(par->gpio.wr, 0);
18993 +
18994 + #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
18995 + prev_data = *(u8 *)buf;
18996 +@@ -192,12 +192,12 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
18997 + data = *(u16 *)buf;
18998 +
18999 + /* Start writing by pulling down /WR */
19000 +- gpiod_set_value(par->gpio.wr, 0);
19001 ++ gpiod_set_value(par->gpio.wr, 1);
19002 +
19003 + /* Set data */
19004 + #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
19005 + if (data == prev_data) {
19006 +- gpiod_set_value(par->gpio.wr, 0); /* used as delay */
19007 ++ gpiod_set_value(par->gpio.wr, 1); /* used as delay */
19008 + } else {
19009 + for (i = 0; i < 16; i++) {
19010 + if ((data & 1) != (prev_data & 1))
19011 +@@ -215,7 +215,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
19012 + #endif
19013 +
19014 + /* Pullup /WR */
19015 +- gpiod_set_value(par->gpio.wr, 1);
19016 ++ gpiod_set_value(par->gpio.wr, 0);
19017 +
19018 + #ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
19019 + prev_data = *(u16 *)buf;
19020 +diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
19021 +index 571f47d394843..bd5f874334043 100644
19022 +--- a/drivers/staging/gdm724x/gdm_lte.c
19023 ++++ b/drivers/staging/gdm724x/gdm_lte.c
19024 +@@ -611,10 +611,12 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf,
19025 + * bytes (99,130,83,99 dec)
19026 + */
19027 + } __packed;
19028 +- void *addr = buf + sizeof(struct iphdr) +
19029 +- sizeof(struct udphdr) +
19030 +- offsetof(struct dhcp_packet, chaddr);
19031 +- ether_addr_copy(nic->dest_mac_addr, addr);
19032 ++ int offset = sizeof(struct iphdr) +
19033 ++ sizeof(struct udphdr) +
19034 ++ offsetof(struct dhcp_packet, chaddr);
19035 ++ if (offset + ETH_ALEN > len)
19036 ++ return;
19037 ++ ether_addr_copy(nic->dest_mac_addr, buf + offset);
19038 + }
19039 + }
19040 +
19041 +@@ -677,6 +679,7 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
19042 + struct sdu *sdu = NULL;
19043 + u8 endian = phy_dev->get_endian(phy_dev->priv_dev);
19044 + u8 *data = (u8 *)multi_sdu->data;
19045 ++ int copied;
19046 + u16 i = 0;
19047 + u16 num_packet;
19048 + u16 hci_len;
19049 +@@ -688,6 +691,12 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
19050 + num_packet = gdm_dev16_to_cpu(endian, multi_sdu->num_packet);
19051 +
19052 + for (i = 0; i < num_packet; i++) {
19053 ++ copied = data - multi_sdu->data;
19054 ++ if (len < copied + sizeof(*sdu)) {
19055 ++ pr_err("rx prevent buffer overflow");
19056 ++ return;
19057 ++ }
19058 ++
19059 + sdu = (struct sdu *)data;
19060 +
19061 + cmd_evt = gdm_dev16_to_cpu(endian, sdu->cmd_evt);
19062 +@@ -698,7 +707,8 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
19063 + pr_err("rx sdu wrong hci %04x\n", cmd_evt);
19064 + return;
19065 + }
19066 +- if (hci_len < 12) {
19067 ++ if (hci_len < 12 ||
19068 ++ len < copied + sizeof(*sdu) + (hci_len - 12)) {
19069 + pr_err("rx sdu invalid len %d\n", hci_len);
19070 + return;
19071 + }
19072 +diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
19073 +index 595e82a827287..eea2009fa17bd 100644
19074 +--- a/drivers/staging/media/hantro/hantro_drv.c
19075 ++++ b/drivers/staging/media/hantro/hantro_drv.c
19076 +@@ -56,16 +56,12 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
19077 + return hantro_get_dec_buf_addr(ctx, buf);
19078 + }
19079 +
19080 +-static void hantro_job_finish(struct hantro_dev *vpu,
19081 +- struct hantro_ctx *ctx,
19082 +- enum vb2_buffer_state result)
19083 ++static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
19084 ++ struct hantro_ctx *ctx,
19085 ++ enum vb2_buffer_state result)
19086 + {
19087 + struct vb2_v4l2_buffer *src, *dst;
19088 +
19089 +- pm_runtime_mark_last_busy(vpu->dev);
19090 +- pm_runtime_put_autosuspend(vpu->dev);
19091 +- clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
19092 +-
19093 + src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
19094 + dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
19095 +
19096 +@@ -81,6 +77,18 @@ static void hantro_job_finish(struct hantro_dev *vpu,
19097 + result);
19098 + }
19099 +
19100 ++static void hantro_job_finish(struct hantro_dev *vpu,
19101 ++ struct hantro_ctx *ctx,
19102 ++ enum vb2_buffer_state result)
19103 ++{
19104 ++ pm_runtime_mark_last_busy(vpu->dev);
19105 ++ pm_runtime_put_autosuspend(vpu->dev);
19106 ++
19107 ++ clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
19108 ++
19109 ++ hantro_job_finish_no_pm(vpu, ctx, result);
19110 ++}
19111 ++
19112 + void hantro_irq_done(struct hantro_dev *vpu,
19113 + enum vb2_buffer_state result)
19114 + {
19115 +@@ -152,12 +160,15 @@ static void device_run(void *priv)
19116 + src = hantro_get_src_buf(ctx);
19117 + dst = hantro_get_dst_buf(ctx);
19118 +
19119 ++ ret = pm_runtime_get_sync(ctx->dev->dev);
19120 ++ if (ret < 0) {
19121 ++ pm_runtime_put_noidle(ctx->dev->dev);
19122 ++ goto err_cancel_job;
19123 ++ }
19124 ++
19125 + ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
19126 + if (ret)
19127 + goto err_cancel_job;
19128 +- ret = pm_runtime_get_sync(ctx->dev->dev);
19129 +- if (ret < 0)
19130 +- goto err_cancel_job;
19131 +
19132 + v4l2_m2m_buf_copy_metadata(src, dst, true);
19133 +
19134 +@@ -165,7 +176,7 @@ static void device_run(void *priv)
19135 + return;
19136 +
19137 + err_cancel_job:
19138 +- hantro_job_finish(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
19139 ++ hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
19140 + }
19141 +
19142 + static struct v4l2_m2m_ops vpu_m2m_ops = {
19143 +diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
19144 +index 1bc118e375a12..7ccc6405036ae 100644
19145 +--- a/drivers/staging/media/hantro/hantro_v4l2.c
19146 ++++ b/drivers/staging/media/hantro/hantro_v4l2.c
19147 +@@ -639,7 +639,14 @@ static int hantro_buf_prepare(struct vb2_buffer *vb)
19148 + ret = hantro_buf_plane_check(vb, pix_fmt);
19149 + if (ret)
19150 + return ret;
19151 +- vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
19152 ++ /*
19153 ++ * Buffer's bytesused must be written by driver for CAPTURE buffers.
19154 ++ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
19155 ++ * it to buffer length).
19156 ++ */
19157 ++ if (V4L2_TYPE_IS_CAPTURE(vq->type))
19158 ++ vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
19159 ++
19160 + return 0;
19161 + }
19162 +
19163 +diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
19164 +index e3bfd635a89ae..6a94fff49bf6b 100644
19165 +--- a/drivers/staging/media/imx/imx-media-csi.c
19166 ++++ b/drivers/staging/media/imx/imx-media-csi.c
19167 +@@ -750,9 +750,10 @@ static int csi_setup(struct csi_priv *priv)
19168 +
19169 + static int csi_start(struct csi_priv *priv)
19170 + {
19171 +- struct v4l2_fract *output_fi;
19172 ++ struct v4l2_fract *input_fi, *output_fi;
19173 + int ret;
19174 +
19175 ++ input_fi = &priv->frame_interval[CSI_SINK_PAD];
19176 + output_fi = &priv->frame_interval[priv->active_output_pad];
19177 +
19178 + /* start upstream */
19179 +@@ -761,6 +762,17 @@ static int csi_start(struct csi_priv *priv)
19180 + if (ret)
19181 + return ret;
19182 +
19183 ++ /* Skip first few frames from a BT.656 source */
19184 ++ if (priv->upstream_ep.bus_type == V4L2_MBUS_BT656) {
19185 ++ u32 delay_usec, bad_frames = 20;
19186 ++
19187 ++ delay_usec = DIV_ROUND_UP_ULL((u64)USEC_PER_SEC *
19188 ++ input_fi->numerator * bad_frames,
19189 ++ input_fi->denominator);
19190 ++
19191 ++ usleep_range(delay_usec, delay_usec + 1000);
19192 ++ }
19193 ++
19194 + if (priv->dest == IPU_CSI_DEST_IDMAC) {
19195 + ret = csi_idmac_start(priv);
19196 + if (ret)
19197 +diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
19198 +index 025fdc488bd66..25d0f89b2e53e 100644
19199 +--- a/drivers/staging/media/imx/imx7-mipi-csis.c
19200 ++++ b/drivers/staging/media/imx/imx7-mipi-csis.c
19201 +@@ -666,13 +666,15 @@ static void mipi_csis_clear_counters(struct csi_state *state)
19202 +
19203 + static void mipi_csis_log_counters(struct csi_state *state, bool non_errors)
19204 + {
19205 +- int i = non_errors ? MIPI_CSIS_NUM_EVENTS : MIPI_CSIS_NUM_EVENTS - 4;
19206 ++ unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS
19207 ++ : MIPI_CSIS_NUM_EVENTS - 6;
19208 + struct device *dev = &state->pdev->dev;
19209 + unsigned long flags;
19210 ++ unsigned int i;
19211 +
19212 + spin_lock_irqsave(&state->slock, flags);
19213 +
19214 +- for (i--; i >= 0; i--) {
19215 ++ for (i = 0; i < num_events; ++i) {
19216 + if (state->events[i].counter > 0 || state->debug)
19217 + dev_info(dev, "%s events: %d\n", state->events[i].name,
19218 + state->events[i].counter);
19219 +diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
19220 +index d821661d30f38..7131156c1f2cf 100644
19221 +--- a/drivers/staging/media/rkvdec/rkvdec.c
19222 ++++ b/drivers/staging/media/rkvdec/rkvdec.c
19223 +@@ -481,7 +481,15 @@ static int rkvdec_buf_prepare(struct vb2_buffer *vb)
19224 + if (vb2_plane_size(vb, i) < sizeimage)
19225 + return -EINVAL;
19226 + }
19227 +- vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
19228 ++
19229 ++ /*
19230 ++ * Buffer's bytesused must be written by driver for CAPTURE buffers.
19231 ++ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
19232 ++ * it to buffer length).
19233 ++ */
19234 ++ if (V4L2_TYPE_IS_CAPTURE(vq->type))
19235 ++ vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
19236 ++
19237 + return 0;
19238 + }
19239 +
19240 +@@ -658,7 +666,7 @@ static void rkvdec_device_run(void *priv)
19241 + if (WARN_ON(!desc))
19242 + return;
19243 +
19244 +- ret = pm_runtime_get_sync(rkvdec->dev);
19245 ++ ret = pm_runtime_resume_and_get(rkvdec->dev);
19246 + if (ret < 0) {
19247 + rkvdec_job_finish_no_pm(ctx, VB2_BUF_STATE_ERROR);
19248 + return;
19249 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
19250 +index ce497d0197dfc..10744fab7ceaa 100644
19251 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
19252 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
19253 +@@ -477,8 +477,8 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
19254 + slice_params->flags);
19255 +
19256 + reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
19257 +- V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT,
19258 +- pps->flags);
19259 ++ V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT,
19260 ++ slice_params->flags);
19261 +
19262 + /* FIXME: For multi-slice support. */
19263 + reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
19264 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
19265 +index b62eb8e840573..bf731caf2ed51 100644
19266 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
19267 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
19268 +@@ -457,7 +457,13 @@ static int cedrus_buf_prepare(struct vb2_buffer *vb)
19269 + if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
19270 + return -EINVAL;
19271 +
19272 +- vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
19273 ++ /*
19274 ++ * Buffer's bytesused must be written by driver for CAPTURE buffers.
19275 ++ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
19276 ++ * it to buffer length).
19277 ++ */
19278 ++ if (V4L2_TYPE_IS_CAPTURE(vq->type))
19279 ++ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
19280 +
19281 + return 0;
19282 + }
19283 +diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
19284 +index f0c9ae757bcd9..d6628e5f4f66c 100644
19285 +--- a/drivers/staging/mt7621-dts/mt7621.dtsi
19286 ++++ b/drivers/staging/mt7621-dts/mt7621.dtsi
19287 +@@ -498,7 +498,7 @@
19288 +
19289 + bus-range = <0 255>;
19290 + ranges = <
19291 +- 0x02000000 0 0x00000000 0x60000000 0 0x10000000 /* pci memory */
19292 ++ 0x02000000 0 0x60000000 0x60000000 0 0x10000000 /* pci memory */
19293 + 0x01000000 0 0x00000000 0x1e160000 0 0x00010000 /* io space */
19294 + >;
19295 +
19296 +diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
19297 +index 715f1fe8b4726..22974277afa08 100644
19298 +--- a/drivers/staging/rtl8712/hal_init.c
19299 ++++ b/drivers/staging/rtl8712/hal_init.c
19300 +@@ -40,7 +40,10 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
19301 + dev_err(&udev->dev, "r8712u: Firmware request failed\n");
19302 + usb_put_dev(udev);
19303 + usb_set_intfdata(usb_intf, NULL);
19304 ++ r8712_free_drv_sw(adapter);
19305 ++ adapter->dvobj_deinit(adapter);
19306 + complete(&adapter->rtl8712_fw_ready);
19307 ++ free_netdev(adapter->pnetdev);
19308 + return;
19309 + }
19310 + adapter->fw = firmware;
19311 +diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
19312 +index 0c3ae8495afb7..2214aca097308 100644
19313 +--- a/drivers/staging/rtl8712/os_intfs.c
19314 ++++ b/drivers/staging/rtl8712/os_intfs.c
19315 +@@ -328,8 +328,6 @@ int r8712_init_drv_sw(struct _adapter *padapter)
19316 +
19317 + void r8712_free_drv_sw(struct _adapter *padapter)
19318 + {
19319 +- struct net_device *pnetdev = padapter->pnetdev;
19320 +-
19321 + r8712_free_cmd_priv(&padapter->cmdpriv);
19322 + r8712_free_evt_priv(&padapter->evtpriv);
19323 + r8712_DeInitSwLeds(padapter);
19324 +@@ -339,8 +337,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
19325 + _r8712_free_sta_priv(&padapter->stapriv);
19326 + _r8712_free_recv_priv(&padapter->recvpriv);
19327 + mp871xdeinit(padapter);
19328 +- if (pnetdev)
19329 +- free_netdev(pnetdev);
19330 + }
19331 +
19332 + static void enable_video_mode(struct _adapter *padapter, int cbw40_value)
19333 +diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
19334 +index db2add5764189..c23f6b376111e 100644
19335 +--- a/drivers/staging/rtl8712/rtl871x_recv.c
19336 ++++ b/drivers/staging/rtl8712/rtl871x_recv.c
19337 +@@ -374,7 +374,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
19338 + if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
19339 + check_fwstate(pmlmepriv, _FW_LINKED)) {
19340 + /* if NULL-frame, drop packet */
19341 +- if ((GetFrameSubType(ptr)) == IEEE80211_STYPE_NULLFUNC)
19342 ++ if ((GetFrameSubType(ptr)) == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC))
19343 + return _FAIL;
19344 + /* drop QoS-SubType Data, including QoS NULL,
19345 + * excluding QoS-Data
19346 +diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
19347 +index 63d63f7be481a..e0a1c30a8fe66 100644
19348 +--- a/drivers/staging/rtl8712/rtl871x_security.c
19349 ++++ b/drivers/staging/rtl8712/rtl871x_security.c
19350 +@@ -1045,9 +1045,9 @@ static void aes_cipher(u8 *key, uint hdrlen,
19351 + else
19352 + a4_exists = 1;
19353 +
19354 +- if ((frtype == IEEE80211_STYPE_DATA_CFACK) ||
19355 +- (frtype == IEEE80211_STYPE_DATA_CFPOLL) ||
19356 +- (frtype == IEEE80211_STYPE_DATA_CFACKPOLL)) {
19357 ++ if ((frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFACK)) ||
19358 ++ (frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFPOLL)) ||
19359 ++ (frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFACKPOLL))) {
19360 + qc_exists = 1;
19361 + if (hdrlen != WLAN_HDR_A3_QOS_LEN)
19362 + hdrlen += 2;
19363 +@@ -1225,9 +1225,9 @@ static void aes_decipher(u8 *key, uint hdrlen,
19364 + a4_exists = 0;
19365 + else
19366 + a4_exists = 1;
19367 +- if ((frtype == IEEE80211_STYPE_DATA_CFACK) ||
19368 +- (frtype == IEEE80211_STYPE_DATA_CFPOLL) ||
19369 +- (frtype == IEEE80211_STYPE_DATA_CFACKPOLL)) {
19370 ++ if ((frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFACK)) ||
19371 ++ (frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFPOLL)) ||
19372 ++ (frtype == (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA_CFACKPOLL))) {
19373 + qc_exists = 1;
19374 + if (hdrlen != WLAN_HDR_A3_QOS_LEN)
19375 + hdrlen += 2;
19376 +diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
19377 +index dc21e7743349c..b760bc3559373 100644
19378 +--- a/drivers/staging/rtl8712/usb_intf.c
19379 ++++ b/drivers/staging/rtl8712/usb_intf.c
19380 +@@ -361,7 +361,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
19381 + /* step 1. */
19382 + pnetdev = r8712_init_netdev();
19383 + if (!pnetdev)
19384 +- goto error;
19385 ++ goto put_dev;
19386 + padapter = netdev_priv(pnetdev);
19387 + disable_ht_for_spec_devid(pdid, padapter);
19388 + pdvobjpriv = &padapter->dvobjpriv;
19389 +@@ -381,16 +381,16 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
19390 + * initialize the dvobj_priv
19391 + */
19392 + if (!padapter->dvobj_init) {
19393 +- goto error;
19394 ++ goto put_dev;
19395 + } else {
19396 + status = padapter->dvobj_init(padapter);
19397 + if (status != _SUCCESS)
19398 +- goto error;
19399 ++ goto free_netdev;
19400 + }
19401 + /* step 4. */
19402 + status = r8712_init_drv_sw(padapter);
19403 + if (status)
19404 +- goto error;
19405 ++ goto dvobj_deinit;
19406 + /* step 5. read efuse/eeprom data and get mac_addr */
19407 + {
19408 + int i, offset;
19409 +@@ -570,17 +570,20 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
19410 + }
19411 + /* step 6. Load the firmware asynchronously */
19412 + if (rtl871x_load_fw(padapter))
19413 +- goto error;
19414 ++ goto deinit_drv_sw;
19415 + spin_lock_init(&padapter->lock_rx_ff0_filter);
19416 + mutex_init(&padapter->mutex_start);
19417 + return 0;
19418 +-error:
19419 ++
19420 ++deinit_drv_sw:
19421 ++ r8712_free_drv_sw(padapter);
19422 ++dvobj_deinit:
19423 ++ padapter->dvobj_deinit(padapter);
19424 ++free_netdev:
19425 ++ free_netdev(pnetdev);
19426 ++put_dev:
19427 + usb_put_dev(udev);
19428 + usb_set_intfdata(pusb_intf, NULL);
19429 +- if (padapter && padapter->dvobj_deinit)
19430 +- padapter->dvobj_deinit(padapter);
19431 +- if (pnetdev)
19432 +- free_netdev(pnetdev);
19433 + return -ENODEV;
19434 + }
19435 +
19436 +@@ -612,6 +615,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
19437 + r8712_stop_drv_timers(padapter);
19438 + r871x_dev_unload(padapter);
19439 + r8712_free_drv_sw(padapter);
19440 ++ free_netdev(pnetdev);
19441 +
19442 + /* decrease the reference count of the usb device structure
19443 + * when disconnect
19444 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
19445 +index 5088c3731b6df..6d0d0beed402f 100644
19446 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
19447 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
19448 +@@ -420,8 +420,10 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
19449 + wep_key_len = wep_key_len <= 5 ? 5 : 13;
19450 + wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
19451 + pwep = kzalloc(wep_total_len, GFP_KERNEL);
19452 +- if (!pwep)
19453 ++ if (!pwep) {
19454 ++ ret = -ENOMEM;
19455 + goto exit;
19456 ++ }
19457 +
19458 + pwep->KeyLength = wep_key_len;
19459 + pwep->Length = wep_total_len;
19460 +diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
19461 +index 06bca7be5203f..76d3f03999647 100644
19462 +--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
19463 ++++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
19464 +@@ -1862,7 +1862,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
19465 + int status;
19466 + int err = -ENODEV;
19467 + struct vchiq_mmal_instance *instance;
19468 +- static struct vchiq_instance *vchiq_instance;
19469 ++ struct vchiq_instance *vchiq_instance;
19470 + struct vchiq_service_params_kernel params = {
19471 + .version = VC_MMAL_VER,
19472 + .version_min = VC_MMAL_MIN_VER,
19473 +diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
19474 +index af35251232eb3..b044999ad002b 100644
19475 +--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
19476 ++++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
19477 +@@ -265,12 +265,13 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
19478 + struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
19479 +
19480 + if (ccmd->release) {
19481 +- struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
19482 +-
19483 +- if (ttinfo->sgl) {
19484 ++ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
19485 ++ put_page(sg_page(&ccmd->sg));
19486 ++ } else {
19487 + struct cxgbit_sock *csk = conn->context;
19488 + struct cxgbit_device *cdev = csk->com.cdev;
19489 + struct cxgbi_ppm *ppm = cdev2ppm(cdev);
19490 ++ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
19491 +
19492 + /* Abort the TCP conn if DDP is not complete to
19493 + * avoid any possibility of DDP after freeing
19494 +@@ -280,14 +281,14 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
19495 + cmd->se_cmd.data_length))
19496 + cxgbit_abort_conn(csk);
19497 +
19498 ++ if (unlikely(ttinfo->sgl)) {
19499 ++ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
19500 ++ ttinfo->nents, DMA_FROM_DEVICE);
19501 ++ ttinfo->nents = 0;
19502 ++ ttinfo->sgl = NULL;
19503 ++ }
19504 + cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
19505 +-
19506 +- dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
19507 +- ttinfo->nents, DMA_FROM_DEVICE);
19508 +- } else {
19509 +- put_page(sg_page(&ccmd->sg));
19510 + }
19511 +-
19512 + ccmd->release = false;
19513 + }
19514 + }
19515 +diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
19516 +index b926e1d6c7b8e..282297ffc4044 100644
19517 +--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
19518 ++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
19519 +@@ -997,17 +997,18 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
19520 + struct scatterlist *sg_start;
19521 + struct iscsi_conn *conn = csk->conn;
19522 + struct iscsi_cmd *cmd = NULL;
19523 ++ struct cxgbit_cmd *ccmd;
19524 ++ struct cxgbi_task_tag_info *ttinfo;
19525 + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
19526 + struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
19527 + u32 data_offset = be32_to_cpu(hdr->offset);
19528 +- u32 data_len = pdu_cb->dlen;
19529 ++ u32 data_len = ntoh24(hdr->dlength);
19530 + int rc, sg_nents, sg_off;
19531 + bool dcrc_err = false;
19532 +
19533 + if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
19534 + u32 offset = be32_to_cpu(hdr->offset);
19535 + u32 ddp_data_len;
19536 +- u32 payload_length = ntoh24(hdr->dlength);
19537 + bool success = false;
19538 +
19539 + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
19540 +@@ -1022,7 +1023,7 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
19541 + cmd->data_sn = be32_to_cpu(hdr->datasn);
19542 +
19543 + rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
19544 +- cmd, payload_length, &success);
19545 ++ cmd, data_len, &success);
19546 + if (rc < 0)
19547 + return rc;
19548 + else if (!success)
19549 +@@ -1060,6 +1061,20 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
19550 + cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
19551 + }
19552 +
19553 ++ ccmd = iscsit_priv_cmd(cmd);
19554 ++ ttinfo = &ccmd->ttinfo;
19555 ++
19556 ++ if (ccmd->release && ttinfo->sgl &&
19557 ++ (cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) {
19558 ++ struct cxgbit_device *cdev = csk->com.cdev;
19559 ++ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
19560 ++
19561 ++ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
19562 ++ DMA_FROM_DEVICE);
19563 ++ ttinfo->nents = 0;
19564 ++ ttinfo->sgl = NULL;
19565 ++ }
19566 ++
19567 + check_payload:
19568 +
19569 + rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
19570 +diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
19571 +index eeb4e4b76c0be..43b1ae8a77893 100644
19572 +--- a/drivers/thermal/cpufreq_cooling.c
19573 ++++ b/drivers/thermal/cpufreq_cooling.c
19574 +@@ -478,7 +478,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
19575 + ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
19576 + if (ret >= 0) {
19577 + cpufreq_cdev->cpufreq_state = state;
19578 +- cpus = cpufreq_cdev->policy->cpus;
19579 ++ cpus = cpufreq_cdev->policy->related_cpus;
19580 + max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
19581 + capacity = frequency * max_capacity;
19582 + capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
19583 +diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
19584 +index 5ff5a03bc9cef..6e0a5391fcd7c 100644
19585 +--- a/drivers/thunderbolt/test.c
19586 ++++ b/drivers/thunderbolt/test.c
19587 +@@ -260,14 +260,14 @@ static struct tb_switch *alloc_dev_default(struct kunit *test,
19588 + if (port->dual_link_port && upstream_port->dual_link_port) {
19589 + port->dual_link_port->remote = upstream_port->dual_link_port;
19590 + upstream_port->dual_link_port->remote = port->dual_link_port;
19591 +- }
19592 +
19593 +- if (bonded) {
19594 +- /* Bonding is used */
19595 +- port->bonded = true;
19596 +- port->dual_link_port->bonded = true;
19597 +- upstream_port->bonded = true;
19598 +- upstream_port->dual_link_port->bonded = true;
19599 ++ if (bonded) {
19600 ++ /* Bonding is used */
19601 ++ port->bonded = true;
19602 ++ port->dual_link_port->bonded = true;
19603 ++ upstream_port->bonded = true;
19604 ++ upstream_port->dual_link_port->bonded = true;
19605 ++ }
19606 + }
19607 +
19608 + return sw;
19609 +diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
19610 +index 9a2d78ace49be..ce3a79e95fb55 100644
19611 +--- a/drivers/tty/nozomi.c
19612 ++++ b/drivers/tty/nozomi.c
19613 +@@ -1378,7 +1378,7 @@ static int nozomi_card_init(struct pci_dev *pdev,
19614 + NOZOMI_NAME, dc);
19615 + if (unlikely(ret)) {
19616 + dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
19617 +- goto err_free_kfifo;
19618 ++ goto err_free_all_kfifo;
19619 + }
19620 +
19621 + DBG1("base_addr: %p", dc->base_addr);
19622 +@@ -1416,12 +1416,15 @@ static int nozomi_card_init(struct pci_dev *pdev,
19623 + return 0;
19624 +
19625 + err_free_tty:
19626 +- for (i = 0; i < MAX_PORT; ++i) {
19627 ++ for (i--; i >= 0; i--) {
19628 + tty_unregister_device(ntty_driver, dc->index_start + i);
19629 + tty_port_destroy(&dc->port[i].port);
19630 + }
19631 ++ free_irq(pdev->irq, dc);
19632 ++err_free_all_kfifo:
19633 ++ i = MAX_PORT;
19634 + err_free_kfifo:
19635 +- for (i = 0; i < MAX_PORT; i++)
19636 ++ for (i--; i >= PORT_MDM; i--)
19637 + kfifo_free(&dc->port[i].fifo_ul);
19638 + err_free_sbuf:
19639 + kfree(dc->send_buf);
19640 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
19641 +index 8ac11eaeca51b..79418d4beb48f 100644
19642 +--- a/drivers/tty/serial/8250/8250_omap.c
19643 ++++ b/drivers/tty/serial/8250/8250_omap.c
19644 +@@ -43,6 +43,7 @@
19645 + #define UART_ERRATA_CLOCK_DISABLE (1 << 3)
19646 + #define UART_HAS_EFR2 BIT(4)
19647 + #define UART_HAS_RHR_IT_DIS BIT(5)
19648 ++#define UART_RX_TIMEOUT_QUIRK BIT(6)
19649 +
19650 + #define OMAP_UART_FCR_RX_TRIG 6
19651 + #define OMAP_UART_FCR_TX_TRIG 4
19652 +@@ -104,6 +105,9 @@
19653 + #define UART_OMAP_EFR2 0x23
19654 + #define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
19655 +
19656 ++/* RX FIFO occupancy indicator */
19657 ++#define UART_OMAP_RX_LVL 0x64
19658 ++
19659 + struct omap8250_priv {
19660 + int line;
19661 + u8 habit;
19662 +@@ -611,6 +615,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port);
19663 + static irqreturn_t omap8250_irq(int irq, void *dev_id)
19664 + {
19665 + struct uart_port *port = dev_id;
19666 ++ struct omap8250_priv *priv = port->private_data;
19667 + struct uart_8250_port *up = up_to_u8250p(port);
19668 + unsigned int iir;
19669 + int ret;
19670 +@@ -625,6 +630,18 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
19671 + serial8250_rpm_get(up);
19672 + iir = serial_port_in(port, UART_IIR);
19673 + ret = serial8250_handle_irq(port, iir);
19674 ++
19675 ++ /*
19676 ++ * On K3 SoCs, it is observed that RX TIMEOUT is signalled after
19677 ++ * FIFO has been drained, in which case a dummy read of RX FIFO
19678 ++ * is required to clear RX TIMEOUT condition.
19679 ++ */
19680 ++ if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
19681 ++ (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
19682 ++ serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
19683 ++ serial_port_in(port, UART_RX);
19684 ++ }
19685 ++
19686 + serial8250_rpm_put(up);
19687 +
19688 + return IRQ_RETVAL(ret);
19689 +@@ -813,7 +830,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
19690 + poll_count--)
19691 + cpu_relax();
19692 +
19693 +- if (!poll_count)
19694 ++ if (poll_count == -1)
19695 + dev_err(p->port.dev, "teardown incomplete\n");
19696 + }
19697 + }
19698 +@@ -1218,7 +1235,8 @@ static struct omap8250_dma_params am33xx_dma = {
19699 +
19700 + static struct omap8250_platdata am654_platdata = {
19701 + .dma_params = &am654_dma,
19702 +- .habit = UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS,
19703 ++ .habit = UART_HAS_EFR2 | UART_HAS_RHR_IT_DIS |
19704 ++ UART_RX_TIMEOUT_QUIRK,
19705 + };
19706 +
19707 + static struct omap8250_platdata am33xx_platdata = {
19708 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
19709 +index fc5ab20322821..ff3f13693def7 100644
19710 +--- a/drivers/tty/serial/8250/8250_port.c
19711 ++++ b/drivers/tty/serial/8250/8250_port.c
19712 +@@ -2629,6 +2629,21 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
19713 + struct ktermios *old)
19714 + {
19715 + unsigned int tolerance = port->uartclk / 100;
19716 ++ unsigned int min;
19717 ++ unsigned int max;
19718 ++
19719 ++ /*
19720 ++ * Handle magic divisors for baud rates above baud_base on SMSC
19721 ++ * Super I/O chips. Enable custom rates of clk/4 and clk/8, but
19722 ++ * disable divisor values beyond 32767, which are unavailable.
19723 ++ */
19724 ++ if (port->flags & UPF_MAGIC_MULTIPLIER) {
19725 ++ min = port->uartclk / 16 / UART_DIV_MAX >> 1;
19726 ++ max = (port->uartclk + tolerance) / 4;
19727 ++ } else {
19728 ++ min = port->uartclk / 16 / UART_DIV_MAX;
19729 ++ max = (port->uartclk + tolerance) / 16;
19730 ++ }
19731 +
19732 + /*
19733 + * Ask the core to calculate the divisor for us.
19734 +@@ -2636,9 +2651,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
19735 + * slower than nominal still match standard baud rates without
19736 + * causing transmission errors.
19737 + */
19738 +- return uart_get_baud_rate(port, termios, old,
19739 +- port->uartclk / 16 / UART_DIV_MAX,
19740 +- (port->uartclk + tolerance) / 16);
19741 ++ return uart_get_baud_rate(port, termios, old, min, max);
19742 + }
19743 +
19744 + /*
19745 +diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
19746 +index 63ea9c4da3d5a..53f2697014a02 100644
19747 +--- a/drivers/tty/serial/8250/serial_cs.c
19748 ++++ b/drivers/tty/serial/8250/serial_cs.c
19749 +@@ -777,6 +777,7 @@ static const struct pcmcia_device_id serial_ids[] = {
19750 + PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e),
19751 + PCMCIA_DEVICE_PROD_ID12("OEM ", "C288MX ", 0xb572d360, 0xd2385b7a),
19752 + PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41),
19753 ++ PCMCIA_DEVICE_PROD_ID12("Option International", "GSM-Ready 56K/ISDN", 0x9d7cd6f5, 0xb23844aa),
19754 + PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab),
19755 + PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f),
19756 + PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d),
19757 +@@ -804,7 +805,6 @@ static const struct pcmcia_device_id serial_ids[] = {
19758 + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
19759 + PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
19760 + PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
19761 +- PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
19762 + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100 1.00.", 0x19ca78af, 0xf964f42b),
19763 + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100", 0x19ca78af, 0x71d98e83),
19764 + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232 1.00.", 0x19ca78af, 0x69fb7490),
19765 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
19766 +index 794035041744f..9c78e43e669d7 100644
19767 +--- a/drivers/tty/serial/fsl_lpuart.c
19768 ++++ b/drivers/tty/serial/fsl_lpuart.c
19769 +@@ -1408,17 +1408,7 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port)
19770 +
19771 + static unsigned int lpuart32_get_mctrl(struct uart_port *port)
19772 + {
19773 +- unsigned int temp = 0;
19774 +- unsigned long reg;
19775 +-
19776 +- reg = lpuart32_read(port, UARTMODIR);
19777 +- if (reg & UARTMODIR_TXCTSE)
19778 +- temp |= TIOCM_CTS;
19779 +-
19780 +- if (reg & UARTMODIR_RXRTSE)
19781 +- temp |= TIOCM_RTS;
19782 +-
19783 +- return temp;
19784 ++ return 0;
19785 + }
19786 +
19787 + static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
19788 +@@ -1625,7 +1615,7 @@ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
19789 + sport->lpuart_dma_rx_use = true;
19790 + rx_dma_timer_init(sport);
19791 +
19792 +- if (sport->port.has_sysrq) {
19793 ++ if (sport->port.has_sysrq && !lpuart_is_32(sport)) {
19794 + cr3 = readb(sport->port.membase + UARTCR3);
19795 + cr3 |= UARTCR3_FEIE;
19796 + writeb(cr3, sport->port.membase + UARTCR3);
19797 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
19798 +index 51b0ecabf2ec9..1e26220c78527 100644
19799 +--- a/drivers/tty/serial/mvebu-uart.c
19800 ++++ b/drivers/tty/serial/mvebu-uart.c
19801 +@@ -445,12 +445,11 @@ static void mvebu_uart_shutdown(struct uart_port *port)
19802 +
19803 + static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
19804 + {
19805 +- struct mvebu_uart *mvuart = to_mvuart(port);
19806 + unsigned int d_divisor, m_divisor;
19807 + u32 brdv, osamp;
19808 +
19809 +- if (IS_ERR(mvuart->clk))
19810 +- return -PTR_ERR(mvuart->clk);
19811 ++ if (!port->uartclk)
19812 ++ return -EOPNOTSUPP;
19813 +
19814 + /*
19815 + * The baudrate is derived from the UART clock thanks to two divisors:
19816 +@@ -463,7 +462,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
19817 + * makes use of D to configure the desired baudrate.
19818 + */
19819 + m_divisor = OSAMP_DEFAULT_DIVISOR;
19820 +- d_divisor = DIV_ROUND_UP(port->uartclk, baud * m_divisor);
19821 ++ d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
19822 +
19823 + brdv = readl(port->membase + UART_BRDV);
19824 + brdv &= ~BRDV_BAUD_MASK;
19825 +@@ -482,7 +481,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
19826 + struct ktermios *old)
19827 + {
19828 + unsigned long flags;
19829 +- unsigned int baud;
19830 ++ unsigned int baud, min_baud, max_baud;
19831 +
19832 + spin_lock_irqsave(&port->lock, flags);
19833 +
19834 +@@ -501,16 +500,21 @@ static void mvebu_uart_set_termios(struct uart_port *port,
19835 + port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
19836 +
19837 + /*
19838 ++ * Maximal divisor is 1023 * 16 when using default (x16) scheme.
19839 + * Maximum achievable frequency with simple baudrate divisor is 230400.
19840 + * Since the error per bit frame would be of more than 15%, achieving
19841 + * higher frequencies would require to implement the fractional divisor
19842 + * feature.
19843 + */
19844 +- baud = uart_get_baud_rate(port, termios, old, 0, 230400);
19845 ++ min_baud = DIV_ROUND_UP(port->uartclk, 1023 * 16);
19846 ++ max_baud = 230400;
19847 ++
19848 ++ baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
19849 + if (mvebu_uart_baud_rate_set(port, baud)) {
19850 + /* No clock available, baudrate cannot be changed */
19851 + if (old)
19852 +- baud = uart_get_baud_rate(port, old, NULL, 0, 230400);
19853 ++ baud = uart_get_baud_rate(port, old, NULL,
19854 ++ min_baud, max_baud);
19855 + } else {
19856 + tty_termios_encode_baud_rate(termios, baud, baud);
19857 + uart_update_timeout(port, termios->c_cflag, baud);
19858 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
19859 +index 4baf1316ea729..2d5487bf68559 100644
19860 +--- a/drivers/tty/serial/sh-sci.c
19861 ++++ b/drivers/tty/serial/sh-sci.c
19862 +@@ -610,6 +610,14 @@ static void sci_stop_tx(struct uart_port *port)
19863 + ctrl &= ~SCSCR_TIE;
19864 +
19865 + serial_port_out(port, SCSCR, ctrl);
19866 ++
19867 ++#ifdef CONFIG_SERIAL_SH_SCI_DMA
19868 ++ if (to_sci_port(port)->chan_tx &&
19869 ++ !dma_submit_error(to_sci_port(port)->cookie_tx)) {
19870 ++ dmaengine_terminate_async(to_sci_port(port)->chan_tx);
19871 ++ to_sci_port(port)->cookie_tx = -EINVAL;
19872 ++ }
19873 ++#endif
19874 + }
19875 +
19876 + static void sci_start_rx(struct uart_port *port)
19877 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
19878 +index ca7a61190dd93..d50b606d09aae 100644
19879 +--- a/drivers/usb/class/cdc-acm.c
19880 ++++ b/drivers/usb/class/cdc-acm.c
19881 +@@ -1959,6 +1959,11 @@ static const struct usb_device_id acm_ids[] = {
19882 + .driver_info = IGNORE_DEVICE,
19883 + },
19884 +
19885 ++ /* Exclude Heimann Sensor GmbH USB appset demo */
19886 ++ { USB_DEVICE(0x32a7, 0x0000),
19887 ++ .driver_info = IGNORE_DEVICE,
19888 ++ },
19889 ++
19890 + /* control interfaces without any protocol set */
19891 + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
19892 + USB_CDC_PROTO_NONE) },
19893 +diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
19894 +index 6f70ab9577b4e..272ae5722c861 100644
19895 +--- a/drivers/usb/dwc2/core.c
19896 ++++ b/drivers/usb/dwc2/core.c
19897 +@@ -1111,15 +1111,6 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
19898 + usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
19899 + if (hsotg->params.phy_utmi_width == 16)
19900 + usbcfg |= GUSBCFG_PHYIF16;
19901 +-
19902 +- /* Set turnaround time */
19903 +- if (dwc2_is_device_mode(hsotg)) {
19904 +- usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
19905 +- if (hsotg->params.phy_utmi_width == 16)
19906 +- usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
19907 +- else
19908 +- usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
19909 +- }
19910 + break;
19911 + default:
19912 + dev_err(hsotg->dev, "FS PHY selected at HS!\n");
19913 +@@ -1141,6 +1132,24 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
19914 + return retval;
19915 + }
19916 +
19917 ++static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
19918 ++{
19919 ++ u32 usbcfg;
19920 ++
19921 ++ if (hsotg->params.phy_type != DWC2_PHY_TYPE_PARAM_UTMI)
19922 ++ return;
19923 ++
19924 ++ usbcfg = dwc2_readl(hsotg, GUSBCFG);
19925 ++
19926 ++ usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
19927 ++ if (hsotg->params.phy_utmi_width == 16)
19928 ++ usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
19929 ++ else
19930 ++ usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
19931 ++
19932 ++ dwc2_writel(hsotg, usbcfg, GUSBCFG);
19933 ++}
19934 ++
19935 + int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
19936 + {
19937 + u32 usbcfg;
19938 +@@ -1158,6 +1167,9 @@ int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
19939 + retval = dwc2_hs_phy_init(hsotg, select_phy);
19940 + if (retval)
19941 + return retval;
19942 ++
19943 ++ if (dwc2_is_device_mode(hsotg))
19944 ++ dwc2_set_turnaround_time(hsotg);
19945 + }
19946 +
19947 + if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
19948 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
19949 +index 4ac397e43e19b..bca720c81799c 100644
19950 +--- a/drivers/usb/dwc3/core.c
19951 ++++ b/drivers/usb/dwc3/core.c
19952 +@@ -1616,17 +1616,18 @@ static int dwc3_probe(struct platform_device *pdev)
19953 + }
19954 +
19955 + dwc3_check_params(dwc);
19956 ++ dwc3_debugfs_init(dwc);
19957 +
19958 + ret = dwc3_core_init_mode(dwc);
19959 + if (ret)
19960 + goto err5;
19961 +
19962 +- dwc3_debugfs_init(dwc);
19963 + pm_runtime_put(dev);
19964 +
19965 + return 0;
19966 +
19967 + err5:
19968 ++ dwc3_debugfs_exit(dwc);
19969 + dwc3_event_buffers_cleanup(dwc);
19970 +
19971 + usb_phy_shutdown(dwc->usb2_phy);
19972 +diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
19973 +index 2cd9942707b46..5d38f29bda720 100644
19974 +--- a/drivers/usb/gadget/function/f_eem.c
19975 ++++ b/drivers/usb/gadget/function/f_eem.c
19976 +@@ -30,6 +30,11 @@ struct f_eem {
19977 + u8 ctrl_id;
19978 + };
19979 +
19980 ++struct in_context {
19981 ++ struct sk_buff *skb;
19982 ++ struct usb_ep *ep;
19983 ++};
19984 ++
19985 + static inline struct f_eem *func_to_eem(struct usb_function *f)
19986 + {
19987 + return container_of(f, struct f_eem, port.func);
19988 +@@ -320,9 +325,12 @@ fail:
19989 +
19990 + static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
19991 + {
19992 +- struct sk_buff *skb = (struct sk_buff *)req->context;
19993 ++ struct in_context *ctx = req->context;
19994 +
19995 +- dev_kfree_skb_any(skb);
19996 ++ dev_kfree_skb_any(ctx->skb);
19997 ++ kfree(req->buf);
19998 ++ usb_ep_free_request(ctx->ep, req);
19999 ++ kfree(ctx);
20000 + }
20001 +
20002 + /*
20003 +@@ -410,7 +418,9 @@ static int eem_unwrap(struct gether *port,
20004 + * b15: bmType (0 == data, 1 == command)
20005 + */
20006 + if (header & BIT(15)) {
20007 +- struct usb_request *req = cdev->req;
20008 ++ struct usb_request *req;
20009 ++ struct in_context *ctx;
20010 ++ struct usb_ep *ep;
20011 + u16 bmEEMCmd;
20012 +
20013 + /* EEM command packet format:
20014 +@@ -439,11 +449,36 @@ static int eem_unwrap(struct gether *port,
20015 + skb_trim(skb2, len);
20016 + put_unaligned_le16(BIT(15) | BIT(11) | len,
20017 + skb_push(skb2, 2));
20018 ++
20019 ++ ep = port->in_ep;
20020 ++ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
20021 ++ if (!req) {
20022 ++ dev_kfree_skb_any(skb2);
20023 ++ goto next;
20024 ++ }
20025 ++
20026 ++ req->buf = kmalloc(skb2->len, GFP_KERNEL);
20027 ++ if (!req->buf) {
20028 ++ usb_ep_free_request(ep, req);
20029 ++ dev_kfree_skb_any(skb2);
20030 ++ goto next;
20031 ++ }
20032 ++
20033 ++ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
20034 ++ if (!ctx) {
20035 ++ kfree(req->buf);
20036 ++ usb_ep_free_request(ep, req);
20037 ++ dev_kfree_skb_any(skb2);
20038 ++ goto next;
20039 ++ }
20040 ++ ctx->skb = skb2;
20041 ++ ctx->ep = ep;
20042 ++
20043 + skb_copy_bits(skb2, 0, req->buf, skb2->len);
20044 + req->length = skb2->len;
20045 + req->complete = eem_cmd_complete;
20046 + req->zero = 1;
20047 +- req->context = skb2;
20048 ++ req->context = ctx;
20049 + if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
20050 + DBG(cdev, "echo response queue fail\n");
20051 + break;
20052 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
20053 +index d4844afeaffc2..9c0c393abb39b 100644
20054 +--- a/drivers/usb/gadget/function/f_fs.c
20055 ++++ b/drivers/usb/gadget/function/f_fs.c
20056 +@@ -250,8 +250,8 @@ EXPORT_SYMBOL_GPL(ffs_lock);
20057 + static struct ffs_dev *_ffs_find_dev(const char *name);
20058 + static struct ffs_dev *_ffs_alloc_dev(void);
20059 + static void _ffs_free_dev(struct ffs_dev *dev);
20060 +-static void *ffs_acquire_dev(const char *dev_name);
20061 +-static void ffs_release_dev(struct ffs_data *ffs_data);
20062 ++static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data);
20063 ++static void ffs_release_dev(struct ffs_dev *ffs_dev);
20064 + static int ffs_ready(struct ffs_data *ffs);
20065 + static void ffs_closed(struct ffs_data *ffs);
20066 +
20067 +@@ -1554,8 +1554,8 @@ unmapped_value:
20068 + static int ffs_fs_get_tree(struct fs_context *fc)
20069 + {
20070 + struct ffs_sb_fill_data *ctx = fc->fs_private;
20071 +- void *ffs_dev;
20072 + struct ffs_data *ffs;
20073 ++ int ret;
20074 +
20075 + ENTER();
20076 +
20077 +@@ -1574,13 +1574,12 @@ static int ffs_fs_get_tree(struct fs_context *fc)
20078 + return -ENOMEM;
20079 + }
20080 +
20081 +- ffs_dev = ffs_acquire_dev(ffs->dev_name);
20082 +- if (IS_ERR(ffs_dev)) {
20083 ++ ret = ffs_acquire_dev(ffs->dev_name, ffs);
20084 ++ if (ret) {
20085 + ffs_data_put(ffs);
20086 +- return PTR_ERR(ffs_dev);
20087 ++ return ret;
20088 + }
20089 +
20090 +- ffs->private_data = ffs_dev;
20091 + ctx->ffs_data = ffs;
20092 + return get_tree_nodev(fc, ffs_sb_fill);
20093 + }
20094 +@@ -1591,7 +1590,6 @@ static void ffs_fs_free_fc(struct fs_context *fc)
20095 +
20096 + if (ctx) {
20097 + if (ctx->ffs_data) {
20098 +- ffs_release_dev(ctx->ffs_data);
20099 + ffs_data_put(ctx->ffs_data);
20100 + }
20101 +
20102 +@@ -1630,10 +1628,8 @@ ffs_fs_kill_sb(struct super_block *sb)
20103 + ENTER();
20104 +
20105 + kill_litter_super(sb);
20106 +- if (sb->s_fs_info) {
20107 +- ffs_release_dev(sb->s_fs_info);
20108 ++ if (sb->s_fs_info)
20109 + ffs_data_closed(sb->s_fs_info);
20110 +- }
20111 + }
20112 +
20113 + static struct file_system_type ffs_fs_type = {
20114 +@@ -1703,6 +1699,7 @@ static void ffs_data_put(struct ffs_data *ffs)
20115 + if (refcount_dec_and_test(&ffs->ref)) {
20116 + pr_info("%s(): freeing\n", __func__);
20117 + ffs_data_clear(ffs);
20118 ++ ffs_release_dev(ffs->private_data);
20119 + BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
20120 + swait_active(&ffs->ep0req_completion.wait) ||
20121 + waitqueue_active(&ffs->wait));
20122 +@@ -3032,6 +3029,7 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
20123 + struct ffs_function *func = ffs_func_from_usb(f);
20124 + struct f_fs_opts *ffs_opts =
20125 + container_of(f->fi, struct f_fs_opts, func_inst);
20126 ++ struct ffs_data *ffs_data;
20127 + int ret;
20128 +
20129 + ENTER();
20130 +@@ -3046,12 +3044,13 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
20131 + if (!ffs_opts->no_configfs)
20132 + ffs_dev_lock();
20133 + ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
20134 +- func->ffs = ffs_opts->dev->ffs_data;
20135 ++ ffs_data = ffs_opts->dev->ffs_data;
20136 + if (!ffs_opts->no_configfs)
20137 + ffs_dev_unlock();
20138 + if (ret)
20139 + return ERR_PTR(ret);
20140 +
20141 ++ func->ffs = ffs_data;
20142 + func->conf = c;
20143 + func->gadget = c->cdev->gadget;
20144 +
20145 +@@ -3506,6 +3505,7 @@ static void ffs_free_inst(struct usb_function_instance *f)
20146 + struct f_fs_opts *opts;
20147 +
20148 + opts = to_f_fs_opts(f);
20149 ++ ffs_release_dev(opts->dev);
20150 + ffs_dev_lock();
20151 + _ffs_free_dev(opts->dev);
20152 + ffs_dev_unlock();
20153 +@@ -3693,47 +3693,48 @@ static void _ffs_free_dev(struct ffs_dev *dev)
20154 + {
20155 + list_del(&dev->entry);
20156 +
20157 +- /* Clear the private_data pointer to stop incorrect dev access */
20158 +- if (dev->ffs_data)
20159 +- dev->ffs_data->private_data = NULL;
20160 +-
20161 + kfree(dev);
20162 + if (list_empty(&ffs_devices))
20163 + functionfs_cleanup();
20164 + }
20165 +
20166 +-static void *ffs_acquire_dev(const char *dev_name)
20167 ++static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data)
20168 + {
20169 ++ int ret = 0;
20170 + struct ffs_dev *ffs_dev;
20171 +
20172 + ENTER();
20173 + ffs_dev_lock();
20174 +
20175 + ffs_dev = _ffs_find_dev(dev_name);
20176 +- if (!ffs_dev)
20177 +- ffs_dev = ERR_PTR(-ENOENT);
20178 +- else if (ffs_dev->mounted)
20179 +- ffs_dev = ERR_PTR(-EBUSY);
20180 +- else if (ffs_dev->ffs_acquire_dev_callback &&
20181 +- ffs_dev->ffs_acquire_dev_callback(ffs_dev))
20182 +- ffs_dev = ERR_PTR(-ENOENT);
20183 +- else
20184 ++ if (!ffs_dev) {
20185 ++ ret = -ENOENT;
20186 ++ } else if (ffs_dev->mounted) {
20187 ++ ret = -EBUSY;
20188 ++ } else if (ffs_dev->ffs_acquire_dev_callback &&
20189 ++ ffs_dev->ffs_acquire_dev_callback(ffs_dev)) {
20190 ++ ret = -ENOENT;
20191 ++ } else {
20192 + ffs_dev->mounted = true;
20193 ++ ffs_dev->ffs_data = ffs_data;
20194 ++ ffs_data->private_data = ffs_dev;
20195 ++ }
20196 +
20197 + ffs_dev_unlock();
20198 +- return ffs_dev;
20199 ++ return ret;
20200 + }
20201 +
20202 +-static void ffs_release_dev(struct ffs_data *ffs_data)
20203 ++static void ffs_release_dev(struct ffs_dev *ffs_dev)
20204 + {
20205 +- struct ffs_dev *ffs_dev;
20206 +-
20207 + ENTER();
20208 + ffs_dev_lock();
20209 +
20210 +- ffs_dev = ffs_data->private_data;
20211 +- if (ffs_dev) {
20212 ++ if (ffs_dev && ffs_dev->mounted) {
20213 + ffs_dev->mounted = false;
20214 ++ if (ffs_dev->ffs_data) {
20215 ++ ffs_dev->ffs_data->private_data = NULL;
20216 ++ ffs_dev->ffs_data = NULL;
20217 ++ }
20218 +
20219 + if (ffs_dev->ffs_release_dev_callback)
20220 + ffs_dev->ffs_release_dev_callback(ffs_dev);
20221 +@@ -3761,7 +3762,6 @@ static int ffs_ready(struct ffs_data *ffs)
20222 + }
20223 +
20224 + ffs_obj->desc_ready = true;
20225 +- ffs_obj->ffs_data = ffs;
20226 +
20227 + if (ffs_obj->ffs_ready_callback) {
20228 + ret = ffs_obj->ffs_ready_callback(ffs);
20229 +@@ -3789,7 +3789,6 @@ static void ffs_closed(struct ffs_data *ffs)
20230 + goto done;
20231 +
20232 + ffs_obj->desc_ready = false;
20233 +- ffs_obj->ffs_data = NULL;
20234 +
20235 + if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
20236 + ffs_obj->ffs_closed_callback)
20237 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
20238 +index f66815fe84822..e4b0c0420b376 100644
20239 +--- a/drivers/usb/host/xhci-mem.c
20240 ++++ b/drivers/usb/host/xhci-mem.c
20241 +@@ -1924,6 +1924,7 @@ no_bw:
20242 + xhci->hw_ports = NULL;
20243 + xhci->rh_bw = NULL;
20244 + xhci->ext_caps = NULL;
20245 ++ xhci->port_caps = NULL;
20246 +
20247 + xhci->page_size = 0;
20248 + xhci->page_shift = 0;
20249 +diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
20250 +index f97ac9f52bf4d..431213cdf9e0e 100644
20251 +--- a/drivers/usb/host/xhci-pci-renesas.c
20252 ++++ b/drivers/usb/host/xhci-pci-renesas.c
20253 +@@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
20254 + return 0;
20255 +
20256 + case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
20257 +- return 0;
20258 ++ dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
20259 ++ break;
20260 +
20261 + case RENESAS_ROM_STATUS_ERROR: /* Error State */
20262 + default: /* All other states are marked as "Reserved states" */
20263 +@@ -224,13 +225,12 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
20264 + u8 fw_state;
20265 + int err;
20266 +
20267 +- /* Check if device has ROM and loaded, if so skip everything */
20268 +- err = renesas_check_rom(pdev);
20269 +- if (err) { /* we have rom */
20270 +- err = renesas_check_rom_state(pdev);
20271 +- if (!err)
20272 +- return err;
20273 +- }
20274 ++ /*
20275 ++ * Only if device has ROM and loaded FW we can skip loading and
20276 ++ * return success. Otherwise (even unknown state), attempt to load FW.
20277 ++ */
20278 ++ if (renesas_check_rom(pdev) && !renesas_check_rom_state(pdev))
20279 ++ return 0;
20280 +
20281 + /*
20282 + * Test if the device is actually needing the firmware. As most
20283 +diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
20284 +index a48452a6172b6..c0f432d509aab 100644
20285 +--- a/drivers/usb/phy/phy-tegra-usb.c
20286 ++++ b/drivers/usb/phy/phy-tegra-usb.c
20287 +@@ -58,12 +58,12 @@
20288 + #define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
20289 +
20290 + #define USB_PHY_VBUS_SENSORS 0x404
20291 +-#define B_SESS_VLD_WAKEUP_EN BIT(6)
20292 +-#define B_VBUS_VLD_WAKEUP_EN BIT(14)
20293 ++#define B_SESS_VLD_WAKEUP_EN BIT(14)
20294 + #define A_SESS_VLD_WAKEUP_EN BIT(22)
20295 + #define A_VBUS_VLD_WAKEUP_EN BIT(30)
20296 +
20297 + #define USB_PHY_VBUS_WAKEUP_ID 0x408
20298 ++#define VBUS_WAKEUP_STS BIT(10)
20299 + #define VBUS_WAKEUP_WAKEUP_EN BIT(30)
20300 +
20301 + #define USB1_LEGACY_CTRL 0x410
20302 +@@ -544,7 +544,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
20303 +
20304 + val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
20305 + val &= ~(A_VBUS_VLD_WAKEUP_EN | A_SESS_VLD_WAKEUP_EN);
20306 +- val &= ~(B_VBUS_VLD_WAKEUP_EN | B_SESS_VLD_WAKEUP_EN);
20307 ++ val &= ~(B_SESS_VLD_WAKEUP_EN);
20308 + writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
20309 +
20310 + val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
20311 +@@ -642,6 +642,15 @@ static int utmi_phy_power_off(struct tegra_usb_phy *phy)
20312 + void __iomem *base = phy->regs;
20313 + u32 val;
20314 +
20315 ++ /*
20316 ++ * Give hardware time to settle down after VBUS disconnection,
20317 ++ * otherwise PHY will immediately wake up from suspend.
20318 ++ */
20319 ++ if (phy->wakeup_enabled && phy->mode != USB_DR_MODE_HOST)
20320 ++ readl_relaxed_poll_timeout(base + USB_PHY_VBUS_WAKEUP_ID,
20321 ++ val, !(val & VBUS_WAKEUP_STS),
20322 ++ 5000, 100000);
20323 ++
20324 + utmi_phy_clk_disable(phy);
20325 +
20326 + /* PHY won't resume if reset is asserted */
20327 +diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
20328 +index b9429c9f65f6c..aeef453aa6585 100644
20329 +--- a/drivers/usb/typec/class.c
20330 ++++ b/drivers/usb/typec/class.c
20331 +@@ -517,8 +517,10 @@ typec_register_altmode(struct device *parent,
20332 + int ret;
20333 +
20334 + alt = kzalloc(sizeof(*alt), GFP_KERNEL);
20335 +- if (!alt)
20336 ++ if (!alt) {
20337 ++ altmode_id_remove(parent, id);
20338 + return ERR_PTR(-ENOMEM);
20339 ++ }
20340 +
20341 + alt->adev.svid = desc->svid;
20342 + alt->adev.mode = desc->mode;
20343 +diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
20344 +index 25b480752266e..98d84243c630c 100644
20345 +--- a/drivers/usb/typec/tcpm/tcpci.c
20346 ++++ b/drivers/usb/typec/tcpm/tcpci.c
20347 +@@ -21,8 +21,12 @@
20348 + #define PD_RETRY_COUNT_DEFAULT 3
20349 + #define PD_RETRY_COUNT_3_0_OR_HIGHER 2
20350 + #define AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV 3500
20351 +-#define AUTO_DISCHARGE_PD_HEADROOM_MV 850
20352 +-#define AUTO_DISCHARGE_PPS_HEADROOM_MV 1250
20353 ++#define VSINKPD_MIN_IR_DROP_MV 750
20354 ++#define VSRC_NEW_MIN_PERCENT 95
20355 ++#define VSRC_VALID_MIN_MV 500
20356 ++#define VPPS_NEW_MIN_PERCENT 95
20357 ++#define VPPS_VALID_MIN_MV 100
20358 ++#define VSINKDISCONNECT_PD_MIN_PERCENT 90
20359 +
20360 + #define tcpc_presenting_rd(reg, cc) \
20361 + (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
20362 +@@ -324,11 +328,13 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty
20363 + threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
20364 + } else if (mode == TYPEC_PWR_MODE_PD) {
20365 + if (pps_active)
20366 +- threshold = (95 * requested_vbus_voltage_mv / 100) -
20367 +- AUTO_DISCHARGE_PD_HEADROOM_MV;
20368 ++ threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
20369 ++ VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
20370 ++ VSINKDISCONNECT_PD_MIN_PERCENT / 100;
20371 + else
20372 +- threshold = (95 * requested_vbus_voltage_mv / 100) -
20373 +- AUTO_DISCHARGE_PPS_HEADROOM_MV;
20374 ++ threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
20375 ++ VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
20376 ++ VSINKDISCONNECT_PD_MIN_PERCENT / 100;
20377 + } else {
20378 + /* 3.5V for non-pd sink */
20379 + threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
20380 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
20381 +index 63470cf7f4cd9..1b7f18d35df45 100644
20382 +--- a/drivers/usb/typec/tcpm/tcpm.c
20383 ++++ b/drivers/usb/typec/tcpm/tcpm.c
20384 +@@ -2576,6 +2576,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
20385 + } else {
20386 + next_state = SNK_WAIT_CAPABILITIES;
20387 + }
20388 ++
20389 ++ /* Threshold was relaxed before sending Request. Restore it back. */
20390 ++ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
20391 ++ port->pps_data.active,
20392 ++ port->supply_voltage);
20393 + tcpm_set_state(port, next_state, 0);
20394 + break;
20395 + case SNK_NEGOTIATE_PPS_CAPABILITIES:
20396 +@@ -2589,6 +2594,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
20397 + port->send_discover)
20398 + port->vdm_sm_running = true;
20399 +
20400 ++ /* Threshold was relaxed before sending Request. Restore it back. */
20401 ++ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
20402 ++ port->pps_data.active,
20403 ++ port->supply_voltage);
20404 ++
20405 + tcpm_set_state(port, SNK_READY, 0);
20406 + break;
20407 + case DR_SWAP_SEND:
20408 +@@ -3308,6 +3318,12 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
20409 + if (ret < 0)
20410 + return ret;
20411 +
20412 ++ /*
20413 ++ * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
20414 ++ * It is safer to modify the threshold here.
20415 ++ */
20416 ++ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
20417 ++
20418 + memset(&msg, 0, sizeof(msg));
20419 + msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
20420 + port->pwr_role,
20421 +@@ -3405,6 +3421,9 @@ static int tcpm_pd_send_pps_request(struct tcpm_port *port)
20422 + if (ret < 0)
20423 + return ret;
20424 +
20425 ++ /* Relax the threshold as voltage will be adjusted right after Accept Message. */
20426 ++ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
20427 ++
20428 + memset(&msg, 0, sizeof(msg));
20429 + msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
20430 + port->pwr_role,
20431 +@@ -4186,6 +4205,10 @@ static void run_state_machine(struct tcpm_port *port)
20432 + port->hard_reset_count = 0;
20433 + ret = tcpm_pd_send_request(port);
20434 + if (ret < 0) {
20435 ++ /* Restore back to the original state */
20436 ++ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
20437 ++ port->pps_data.active,
20438 ++ port->supply_voltage);
20439 + /* Let the Source send capabilities again. */
20440 + tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
20441 + } else {
20442 +@@ -4196,6 +4219,10 @@ static void run_state_machine(struct tcpm_port *port)
20443 + case SNK_NEGOTIATE_PPS_CAPABILITIES:
20444 + ret = tcpm_pd_send_pps_request(port);
20445 + if (ret < 0) {
20446 ++ /* Restore back to the original state */
20447 ++ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
20448 ++ port->pps_data.active,
20449 ++ port->supply_voltage);
20450 + port->pps_status = ret;
20451 + /*
20452 + * If this was called due to updates to sink
20453 +@@ -5198,6 +5225,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
20454 + tcpm_set_state(port, SNK_UNATTACHED, 0);
20455 + }
20456 + break;
20457 ++ case PR_SWAP_SNK_SRC_SINK_OFF:
20458 ++ /* Do nothing, vsafe0v is expected during transition */
20459 ++ break;
20460 + default:
20461 + if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
20462 + tcpm_set_state(port, SNK_UNATTACHED, 0);
20463 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
20464 +index bd7c482c948aa..b94958552eb87 100644
20465 +--- a/drivers/vfio/pci/vfio_pci.c
20466 ++++ b/drivers/vfio/pci/vfio_pci.c
20467 +@@ -1594,6 +1594,7 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
20468 + {
20469 + struct vm_area_struct *vma = vmf->vma;
20470 + struct vfio_pci_device *vdev = vma->vm_private_data;
20471 ++ struct vfio_pci_mmap_vma *mmap_vma;
20472 + vm_fault_t ret = VM_FAULT_NOPAGE;
20473 +
20474 + mutex_lock(&vdev->vma_lock);
20475 +@@ -1601,24 +1602,36 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
20476 +
20477 + if (!__vfio_pci_memory_enabled(vdev)) {
20478 + ret = VM_FAULT_SIGBUS;
20479 +- mutex_unlock(&vdev->vma_lock);
20480 + goto up_out;
20481 + }
20482 +
20483 +- if (__vfio_pci_add_vma(vdev, vma)) {
20484 +- ret = VM_FAULT_OOM;
20485 +- mutex_unlock(&vdev->vma_lock);
20486 +- goto up_out;
20487 ++ /*
20488 ++ * We populate the whole vma on fault, so we need to test whether
20489 ++ * the vma has already been mapped, such as for concurrent faults
20490 ++ * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
20491 ++ * we ask it to fill the same range again.
20492 ++ */
20493 ++ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
20494 ++ if (mmap_vma->vma == vma)
20495 ++ goto up_out;
20496 + }
20497 +
20498 +- mutex_unlock(&vdev->vma_lock);
20499 +-
20500 + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
20501 +- vma->vm_end - vma->vm_start, vma->vm_page_prot))
20502 ++ vma->vm_end - vma->vm_start,
20503 ++ vma->vm_page_prot)) {
20504 + ret = VM_FAULT_SIGBUS;
20505 ++ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
20506 ++ goto up_out;
20507 ++ }
20508 ++
20509 ++ if (__vfio_pci_add_vma(vdev, vma)) {
20510 ++ ret = VM_FAULT_OOM;
20511 ++ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
20512 ++ }
20513 +
20514 + up_out:
20515 + up_read(&vdev->memory_lock);
20516 ++ mutex_unlock(&vdev->vma_lock);
20517 + return ret;
20518 + }
20519 +
20520 +diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
20521 +index e88a2b0e59046..662029d6a3dc9 100644
20522 +--- a/drivers/video/backlight/lm3630a_bl.c
20523 ++++ b/drivers/video/backlight/lm3630a_bl.c
20524 +@@ -482,8 +482,10 @@ static int lm3630a_parse_node(struct lm3630a_chip *pchip,
20525 +
20526 + device_for_each_child_node(pchip->dev, node) {
20527 + ret = lm3630a_parse_bank(pdata, node, &seen_led_sources);
20528 +- if (ret)
20529 ++ if (ret) {
20530 ++ fwnode_handle_put(node);
20531 + return ret;
20532 ++ }
20533 + }
20534 +
20535 + return ret;
20536 +diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
20537 +index 7f8debd2da065..ad598257ab386 100644
20538 +--- a/drivers/video/fbdev/imxfb.c
20539 ++++ b/drivers/video/fbdev/imxfb.c
20540 +@@ -992,7 +992,7 @@ static int imxfb_probe(struct platform_device *pdev)
20541 + info->screen_buffer = dma_alloc_wc(&pdev->dev, fbi->map_size,
20542 + &fbi->map_dma, GFP_KERNEL);
20543 + if (!info->screen_buffer) {
20544 +- dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
20545 ++ dev_err(&pdev->dev, "Failed to allocate video RAM\n");
20546 + ret = -ENOMEM;
20547 + goto failed_map;
20548 + }
20549 +diff --git a/drivers/visorbus/visorchipset.c b/drivers/visorbus/visorchipset.c
20550 +index cb1eb7e05f871..5668cad86e374 100644
20551 +--- a/drivers/visorbus/visorchipset.c
20552 ++++ b/drivers/visorbus/visorchipset.c
20553 +@@ -1561,7 +1561,7 @@ schedule_out:
20554 +
20555 + static int visorchipset_init(struct acpi_device *acpi_device)
20556 + {
20557 +- int err = -ENODEV;
20558 ++ int err = -ENOMEM;
20559 + struct visorchannel *controlvm_channel;
20560 +
20561 + chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
20562 +@@ -1584,8 +1584,10 @@ static int visorchipset_init(struct acpi_device *acpi_device)
20563 + "controlvm",
20564 + sizeof(struct visor_controlvm_channel),
20565 + VISOR_CONTROLVM_CHANNEL_VERSIONID,
20566 +- VISOR_CHANNEL_SIGNATURE))
20567 ++ VISOR_CHANNEL_SIGNATURE)) {
20568 ++ err = -ENODEV;
20569 + goto error_delete_groups;
20570 ++ }
20571 + /* if booting in a crash kernel */
20572 + if (is_kdump_kernel())
20573 + INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
20574 +diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
20575 +index 68b95ad82126e..520a0f6a7d9e9 100644
20576 +--- a/fs/btrfs/Kconfig
20577 ++++ b/fs/btrfs/Kconfig
20578 +@@ -18,6 +18,8 @@ config BTRFS_FS
20579 + select RAID6_PQ
20580 + select XOR_BLOCKS
20581 + select SRCU
20582 ++ depends on !PPC_256K_PAGES # powerpc
20583 ++ depends on !PAGE_SIZE_256KB # hexagon
20584 +
20585 + help
20586 + Btrfs is a general purpose copy-on-write filesystem with extents,
20587 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
20588 +index a484fb72a01f0..4bc3ca2cbd7d4 100644
20589 +--- a/fs/btrfs/ctree.c
20590 ++++ b/fs/btrfs/ctree.c
20591 +@@ -596,7 +596,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
20592 + trans->transid, fs_info->generation);
20593 +
20594 + if (!should_cow_block(trans, root, buf)) {
20595 +- trans->dirty = true;
20596 + *cow_ret = buf;
20597 + return 0;
20598 + }
20599 +@@ -1788,10 +1787,8 @@ again:
20600 + * then we don't want to set the path blocking,
20601 + * so we test it here
20602 + */
20603 +- if (!should_cow_block(trans, root, b)) {
20604 +- trans->dirty = true;
20605 ++ if (!should_cow_block(trans, root, b))
20606 + goto cow_done;
20607 +- }
20608 +
20609 + /*
20610 + * must have write locks on this node and the
20611 +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
20612 +index 1a88f6214ebc0..3bb8b919d2c19 100644
20613 +--- a/fs/btrfs/delayed-inode.c
20614 ++++ b/fs/btrfs/delayed-inode.c
20615 +@@ -1009,12 +1009,10 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
20616 + nofs_flag = memalloc_nofs_save();
20617 + ret = btrfs_lookup_inode(trans, root, path, &key, mod);
20618 + memalloc_nofs_restore(nofs_flag);
20619 +- if (ret > 0) {
20620 +- btrfs_release_path(path);
20621 +- return -ENOENT;
20622 +- } else if (ret < 0) {
20623 +- return ret;
20624 +- }
20625 ++ if (ret > 0)
20626 ++ ret = -ENOENT;
20627 ++ if (ret < 0)
20628 ++ goto out;
20629 +
20630 + leaf = path->nodes[0];
20631 + inode_item = btrfs_item_ptr(leaf, path->slots[0],
20632 +@@ -1052,6 +1050,14 @@ err_out:
20633 + btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
20634 + btrfs_release_delayed_inode(node);
20635 +
20636 ++ /*
20637 ++ * If we fail to update the delayed inode we need to abort the
20638 ++ * transaction, because we could leave the inode with the improper
20639 ++ * counts behind.
20640 ++ */
20641 ++ if (ret && ret != -ENOENT)
20642 ++ btrfs_abort_transaction(trans, ret);
20643 ++
20644 + return ret;
20645 +
20646 + search:
20647 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
20648 +index 3d5c35e4cb76e..d2f39a122d89d 100644
20649 +--- a/fs/btrfs/extent-tree.c
20650 ++++ b/fs/btrfs/extent-tree.c
20651 +@@ -4784,7 +4784,6 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
20652 + set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
20653 + buf->start + buf->len - 1, GFP_NOFS);
20654 + }
20655 +- trans->dirty = true;
20656 + /* this returns a buffer locked for blocking */
20657 + return buf;
20658 + }
20659 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
20660 +index 46f392943f4d0..9229549697ce7 100644
20661 +--- a/fs/btrfs/inode.c
20662 ++++ b/fs/btrfs/inode.c
20663 +@@ -603,7 +603,7 @@ again:
20664 + * inode has not been flagged as nocompress. This flag can
20665 + * change at any time if we discover bad compression ratios.
20666 + */
20667 +- if (inode_need_compress(BTRFS_I(inode), start, end)) {
20668 ++ if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
20669 + WARN_ON(pages);
20670 + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
20671 + if (!pages) {
20672 +@@ -8390,7 +8390,19 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
20673 + */
20674 + wait_on_page_writeback(page);
20675 +
20676 +- if (offset) {
20677 ++ /*
20678 ++ * For subpage case, we have call sites like
20679 ++ * btrfs_punch_hole_lock_range() which passes range not aligned to
20680 ++ * sectorsize.
20681 ++ * If the range doesn't cover the full page, we don't need to and
20682 ++ * shouldn't clear page extent mapped, as page->private can still
20683 ++ * record subpage dirty bits for other part of the range.
20684 ++ *
20685 ++ * For cases that can invalidate the full even the range doesn't
20686 ++ * cover the full page, like invalidating the last page, we're
20687 ++ * still safe to wait for ordered extent to finish.
20688 ++ */
20689 ++ if (!(offset == 0 && length == PAGE_SIZE)) {
20690 + btrfs_releasepage(page, GFP_NOFS);
20691 + return;
20692 + }
20693 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
20694 +index bd69db72acc5e..a2b3c594379d6 100644
20695 +--- a/fs/btrfs/send.c
20696 ++++ b/fs/btrfs/send.c
20697 +@@ -4064,6 +4064,17 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
20698 + if (ret < 0)
20699 + goto out;
20700 + } else {
20701 ++ /*
20702 ++ * If we previously orphanized a directory that
20703 ++ * collided with a new reference that we already
20704 ++ * processed, recompute the current path because
20705 ++ * that directory may be part of the path.
20706 ++ */
20707 ++ if (orphanized_dir) {
20708 ++ ret = refresh_ref_path(sctx, cur);
20709 ++ if (ret < 0)
20710 ++ goto out;
20711 ++ }
20712 + ret = send_unlink(sctx, cur->full_path);
20713 + if (ret < 0)
20714 + goto out;
20715 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
20716 +index 4a396c1147f17..bc613218c8c5b 100644
20717 +--- a/fs/btrfs/super.c
20718 ++++ b/fs/btrfs/super.c
20719 +@@ -299,17 +299,6 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
20720 + struct btrfs_fs_info *fs_info = trans->fs_info;
20721 +
20722 + WRITE_ONCE(trans->aborted, errno);
20723 +- /* Nothing used. The other threads that have joined this
20724 +- * transaction may be able to continue. */
20725 +- if (!trans->dirty && list_empty(&trans->new_bgs)) {
20726 +- const char *errstr;
20727 +-
20728 +- errstr = btrfs_decode_error(errno);
20729 +- btrfs_warn(fs_info,
20730 +- "%s:%d: Aborting unused transaction(%s).",
20731 +- function, line, errstr);
20732 +- return;
20733 +- }
20734 + WRITE_ONCE(trans->transaction->aborted, errno);
20735 + /* Wake up anybody who may be waiting on this transaction */
20736 + wake_up(&fs_info->transaction_wait);
20737 +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
20738 +index 436ac7b4b3346..4f5b14cd3a199 100644
20739 +--- a/fs/btrfs/sysfs.c
20740 ++++ b/fs/btrfs/sysfs.c
20741 +@@ -429,7 +429,7 @@ static ssize_t btrfs_discard_bitmap_bytes_show(struct kobject *kobj,
20742 + {
20743 + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
20744 +
20745 +- return scnprintf(buf, PAGE_SIZE, "%lld\n",
20746 ++ return scnprintf(buf, PAGE_SIZE, "%llu\n",
20747 + fs_info->discard_ctl.discard_bitmap_bytes);
20748 + }
20749 + BTRFS_ATTR(discard, discard_bitmap_bytes, btrfs_discard_bitmap_bytes_show);
20750 +@@ -451,7 +451,7 @@ static ssize_t btrfs_discard_extent_bytes_show(struct kobject *kobj,
20751 + {
20752 + struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
20753 +
20754 +- return scnprintf(buf, PAGE_SIZE, "%lld\n",
20755 ++ return scnprintf(buf, PAGE_SIZE, "%llu\n",
20756 + fs_info->discard_ctl.discard_extent_bytes);
20757 + }
20758 + BTRFS_ATTR(discard, discard_extent_bytes, btrfs_discard_extent_bytes_show);
20759 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
20760 +index f75de9f6c0ada..37450c7644ca0 100644
20761 +--- a/fs/btrfs/transaction.c
20762 ++++ b/fs/btrfs/transaction.c
20763 +@@ -1406,8 +1406,10 @@ int btrfs_defrag_root(struct btrfs_root *root)
20764 +
20765 + while (1) {
20766 + trans = btrfs_start_transaction(root, 0);
20767 +- if (IS_ERR(trans))
20768 +- return PTR_ERR(trans);
20769 ++ if (IS_ERR(trans)) {
20770 ++ ret = PTR_ERR(trans);
20771 ++ break;
20772 ++ }
20773 +
20774 + ret = btrfs_defrag_leaves(trans, root);
20775 +
20776 +@@ -1476,7 +1478,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
20777 + ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
20778 + if (ret) {
20779 + btrfs_abort_transaction(trans, ret);
20780 +- goto out;
20781 ++ return ret;
20782 + }
20783 +
20784 + /*
20785 +@@ -2074,14 +2076,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
20786 +
20787 + ASSERT(refcount_read(&trans->use_count) == 1);
20788 +
20789 +- /*
20790 +- * Some places just start a transaction to commit it. We need to make
20791 +- * sure that if this commit fails that the abort code actually marks the
20792 +- * transaction as failed, so set trans->dirty to make the abort code do
20793 +- * the right thing.
20794 +- */
20795 +- trans->dirty = true;
20796 +-
20797 + /* Stop the commit early if ->aborted is set */
20798 + if (TRANS_ABORTED(cur_trans)) {
20799 + ret = cur_trans->aborted;
20800 +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
20801 +index 364cfbb4c5c59..c49e2266b28ba 100644
20802 +--- a/fs/btrfs/transaction.h
20803 ++++ b/fs/btrfs/transaction.h
20804 +@@ -143,7 +143,6 @@ struct btrfs_trans_handle {
20805 + bool allocating_chunk;
20806 + bool can_flush_pending_bgs;
20807 + bool reloc_reserved;
20808 +- bool dirty;
20809 + bool in_fsync;
20810 + struct btrfs_root *root;
20811 + struct btrfs_fs_info *fs_info;
20812 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
20813 +index dbcf8bb2f3b9a..760d950752f51 100644
20814 +--- a/fs/btrfs/tree-log.c
20815 ++++ b/fs/btrfs/tree-log.c
20816 +@@ -6371,6 +6371,7 @@ next:
20817 + error:
20818 + if (wc.trans)
20819 + btrfs_end_transaction(wc.trans);
20820 ++ clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
20821 + btrfs_free_path(path);
20822 + return ret;
20823 + }
20824 +diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
20825 +index f1f3b10d1dbbe..c7243d392ca8e 100644
20826 +--- a/fs/btrfs/zoned.c
20827 ++++ b/fs/btrfs/zoned.c
20828 +@@ -1140,6 +1140,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
20829 + }
20830 +
20831 + if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
20832 ++ btrfs_err_in_rcu(fs_info,
20833 ++ "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
20834 ++ zone.start << SECTOR_SHIFT,
20835 ++ rcu_str_deref(device->name), device->devid);
20836 + ret = -EIO;
20837 + goto out;
20838 + }
20839 +@@ -1200,6 +1204,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
20840 +
20841 + switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
20842 + case 0: /* single */
20843 ++ if (alloc_offsets[0] == WP_MISSING_DEV) {
20844 ++ btrfs_err(fs_info,
20845 ++ "zoned: cannot recover write pointer for zone %llu",
20846 ++ physical);
20847 ++ ret = -EIO;
20848 ++ goto out;
20849 ++ }
20850 + cache->alloc_offset = alloc_offsets[0];
20851 + break;
20852 + case BTRFS_BLOCK_GROUP_DUP:
20853 +@@ -1217,6 +1228,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
20854 + }
20855 +
20856 + out:
20857 ++ if (cache->alloc_offset > fs_info->zone_size) {
20858 ++ btrfs_err(fs_info,
20859 ++ "zoned: invalid write pointer %llu in block group %llu",
20860 ++ cache->alloc_offset, cache->start);
20861 ++ ret = -EIO;
20862 ++ }
20863 ++
20864 + /* An extent is allocated after the write pointer */
20865 + if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
20866 + btrfs_err(fs_info,
20867 +diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
20868 +index d829b8bf833e3..93b47818c6c2d 100644
20869 +--- a/fs/cifs/cifs_swn.c
20870 ++++ b/fs/cifs/cifs_swn.c
20871 +@@ -447,15 +447,13 @@ static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new,
20872 + const struct sockaddr_storage *old,
20873 + struct sockaddr_storage *dst)
20874 + {
20875 +- __be16 port;
20876 ++ __be16 port = cpu_to_be16(CIFS_PORT);
20877 +
20878 + if (old->ss_family == AF_INET) {
20879 + struct sockaddr_in *ipv4 = (struct sockaddr_in *)old;
20880 +
20881 + port = ipv4->sin_port;
20882 +- }
20883 +-
20884 +- if (old->ss_family == AF_INET6) {
20885 ++ } else if (old->ss_family == AF_INET6) {
20886 + struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)old;
20887 +
20888 + port = ipv6->sin6_port;
20889 +@@ -465,9 +463,7 @@ static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new,
20890 + struct sockaddr_in *ipv4 = (struct sockaddr_in *)new;
20891 +
20892 + ipv4->sin_port = port;
20893 +- }
20894 +-
20895 +- if (new->ss_family == AF_INET6) {
20896 ++ } else if (new->ss_family == AF_INET6) {
20897 + struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)new;
20898 +
20899 + ipv6->sin6_port = port;
20900 +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
20901 +index 784407f9280fd..a18dee071fcd2 100644
20902 +--- a/fs/cifs/cifsacl.c
20903 ++++ b/fs/cifs/cifsacl.c
20904 +@@ -1308,7 +1308,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
20905 + ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
20906 + ndacl_ptr->revision =
20907 + dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
20908 +- ndacl_ptr->num_aces = dacl_ptr->num_aces;
20909 ++ ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0;
20910 +
20911 + if (uid_valid(uid)) { /* chown */
20912 + uid_t id;
20913 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
20914 +index 8488d70244620..706a2aeba1dec 100644
20915 +--- a/fs/cifs/cifsglob.h
20916 ++++ b/fs/cifs/cifsglob.h
20917 +@@ -896,7 +896,7 @@ struct cifs_ses {
20918 + struct mutex session_mutex;
20919 + struct TCP_Server_Info *server; /* pointer to server info */
20920 + int ses_count; /* reference counter */
20921 +- enum statusEnum status;
20922 ++ enum statusEnum status; /* updates protected by GlobalMid_Lock */
20923 + unsigned overrideSecFlg; /* if non-zero override global sec flags */
20924 + char *serverOS; /* name of operating system underlying server */
20925 + char *serverNOS; /* name of network operating system of server */
20926 +@@ -1795,6 +1795,7 @@ require use of the stronger protocol */
20927 + * list operations on pending_mid_q and oplockQ
20928 + * updates to XID counters, multiplex id and SMB sequence numbers
20929 + * list operations on global DnotifyReqList
20930 ++ * updates to ses->status
20931 + * tcp_ses_lock protects:
20932 + * list operations on tcp and SMB session lists
20933 + * tcon->open_file_lock protects the list of open files hanging off the tcon
20934 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
20935 +index 495c395f9defd..eb6c10fa67410 100644
20936 +--- a/fs/cifs/connect.c
20937 ++++ b/fs/cifs/connect.c
20938 +@@ -1617,9 +1617,12 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
20939 + spin_unlock(&cifs_tcp_ses_lock);
20940 + return;
20941 + }
20942 ++ spin_unlock(&cifs_tcp_ses_lock);
20943 ++
20944 ++ spin_lock(&GlobalMid_Lock);
20945 + if (ses->status == CifsGood)
20946 + ses->status = CifsExiting;
20947 +- spin_unlock(&cifs_tcp_ses_lock);
20948 ++ spin_unlock(&GlobalMid_Lock);
20949 +
20950 + cifs_free_ipc(ses);
20951 +
20952 +diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
20953 +index b1fa30fefe1f6..8e16ee1e5fd10 100644
20954 +--- a/fs/cifs/dfs_cache.c
20955 ++++ b/fs/cifs/dfs_cache.c
20956 +@@ -25,8 +25,7 @@
20957 + #define CACHE_HTABLE_SIZE 32
20958 + #define CACHE_MAX_ENTRIES 64
20959 +
20960 +-#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
20961 +- DFSREF_STORAGE_SERVER))
20962 ++#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
20963 +
20964 + struct cache_dfs_tgt {
20965 + char *name;
20966 +@@ -171,7 +170,7 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
20967 + "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
20968 + ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
20969 + ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
20970 +- IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
20971 ++ IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
20972 + ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
20973 +
20974 + list_for_each_entry(t, &ce->tlist, list) {
20975 +@@ -240,7 +239,7 @@ static inline void dump_ce(const struct cache_entry *ce)
20976 + ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
20977 + ce->etime.tv_nsec,
20978 + ce->hdr_flags, ce->ref_flags,
20979 +- IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
20980 ++ IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
20981 + ce->path_consumed,
20982 + cache_entry_expired(ce) ? "yes" : "no");
20983 + dump_tgts(ce);
20984 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
20985 +index 6bcd3e8f7cdae..7c641f9a3dac2 100644
20986 +--- a/fs/cifs/dir.c
20987 ++++ b/fs/cifs/dir.c
20988 +@@ -630,6 +630,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
20989 + struct inode *newInode = NULL;
20990 + const char *full_path;
20991 + void *page;
20992 ++ int retry_count = 0;
20993 +
20994 + xid = get_xid();
20995 +
20996 +@@ -673,6 +674,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
20997 + cifs_dbg(FYI, "Full path: %s inode = 0x%p\n",
20998 + full_path, d_inode(direntry));
20999 +
21000 ++again:
21001 + if (pTcon->posix_extensions)
21002 + rc = smb311_posix_get_inode_info(&newInode, full_path, parent_dir_inode->i_sb, xid);
21003 + else if (pTcon->unix_ext) {
21004 +@@ -687,6 +689,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
21005 + /* since paths are not looked up by component - the parent
21006 + directories are presumed to be good here */
21007 + renew_parental_timestamps(direntry);
21008 ++ } else if (rc == -EAGAIN && retry_count++ < 10) {
21009 ++ goto again;
21010 + } else if (rc == -ENOENT) {
21011 + cifs_set_time(direntry, jiffies);
21012 + newInode = NULL;
21013 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
21014 +index 1dfa57982522b..f60f068d33e86 100644
21015 +--- a/fs/cifs/inode.c
21016 ++++ b/fs/cifs/inode.c
21017 +@@ -367,9 +367,12 @@ cifs_get_file_info_unix(struct file *filp)
21018 + } else if (rc == -EREMOTE) {
21019 + cifs_create_dfs_fattr(&fattr, inode->i_sb);
21020 + rc = 0;
21021 +- }
21022 ++ } else
21023 ++ goto cifs_gfiunix_out;
21024 +
21025 + rc = cifs_fattr_to_inode(inode, &fattr);
21026 ++
21027 ++cifs_gfiunix_out:
21028 + free_xid(xid);
21029 + return rc;
21030 + }
21031 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
21032 +index 21ef51d338e0c..903de7449aa33 100644
21033 +--- a/fs/cifs/smb2ops.c
21034 ++++ b/fs/cifs/smb2ops.c
21035 +@@ -2325,6 +2325,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
21036 + struct smb2_query_directory_rsp *qd_rsp = NULL;
21037 + struct smb2_create_rsp *op_rsp = NULL;
21038 + struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
21039 ++ int retry_count = 0;
21040 +
21041 + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
21042 + if (!utf16_path)
21043 +@@ -2372,10 +2373,14 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
21044 +
21045 + smb2_set_related(&rqst[1]);
21046 +
21047 ++again:
21048 + rc = compound_send_recv(xid, tcon->ses, server,
21049 + flags, 2, rqst,
21050 + resp_buftype, rsp_iov);
21051 +
21052 ++ if (rc == -EAGAIN && retry_count++ < 10)
21053 ++ goto again;
21054 ++
21055 + /* If the open failed there is nothing to do */
21056 + op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
21057 + if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
21058 +@@ -3601,6 +3606,119 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
21059 + return rc;
21060 + }
21061 +
21062 ++static int smb3_simple_fallocate_write_range(unsigned int xid,
21063 ++ struct cifs_tcon *tcon,
21064 ++ struct cifsFileInfo *cfile,
21065 ++ loff_t off, loff_t len,
21066 ++ char *buf)
21067 ++{
21068 ++ struct cifs_io_parms io_parms = {0};
21069 ++ int nbytes;
21070 ++ struct kvec iov[2];
21071 ++
21072 ++ io_parms.netfid = cfile->fid.netfid;
21073 ++ io_parms.pid = current->tgid;
21074 ++ io_parms.tcon = tcon;
21075 ++ io_parms.persistent_fid = cfile->fid.persistent_fid;
21076 ++ io_parms.volatile_fid = cfile->fid.volatile_fid;
21077 ++ io_parms.offset = off;
21078 ++ io_parms.length = len;
21079 ++
21080 ++ /* iov[0] is reserved for smb header */
21081 ++ iov[1].iov_base = buf;
21082 ++ iov[1].iov_len = io_parms.length;
21083 ++ return SMB2_write(xid, &io_parms, &nbytes, iov, 1);
21084 ++}
21085 ++
21086 ++static int smb3_simple_fallocate_range(unsigned int xid,
21087 ++ struct cifs_tcon *tcon,
21088 ++ struct cifsFileInfo *cfile,
21089 ++ loff_t off, loff_t len)
21090 ++{
21091 ++ struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
21092 ++ u32 out_data_len;
21093 ++ char *buf = NULL;
21094 ++ loff_t l;
21095 ++ int rc;
21096 ++
21097 ++ in_data.file_offset = cpu_to_le64(off);
21098 ++ in_data.length = cpu_to_le64(len);
21099 ++ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
21100 ++ cfile->fid.volatile_fid,
21101 ++ FSCTL_QUERY_ALLOCATED_RANGES, true,
21102 ++ (char *)&in_data, sizeof(in_data),
21103 ++ 1024 * sizeof(struct file_allocated_range_buffer),
21104 ++ (char **)&out_data, &out_data_len);
21105 ++ if (rc)
21106 ++ goto out;
21107 ++ /*
21108 ++ * It is already all allocated
21109 ++ */
21110 ++ if (out_data_len == 0)
21111 ++ goto out;
21112 ++
21113 ++ buf = kzalloc(1024 * 1024, GFP_KERNEL);
21114 ++ if (buf == NULL) {
21115 ++ rc = -ENOMEM;
21116 ++ goto out;
21117 ++ }
21118 ++
21119 ++ tmp_data = out_data;
21120 ++ while (len) {
21121 ++ /*
21122 ++ * The rest of the region is unmapped so write it all.
21123 ++ */
21124 ++ if (out_data_len == 0) {
21125 ++ rc = smb3_simple_fallocate_write_range(xid, tcon,
21126 ++ cfile, off, len, buf);
21127 ++ goto out;
21128 ++ }
21129 ++
21130 ++ if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
21131 ++ rc = -EINVAL;
21132 ++ goto out;
21133 ++ }
21134 ++
21135 ++ if (off < le64_to_cpu(tmp_data->file_offset)) {
21136 ++ /*
21137 ++ * We are at a hole. Write until the end of the region
21138 ++ * or until the next allocated data,
21139 ++ * whichever comes next.
21140 ++ */
21141 ++ l = le64_to_cpu(tmp_data->file_offset) - off;
21142 ++ if (len < l)
21143 ++ l = len;
21144 ++ rc = smb3_simple_fallocate_write_range(xid, tcon,
21145 ++ cfile, off, l, buf);
21146 ++ if (rc)
21147 ++ goto out;
21148 ++ off = off + l;
21149 ++ len = len - l;
21150 ++ if (len == 0)
21151 ++ goto out;
21152 ++ }
21153 ++ /*
21154 ++ * We are at a section of allocated data, just skip forward
21155 ++ * until the end of the data or the end of the region
21156 ++ * we are supposed to fallocate, whichever comes first.
21157 ++ */
21158 ++ l = le64_to_cpu(tmp_data->length);
21159 ++ if (len < l)
21160 ++ l = len;
21161 ++ off += l;
21162 ++ len -= l;
21163 ++
21164 ++ tmp_data = &tmp_data[1];
21165 ++ out_data_len -= sizeof(struct file_allocated_range_buffer);
21166 ++ }
21167 ++
21168 ++ out:
21169 ++ kfree(out_data);
21170 ++ kfree(buf);
21171 ++ return rc;
21172 ++}
21173 ++
21174 ++
21175 + static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
21176 + loff_t off, loff_t len, bool keep_size)
21177 + {
21178 +@@ -3661,6 +3779,26 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
21179 + }
21180 +
21181 + if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
21182 ++ /*
21183 ++ * At this point, we are trying to fallocate an internal
21184 ++ * regions of a sparse file. Since smb2 does not have a
21185 ++ * fallocate command we have two otions on how to emulate this.
21186 ++ * We can either turn the entire file to become non-sparse
21187 ++ * which we only do if the fallocate is for virtually
21188 ++ * the whole file, or we can overwrite the region with zeroes
21189 ++ * using SMB2_write, which could be prohibitevly expensive
21190 ++ * if len is large.
21191 ++ */
21192 ++ /*
21193 ++ * We are only trying to fallocate a small region so
21194 ++ * just write it with zero.
21195 ++ */
21196 ++ if (len <= 1024 * 1024) {
21197 ++ rc = smb3_simple_fallocate_range(xid, tcon, cfile,
21198 ++ off, len);
21199 ++ goto out;
21200 ++ }
21201 ++
21202 + /*
21203 + * Check if falloc starts within first few pages of file
21204 + * and ends within a few pages of the end of file to
21205 +diff --git a/fs/configfs/file.c b/fs/configfs/file.c
21206 +index e26060dae70a3..b4b0fbabd62e2 100644
21207 +--- a/fs/configfs/file.c
21208 ++++ b/fs/configfs/file.c
21209 +@@ -480,13 +480,13 @@ static int configfs_release_bin_file(struct inode *inode, struct file *file)
21210 + buffer->bin_buffer_size);
21211 + }
21212 + up_read(&frag->frag_sem);
21213 +- /* vfree on NULL is safe */
21214 +- vfree(buffer->bin_buffer);
21215 +- buffer->bin_buffer = NULL;
21216 +- buffer->bin_buffer_size = 0;
21217 +- buffer->needs_read_fill = 1;
21218 + }
21219 +
21220 ++ vfree(buffer->bin_buffer);
21221 ++ buffer->bin_buffer = NULL;
21222 ++ buffer->bin_buffer_size = 0;
21223 ++ buffer->needs_read_fill = 1;
21224 ++
21225 + configfs_release(inode, file);
21226 + return 0;
21227 + }
21228 +diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
21229 +index 6ca7d16593ff6..d00455440d087 100644
21230 +--- a/fs/crypto/fname.c
21231 ++++ b/fs/crypto/fname.c
21232 +@@ -344,13 +344,9 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
21233 + offsetof(struct fscrypt_nokey_name, sha256));
21234 + BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX);
21235 +
21236 +- if (hash) {
21237 +- nokey_name.dirhash[0] = hash;
21238 +- nokey_name.dirhash[1] = minor_hash;
21239 +- } else {
21240 +- nokey_name.dirhash[0] = 0;
21241 +- nokey_name.dirhash[1] = 0;
21242 +- }
21243 ++ nokey_name.dirhash[0] = hash;
21244 ++ nokey_name.dirhash[1] = minor_hash;
21245 ++
21246 + if (iname->len <= sizeof(nokey_name.bytes)) {
21247 + memcpy(nokey_name.bytes, iname->name, iname->len);
21248 + size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]);
21249 +diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
21250 +index 261293fb70974..bca9c6658a7c5 100644
21251 +--- a/fs/crypto/keysetup.c
21252 ++++ b/fs/crypto/keysetup.c
21253 +@@ -210,15 +210,40 @@ out_unlock:
21254 + return err;
21255 + }
21256 +
21257 ++/*
21258 ++ * Derive a SipHash key from the given fscrypt master key and the given
21259 ++ * application-specific information string.
21260 ++ *
21261 ++ * Note that the KDF produces a byte array, but the SipHash APIs expect the key
21262 ++ * as a pair of 64-bit words. Therefore, on big endian CPUs we have to do an
21263 ++ * endianness swap in order to get the same results as on little endian CPUs.
21264 ++ */
21265 ++static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
21266 ++ u8 context, const u8 *info,
21267 ++ unsigned int infolen, siphash_key_t *key)
21268 ++{
21269 ++ int err;
21270 ++
21271 ++ err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen,
21272 ++ (u8 *)key, sizeof(*key));
21273 ++ if (err)
21274 ++ return err;
21275 ++
21276 ++ BUILD_BUG_ON(sizeof(*key) != 16);
21277 ++ BUILD_BUG_ON(ARRAY_SIZE(key->key) != 2);
21278 ++ le64_to_cpus(&key->key[0]);
21279 ++ le64_to_cpus(&key->key[1]);
21280 ++ return 0;
21281 ++}
21282 ++
21283 + int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
21284 + const struct fscrypt_master_key *mk)
21285 + {
21286 + int err;
21287 +
21288 +- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY,
21289 +- ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
21290 +- (u8 *)&ci->ci_dirhash_key,
21291 +- sizeof(ci->ci_dirhash_key));
21292 ++ err = fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY,
21293 ++ ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE,
21294 ++ &ci->ci_dirhash_key);
21295 + if (err)
21296 + return err;
21297 + ci->ci_dirhash_key_initialized = true;
21298 +@@ -253,10 +278,9 @@ static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
21299 + if (mk->mk_ino_hash_key_initialized)
21300 + goto unlock;
21301 +
21302 +- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
21303 +- HKDF_CONTEXT_INODE_HASH_KEY, NULL, 0,
21304 +- (u8 *)&mk->mk_ino_hash_key,
21305 +- sizeof(mk->mk_ino_hash_key));
21306 ++ err = fscrypt_derive_siphash_key(mk,
21307 ++ HKDF_CONTEXT_INODE_HASH_KEY,
21308 ++ NULL, 0, &mk->mk_ino_hash_key);
21309 + if (err)
21310 + goto unlock;
21311 + /* pairs with smp_load_acquire() above */
21312 +diff --git a/fs/dax.c b/fs/dax.c
21313 +index 62352cbcf0f40..da41f9363568e 100644
21314 +--- a/fs/dax.c
21315 ++++ b/fs/dax.c
21316 +@@ -488,10 +488,11 @@ static void *grab_mapping_entry(struct xa_state *xas,
21317 + struct address_space *mapping, unsigned int order)
21318 + {
21319 + unsigned long index = xas->xa_index;
21320 +- bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
21321 ++ bool pmd_downgrade; /* splitting PMD entry into PTE entries? */
21322 + void *entry;
21323 +
21324 + retry:
21325 ++ pmd_downgrade = false;
21326 + xas_lock_irq(xas);
21327 + entry = get_unlocked_entry(xas, order);
21328 +
21329 +diff --git a/fs/dlm/config.c b/fs/dlm/config.c
21330 +index 88d95d96e36c5..52bcda64172aa 100644
21331 +--- a/fs/dlm/config.c
21332 ++++ b/fs/dlm/config.c
21333 +@@ -79,6 +79,9 @@ struct dlm_cluster {
21334 + unsigned int cl_new_rsb_count;
21335 + unsigned int cl_recover_callbacks;
21336 + char cl_cluster_name[DLM_LOCKSPACE_LEN];
21337 ++
21338 ++ struct dlm_spaces *sps;
21339 ++ struct dlm_comms *cms;
21340 + };
21341 +
21342 + static struct dlm_cluster *config_item_to_cluster(struct config_item *i)
21343 +@@ -409,6 +412,9 @@ static struct config_group *make_cluster(struct config_group *g,
21344 + if (!cl || !sps || !cms)
21345 + goto fail;
21346 +
21347 ++ cl->sps = sps;
21348 ++ cl->cms = cms;
21349 ++
21350 + config_group_init_type_name(&cl->group, name, &cluster_type);
21351 + config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
21352 + config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
21353 +@@ -458,6 +464,9 @@ static void drop_cluster(struct config_group *g, struct config_item *i)
21354 + static void release_cluster(struct config_item *i)
21355 + {
21356 + struct dlm_cluster *cl = config_item_to_cluster(i);
21357 ++
21358 ++ kfree(cl->sps);
21359 ++ kfree(cl->cms);
21360 + kfree(cl);
21361 + }
21362 +
21363 +diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
21364 +index 166e36fcf3e4c..9bf920bee292e 100644
21365 +--- a/fs/dlm/lowcomms.c
21366 ++++ b/fs/dlm/lowcomms.c
21367 +@@ -79,14 +79,20 @@ struct connection {
21368 + #define CF_CLOSING 8
21369 + #define CF_SHUTDOWN 9
21370 + #define CF_CONNECTED 10
21371 ++#define CF_RECONNECT 11
21372 ++#define CF_DELAY_CONNECT 12
21373 ++#define CF_EOF 13
21374 + struct list_head writequeue; /* List of outgoing writequeue_entries */
21375 + spinlock_t writequeue_lock;
21376 ++ atomic_t writequeue_cnt;
21377 + void (*connect_action) (struct connection *); /* What to do to connect */
21378 + void (*shutdown_action)(struct connection *con); /* What to do to shutdown */
21379 ++ bool (*eof_condition)(struct connection *con); /* What to do to eof check */
21380 + int retries;
21381 + #define MAX_CONNECT_RETRIES 3
21382 + struct hlist_node list;
21383 + struct connection *othercon;
21384 ++ struct connection *sendcon;
21385 + struct work_struct rwork; /* Receive workqueue */
21386 + struct work_struct swork; /* Send workqueue */
21387 + wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
21388 +@@ -113,6 +119,7 @@ struct writequeue_entry {
21389 + int len;
21390 + int end;
21391 + int users;
21392 ++ int idx; /* get()/commit() idx exchange */
21393 + struct connection *con;
21394 + };
21395 +
21396 +@@ -163,25 +170,23 @@ static inline int nodeid_hash(int nodeid)
21397 + return nodeid & (CONN_HASH_SIZE-1);
21398 + }
21399 +
21400 +-static struct connection *__find_con(int nodeid)
21401 ++static struct connection *__find_con(int nodeid, int r)
21402 + {
21403 +- int r, idx;
21404 + struct connection *con;
21405 +
21406 +- r = nodeid_hash(nodeid);
21407 +-
21408 +- idx = srcu_read_lock(&connections_srcu);
21409 + hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
21410 +- if (con->nodeid == nodeid) {
21411 +- srcu_read_unlock(&connections_srcu, idx);
21412 ++ if (con->nodeid == nodeid)
21413 + return con;
21414 +- }
21415 + }
21416 +- srcu_read_unlock(&connections_srcu, idx);
21417 +
21418 + return NULL;
21419 + }
21420 +
21421 ++static bool tcp_eof_condition(struct connection *con)
21422 ++{
21423 ++ return atomic_read(&con->writequeue_cnt);
21424 ++}
21425 ++
21426 + static int dlm_con_init(struct connection *con, int nodeid)
21427 + {
21428 + con->rx_buflen = dlm_config.ci_buffer_size;
21429 +@@ -193,6 +198,7 @@ static int dlm_con_init(struct connection *con, int nodeid)
21430 + mutex_init(&con->sock_mutex);
21431 + INIT_LIST_HEAD(&con->writequeue);
21432 + spin_lock_init(&con->writequeue_lock);
21433 ++ atomic_set(&con->writequeue_cnt, 0);
21434 + INIT_WORK(&con->swork, process_send_sockets);
21435 + INIT_WORK(&con->rwork, process_recv_sockets);
21436 + init_waitqueue_head(&con->shutdown_wait);
21437 +@@ -200,6 +206,7 @@ static int dlm_con_init(struct connection *con, int nodeid)
21438 + if (dlm_config.ci_protocol == 0) {
21439 + con->connect_action = tcp_connect_to_sock;
21440 + con->shutdown_action = dlm_tcp_shutdown;
21441 ++ con->eof_condition = tcp_eof_condition;
21442 + } else {
21443 + con->connect_action = sctp_connect_to_sock;
21444 + }
21445 +@@ -216,7 +223,8 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
21446 + struct connection *con, *tmp;
21447 + int r, ret;
21448 +
21449 +- con = __find_con(nodeid);
21450 ++ r = nodeid_hash(nodeid);
21451 ++ con = __find_con(nodeid, r);
21452 + if (con || !alloc)
21453 + return con;
21454 +
21455 +@@ -230,8 +238,6 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
21456 + return NULL;
21457 + }
21458 +
21459 +- r = nodeid_hash(nodeid);
21460 +-
21461 + spin_lock(&connections_lock);
21462 + /* Because multiple workqueues/threads calls this function it can
21463 + * race on multiple cpu's. Instead of locking hot path __find_con()
21464 +@@ -239,7 +245,7 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
21465 + * under protection of connections_lock. If this is the case we
21466 + * abort our connection creation and return the existing connection.
21467 + */
21468 +- tmp = __find_con(nodeid);
21469 ++ tmp = __find_con(nodeid, r);
21470 + if (tmp) {
21471 + spin_unlock(&connections_lock);
21472 + kfree(con->rx_buf);
21473 +@@ -256,15 +262,13 @@ static struct connection *nodeid2con(int nodeid, gfp_t alloc)
21474 + /* Loop round all connections */
21475 + static void foreach_conn(void (*conn_func)(struct connection *c))
21476 + {
21477 +- int i, idx;
21478 ++ int i;
21479 + struct connection *con;
21480 +
21481 +- idx = srcu_read_lock(&connections_srcu);
21482 + for (i = 0; i < CONN_HASH_SIZE; i++) {
21483 + hlist_for_each_entry_rcu(con, &connection_hash[i], list)
21484 + conn_func(con);
21485 + }
21486 +- srcu_read_unlock(&connections_srcu, idx);
21487 + }
21488 +
21489 + static struct dlm_node_addr *find_node_addr(int nodeid)
21490 +@@ -518,14 +522,21 @@ static void lowcomms_state_change(struct sock *sk)
21491 + int dlm_lowcomms_connect_node(int nodeid)
21492 + {
21493 + struct connection *con;
21494 ++ int idx;
21495 +
21496 + if (nodeid == dlm_our_nodeid())
21497 + return 0;
21498 +
21499 ++ idx = srcu_read_lock(&connections_srcu);
21500 + con = nodeid2con(nodeid, GFP_NOFS);
21501 +- if (!con)
21502 ++ if (!con) {
21503 ++ srcu_read_unlock(&connections_srcu, idx);
21504 + return -ENOMEM;
21505 ++ }
21506 ++
21507 + lowcomms_connect_sock(con);
21508 ++ srcu_read_unlock(&connections_srcu, idx);
21509 ++
21510 + return 0;
21511 + }
21512 +
21513 +@@ -587,6 +598,22 @@ static void lowcomms_error_report(struct sock *sk)
21514 + dlm_config.ci_tcp_port, sk->sk_err,
21515 + sk->sk_err_soft);
21516 + }
21517 ++
21518 ++ /* below sendcon only handling */
21519 ++ if (test_bit(CF_IS_OTHERCON, &con->flags))
21520 ++ con = con->sendcon;
21521 ++
21522 ++ switch (sk->sk_err) {
21523 ++ case ECONNREFUSED:
21524 ++ set_bit(CF_DELAY_CONNECT, &con->flags);
21525 ++ break;
21526 ++ default:
21527 ++ break;
21528 ++ }
21529 ++
21530 ++ if (!test_and_set_bit(CF_RECONNECT, &con->flags))
21531 ++ queue_work(send_workqueue, &con->swork);
21532 ++
21533 + out:
21534 + read_unlock_bh(&sk->sk_callback_lock);
21535 + if (orig_report)
21536 +@@ -698,12 +725,15 @@ static void close_connection(struct connection *con, bool and_other,
21537 +
21538 + if (con->othercon && and_other) {
21539 + /* Will only re-enter once. */
21540 +- close_connection(con->othercon, false, true, true);
21541 ++ close_connection(con->othercon, false, tx, rx);
21542 + }
21543 +
21544 + con->rx_leftover = 0;
21545 + con->retries = 0;
21546 + clear_bit(CF_CONNECTED, &con->flags);
21547 ++ clear_bit(CF_DELAY_CONNECT, &con->flags);
21548 ++ clear_bit(CF_RECONNECT, &con->flags);
21549 ++ clear_bit(CF_EOF, &con->flags);
21550 + mutex_unlock(&con->sock_mutex);
21551 + clear_bit(CF_CLOSING, &con->flags);
21552 + }
21553 +@@ -841,19 +871,26 @@ out_resched:
21554 + return -EAGAIN;
21555 +
21556 + out_close:
21557 +- mutex_unlock(&con->sock_mutex);
21558 +- if (ret != -EAGAIN) {
21559 +- /* Reconnect when there is something to send */
21560 +- close_connection(con, false, true, false);
21561 +- if (ret == 0) {
21562 +- log_print("connection %p got EOF from %d",
21563 +- con, con->nodeid);
21564 ++ if (ret == 0) {
21565 ++ log_print("connection %p got EOF from %d",
21566 ++ con, con->nodeid);
21567 ++
21568 ++ if (con->eof_condition && con->eof_condition(con)) {
21569 ++ set_bit(CF_EOF, &con->flags);
21570 ++ mutex_unlock(&con->sock_mutex);
21571 ++ } else {
21572 ++ mutex_unlock(&con->sock_mutex);
21573 ++ close_connection(con, false, true, false);
21574 ++
21575 + /* handling for tcp shutdown */
21576 + clear_bit(CF_SHUTDOWN, &con->flags);
21577 + wake_up(&con->shutdown_wait);
21578 +- /* signal to breaking receive worker */
21579 +- ret = -1;
21580 + }
21581 ++
21582 ++ /* signal to breaking receive worker */
21583 ++ ret = -1;
21584 ++ } else {
21585 ++ mutex_unlock(&con->sock_mutex);
21586 + }
21587 + return ret;
21588 + }
21589 +@@ -864,7 +901,7 @@ static int accept_from_sock(struct listen_connection *con)
21590 + int result;
21591 + struct sockaddr_storage peeraddr;
21592 + struct socket *newsock;
21593 +- int len;
21594 ++ int len, idx;
21595 + int nodeid;
21596 + struct connection *newcon;
21597 + struct connection *addcon;
21598 +@@ -907,8 +944,10 @@ static int accept_from_sock(struct listen_connection *con)
21599 + * the same time and the connections cross on the wire.
21600 + * In this case we store the incoming one in "othercon"
21601 + */
21602 ++ idx = srcu_read_lock(&connections_srcu);
21603 + newcon = nodeid2con(nodeid, GFP_NOFS);
21604 + if (!newcon) {
21605 ++ srcu_read_unlock(&connections_srcu, idx);
21606 + result = -ENOMEM;
21607 + goto accept_err;
21608 + }
21609 +@@ -924,6 +963,7 @@ static int accept_from_sock(struct listen_connection *con)
21610 + if (!othercon) {
21611 + log_print("failed to allocate incoming socket");
21612 + mutex_unlock(&newcon->sock_mutex);
21613 ++ srcu_read_unlock(&connections_srcu, idx);
21614 + result = -ENOMEM;
21615 + goto accept_err;
21616 + }
21617 +@@ -932,11 +972,13 @@ static int accept_from_sock(struct listen_connection *con)
21618 + if (result < 0) {
21619 + kfree(othercon);
21620 + mutex_unlock(&newcon->sock_mutex);
21621 ++ srcu_read_unlock(&connections_srcu, idx);
21622 + goto accept_err;
21623 + }
21624 +
21625 + lockdep_set_subclass(&othercon->sock_mutex, 1);
21626 + newcon->othercon = othercon;
21627 ++ othercon->sendcon = newcon;
21628 + } else {
21629 + /* close other sock con if we have something new */
21630 + close_connection(othercon, false, true, false);
21631 +@@ -966,6 +1008,8 @@ static int accept_from_sock(struct listen_connection *con)
21632 + if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
21633 + queue_work(recv_workqueue, &addcon->rwork);
21634 +
21635 ++ srcu_read_unlock(&connections_srcu, idx);
21636 ++
21637 + return 0;
21638 +
21639 + accept_err:
21640 +@@ -997,6 +1041,7 @@ static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
21641 +
21642 + if (e->len == 0 && e->users == 0) {
21643 + list_del(&e->list);
21644 ++ atomic_dec(&e->con->writequeue_cnt);
21645 + free_entry(e);
21646 + }
21647 + }
21648 +@@ -1393,6 +1438,7 @@ static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
21649 +
21650 + *ppc = page_address(e->page);
21651 + e->end += len;
21652 ++ atomic_inc(&con->writequeue_cnt);
21653 +
21654 + spin_lock(&con->writequeue_lock);
21655 + list_add_tail(&e->list, &con->writequeue);
21656 +@@ -1403,7 +1449,9 @@ static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
21657 +
21658 + void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
21659 + {
21660 ++ struct writequeue_entry *e;
21661 + struct connection *con;
21662 ++ int idx;
21663 +
21664 + if (len > DEFAULT_BUFFER_SIZE ||
21665 + len < sizeof(struct dlm_header)) {
21666 +@@ -1413,11 +1461,23 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
21667 + return NULL;
21668 + }
21669 +
21670 ++ idx = srcu_read_lock(&connections_srcu);
21671 + con = nodeid2con(nodeid, allocation);
21672 +- if (!con)
21673 ++ if (!con) {
21674 ++ srcu_read_unlock(&connections_srcu, idx);
21675 + return NULL;
21676 ++ }
21677 +
21678 +- return new_wq_entry(con, len, allocation, ppc);
21679 ++ e = new_wq_entry(con, len, allocation, ppc);
21680 ++ if (!e) {
21681 ++ srcu_read_unlock(&connections_srcu, idx);
21682 ++ return NULL;
21683 ++ }
21684 ++
21685 ++ /* we assume if successful commit must called */
21686 ++ e->idx = idx;
21687 ++
21688 ++ return e;
21689 + }
21690 +
21691 + void dlm_lowcomms_commit_buffer(void *mh)
21692 +@@ -1435,10 +1495,12 @@ void dlm_lowcomms_commit_buffer(void *mh)
21693 + spin_unlock(&con->writequeue_lock);
21694 +
21695 + queue_work(send_workqueue, &con->swork);
21696 ++ srcu_read_unlock(&connections_srcu, e->idx);
21697 + return;
21698 +
21699 + out:
21700 + spin_unlock(&con->writequeue_lock);
21701 ++ srcu_read_unlock(&connections_srcu, e->idx);
21702 + return;
21703 + }
21704 +
21705 +@@ -1483,7 +1545,7 @@ static void send_to_sock(struct connection *con)
21706 + cond_resched();
21707 + goto out;
21708 + } else if (ret < 0)
21709 +- goto send_error;
21710 ++ goto out;
21711 + }
21712 +
21713 + /* Don't starve people filling buffers */
21714 +@@ -1496,16 +1558,23 @@ static void send_to_sock(struct connection *con)
21715 + writequeue_entry_complete(e, ret);
21716 + }
21717 + spin_unlock(&con->writequeue_lock);
21718 +-out:
21719 +- mutex_unlock(&con->sock_mutex);
21720 ++
21721 ++ /* close if we got EOF */
21722 ++ if (test_and_clear_bit(CF_EOF, &con->flags)) {
21723 ++ mutex_unlock(&con->sock_mutex);
21724 ++ close_connection(con, false, false, true);
21725 ++
21726 ++ /* handling for tcp shutdown */
21727 ++ clear_bit(CF_SHUTDOWN, &con->flags);
21728 ++ wake_up(&con->shutdown_wait);
21729 ++ } else {
21730 ++ mutex_unlock(&con->sock_mutex);
21731 ++ }
21732 ++
21733 + return;
21734 +
21735 +-send_error:
21736 ++out:
21737 + mutex_unlock(&con->sock_mutex);
21738 +- close_connection(con, false, false, true);
21739 +- /* Requeue the send work. When the work daemon runs again, it will try
21740 +- a new connection, then call this function again. */
21741 +- queue_work(send_workqueue, &con->swork);
21742 + return;
21743 +
21744 + out_connect:
21745 +@@ -1532,8 +1601,10 @@ int dlm_lowcomms_close(int nodeid)
21746 + {
21747 + struct connection *con;
21748 + struct dlm_node_addr *na;
21749 ++ int idx;
21750 +
21751 + log_print("closing connection to node %d", nodeid);
21752 ++ idx = srcu_read_lock(&connections_srcu);
21753 + con = nodeid2con(nodeid, 0);
21754 + if (con) {
21755 + set_bit(CF_CLOSE, &con->flags);
21756 +@@ -1542,6 +1613,7 @@ int dlm_lowcomms_close(int nodeid)
21757 + if (con->othercon)
21758 + clean_one_writequeue(con->othercon);
21759 + }
21760 ++ srcu_read_unlock(&connections_srcu, idx);
21761 +
21762 + spin_lock(&dlm_node_addrs_spin);
21763 + na = find_node_addr(nodeid);
21764 +@@ -1579,18 +1651,30 @@ static void process_send_sockets(struct work_struct *work)
21765 + struct connection *con = container_of(work, struct connection, swork);
21766 +
21767 + clear_bit(CF_WRITE_PENDING, &con->flags);
21768 +- if (con->sock == NULL) /* not mutex protected so check it inside too */
21769 ++
21770 ++ if (test_and_clear_bit(CF_RECONNECT, &con->flags))
21771 ++ close_connection(con, false, false, true);
21772 ++
21773 ++ if (con->sock == NULL) { /* not mutex protected so check it inside too */
21774 ++ if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags))
21775 ++ msleep(1000);
21776 + con->connect_action(con);
21777 ++ }
21778 + if (!list_empty(&con->writequeue))
21779 + send_to_sock(con);
21780 + }
21781 +
21782 + static void work_stop(void)
21783 + {
21784 +- if (recv_workqueue)
21785 ++ if (recv_workqueue) {
21786 + destroy_workqueue(recv_workqueue);
21787 +- if (send_workqueue)
21788 ++ recv_workqueue = NULL;
21789 ++ }
21790 ++
21791 ++ if (send_workqueue) {
21792 + destroy_workqueue(send_workqueue);
21793 ++ send_workqueue = NULL;
21794 ++ }
21795 + }
21796 +
21797 + static int work_start(void)
21798 +@@ -1607,6 +1691,7 @@ static int work_start(void)
21799 + if (!send_workqueue) {
21800 + log_print("can't start dlm_send");
21801 + destroy_workqueue(recv_workqueue);
21802 ++ recv_workqueue = NULL;
21803 + return -ENOMEM;
21804 + }
21805 +
21806 +@@ -1621,6 +1706,8 @@ static void shutdown_conn(struct connection *con)
21807 +
21808 + void dlm_lowcomms_shutdown(void)
21809 + {
21810 ++ int idx;
21811 ++
21812 + /* Set all the flags to prevent any
21813 + * socket activity.
21814 + */
21815 +@@ -1633,7 +1720,9 @@ void dlm_lowcomms_shutdown(void)
21816 +
21817 + dlm_close_sock(&listen_con.sock);
21818 +
21819 ++ idx = srcu_read_lock(&connections_srcu);
21820 + foreach_conn(shutdown_conn);
21821 ++ srcu_read_unlock(&connections_srcu, idx);
21822 + }
21823 +
21824 + static void _stop_conn(struct connection *con, bool and_other)
21825 +@@ -1682,7 +1771,7 @@ static void free_conn(struct connection *con)
21826 +
21827 + static void work_flush(void)
21828 + {
21829 +- int ok, idx;
21830 ++ int ok;
21831 + int i;
21832 + struct connection *con;
21833 +
21834 +@@ -1693,7 +1782,6 @@ static void work_flush(void)
21835 + flush_workqueue(recv_workqueue);
21836 + if (send_workqueue)
21837 + flush_workqueue(send_workqueue);
21838 +- idx = srcu_read_lock(&connections_srcu);
21839 + for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
21840 + hlist_for_each_entry_rcu(con, &connection_hash[i],
21841 + list) {
21842 +@@ -1707,14 +1795,17 @@ static void work_flush(void)
21843 + }
21844 + }
21845 + }
21846 +- srcu_read_unlock(&connections_srcu, idx);
21847 + } while (!ok);
21848 + }
21849 +
21850 + void dlm_lowcomms_stop(void)
21851 + {
21852 ++ int idx;
21853 ++
21854 ++ idx = srcu_read_lock(&connections_srcu);
21855 + work_flush();
21856 + foreach_conn(free_conn);
21857 ++ srcu_read_unlock(&connections_srcu, idx);
21858 + work_stop();
21859 + deinit_local();
21860 + }
21861 +@@ -1738,7 +1829,7 @@ int dlm_lowcomms_start(void)
21862 +
21863 + error = work_start();
21864 + if (error)
21865 +- goto fail;
21866 ++ goto fail_local;
21867 +
21868 + dlm_allow_conn = 1;
21869 +
21870 +@@ -1755,6 +1846,9 @@ int dlm_lowcomms_start(void)
21871 + fail_unlisten:
21872 + dlm_allow_conn = 0;
21873 + dlm_close_sock(&listen_con.sock);
21874 ++ work_stop();
21875 ++fail_local:
21876 ++ deinit_local();
21877 + fail:
21878 + return error;
21879 + }
21880 +diff --git a/fs/erofs/super.c b/fs/erofs/super.c
21881 +index bbf3bbd908e08..22991d22af5a2 100644
21882 +--- a/fs/erofs/super.c
21883 ++++ b/fs/erofs/super.c
21884 +@@ -285,6 +285,7 @@ static int erofs_read_superblock(struct super_block *sb)
21885 + goto out;
21886 + }
21887 +
21888 ++ ret = -EINVAL;
21889 + blkszbits = dsb->blkszbits;
21890 + /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
21891 + if (blkszbits != LOG_BLOCK_SIZE) {
21892 +diff --git a/fs/exec.c b/fs/exec.c
21893 +index 18594f11c31fe..d7c4187ca023e 100644
21894 +--- a/fs/exec.c
21895 ++++ b/fs/exec.c
21896 +@@ -1360,6 +1360,10 @@ int begin_new_exec(struct linux_binprm * bprm)
21897 + WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
21898 + flush_signal_handlers(me, 0);
21899 +
21900 ++ retval = set_cred_ucounts(bprm->cred);
21901 ++ if (retval < 0)
21902 ++ goto out_unlock;
21903 ++
21904 + /*
21905 + * install the new credentials for this executable
21906 + */
21907 +diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
21908 +index c4523648472a0..cb1c0d8c17141 100644
21909 +--- a/fs/exfat/dir.c
21910 ++++ b/fs/exfat/dir.c
21911 +@@ -63,7 +63,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
21912 + static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_entry *dir_entry)
21913 + {
21914 + int i, dentries_per_clu, dentries_per_clu_bits = 0, num_ext;
21915 +- unsigned int type, clu_offset;
21916 ++ unsigned int type, clu_offset, max_dentries;
21917 + sector_t sector;
21918 + struct exfat_chain dir, clu;
21919 + struct exfat_uni_name uni_name;
21920 +@@ -86,6 +86,8 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
21921 +
21922 + dentries_per_clu = sbi->dentries_per_clu;
21923 + dentries_per_clu_bits = ilog2(dentries_per_clu);
21924 ++ max_dentries = (unsigned int)min_t(u64, MAX_EXFAT_DENTRIES,
21925 ++ (u64)sbi->num_clusters << dentries_per_clu_bits);
21926 +
21927 + clu_offset = dentry >> dentries_per_clu_bits;
21928 + exfat_chain_dup(&clu, &dir);
21929 +@@ -109,7 +111,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
21930 + }
21931 + }
21932 +
21933 +- while (clu.dir != EXFAT_EOF_CLUSTER) {
21934 ++ while (clu.dir != EXFAT_EOF_CLUSTER && dentry < max_dentries) {
21935 + i = dentry & (dentries_per_clu - 1);
21936 +
21937 + for ( ; i < dentries_per_clu; i++, dentry++) {
21938 +@@ -245,7 +247,7 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx)
21939 + if (err)
21940 + goto unlock;
21941 + get_new:
21942 +- if (cpos >= i_size_read(inode))
21943 ++ if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
21944 + goto end_of_dir;
21945 +
21946 + err = exfat_readdir(inode, &cpos, &de);
21947 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
21948 +index cbf37b2cf871e..1293de50c8d48 100644
21949 +--- a/fs/ext4/extents.c
21950 ++++ b/fs/ext4/extents.c
21951 +@@ -825,6 +825,7 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
21952 + eh->eh_entries = 0;
21953 + eh->eh_magic = EXT4_EXT_MAGIC;
21954 + eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
21955 ++ eh->eh_generation = 0;
21956 + ext4_mark_inode_dirty(handle, inode);
21957 + }
21958 +
21959 +@@ -1090,6 +1091,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
21960 + neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
21961 + neh->eh_magic = EXT4_EXT_MAGIC;
21962 + neh->eh_depth = 0;
21963 ++ neh->eh_generation = 0;
21964 +
21965 + /* move remainder of path[depth] to the new leaf */
21966 + if (unlikely(path[depth].p_hdr->eh_entries !=
21967 +@@ -1167,6 +1169,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
21968 + neh->eh_magic = EXT4_EXT_MAGIC;
21969 + neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
21970 + neh->eh_depth = cpu_to_le16(depth - i);
21971 ++ neh->eh_generation = 0;
21972 + fidx = EXT_FIRST_INDEX(neh);
21973 + fidx->ei_block = border;
21974 + ext4_idx_store_pblock(fidx, oldblock);
21975 +diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
21976 +index 0a729027322dd..9a3a8996aacf7 100644
21977 +--- a/fs/ext4/extents_status.c
21978 ++++ b/fs/ext4/extents_status.c
21979 +@@ -1574,11 +1574,9 @@ static unsigned long ext4_es_scan(struct shrinker *shrink,
21980 + ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
21981 + trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
21982 +
21983 +- if (!nr_to_scan)
21984 +- return ret;
21985 +-
21986 + nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
21987 +
21988 ++ ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
21989 + trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
21990 + return nr_shrunk;
21991 + }
21992 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
21993 +index 9bab7fd4ccd57..e89fc0f770b03 100644
21994 +--- a/fs/ext4/ialloc.c
21995 ++++ b/fs/ext4/ialloc.c
21996 +@@ -402,7 +402,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
21997 + *
21998 + * We always try to spread first-level directories.
21999 + *
22000 +- * If there are blockgroups with both free inodes and free blocks counts
22001 ++ * If there are blockgroups with both free inodes and free clusters counts
22002 + * not worse than average we return one with smallest directory count.
22003 + * Otherwise we simply return a random group.
22004 + *
22005 +@@ -411,7 +411,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
22006 + * It's OK to put directory into a group unless
22007 + * it has too many directories already (max_dirs) or
22008 + * it has too few free inodes left (min_inodes) or
22009 +- * it has too few free blocks left (min_blocks) or
22010 ++ * it has too few free clusters left (min_clusters) or
22011 + * Parent's group is preferred, if it doesn't satisfy these
22012 + * conditions we search cyclically through the rest. If none
22013 + * of the groups look good we just look for a group with more
22014 +@@ -427,7 +427,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
22015 + ext4_group_t real_ngroups = ext4_get_groups_count(sb);
22016 + int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
22017 + unsigned int freei, avefreei, grp_free;
22018 +- ext4_fsblk_t freeb, avefreec;
22019 ++ ext4_fsblk_t freec, avefreec;
22020 + unsigned int ndirs;
22021 + int max_dirs, min_inodes;
22022 + ext4_grpblk_t min_clusters;
22023 +@@ -446,9 +446,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
22024 +
22025 + freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
22026 + avefreei = freei / ngroups;
22027 +- freeb = EXT4_C2B(sbi,
22028 +- percpu_counter_read_positive(&sbi->s_freeclusters_counter));
22029 +- avefreec = freeb;
22030 ++ freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter);
22031 ++ avefreec = freec;
22032 + do_div(avefreec, ngroups);
22033 + ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
22034 +
22035 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
22036 +index fe6045a465993..211acfba3af7c 100644
22037 +--- a/fs/ext4/inode.c
22038 ++++ b/fs/ext4/inode.c
22039 +@@ -3418,7 +3418,7 @@ retry:
22040 + * i_disksize out to i_size. This could be beyond where direct I/O is
22041 + * happening and thus expose allocated blocks to direct I/O reads.
22042 + */
22043 +- else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode))
22044 ++ else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
22045 + m_flags = EXT4_GET_BLOCKS_CREATE;
22046 + else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
22047 + m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
22048 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
22049 +index c2c22c2baac0b..089c958aa2c34 100644
22050 +--- a/fs/ext4/mballoc.c
22051 ++++ b/fs/ext4/mballoc.c
22052 +@@ -1909,10 +1909,11 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
22053 + if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
22054 + /* Should never happen! (but apparently sometimes does?!?) */
22055 + WARN_ON(1);
22056 +- ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
22057 +- "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
22058 +- block, order, needed, ex->fe_group, ex->fe_start,
22059 +- ex->fe_len, ex->fe_logical);
22060 ++ ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
22061 ++ "corruption or bug in mb_find_extent "
22062 ++ "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
22063 ++ block, order, needed, ex->fe_group, ex->fe_start,
22064 ++ ex->fe_len, ex->fe_logical);
22065 + ex->fe_len = 0;
22066 + ex->fe_start = 0;
22067 + ex->fe_group = 0;
22068 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
22069 +index d29f6aa7d96ee..736724ce86d73 100644
22070 +--- a/fs/ext4/super.c
22071 ++++ b/fs/ext4/super.c
22072 +@@ -3101,8 +3101,15 @@ static void ext4_orphan_cleanup(struct super_block *sb,
22073 + inode_lock(inode);
22074 + truncate_inode_pages(inode->i_mapping, inode->i_size);
22075 + ret = ext4_truncate(inode);
22076 +- if (ret)
22077 ++ if (ret) {
22078 ++ /*
22079 ++ * We need to clean up the in-core orphan list
22080 ++ * manually if ext4_truncate() failed to get a
22081 ++ * transaction handle.
22082 ++ */
22083 ++ ext4_orphan_del(NULL, inode);
22084 + ext4_std_error(inode->i_sb, ret);
22085 ++ }
22086 + inode_unlock(inode);
22087 + nr_truncates++;
22088 + } else {
22089 +@@ -5058,6 +5065,7 @@ no_journal:
22090 + ext4_msg(sb, KERN_ERR,
22091 + "unable to initialize "
22092 + "flex_bg meta info!");
22093 ++ ret = -ENOMEM;
22094 + goto failed_mount6;
22095 + }
22096 +
22097 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
22098 +index 009a09fb9d88c..e2d0c7d9673e0 100644
22099 +--- a/fs/f2fs/data.c
22100 ++++ b/fs/f2fs/data.c
22101 +@@ -4067,6 +4067,12 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
22102 + if (f2fs_readonly(F2FS_I_SB(inode)->sb))
22103 + return -EROFS;
22104 +
22105 ++ if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
22106 ++ f2fs_err(F2FS_I_SB(inode),
22107 ++ "Swapfile not supported in LFS mode");
22108 ++ return -EINVAL;
22109 ++ }
22110 ++
22111 + ret = f2fs_convert_inline_inode(inode);
22112 + if (ret)
22113 + return ret;
22114 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
22115 +index 39b522ec73e7e..e5dbe87e65b45 100644
22116 +--- a/fs/f2fs/sysfs.c
22117 ++++ b/fs/f2fs/sysfs.c
22118 +@@ -562,6 +562,7 @@ enum feat_id {
22119 + FEAT_CASEFOLD,
22120 + FEAT_COMPRESSION,
22121 + FEAT_TEST_DUMMY_ENCRYPTION_V2,
22122 ++ FEAT_ENCRYPTED_CASEFOLD,
22123 + };
22124 +
22125 + static ssize_t f2fs_feature_show(struct f2fs_attr *a,
22126 +@@ -583,6 +584,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
22127 + case FEAT_CASEFOLD:
22128 + case FEAT_COMPRESSION:
22129 + case FEAT_TEST_DUMMY_ENCRYPTION_V2:
22130 ++ case FEAT_ENCRYPTED_CASEFOLD:
22131 + return sprintf(buf, "supported\n");
22132 + }
22133 + return 0;
22134 +@@ -687,7 +689,10 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks);
22135 + #ifdef CONFIG_FS_ENCRYPTION
22136 + F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
22137 + F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2, FEAT_TEST_DUMMY_ENCRYPTION_V2);
22138 ++#ifdef CONFIG_UNICODE
22139 ++F2FS_FEATURE_RO_ATTR(encrypted_casefold, FEAT_ENCRYPTED_CASEFOLD);
22140 + #endif
22141 ++#endif /* CONFIG_FS_ENCRYPTION */
22142 + #ifdef CONFIG_BLK_DEV_ZONED
22143 + F2FS_FEATURE_RO_ATTR(block_zoned, FEAT_BLKZONED);
22144 + #endif
22145 +@@ -786,7 +791,10 @@ static struct attribute *f2fs_feat_attrs[] = {
22146 + #ifdef CONFIG_FS_ENCRYPTION
22147 + ATTR_LIST(encryption),
22148 + ATTR_LIST(test_dummy_encryption_v2),
22149 ++#ifdef CONFIG_UNICODE
22150 ++ ATTR_LIST(encrypted_casefold),
22151 + #endif
22152 ++#endif /* CONFIG_FS_ENCRYPTION */
22153 + #ifdef CONFIG_BLK_DEV_ZONED
22154 + ATTR_LIST(block_zoned),
22155 + #endif
22156 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
22157 +index e91980f493884..8d4130b01423b 100644
22158 +--- a/fs/fs-writeback.c
22159 ++++ b/fs/fs-writeback.c
22160 +@@ -505,12 +505,19 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
22161 + if (!isw)
22162 + return;
22163 +
22164 ++ atomic_inc(&isw_nr_in_flight);
22165 ++
22166 + /* find and pin the new wb */
22167 + rcu_read_lock();
22168 + memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
22169 +- if (memcg_css)
22170 +- isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
22171 ++ if (memcg_css && !css_tryget(memcg_css))
22172 ++ memcg_css = NULL;
22173 + rcu_read_unlock();
22174 ++ if (!memcg_css)
22175 ++ goto out_free;
22176 ++
22177 ++ isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
22178 ++ css_put(memcg_css);
22179 + if (!isw->new_wb)
22180 + goto out_free;
22181 +
22182 +@@ -535,11 +542,10 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
22183 + * Let's continue after I_WB_SWITCH is guaranteed to be visible.
22184 + */
22185 + call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
22186 +-
22187 +- atomic_inc(&isw_nr_in_flight);
22188 + return;
22189 +
22190 + out_free:
22191 ++ atomic_dec(&isw_nr_in_flight);
22192 + if (isw->new_wb)
22193 + wb_put(isw->new_wb);
22194 + kfree(isw);
22195 +@@ -2205,28 +2211,6 @@ int dirtytime_interval_handler(struct ctl_table *table, int write,
22196 + return ret;
22197 + }
22198 +
22199 +-static noinline void block_dump___mark_inode_dirty(struct inode *inode)
22200 +-{
22201 +- if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
22202 +- struct dentry *dentry;
22203 +- const char *name = "?";
22204 +-
22205 +- dentry = d_find_alias(inode);
22206 +- if (dentry) {
22207 +- spin_lock(&dentry->d_lock);
22208 +- name = (const char *) dentry->d_name.name;
22209 +- }
22210 +- printk(KERN_DEBUG
22211 +- "%s(%d): dirtied inode %lu (%s) on %s\n",
22212 +- current->comm, task_pid_nr(current), inode->i_ino,
22213 +- name, inode->i_sb->s_id);
22214 +- if (dentry) {
22215 +- spin_unlock(&dentry->d_lock);
22216 +- dput(dentry);
22217 +- }
22218 +- }
22219 +-}
22220 +-
22221 + /**
22222 + * __mark_inode_dirty - internal function to mark an inode dirty
22223 + *
22224 +@@ -2296,9 +2280,6 @@ void __mark_inode_dirty(struct inode *inode, int flags)
22225 + (dirtytime && (inode->i_state & I_DIRTY_INODE)))
22226 + return;
22227 +
22228 +- if (unlikely(block_dump))
22229 +- block_dump___mark_inode_dirty(inode);
22230 +-
22231 + spin_lock(&inode->i_lock);
22232 + if (dirtytime && (inode->i_state & I_DIRTY_INODE))
22233 + goto out_unlock_inode;
22234 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
22235 +index a5ceccc5ef00f..b8d58aa082062 100644
22236 +--- a/fs/fuse/dev.c
22237 ++++ b/fs/fuse/dev.c
22238 +@@ -783,6 +783,7 @@ static int fuse_check_page(struct page *page)
22239 + 1 << PG_uptodate |
22240 + 1 << PG_lru |
22241 + 1 << PG_active |
22242 ++ 1 << PG_workingset |
22243 + 1 << PG_reclaim |
22244 + 1 << PG_waiters))) {
22245 + dump_page(page, "fuse: trying to steal weird page");
22246 +@@ -1271,6 +1272,15 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
22247 + goto restart;
22248 + }
22249 + spin_lock(&fpq->lock);
22250 ++ /*
22251 ++ * Must not put request on fpq->io queue after having been shut down by
22252 ++ * fuse_abort_conn()
22253 ++ */
22254 ++ if (!fpq->connected) {
22255 ++ req->out.h.error = err = -ECONNABORTED;
22256 ++ goto out_end;
22257 ++
22258 ++ }
22259 + list_add(&req->list, &fpq->io);
22260 + spin_unlock(&fpq->lock);
22261 + cs->req = req;
22262 +@@ -1857,7 +1867,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
22263 + }
22264 +
22265 + err = -EINVAL;
22266 +- if (oh.error <= -1000 || oh.error > 0)
22267 ++ if (oh.error <= -512 || oh.error > 0)
22268 + goto copy_finish;
22269 +
22270 + spin_lock(&fpq->lock);
22271 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
22272 +index 1b6c001a7dd12..3fa8604c21d52 100644
22273 +--- a/fs/fuse/dir.c
22274 ++++ b/fs/fuse/dir.c
22275 +@@ -339,18 +339,33 @@ static struct vfsmount *fuse_dentry_automount(struct path *path)
22276 +
22277 + /* Initialize superblock, making @mp_fi its root */
22278 + err = fuse_fill_super_submount(sb, mp_fi);
22279 +- if (err)
22280 ++ if (err) {
22281 ++ fuse_conn_put(fc);
22282 ++ kfree(fm);
22283 ++ sb->s_fs_info = NULL;
22284 + goto out_put_sb;
22285 ++ }
22286 ++
22287 ++ down_write(&fc->killsb);
22288 ++ list_add_tail(&fm->fc_entry, &fc->mounts);
22289 ++ up_write(&fc->killsb);
22290 +
22291 + sb->s_flags |= SB_ACTIVE;
22292 + fsc->root = dget(sb->s_root);
22293 ++
22294 ++ /*
22295 ++ * FIXME: setting SB_BORN requires a write barrier for
22296 ++ * super_cache_count(). We should actually come
22297 ++ * up with a proper ->get_tree() implementation
22298 ++ * for submounts and call vfs_get_tree() to take
22299 ++ * care of the write barrier.
22300 ++ */
22301 ++ smp_wmb();
22302 ++ sb->s_flags |= SB_BORN;
22303 ++
22304 + /* We are done configuring the superblock, so unlock it */
22305 + up_write(&sb->s_umount);
22306 +
22307 +- down_write(&fc->killsb);
22308 +- list_add_tail(&fm->fc_entry, &fc->mounts);
22309 +- up_write(&fc->killsb);
22310 +-
22311 + /* Create the submount */
22312 + mnt = vfs_create_mount(fsc);
22313 + if (IS_ERR(mnt)) {
22314 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
22315 +index 493a83e3f5906..13ca4fe47a6e7 100644
22316 +--- a/fs/gfs2/file.c
22317 ++++ b/fs/gfs2/file.c
22318 +@@ -450,8 +450,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
22319 + file_update_time(vmf->vma->vm_file);
22320 +
22321 + /* page is wholly or partially inside EOF */
22322 +- if (offset > size - PAGE_SIZE)
22323 +- length = offset_in_page(size);
22324 ++ if (size - offset < PAGE_SIZE)
22325 ++ length = size - offset;
22326 + else
22327 + length = PAGE_SIZE;
22328 +
22329 +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
22330 +index 826f77d9cff5d..5f4504dd0875a 100644
22331 +--- a/fs/gfs2/ops_fstype.c
22332 ++++ b/fs/gfs2/ops_fstype.c
22333 +@@ -687,6 +687,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
22334 + }
22335 +
22336 + iput(pn);
22337 ++ pn = NULL;
22338 + ip = GFS2_I(sdp->sd_sc_inode);
22339 + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
22340 + &sdp->sd_sc_gh);
22341 +diff --git a/fs/io_uring.c b/fs/io_uring.c
22342 +index fa8794c61af7b..ad1f31fafe445 100644
22343 +--- a/fs/io_uring.c
22344 ++++ b/fs/io_uring.c
22345 +@@ -2621,7 +2621,7 @@ static bool __io_file_supports_async(struct file *file, int rw)
22346 + return true;
22347 + return false;
22348 + }
22349 +- if (S_ISCHR(mode) || S_ISSOCK(mode))
22350 ++ if (S_ISSOCK(mode))
22351 + return true;
22352 + if (S_ISREG(mode)) {
22353 + if (IS_ENABLED(CONFIG_BLOCK) &&
22354 +@@ -3453,6 +3453,10 @@ static int io_renameat_prep(struct io_kiocb *req,
22355 + struct io_rename *ren = &req->rename;
22356 + const char __user *oldf, *newf;
22357 +
22358 ++ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
22359 ++ return -EINVAL;
22360 ++ if (sqe->ioprio || sqe->buf_index)
22361 ++ return -EINVAL;
22362 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
22363 + return -EBADF;
22364 +
22365 +@@ -3500,6 +3504,10 @@ static int io_unlinkat_prep(struct io_kiocb *req,
22366 + struct io_unlink *un = &req->unlink;
22367 + const char __user *fname;
22368 +
22369 ++ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
22370 ++ return -EINVAL;
22371 ++ if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
22372 ++ return -EINVAL;
22373 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
22374 + return -EBADF;
22375 +
22376 +diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
22377 +index f5c058b3192ce..4474adb393ca8 100644
22378 +--- a/fs/ntfs/inode.c
22379 ++++ b/fs/ntfs/inode.c
22380 +@@ -477,7 +477,7 @@ err_corrupt_attr:
22381 + }
22382 + file_name_attr = (FILE_NAME_ATTR*)((u8*)attr +
22383 + le16_to_cpu(attr->data.resident.value_offset));
22384 +- p2 = (u8*)attr + le32_to_cpu(attr->data.resident.value_length);
22385 ++ p2 = (u8 *)file_name_attr + le32_to_cpu(attr->data.resident.value_length);
22386 + if (p2 < (u8*)attr || p2 > p)
22387 + goto err_corrupt_attr;
22388 + /* This attribute is ok, but is it in the $Extend directory? */
22389 +diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
22390 +index 90b8d300c1eea..de56e6231af87 100644
22391 +--- a/fs/ocfs2/filecheck.c
22392 ++++ b/fs/ocfs2/filecheck.c
22393 +@@ -326,11 +326,7 @@ static ssize_t ocfs2_filecheck_attr_show(struct kobject *kobj,
22394 + ret = snprintf(buf + total, remain, "%lu\t\t%u\t%s\n",
22395 + p->fe_ino, p->fe_done,
22396 + ocfs2_filecheck_error(p->fe_status));
22397 +- if (ret < 0) {
22398 +- total = ret;
22399 +- break;
22400 +- }
22401 +- if (ret == remain) {
22402 ++ if (ret >= remain) {
22403 + /* snprintf() didn't fit */
22404 + total = -E2BIG;
22405 + break;
22406 +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
22407 +index d50e8b8dfea47..16f1bfc407f2a 100644
22408 +--- a/fs/ocfs2/stackglue.c
22409 ++++ b/fs/ocfs2/stackglue.c
22410 +@@ -500,11 +500,7 @@ static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj,
22411 + list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
22412 + ret = snprintf(buf, remain, "%s\n",
22413 + p->sp_name);
22414 +- if (ret < 0) {
22415 +- total = ret;
22416 +- break;
22417 +- }
22418 +- if (ret == remain) {
22419 ++ if (ret >= remain) {
22420 + /* snprintf() didn't fit */
22421 + total = -E2BIG;
22422 + break;
22423 +@@ -531,7 +527,7 @@ static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj,
22424 + if (active_stack) {
22425 + ret = snprintf(buf, PAGE_SIZE, "%s\n",
22426 + active_stack->sp_name);
22427 +- if (ret == PAGE_SIZE)
22428 ++ if (ret >= PAGE_SIZE)
22429 + ret = -E2BIG;
22430 + }
22431 + spin_unlock(&ocfs2_stack_lock);
22432 +diff --git a/fs/open.c b/fs/open.c
22433 +index e53af13b5835f..53bc0573c0eca 100644
22434 +--- a/fs/open.c
22435 ++++ b/fs/open.c
22436 +@@ -1002,12 +1002,20 @@ inline struct open_how build_open_how(int flags, umode_t mode)
22437 +
22438 + inline int build_open_flags(const struct open_how *how, struct open_flags *op)
22439 + {
22440 +- int flags = how->flags;
22441 ++ u64 flags = how->flags;
22442 ++ u64 strip = FMODE_NONOTIFY | O_CLOEXEC;
22443 + int lookup_flags = 0;
22444 + int acc_mode = ACC_MODE(flags);
22445 +
22446 +- /* Must never be set by userspace */
22447 +- flags &= ~(FMODE_NONOTIFY | O_CLOEXEC);
22448 ++ BUILD_BUG_ON_MSG(upper_32_bits(VALID_OPEN_FLAGS),
22449 ++ "struct open_flags doesn't yet handle flags > 32 bits");
22450 ++
22451 ++ /*
22452 ++ * Strip flags that either shouldn't be set by userspace like
22453 ++ * FMODE_NONOTIFY or that aren't relevant in determining struct
22454 ++ * open_flags like O_CLOEXEC.
22455 ++ */
22456 ++ flags &= ~strip;
22457 +
22458 + /*
22459 + * Older syscalls implicitly clear all of the invalid flags or argument
22460 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
22461 +index fc9784544b241..7389df326edde 100644
22462 +--- a/fs/proc/task_mmu.c
22463 ++++ b/fs/proc/task_mmu.c
22464 +@@ -832,7 +832,7 @@ static int show_smap(struct seq_file *m, void *v)
22465 + __show_smap(m, &mss, false);
22466 +
22467 + seq_printf(m, "THPeligible: %d\n",
22468 +- transparent_hugepage_enabled(vma));
22469 ++ transparent_hugepage_active(vma));
22470 +
22471 + if (arch_pkeys_enabled())
22472 + seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
22473 +diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
22474 +index 8adabde685f13..328da35da3908 100644
22475 +--- a/fs/pstore/Kconfig
22476 ++++ b/fs/pstore/Kconfig
22477 +@@ -173,6 +173,7 @@ config PSTORE_BLK
22478 + tristate "Log panic/oops to a block device"
22479 + depends on PSTORE
22480 + depends on BLOCK
22481 ++ depends on BROKEN
22482 + select PSTORE_ZONE
22483 + default n
22484 + help
22485 +diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
22486 +index ce2cbb3c380ff..2f6b1befb1292 100644
22487 +--- a/include/asm-generic/pgtable-nop4d.h
22488 ++++ b/include/asm-generic/pgtable-nop4d.h
22489 +@@ -9,7 +9,6 @@
22490 + typedef struct { pgd_t pgd; } p4d_t;
22491 +
22492 + #define P4D_SHIFT PGDIR_SHIFT
22493 +-#define MAX_PTRS_PER_P4D 1
22494 + #define PTRS_PER_P4D 1
22495 + #define P4D_SIZE (1UL << P4D_SHIFT)
22496 + #define P4D_MASK (~(P4D_SIZE-1))
22497 +diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
22498 +index d683f5e6d7913..b4d43a4af5f79 100644
22499 +--- a/include/asm-generic/preempt.h
22500 ++++ b/include/asm-generic/preempt.h
22501 +@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
22502 + } while (0)
22503 +
22504 + #define init_idle_preempt_count(p, cpu) do { \
22505 +- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
22506 ++ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
22507 + } while (0)
22508 +
22509 + static __always_inline void set_preempt_need_resched(void)
22510 +diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
22511 +index 4c61dade8835f..f6da8a1326398 100644
22512 +--- a/include/clocksource/timer-ti-dm.h
22513 ++++ b/include/clocksource/timer-ti-dm.h
22514 +@@ -74,6 +74,7 @@
22515 + #define OMAP_TIMER_ERRATA_I103_I767 0x80000000
22516 +
22517 + struct timer_regs {
22518 ++ u32 ocp_cfg;
22519 + u32 tidr;
22520 + u32 tier;
22521 + u32 twer;
22522 +diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
22523 +index 0a288dddcf5be..25806141db591 100644
22524 +--- a/include/crypto/internal/hash.h
22525 ++++ b/include/crypto/internal/hash.h
22526 +@@ -75,13 +75,7 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
22527 + int ahash_register_instance(struct crypto_template *tmpl,
22528 + struct ahash_instance *inst);
22529 +
22530 +-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
22531 +- unsigned int keylen);
22532 +-
22533 +-static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
22534 +-{
22535 +- return alg->setkey != shash_no_setkey;
22536 +-}
22537 ++bool crypto_shash_alg_has_setkey(struct shash_alg *alg);
22538 +
22539 + static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
22540 + {
22541 +diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
22542 +index 82e907ce7bdd3..afa74d7ba1009 100644
22543 +--- a/include/dt-bindings/clock/imx8mq-clock.h
22544 ++++ b/include/dt-bindings/clock/imx8mq-clock.h
22545 +@@ -405,25 +405,6 @@
22546 +
22547 + #define IMX8MQ_VIDEO2_PLL1_REF_SEL 266
22548 +
22549 +-#define IMX8MQ_SYS1_PLL_40M_CG 267
22550 +-#define IMX8MQ_SYS1_PLL_80M_CG 268
22551 +-#define IMX8MQ_SYS1_PLL_100M_CG 269
22552 +-#define IMX8MQ_SYS1_PLL_133M_CG 270
22553 +-#define IMX8MQ_SYS1_PLL_160M_CG 271
22554 +-#define IMX8MQ_SYS1_PLL_200M_CG 272
22555 +-#define IMX8MQ_SYS1_PLL_266M_CG 273
22556 +-#define IMX8MQ_SYS1_PLL_400M_CG 274
22557 +-#define IMX8MQ_SYS1_PLL_800M_CG 275
22558 +-#define IMX8MQ_SYS2_PLL_50M_CG 276
22559 +-#define IMX8MQ_SYS2_PLL_100M_CG 277
22560 +-#define IMX8MQ_SYS2_PLL_125M_CG 278
22561 +-#define IMX8MQ_SYS2_PLL_166M_CG 279
22562 +-#define IMX8MQ_SYS2_PLL_200M_CG 280
22563 +-#define IMX8MQ_SYS2_PLL_250M_CG 281
22564 +-#define IMX8MQ_SYS2_PLL_333M_CG 282
22565 +-#define IMX8MQ_SYS2_PLL_500M_CG 283
22566 +-#define IMX8MQ_SYS2_PLL_1000M_CG 284
22567 +-
22568 + #define IMX8MQ_CLK_GPU_CORE 285
22569 + #define IMX8MQ_CLK_GPU_SHADER 286
22570 + #define IMX8MQ_CLK_M4_CORE 287
22571 +diff --git a/include/linux/bio.h b/include/linux/bio.h
22572 +index a0b4cfdf62a43..d2b98efb5cc50 100644
22573 +--- a/include/linux/bio.h
22574 ++++ b/include/linux/bio.h
22575 +@@ -44,9 +44,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs)
22576 + #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
22577 + #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
22578 +
22579 +-#define bio_multiple_segments(bio) \
22580 +- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
22581 +-
22582 + #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
22583 + #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
22584 +
22585 +@@ -271,7 +268,7 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
22586 +
22587 + static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22588 + {
22589 +- *bv = bio_iovec(bio);
22590 ++ *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
22591 + }
22592 +
22593 + static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
22594 +@@ -279,10 +276,9 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
22595 + struct bvec_iter iter = bio->bi_iter;
22596 + int idx;
22597 +
22598 +- if (unlikely(!bio_multiple_segments(bio))) {
22599 +- *bv = bio_iovec(bio);
22600 +- return;
22601 +- }
22602 ++ bio_get_first_bvec(bio, bv);
22603 ++ if (bv->bv_len == bio->bi_iter.bi_size)
22604 ++ return; /* this bio only has a single bvec */
22605 +
22606 + bio_advance_iter(bio, &iter, iter.bi_size);
22607 +
22608 +diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
22609 +index d6ab416ee2d2c..7f83d51c0fd7b 100644
22610 +--- a/include/linux/clocksource.h
22611 ++++ b/include/linux/clocksource.h
22612 +@@ -137,7 +137,7 @@ struct clocksource {
22613 + #define CLOCK_SOURCE_UNSTABLE 0x40
22614 + #define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
22615 + #define CLOCK_SOURCE_RESELECT 0x100
22616 +-
22617 ++#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
22618 + /* simplify initialization of mask field */
22619 + #define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
22620 +
22621 +diff --git a/include/linux/cred.h b/include/linux/cred.h
22622 +index 14971322e1a05..65014e50d5fab 100644
22623 +--- a/include/linux/cred.h
22624 ++++ b/include/linux/cred.h
22625 +@@ -143,6 +143,7 @@ struct cred {
22626 + #endif
22627 + struct user_struct *user; /* real user ID subscription */
22628 + struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
22629 ++ struct ucounts *ucounts;
22630 + struct group_info *group_info; /* supplementary groups for euid/fsgid */
22631 + /* RCU deletion */
22632 + union {
22633 +@@ -169,6 +170,7 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
22634 + extern int set_create_files_as(struct cred *, struct inode *);
22635 + extern int cred_fscmp(const struct cred *, const struct cred *);
22636 + extern void __init cred_init(void);
22637 ++extern int set_cred_ucounts(struct cred *);
22638 +
22639 + /*
22640 + * check for validity of credentials
22641 +diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
22642 +index 2a8ebe6c222ef..b4e1ebaae825a 100644
22643 +--- a/include/linux/huge_mm.h
22644 ++++ b/include/linux/huge_mm.h
22645 +@@ -115,9 +115,34 @@ extern struct kobj_attribute shmem_enabled_attr;
22646 +
22647 + extern unsigned long transparent_hugepage_flags;
22648 +
22649 ++static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
22650 ++ unsigned long haddr)
22651 ++{
22652 ++ /* Don't have to check pgoff for anonymous vma */
22653 ++ if (!vma_is_anonymous(vma)) {
22654 ++ if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
22655 ++ HPAGE_PMD_NR))
22656 ++ return false;
22657 ++ }
22658 ++
22659 ++ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
22660 ++ return false;
22661 ++ return true;
22662 ++}
22663 ++
22664 ++static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
22665 ++ unsigned long vm_flags)
22666 ++{
22667 ++ /* Explicitly disabled through madvise. */
22668 ++ if ((vm_flags & VM_NOHUGEPAGE) ||
22669 ++ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
22670 ++ return false;
22671 ++ return true;
22672 ++}
22673 ++
22674 + /*
22675 + * to be used on vmas which are known to support THP.
22676 +- * Use transparent_hugepage_enabled otherwise
22677 ++ * Use transparent_hugepage_active otherwise
22678 + */
22679 + static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
22680 + {
22681 +@@ -128,15 +153,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
22682 + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
22683 + return false;
22684 +
22685 +- if (vma->vm_flags & VM_NOHUGEPAGE)
22686 ++ if (!transhuge_vma_enabled(vma, vma->vm_flags))
22687 + return false;
22688 +
22689 + if (vma_is_temporary_stack(vma))
22690 + return false;
22691 +
22692 +- if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
22693 +- return false;
22694 +-
22695 + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
22696 + return true;
22697 +
22698 +@@ -150,24 +172,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
22699 + return false;
22700 + }
22701 +
22702 +-bool transparent_hugepage_enabled(struct vm_area_struct *vma);
22703 +-
22704 +-#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
22705 +-
22706 +-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
22707 +- unsigned long haddr)
22708 +-{
22709 +- /* Don't have to check pgoff for anonymous vma */
22710 +- if (!vma_is_anonymous(vma)) {
22711 +- if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
22712 +- (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
22713 +- return false;
22714 +- }
22715 +-
22716 +- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
22717 +- return false;
22718 +- return true;
22719 +-}
22720 ++bool transparent_hugepage_active(struct vm_area_struct *vma);
22721 +
22722 + #define transparent_hugepage_use_zero_page() \
22723 + (transparent_hugepage_flags & \
22724 +@@ -354,7 +359,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
22725 + return false;
22726 + }
22727 +
22728 +-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
22729 ++static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
22730 + {
22731 + return false;
22732 + }
22733 +@@ -365,6 +370,12 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
22734 + return false;
22735 + }
22736 +
22737 ++static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
22738 ++ unsigned long vm_flags)
22739 ++{
22740 ++ return false;
22741 ++}
22742 ++
22743 + static inline void prep_transhuge_page(struct page *page) {}
22744 +
22745 + static inline bool is_transparent_hugepage(struct page *page)
22746 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
22747 +index 3c0117656745a..28a110ec2a0d5 100644
22748 +--- a/include/linux/hugetlb.h
22749 ++++ b/include/linux/hugetlb.h
22750 +@@ -875,6 +875,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
22751 + #else /* CONFIG_HUGETLB_PAGE */
22752 + struct hstate {};
22753 +
22754 ++static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
22755 ++{
22756 ++ return NULL;
22757 ++}
22758 ++
22759 + static inline int isolate_or_dissolve_huge_page(struct page *page,
22760 + struct list_head *list)
22761 + {
22762 +diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
22763 +index 7ce8a8adad587..c582e1a142320 100644
22764 +--- a/include/linux/iio/common/cros_ec_sensors_core.h
22765 ++++ b/include/linux/iio/common/cros_ec_sensors_core.h
22766 +@@ -77,7 +77,7 @@ struct cros_ec_sensors_core_state {
22767 + u16 scale;
22768 + } calib[CROS_EC_SENSOR_MAX_AXIS];
22769 + s8 sign[CROS_EC_SENSOR_MAX_AXIS];
22770 +- u8 samples[CROS_EC_SAMPLE_SIZE];
22771 ++ u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
22772 +
22773 + int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
22774 + unsigned long scan_mask, s16 *data);
22775 +diff --git a/include/linux/kthread.h b/include/linux/kthread.h
22776 +index 2484ed97e72f5..d9133d6db3084 100644
22777 +--- a/include/linux/kthread.h
22778 ++++ b/include/linux/kthread.h
22779 +@@ -33,6 +33,8 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
22780 + unsigned int cpu,
22781 + const char *namefmt);
22782 +
22783 ++void set_kthread_struct(struct task_struct *p);
22784 ++
22785 + void kthread_set_per_cpu(struct task_struct *k, int cpu);
22786 + bool kthread_is_per_cpu(struct task_struct *k);
22787 +
22788 +diff --git a/include/linux/mm.h b/include/linux/mm.h
22789 +index 8ae31622deeff..9afb8998e7e5d 100644
22790 +--- a/include/linux/mm.h
22791 ++++ b/include/linux/mm.h
22792 +@@ -2474,7 +2474,6 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
22793 + extern void memmap_init_range(unsigned long, int, unsigned long,
22794 + unsigned long, unsigned long, enum meminit_context,
22795 + struct vmem_altmap *, int migratetype);
22796 +-extern void memmap_init_zone(struct zone *zone);
22797 + extern void setup_per_zone_wmarks(void);
22798 + extern int __meminit init_per_zone_wmark_min(void);
22799 + extern void mem_init(void);
22800 +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
22801 +index a43047b1030dc..c32600c9e1ade 100644
22802 +--- a/include/linux/pgtable.h
22803 ++++ b/include/linux/pgtable.h
22804 +@@ -1592,4 +1592,26 @@ typedef unsigned int pgtbl_mod_mask;
22805 + #define pte_leaf_size(x) PAGE_SIZE
22806 + #endif
22807 +
22808 ++/*
22809 ++ * Some architectures have MMUs that are configurable or selectable at boot
22810 ++ * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
22811 ++ * helps to have a static maximum value.
22812 ++ */
22813 ++
22814 ++#ifndef MAX_PTRS_PER_PTE
22815 ++#define MAX_PTRS_PER_PTE PTRS_PER_PTE
22816 ++#endif
22817 ++
22818 ++#ifndef MAX_PTRS_PER_PMD
22819 ++#define MAX_PTRS_PER_PMD PTRS_PER_PMD
22820 ++#endif
22821 ++
22822 ++#ifndef MAX_PTRS_PER_PUD
22823 ++#define MAX_PTRS_PER_PUD PTRS_PER_PUD
22824 ++#endif
22825 ++
22826 ++#ifndef MAX_PTRS_PER_P4D
22827 ++#define MAX_PTRS_PER_P4D PTRS_PER_P4D
22828 ++#endif
22829 ++
22830 + #endif /* _LINUX_PGTABLE_H */
22831 +diff --git a/include/linux/prandom.h b/include/linux/prandom.h
22832 +index bbf4b4ad61dfd..056d31317e499 100644
22833 +--- a/include/linux/prandom.h
22834 ++++ b/include/linux/prandom.h
22835 +@@ -111,7 +111,7 @@ static inline u32 __seed(u32 x, u32 m)
22836 + */
22837 + static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
22838 + {
22839 +- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
22840 ++ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
22841 +
22842 + state->s1 = __seed(i, 2U);
22843 + state->s2 = __seed(i, 8U);
22844 +diff --git a/include/linux/swap.h b/include/linux/swap.h
22845 +index 144727041e78b..a84f76db50702 100644
22846 +--- a/include/linux/swap.h
22847 ++++ b/include/linux/swap.h
22848 +@@ -526,6 +526,15 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
22849 + return NULL;
22850 + }
22851 +
22852 ++static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
22853 ++{
22854 ++ return NULL;
22855 ++}
22856 ++
22857 ++static inline void put_swap_device(struct swap_info_struct *si)
22858 ++{
22859 ++}
22860 ++
22861 + #define swap_address_space(entry) (NULL)
22862 + #define get_nr_swap_pages() 0L
22863 + #define total_swap_pages 0L
22864 +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
22865 +index 13f65420f188e..ab58696d0ddd1 100644
22866 +--- a/include/linux/tracepoint.h
22867 ++++ b/include/linux/tracepoint.h
22868 +@@ -41,7 +41,17 @@ extern int
22869 + tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
22870 + int prio);
22871 + extern int
22872 ++tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data,
22873 ++ int prio);
22874 ++extern int
22875 + tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
22876 ++static inline int
22877 ++tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe,
22878 ++ void *data)
22879 ++{
22880 ++ return tracepoint_probe_register_prio_may_exist(tp, probe, data,
22881 ++ TRACEPOINT_DEFAULT_PRIO);
22882 ++}
22883 + extern void
22884 + for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
22885 + void *priv);
22886 +diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
22887 +index 1d08dbbcfe32a..bfa6463f8a95d 100644
22888 +--- a/include/linux/user_namespace.h
22889 ++++ b/include/linux/user_namespace.h
22890 +@@ -104,11 +104,15 @@ struct ucounts {
22891 + };
22892 +
22893 + extern struct user_namespace init_user_ns;
22894 ++extern struct ucounts init_ucounts;
22895 +
22896 + bool setup_userns_sysctls(struct user_namespace *ns);
22897 + void retire_userns_sysctls(struct user_namespace *ns);
22898 + struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
22899 + void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
22900 ++struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
22901 ++struct ucounts *get_ucounts(struct ucounts *ucounts);
22902 ++void put_ucounts(struct ucounts *ucounts);
22903 +
22904 + #ifdef CONFIG_USER_NS
22905 +
22906 +diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
22907 +index b4cb2ef02f171..226fcfa0e0261 100644
22908 +--- a/include/media/hevc-ctrls.h
22909 ++++ b/include/media/hevc-ctrls.h
22910 +@@ -81,7 +81,7 @@ struct v4l2_ctrl_hevc_sps {
22911 + __u64 flags;
22912 + };
22913 +
22914 +-#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0)
22915 ++#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0)
22916 + #define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
22917 + #define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
22918 + #define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
22919 +@@ -160,6 +160,7 @@ struct v4l2_hevc_pred_weight_table {
22920 + #define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
22921 + #define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
22922 + #define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
22923 ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9)
22924 +
22925 + struct v4l2_ctrl_hevc_slice_params {
22926 + __u32 bit_size;
22927 +diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h
22928 +index b35ea6062596b..2ab54d426c644 100644
22929 +--- a/include/media/media-dev-allocator.h
22930 ++++ b/include/media/media-dev-allocator.h
22931 +@@ -19,7 +19,7 @@
22932 +
22933 + struct usb_device;
22934 +
22935 +-#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB)
22936 ++#if defined(CONFIG_MEDIA_CONTROLLER) && IS_ENABLED(CONFIG_USB)
22937 + /**
22938 + * media_device_usb_allocate() - Allocate and return struct &media device
22939 + *
22940 +diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
22941 +index ea4ae551c4268..18b135dc968b9 100644
22942 +--- a/include/net/bluetooth/hci.h
22943 ++++ b/include/net/bluetooth/hci.h
22944 +@@ -1774,13 +1774,15 @@ struct hci_cp_ext_adv_set {
22945 + __u8 max_events;
22946 + } __packed;
22947 +
22948 ++#define HCI_MAX_EXT_AD_LENGTH 251
22949 ++
22950 + #define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
22951 + struct hci_cp_le_set_ext_adv_data {
22952 + __u8 handle;
22953 + __u8 operation;
22954 + __u8 frag_pref;
22955 + __u8 length;
22956 +- __u8 data[HCI_MAX_AD_LENGTH];
22957 ++ __u8 data[];
22958 + } __packed;
22959 +
22960 + #define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
22961 +@@ -1789,7 +1791,7 @@ struct hci_cp_le_set_ext_scan_rsp_data {
22962 + __u8 operation;
22963 + __u8 frag_pref;
22964 + __u8 length;
22965 +- __u8 data[HCI_MAX_AD_LENGTH];
22966 ++ __u8 data[];
22967 + } __packed;
22968 +
22969 + #define LE_SET_ADV_DATA_OP_COMPLETE 0x03
22970 +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
22971 +index c73ac52af1869..89c8406dddb4a 100644
22972 +--- a/include/net/bluetooth/hci_core.h
22973 ++++ b/include/net/bluetooth/hci_core.h
22974 +@@ -228,9 +228,9 @@ struct adv_info {
22975 + __u16 remaining_time;
22976 + __u16 duration;
22977 + __u16 adv_data_len;
22978 +- __u8 adv_data[HCI_MAX_AD_LENGTH];
22979 ++ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
22980 + __u16 scan_rsp_len;
22981 +- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
22982 ++ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
22983 + __s8 tx_power;
22984 + __u32 min_interval;
22985 + __u32 max_interval;
22986 +@@ -550,9 +550,9 @@ struct hci_dev {
22987 + DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
22988 +
22989 + __s8 adv_tx_power;
22990 +- __u8 adv_data[HCI_MAX_AD_LENGTH];
22991 ++ __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
22992 + __u8 adv_data_len;
22993 +- __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
22994 ++ __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH];
22995 + __u8 scan_rsp_data_len;
22996 +
22997 + struct list_head adv_instances;
22998 +diff --git a/include/net/ip.h b/include/net/ip.h
22999 +index e20874059f826..d9683bef86840 100644
23000 +--- a/include/net/ip.h
23001 ++++ b/include/net/ip.h
23002 +@@ -31,6 +31,7 @@
23003 + #include <net/flow.h>
23004 + #include <net/flow_dissector.h>
23005 + #include <net/netns/hash.h>
23006 ++#include <net/lwtunnel.h>
23007 +
23008 + #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
23009 + #define IPV4_MIN_MTU 68 /* RFC 791 */
23010 +@@ -445,22 +446,25 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
23011 +
23012 + /* 'forwarding = true' case should always honour route mtu */
23013 + mtu = dst_metric_raw(dst, RTAX_MTU);
23014 +- if (mtu)
23015 +- return mtu;
23016 ++ if (!mtu)
23017 ++ mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
23018 +
23019 +- return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
23020 ++ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
23021 + }
23022 +
23023 + static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
23024 + const struct sk_buff *skb)
23025 + {
23026 ++ unsigned int mtu;
23027 ++
23028 + if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
23029 + bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
23030 +
23031 + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
23032 + }
23033 +
23034 +- return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
23035 ++ mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
23036 ++ return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
23037 + }
23038 +
23039 + struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
23040 +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
23041 +index f51a118bfce8b..f14149df5a654 100644
23042 +--- a/include/net/ip6_route.h
23043 ++++ b/include/net/ip6_route.h
23044 +@@ -265,11 +265,18 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
23045 +
23046 + static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
23047 + {
23048 ++ int mtu;
23049 ++
23050 + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
23051 + inet6_sk(skb->sk) : NULL;
23052 +
23053 +- return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
23054 +- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
23055 ++ if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
23056 ++ mtu = READ_ONCE(skb_dst(skb)->dev->mtu);
23057 ++ mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
23058 ++ } else
23059 ++ mtu = dst_mtu(skb_dst(skb));
23060 ++
23061 ++ return mtu;
23062 + }
23063 +
23064 + static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
23065 +@@ -317,7 +324,7 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
23066 + if (dst_metric_locked(dst, RTAX_MTU)) {
23067 + mtu = dst_metric_raw(dst, RTAX_MTU);
23068 + if (mtu)
23069 +- return mtu;
23070 ++ goto out;
23071 + }
23072 +
23073 + mtu = IPV6_MIN_MTU;
23074 +@@ -327,7 +334,8 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
23075 + mtu = idev->cnf.mtu6;
23076 + rcu_read_unlock();
23077 +
23078 +- return mtu;
23079 ++out:
23080 ++ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
23081 + }
23082 +
23083 + u32 ip6_mtu_from_fib6(const struct fib6_result *res,
23084 +diff --git a/include/net/macsec.h b/include/net/macsec.h
23085 +index 52874cdfe2260..d6fa6b97f6efa 100644
23086 +--- a/include/net/macsec.h
23087 ++++ b/include/net/macsec.h
23088 +@@ -241,7 +241,7 @@ struct macsec_context {
23089 + struct macsec_rx_sc *rx_sc;
23090 + struct {
23091 + unsigned char assoc_num;
23092 +- u8 key[MACSEC_KEYID_LEN];
23093 ++ u8 key[MACSEC_MAX_KEY_LEN];
23094 + union {
23095 + struct macsec_rx_sa *rx_sa;
23096 + struct macsec_tx_sa *tx_sa;
23097 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
23098 +index 1e625519ae968..57710303908c6 100644
23099 +--- a/include/net/sch_generic.h
23100 ++++ b/include/net/sch_generic.h
23101 +@@ -163,6 +163,12 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
23102 + if (spin_trylock(&qdisc->seqlock))
23103 + goto nolock_empty;
23104 +
23105 ++ /* Paired with smp_mb__after_atomic() to make sure
23106 ++ * STATE_MISSED checking is synchronized with clearing
23107 ++ * in pfifo_fast_dequeue().
23108 ++ */
23109 ++ smp_mb__before_atomic();
23110 ++
23111 + /* If the MISSED flag is set, it means other thread has
23112 + * set the MISSED flag before second spin_trylock(), so
23113 + * we can return false here to avoid multi cpus doing
23114 +@@ -180,6 +186,12 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
23115 + */
23116 + set_bit(__QDISC_STATE_MISSED, &qdisc->state);
23117 +
23118 ++ /* spin_trylock() only has load-acquire semantic, so use
23119 ++ * smp_mb__after_atomic() to ensure STATE_MISSED is set
23120 ++ * before doing the second spin_trylock().
23121 ++ */
23122 ++ smp_mb__after_atomic();
23123 ++
23124 + /* Retry again in case other CPU may not see the new flag
23125 + * after it releases the lock at the end of qdisc_run_end().
23126 + */
23127 +diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
23128 +index f051046ba0344..f94b8bc26f9ec 100644
23129 +--- a/include/net/tc_act/tc_vlan.h
23130 ++++ b/include/net/tc_act/tc_vlan.h
23131 +@@ -16,6 +16,7 @@ struct tcf_vlan_params {
23132 + u16 tcfv_push_vid;
23133 + __be16 tcfv_push_proto;
23134 + u8 tcfv_push_prio;
23135 ++ bool tcfv_push_prio_exists;
23136 + struct rcu_head rcu;
23137 + };
23138 +
23139 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
23140 +index c58a6d4eb6103..6232a5f048bde 100644
23141 +--- a/include/net/xfrm.h
23142 ++++ b/include/net/xfrm.h
23143 +@@ -1546,6 +1546,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
23144 + void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
23145 + u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
23146 + int xfrm_init_replay(struct xfrm_state *x);
23147 ++u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
23148 + u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
23149 + int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
23150 + int xfrm_init_state(struct xfrm_state *x);
23151 +diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
23152 +index eaa8386dbc630..7a9a23e7a604a 100644
23153 +--- a/include/net/xsk_buff_pool.h
23154 ++++ b/include/net/xsk_buff_pool.h
23155 +@@ -147,11 +147,16 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
23156 + {
23157 + bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
23158 +
23159 +- if (pool->dma_pages_cnt && cross_pg) {
23160 ++ if (likely(!cross_pg))
23161 ++ return false;
23162 ++
23163 ++ if (pool->dma_pages_cnt) {
23164 + return !(pool->dma_pages[addr >> PAGE_SHIFT] &
23165 + XSK_NEXT_PG_CONTIG_MASK);
23166 + }
23167 +- return false;
23168 ++
23169 ++ /* skb path */
23170 ++ return addr + len > pool->addrs_cnt;
23171 + }
23172 +
23173 + static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
23174 +diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h
23175 +index 9e273fed0a85f..800d53dc94705 100644
23176 +--- a/include/scsi/fc/fc_ms.h
23177 ++++ b/include/scsi/fc/fc_ms.h
23178 +@@ -63,8 +63,8 @@ enum fc_fdmi_hba_attr_type {
23179 + * HBA Attribute Length
23180 + */
23181 + #define FC_FDMI_HBA_ATTR_NODENAME_LEN 8
23182 +-#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 80
23183 +-#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 80
23184 ++#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64
23185 ++#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64
23186 + #define FC_FDMI_HBA_ATTR_MODEL_LEN 256
23187 + #define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256
23188 + #define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256
23189 +diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
23190 +index 02f966e9358f6..091f284bd6e93 100644
23191 +--- a/include/scsi/libiscsi.h
23192 ++++ b/include/scsi/libiscsi.h
23193 +@@ -424,6 +424,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *);
23194 + extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
23195 + extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
23196 + int);
23197 ++extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active);
23198 + extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
23199 + extern void iscsi_session_failure(struct iscsi_session *session,
23200 + enum iscsi_err err);
23201 +diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
23202 +index fc5a39839b4b0..3974329d4d023 100644
23203 +--- a/include/scsi/scsi_transport_iscsi.h
23204 ++++ b/include/scsi/scsi_transport_iscsi.h
23205 +@@ -82,6 +82,7 @@ struct iscsi_transport {
23206 + void (*destroy_session) (struct iscsi_cls_session *session);
23207 + struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
23208 + uint32_t cid);
23209 ++ void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active);
23210 + int (*bind_conn) (struct iscsi_cls_session *session,
23211 + struct iscsi_cls_conn *cls_conn,
23212 + uint64_t transport_eph, int is_leading);
23213 +@@ -196,15 +197,23 @@ enum iscsi_connection_state {
23214 + ISCSI_CONN_BOUND,
23215 + };
23216 +
23217 ++#define ISCSI_CLS_CONN_BIT_CLEANUP 1
23218 ++
23219 + struct iscsi_cls_conn {
23220 + struct list_head conn_list; /* item in connlist */
23221 +- struct list_head conn_list_err; /* item in connlist_err */
23222 + void *dd_data; /* LLD private data */
23223 + struct iscsi_transport *transport;
23224 + uint32_t cid; /* connection id */
23225 ++ /*
23226 ++ * This protects the conn startup and binding/unbinding of the ep to
23227 ++ * the conn. Unbinding includes ep_disconnect and stop_conn.
23228 ++ */
23229 + struct mutex ep_mutex;
23230 + struct iscsi_endpoint *ep;
23231 +
23232 ++ unsigned long flags;
23233 ++ struct work_struct cleanup_work;
23234 ++
23235 + struct device dev; /* sysfs transport/container device */
23236 + enum iscsi_connection_state state;
23237 + };
23238 +@@ -441,6 +450,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
23239 + extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
23240 + extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
23241 + extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
23242 ++extern void iscsi_put_endpoint(struct iscsi_endpoint *ep);
23243 + extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd);
23244 + extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
23245 + struct iscsi_transport *t,
23246 +diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
23247 +index 6ba18b82a02e4..78074254ab98a 100644
23248 +--- a/include/uapi/linux/seccomp.h
23249 ++++ b/include/uapi/linux/seccomp.h
23250 +@@ -115,6 +115,7 @@ struct seccomp_notif_resp {
23251 +
23252 + /* valid flags for seccomp_notif_addfd */
23253 + #define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
23254 ++#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
23255 +
23256 + /**
23257 + * struct seccomp_notif_addfd
23258 +diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
23259 +index d43bec5f1afd0..5afc19c687049 100644
23260 +--- a/include/uapi/linux/v4l2-controls.h
23261 ++++ b/include/uapi/linux/v4l2-controls.h
23262 +@@ -50,6 +50,7 @@
23263 + #ifndef __LINUX_V4L2_CONTROLS_H
23264 + #define __LINUX_V4L2_CONTROLS_H
23265 +
23266 ++#include <linux/const.h>
23267 + #include <linux/types.h>
23268 +
23269 + /* Control classes */
23270 +@@ -1602,30 +1603,30 @@ struct v4l2_ctrl_h264_decode_params {
23271 + #define V4L2_FWHT_VERSION 3
23272 +
23273 + /* Set if this is an interlaced format */
23274 +-#define V4L2_FWHT_FL_IS_INTERLACED BIT(0)
23275 ++#define V4L2_FWHT_FL_IS_INTERLACED _BITUL(0)
23276 + /* Set if this is a bottom-first (NTSC) interlaced format */
23277 +-#define V4L2_FWHT_FL_IS_BOTTOM_FIRST BIT(1)
23278 ++#define V4L2_FWHT_FL_IS_BOTTOM_FIRST _BITUL(1)
23279 + /* Set if each 'frame' contains just one field */
23280 +-#define V4L2_FWHT_FL_IS_ALTERNATE BIT(2)
23281 ++#define V4L2_FWHT_FL_IS_ALTERNATE _BITUL(2)
23282 + /*
23283 + * If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this
23284 + * 'frame' is the bottom field, else it is the top field.
23285 + */
23286 +-#define V4L2_FWHT_FL_IS_BOTTOM_FIELD BIT(3)
23287 ++#define V4L2_FWHT_FL_IS_BOTTOM_FIELD _BITUL(3)
23288 + /* Set if the Y' plane is uncompressed */
23289 +-#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED BIT(4)
23290 ++#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED _BITUL(4)
23291 + /* Set if the Cb plane is uncompressed */
23292 +-#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED BIT(5)
23293 ++#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED _BITUL(5)
23294 + /* Set if the Cr plane is uncompressed */
23295 +-#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED BIT(6)
23296 ++#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED _BITUL(6)
23297 + /* Set if the chroma plane is full height, if cleared it is half height */
23298 +-#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT BIT(7)
23299 ++#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT _BITUL(7)
23300 + /* Set if the chroma plane is full width, if cleared it is half width */
23301 +-#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH BIT(8)
23302 ++#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH _BITUL(8)
23303 + /* Set if the alpha plane is uncompressed */
23304 +-#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9)
23305 ++#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED _BITUL(9)
23306 + /* Set if this is an I Frame */
23307 +-#define V4L2_FWHT_FL_I_FRAME BIT(10)
23308 ++#define V4L2_FWHT_FL_I_FRAME _BITUL(10)
23309 +
23310 + /* A 4-values flag - the number of components - 1 */
23311 + #define V4L2_FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16)
23312 +diff --git a/init/main.c b/init/main.c
23313 +index e9c42a183e339..e6836a9400d56 100644
23314 +--- a/init/main.c
23315 ++++ b/init/main.c
23316 +@@ -941,11 +941,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
23317 + * time - but meanwhile we still have a functioning scheduler.
23318 + */
23319 + sched_init();
23320 +- /*
23321 +- * Disable preemption - early bootup scheduling is extremely
23322 +- * fragile until we cpu_idle() for the first time.
23323 +- */
23324 +- preempt_disable();
23325 ++
23326 + if (WARN(!irqs_disabled(),
23327 + "Interrupts were enabled *very* early, fixing it\n"))
23328 + local_irq_disable();
23329 +diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
23330 +index aa516472ce46f..3b45c23286c03 100644
23331 +--- a/kernel/bpf/devmap.c
23332 ++++ b/kernel/bpf/devmap.c
23333 +@@ -92,7 +92,7 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries,
23334 + int i;
23335 + struct hlist_head *hash;
23336 +
23337 +- hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
23338 ++ hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
23339 + if (hash != NULL)
23340 + for (i = 0; i < entries; i++)
23341 + INIT_HLIST_HEAD(&hash[i]);
23342 +@@ -143,7 +143,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
23343 +
23344 + spin_lock_init(&dtab->index_lock);
23345 + } else {
23346 +- dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
23347 ++ dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
23348 + sizeof(struct bpf_dtab_netdev *),
23349 + dtab->map.numa_node);
23350 + if (!dtab->netdev_map)
23351 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
23352 +index b4ebd60a6c164..80da1db47c686 100644
23353 +--- a/kernel/bpf/inode.c
23354 ++++ b/kernel/bpf/inode.c
23355 +@@ -543,7 +543,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
23356 + return PTR_ERR(raw);
23357 +
23358 + if (type == BPF_TYPE_PROG)
23359 +- ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw);
23360 ++ ret = bpf_prog_new_fd(raw);
23361 + else if (type == BPF_TYPE_MAP)
23362 + ret = bpf_map_new_fd(raw, f_flags);
23363 + else if (type == BPF_TYPE_LINK)
23364 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
23365 +index c6a27574242de..6e2ebcb0d66f0 100644
23366 +--- a/kernel/bpf/verifier.c
23367 ++++ b/kernel/bpf/verifier.c
23368 +@@ -11459,7 +11459,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
23369 + }
23370 + }
23371 +
23372 +-static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
23373 ++static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
23374 + {
23375 + struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
23376 + int i, sz = prog->aux->size_poke_tab;
23377 +@@ -11467,6 +11467,8 @@ static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
23378 +
23379 + for (i = 0; i < sz; i++) {
23380 + desc = &tab[i];
23381 ++ if (desc->insn_idx <= off)
23382 ++ continue;
23383 + desc->insn_idx += len - 1;
23384 + }
23385 + }
23386 +@@ -11487,7 +11489,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
23387 + if (adjust_insn_aux_data(env, new_prog, off, len))
23388 + return NULL;
23389 + adjust_subprog_starts(env, off, len);
23390 +- adjust_poke_descs(new_prog, len);
23391 ++ adjust_poke_descs(new_prog, off, len);
23392 + return new_prog;
23393 + }
23394 +
23395 +diff --git a/kernel/cred.c b/kernel/cred.c
23396 +index e1d274cd741be..9c2759166bd82 100644
23397 +--- a/kernel/cred.c
23398 ++++ b/kernel/cred.c
23399 +@@ -60,6 +60,7 @@ struct cred init_cred = {
23400 + .user = INIT_USER,
23401 + .user_ns = &init_user_ns,
23402 + .group_info = &init_groups,
23403 ++ .ucounts = &init_ucounts,
23404 + };
23405 +
23406 + static inline void set_cred_subscribers(struct cred *cred, int n)
23407 +@@ -119,6 +120,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
23408 + if (cred->group_info)
23409 + put_group_info(cred->group_info);
23410 + free_uid(cred->user);
23411 ++ if (cred->ucounts)
23412 ++ put_ucounts(cred->ucounts);
23413 + put_user_ns(cred->user_ns);
23414 + kmem_cache_free(cred_jar, cred);
23415 + }
23416 +@@ -222,6 +225,7 @@ struct cred *cred_alloc_blank(void)
23417 + #ifdef CONFIG_DEBUG_CREDENTIALS
23418 + new->magic = CRED_MAGIC;
23419 + #endif
23420 ++ new->ucounts = get_ucounts(&init_ucounts);
23421 +
23422 + if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
23423 + goto error;
23424 +@@ -284,6 +288,11 @@ struct cred *prepare_creds(void)
23425 +
23426 + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
23427 + goto error;
23428 ++
23429 ++ new->ucounts = get_ucounts(new->ucounts);
23430 ++ if (!new->ucounts)
23431 ++ goto error;
23432 ++
23433 + validate_creds(new);
23434 + return new;
23435 +
23436 +@@ -363,6 +372,9 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
23437 + ret = create_user_ns(new);
23438 + if (ret < 0)
23439 + goto error_put;
23440 ++ ret = set_cred_ucounts(new);
23441 ++ if (ret < 0)
23442 ++ goto error_put;
23443 + }
23444 +
23445 + #ifdef CONFIG_KEYS
23446 +@@ -653,6 +665,31 @@ int cred_fscmp(const struct cred *a, const struct cred *b)
23447 + }
23448 + EXPORT_SYMBOL(cred_fscmp);
23449 +
23450 ++int set_cred_ucounts(struct cred *new)
23451 ++{
23452 ++ struct task_struct *task = current;
23453 ++ const struct cred *old = task->real_cred;
23454 ++ struct ucounts *old_ucounts = new->ucounts;
23455 ++
23456 ++ if (new->user == old->user && new->user_ns == old->user_ns)
23457 ++ return 0;
23458 ++
23459 ++ /*
23460 ++ * This optimization is needed because alloc_ucounts() uses locks
23461 ++ * for table lookups.
23462 ++ */
23463 ++ if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
23464 ++ return 0;
23465 ++
23466 ++ if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
23467 ++ return -EAGAIN;
23468 ++
23469 ++ if (old_ucounts)
23470 ++ put_ucounts(old_ucounts);
23471 ++
23472 ++ return 0;
23473 ++}
23474 ++
23475 + /*
23476 + * initialise the credentials stuff
23477 + */
23478 +@@ -719,6 +756,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
23479 + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
23480 + goto error;
23481 +
23482 ++ new->ucounts = get_ucounts(new->ucounts);
23483 ++ if (!new->ucounts)
23484 ++ goto error;
23485 ++
23486 + put_cred(old);
23487 + validate_creds(new);
23488 + return new;
23489 +diff --git a/kernel/events/core.c b/kernel/events/core.c
23490 +index fe88d6eea3c2c..9ebac2a794679 100644
23491 +--- a/kernel/events/core.c
23492 ++++ b/kernel/events/core.c
23493 +@@ -3821,9 +3821,16 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
23494 + struct task_struct *task)
23495 + {
23496 + struct perf_cpu_context *cpuctx;
23497 +- struct pmu *pmu = ctx->pmu;
23498 ++ struct pmu *pmu;
23499 +
23500 + cpuctx = __get_cpu_context(ctx);
23501 ++
23502 ++ /*
23503 ++ * HACK: for HETEROGENEOUS the task context might have switched to a
23504 ++ * different PMU, force (re)set the context,
23505 ++ */
23506 ++ pmu = ctx->pmu = cpuctx->ctx.pmu;
23507 ++
23508 + if (cpuctx->task_ctx == ctx) {
23509 + if (cpuctx->sched_cb_usage)
23510 + __perf_pmu_sched_task(cpuctx, true);
23511 +diff --git a/kernel/fork.c b/kernel/fork.c
23512 +index a070caed5c8ed..567fee3405003 100644
23513 +--- a/kernel/fork.c
23514 ++++ b/kernel/fork.c
23515 +@@ -1999,7 +1999,7 @@ static __latent_entropy struct task_struct *copy_process(
23516 + goto bad_fork_cleanup_count;
23517 +
23518 + delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
23519 +- p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
23520 ++ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
23521 + p->flags |= PF_FORKNOEXEC;
23522 + INIT_LIST_HEAD(&p->children);
23523 + INIT_LIST_HEAD(&p->sibling);
23524 +@@ -2407,7 +2407,7 @@ static inline void init_idle_pids(struct task_struct *idle)
23525 + }
23526 + }
23527 +
23528 +-struct task_struct *fork_idle(int cpu)
23529 ++struct task_struct * __init fork_idle(int cpu)
23530 + {
23531 + struct task_struct *task;
23532 + struct kernel_clone_args args = {
23533 +@@ -2997,6 +2997,12 @@ int ksys_unshare(unsigned long unshare_flags)
23534 + if (err)
23535 + goto bad_unshare_cleanup_cred;
23536 +
23537 ++ if (new_cred) {
23538 ++ err = set_cred_ucounts(new_cred);
23539 ++ if (err)
23540 ++ goto bad_unshare_cleanup_cred;
23541 ++ }
23542 ++
23543 + if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
23544 + if (do_sysvsem) {
23545 + /*
23546 +diff --git a/kernel/kthread.c b/kernel/kthread.c
23547 +index 0fccf7d0c6a16..08931e525dd92 100644
23548 +--- a/kernel/kthread.c
23549 ++++ b/kernel/kthread.c
23550 +@@ -68,16 +68,6 @@ enum KTHREAD_BITS {
23551 + KTHREAD_SHOULD_PARK,
23552 + };
23553 +
23554 +-static inline void set_kthread_struct(void *kthread)
23555 +-{
23556 +- /*
23557 +- * We abuse ->set_child_tid to avoid the new member and because it
23558 +- * can't be wrongly copied by copy_process(). We also rely on fact
23559 +- * that the caller can't exec, so PF_KTHREAD can't be cleared.
23560 +- */
23561 +- current->set_child_tid = (__force void __user *)kthread;
23562 +-}
23563 +-
23564 + static inline struct kthread *to_kthread(struct task_struct *k)
23565 + {
23566 + WARN_ON(!(k->flags & PF_KTHREAD));
23567 +@@ -103,6 +93,22 @@ static inline struct kthread *__to_kthread(struct task_struct *p)
23568 + return kthread;
23569 + }
23570 +
23571 ++void set_kthread_struct(struct task_struct *p)
23572 ++{
23573 ++ struct kthread *kthread;
23574 ++
23575 ++ if (__to_kthread(p))
23576 ++ return;
23577 ++
23578 ++ kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
23579 ++ /*
23580 ++ * We abuse ->set_child_tid to avoid the new member and because it
23581 ++ * can't be wrongly copied by copy_process(). We also rely on fact
23582 ++ * that the caller can't exec, so PF_KTHREAD can't be cleared.
23583 ++ */
23584 ++ p->set_child_tid = (__force void __user *)kthread;
23585 ++}
23586 ++
23587 + void free_kthread_struct(struct task_struct *k)
23588 + {
23589 + struct kthread *kthread;
23590 +@@ -272,8 +278,8 @@ static int kthread(void *_create)
23591 + struct kthread *self;
23592 + int ret;
23593 +
23594 +- self = kzalloc(sizeof(*self), GFP_KERNEL);
23595 +- set_kthread_struct(self);
23596 ++ set_kthread_struct(current);
23597 ++ self = to_kthread(current);
23598 +
23599 + /* If user was SIGKILLed, I release the structure. */
23600 + done = xchg(&create->done, NULL);
23601 +@@ -1156,14 +1162,14 @@ static bool __kthread_cancel_work(struct kthread_work *work)
23602 + * modify @dwork's timer so that it expires after @delay. If @delay is zero,
23603 + * @work is guaranteed to be queued immediately.
23604 + *
23605 +- * Return: %true if @dwork was pending and its timer was modified,
23606 +- * %false otherwise.
23607 ++ * Return: %false if @dwork was idle and queued, %true otherwise.
23608 + *
23609 + * A special case is when the work is being canceled in parallel.
23610 + * It might be caused either by the real kthread_cancel_delayed_work_sync()
23611 + * or yet another kthread_mod_delayed_work() call. We let the other command
23612 +- * win and return %false here. The caller is supposed to synchronize these
23613 +- * operations a reasonable way.
23614 ++ * win and return %true here. The return value can be used for reference
23615 ++ * counting and the number of queued works stays the same. Anyway, the caller
23616 ++ * is supposed to synchronize these operations a reasonable way.
23617 + *
23618 + * This function is safe to call from any context including IRQ handler.
23619 + * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
23620 +@@ -1175,13 +1181,15 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
23621 + {
23622 + struct kthread_work *work = &dwork->work;
23623 + unsigned long flags;
23624 +- int ret = false;
23625 ++ int ret;
23626 +
23627 + raw_spin_lock_irqsave(&worker->lock, flags);
23628 +
23629 + /* Do not bother with canceling when never queued. */
23630 +- if (!work->worker)
23631 ++ if (!work->worker) {
23632 ++ ret = false;
23633 + goto fast_queue;
23634 ++ }
23635 +
23636 + /* Work must not be used with >1 worker, see kthread_queue_work() */
23637 + WARN_ON_ONCE(work->worker != worker);
23638 +@@ -1199,8 +1207,11 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
23639 + * be used for reference counting.
23640 + */
23641 + kthread_cancel_delayed_work_timer(work, &flags);
23642 +- if (work->canceling)
23643 ++ if (work->canceling) {
23644 ++ /* The number of works in the queue does not change. */
23645 ++ ret = true;
23646 + goto out;
23647 ++ }
23648 + ret = __kthread_cancel_work(work);
23649 +
23650 + fast_queue:
23651 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
23652 +index e32313072506d..9125bd419216b 100644
23653 +--- a/kernel/locking/lockdep.c
23654 ++++ b/kernel/locking/lockdep.c
23655 +@@ -2306,7 +2306,56 @@ static void print_lock_class_header(struct lock_class *class, int depth)
23656 + }
23657 +
23658 + /*
23659 +- * printk the shortest lock dependencies from @start to @end in reverse order:
23660 ++ * Dependency path printing:
23661 ++ *
23662 ++ * After BFS we get a lock dependency path (linked via ->parent of lock_list),
23663 ++ * printing out each lock in the dependency path will help on understanding how
23664 ++ * the deadlock could happen. Here are some details about dependency path
23665 ++ * printing:
23666 ++ *
23667 ++ * 1) A lock_list can be either forwards or backwards for a lock dependency,
23668 ++ * for a lock dependency A -> B, there are two lock_lists:
23669 ++ *
23670 ++ * a) lock_list in the ->locks_after list of A, whose ->class is B and
23671 ++ * ->links_to is A. In this case, we can say the lock_list is
23672 ++ * "A -> B" (forwards case).
23673 ++ *
23674 ++ * b) lock_list in the ->locks_before list of B, whose ->class is A
23675 ++ * and ->links_to is B. In this case, we can say the lock_list is
23676 ++ * "B <- A" (bacwards case).
23677 ++ *
23678 ++ * The ->trace of both a) and b) point to the call trace where B was
23679 ++ * acquired with A held.
23680 ++ *
23681 ++ * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
23682 ++ * represent a certain lock dependency, it only provides an initial entry
23683 ++ * for BFS. For example, BFS may introduce a "helper" lock_list whose
23684 ++ * ->class is A, as a result BFS will search all dependencies starting with
23685 ++ * A, e.g. A -> B or A -> C.
23686 ++ *
23687 ++ * The notation of a forwards helper lock_list is like "-> A", which means
23688 ++ * we should search the forwards dependencies starting with "A", e.g A -> B
23689 ++ * or A -> C.
23690 ++ *
23691 ++ * The notation of a bacwards helper lock_list is like "<- B", which means
23692 ++ * we should search the backwards dependencies ending with "B", e.g.
23693 ++ * B <- A or B <- C.
23694 ++ */
23695 ++
23696 ++/*
23697 ++ * printk the shortest lock dependencies from @root to @leaf in reverse order.
23698 ++ *
23699 ++ * We have a lock dependency path as follow:
23700 ++ *
23701 ++ * @root @leaf
23702 ++ * | |
23703 ++ * V V
23704 ++ * ->parent ->parent
23705 ++ * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
23706 ++ * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
23707 ++ *
23708 ++ * , so it's natural that we start from @leaf and print every ->class and
23709 ++ * ->trace until we reach the @root.
23710 + */
23711 + static void __used
23712 + print_shortest_lock_dependencies(struct lock_list *leaf,
23713 +@@ -2334,6 +2383,61 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
23714 + } while (entry && (depth >= 0));
23715 + }
23716 +
23717 ++/*
23718 ++ * printk the shortest lock dependencies from @leaf to @root.
23719 ++ *
23720 ++ * We have a lock dependency path (from a backwards search) as follow:
23721 ++ *
23722 ++ * @leaf @root
23723 ++ * | |
23724 ++ * V V
23725 ++ * ->parent ->parent
23726 ++ * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
23727 ++ * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
23728 ++ *
23729 ++ * , so when we iterate from @leaf to @root, we actually print the lock
23730 ++ * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
23731 ++ *
23732 ++ * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
23733 ++ * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
23734 ++ * trace of L1 in the dependency path, which is alright, because most of the
23735 ++ * time we can figure out where L1 is held from the call trace of L2.
23736 ++ */
23737 ++static void __used
23738 ++print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
23739 ++ struct lock_list *root)
23740 ++{
23741 ++ struct lock_list *entry = leaf;
23742 ++ const struct lock_trace *trace = NULL;
23743 ++ int depth;
23744 ++
23745 ++ /*compute depth from generated tree by BFS*/
23746 ++ depth = get_lock_depth(leaf);
23747 ++
23748 ++ do {
23749 ++ print_lock_class_header(entry->class, depth);
23750 ++ if (trace) {
23751 ++ printk("%*s ... acquired at:\n", depth, "");
23752 ++ print_lock_trace(trace, 2);
23753 ++ printk("\n");
23754 ++ }
23755 ++
23756 ++ /*
23757 ++ * Record the pointer to the trace for the next lock_list
23758 ++ * entry, see the comments for the function.
23759 ++ */
23760 ++ trace = entry->trace;
23761 ++
23762 ++ if (depth == 0 && (entry != root)) {
23763 ++ printk("lockdep:%s bad path found in chain graph\n", __func__);
23764 ++ break;
23765 ++ }
23766 ++
23767 ++ entry = get_lock_parent(entry);
23768 ++ depth--;
23769 ++ } while (entry && (depth >= 0));
23770 ++}
23771 ++
23772 + static void
23773 + print_irq_lock_scenario(struct lock_list *safe_entry,
23774 + struct lock_list *unsafe_entry,
23775 +@@ -2451,7 +2555,7 @@ print_bad_irq_dependency(struct task_struct *curr,
23776 + prev_root->trace = save_trace();
23777 + if (!prev_root->trace)
23778 + return;
23779 +- print_shortest_lock_dependencies(backwards_entry, prev_root);
23780 ++ print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
23781 +
23782 + pr_warn("\nthe dependencies between the lock to be acquired");
23783 + pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
23784 +@@ -2669,8 +2773,18 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
23785 + * Step 3: we found a bad match! Now retrieve a lock from the backward
23786 + * list whose usage mask matches the exclusive usage mask from the
23787 + * lock found on the forward list.
23788 ++ *
23789 ++ * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
23790 ++ * the follow case:
23791 ++ *
23792 ++ * When trying to add A -> B to the graph, we find that there is a
23793 ++ * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
23794 ++ * that B -> ... -> M. However M is **softirq-safe**, if we use exact
23795 ++ * invert bits of M's usage_mask, we will find another lock N that is
23796 ++ * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
23797 ++ * cause a inversion deadlock.
23798 + */
23799 +- backward_mask = original_mask(target_entry1->class->usage_mask);
23800 ++ backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
23801 +
23802 + ret = find_usage_backwards(&this, backward_mask, &target_entry);
23803 + if (bfs_error(ret)) {
23804 +@@ -4579,7 +4693,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
23805 + u8 curr_inner;
23806 + int depth;
23807 +
23808 +- if (!curr->lockdep_depth || !next_inner || next->trylock)
23809 ++ if (!next_inner || next->trylock)
23810 + return 0;
23811 +
23812 + if (!next_outer)
23813 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
23814 +index 8e78b2430c168..9a1396a70c520 100644
23815 +--- a/kernel/rcu/tree.c
23816 ++++ b/kernel/rcu/tree.c
23817 +@@ -2911,7 +2911,6 @@ static int __init rcu_spawn_core_kthreads(void)
23818 + "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
23819 + return 0;
23820 + }
23821 +-early_initcall(rcu_spawn_core_kthreads);
23822 +
23823 + /*
23824 + * Handle any core-RCU processing required by a call_rcu() invocation.
23825 +@@ -4472,6 +4471,7 @@ static int __init rcu_spawn_gp_kthread(void)
23826 + wake_up_process(t);
23827 + rcu_spawn_nocb_kthreads();
23828 + rcu_spawn_boost_kthreads();
23829 ++ rcu_spawn_core_kthreads();
23830 + return 0;
23831 + }
23832 + early_initcall(rcu_spawn_gp_kthread);
23833 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
23834 +index 4ca80df205ce6..e5858999b54de 100644
23835 +--- a/kernel/sched/core.c
23836 ++++ b/kernel/sched/core.c
23837 +@@ -1065,9 +1065,10 @@ static void uclamp_sync_util_min_rt_default(void)
23838 + static inline struct uclamp_se
23839 + uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
23840 + {
23841 ++ /* Copy by value as we could modify it */
23842 + struct uclamp_se uc_req = p->uclamp_req[clamp_id];
23843 + #ifdef CONFIG_UCLAMP_TASK_GROUP
23844 +- struct uclamp_se uc_max;
23845 ++ unsigned int tg_min, tg_max, value;
23846 +
23847 + /*
23848 + * Tasks in autogroups or root task group will be
23849 +@@ -1078,9 +1079,11 @@ uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
23850 + if (task_group(p) == &root_task_group)
23851 + return uc_req;
23852 +
23853 +- uc_max = task_group(p)->uclamp[clamp_id];
23854 +- if (uc_req.value > uc_max.value || !uc_req.user_defined)
23855 +- return uc_max;
23856 ++ tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
23857 ++ tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
23858 ++ value = uc_req.value;
23859 ++ value = clamp(value, tg_min, tg_max);
23860 ++ uclamp_se_set(&uc_req, value, false);
23861 + #endif
23862 +
23863 + return uc_req;
23864 +@@ -1279,8 +1282,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
23865 + }
23866 +
23867 + static inline void
23868 +-uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
23869 ++uclamp_update_active(struct task_struct *p)
23870 + {
23871 ++ enum uclamp_id clamp_id;
23872 + struct rq_flags rf;
23873 + struct rq *rq;
23874 +
23875 +@@ -1300,9 +1304,11 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
23876 + * affecting a valid clamp bucket, the next time it's enqueued,
23877 + * it will already see the updated clamp bucket value.
23878 + */
23879 +- if (p->uclamp[clamp_id].active) {
23880 +- uclamp_rq_dec_id(rq, p, clamp_id);
23881 +- uclamp_rq_inc_id(rq, p, clamp_id);
23882 ++ for_each_clamp_id(clamp_id) {
23883 ++ if (p->uclamp[clamp_id].active) {
23884 ++ uclamp_rq_dec_id(rq, p, clamp_id);
23885 ++ uclamp_rq_inc_id(rq, p, clamp_id);
23886 ++ }
23887 + }
23888 +
23889 + task_rq_unlock(rq, p, &rf);
23890 +@@ -1310,20 +1316,14 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
23891 +
23892 + #ifdef CONFIG_UCLAMP_TASK_GROUP
23893 + static inline void
23894 +-uclamp_update_active_tasks(struct cgroup_subsys_state *css,
23895 +- unsigned int clamps)
23896 ++uclamp_update_active_tasks(struct cgroup_subsys_state *css)
23897 + {
23898 +- enum uclamp_id clamp_id;
23899 + struct css_task_iter it;
23900 + struct task_struct *p;
23901 +
23902 + css_task_iter_start(css, 0, &it);
23903 +- while ((p = css_task_iter_next(&it))) {
23904 +- for_each_clamp_id(clamp_id) {
23905 +- if ((0x1 << clamp_id) & clamps)
23906 +- uclamp_update_active(p, clamp_id);
23907 +- }
23908 +- }
23909 ++ while ((p = css_task_iter_next(&it)))
23910 ++ uclamp_update_active(p);
23911 + css_task_iter_end(&it);
23912 + }
23913 +
23914 +@@ -1916,7 +1916,6 @@ static int migration_cpu_stop(void *data)
23915 + struct migration_arg *arg = data;
23916 + struct set_affinity_pending *pending = arg->pending;
23917 + struct task_struct *p = arg->task;
23918 +- int dest_cpu = arg->dest_cpu;
23919 + struct rq *rq = this_rq();
23920 + bool complete = false;
23921 + struct rq_flags rf;
23922 +@@ -1954,19 +1953,15 @@ static int migration_cpu_stop(void *data)
23923 + if (pending) {
23924 + p->migration_pending = NULL;
23925 + complete = true;
23926 +- }
23927 +
23928 +- if (dest_cpu < 0) {
23929 + if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
23930 + goto out;
23931 +-
23932 +- dest_cpu = cpumask_any_distribute(&p->cpus_mask);
23933 + }
23934 +
23935 + if (task_on_rq_queued(p))
23936 +- rq = __migrate_task(rq, &rf, p, dest_cpu);
23937 ++ rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
23938 + else
23939 +- p->wake_cpu = dest_cpu;
23940 ++ p->wake_cpu = arg->dest_cpu;
23941 +
23942 + /*
23943 + * XXX __migrate_task() can fail, at which point we might end
23944 +@@ -2249,7 +2244,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
23945 + init_completion(&my_pending.done);
23946 + my_pending.arg = (struct migration_arg) {
23947 + .task = p,
23948 +- .dest_cpu = -1, /* any */
23949 ++ .dest_cpu = dest_cpu,
23950 + .pending = &my_pending,
23951 + };
23952 +
23953 +@@ -2257,6 +2252,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
23954 + } else {
23955 + pending = p->migration_pending;
23956 + refcount_inc(&pending->refs);
23957 ++ /*
23958 ++ * Affinity has changed, but we've already installed a
23959 ++ * pending. migration_cpu_stop() *must* see this, else
23960 ++ * we risk a completion of the pending despite having a
23961 ++ * task on a disallowed CPU.
23962 ++ *
23963 ++ * Serialized by p->pi_lock, so this is safe.
23964 ++ */
23965 ++ pending->arg.dest_cpu = dest_cpu;
23966 + }
23967 + }
23968 + pending = p->migration_pending;
23969 +@@ -7433,19 +7437,32 @@ void show_state_filter(unsigned long state_filter)
23970 + * NOTE: this function does not set the idle thread's NEED_RESCHED
23971 + * flag, to make booting more robust.
23972 + */
23973 +-void init_idle(struct task_struct *idle, int cpu)
23974 ++void __init init_idle(struct task_struct *idle, int cpu)
23975 + {
23976 + struct rq *rq = cpu_rq(cpu);
23977 + unsigned long flags;
23978 +
23979 + __sched_fork(0, idle);
23980 +
23981 ++ /*
23982 ++ * The idle task doesn't need the kthread struct to function, but it
23983 ++ * is dressed up as a per-CPU kthread and thus needs to play the part
23984 ++ * if we want to avoid special-casing it in code that deals with per-CPU
23985 ++ * kthreads.
23986 ++ */
23987 ++ set_kthread_struct(idle);
23988 ++
23989 + raw_spin_lock_irqsave(&idle->pi_lock, flags);
23990 + raw_spin_lock(&rq->lock);
23991 +
23992 + idle->state = TASK_RUNNING;
23993 + idle->se.exec_start = sched_clock();
23994 +- idle->flags |= PF_IDLE;
23995 ++ /*
23996 ++ * PF_KTHREAD should already be set at this point; regardless, make it
23997 ++ * look like a proper per-CPU kthread.
23998 ++ */
23999 ++ idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
24000 ++ kthread_set_per_cpu(idle, cpu);
24001 +
24002 + scs_task_reset(idle);
24003 + kasan_unpoison_task_stack(idle);
24004 +@@ -7662,12 +7679,8 @@ static void balance_push(struct rq *rq)
24005 + /*
24006 + * Both the cpu-hotplug and stop task are in this case and are
24007 + * required to complete the hotplug process.
24008 +- *
24009 +- * XXX: the idle task does not match kthread_is_per_cpu() due to
24010 +- * histerical raisins.
24011 + */
24012 +- if (rq->idle == push_task ||
24013 +- kthread_is_per_cpu(push_task) ||
24014 ++ if (kthread_is_per_cpu(push_task) ||
24015 + is_migration_disabled(push_task)) {
24016 +
24017 + /*
24018 +@@ -8680,7 +8693,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
24019 +
24020 + #ifdef CONFIG_UCLAMP_TASK_GROUP
24021 + /* Propagate the effective uclamp value for the new group */
24022 ++ mutex_lock(&uclamp_mutex);
24023 ++ rcu_read_lock();
24024 + cpu_util_update_eff(css);
24025 ++ rcu_read_unlock();
24026 ++ mutex_unlock(&uclamp_mutex);
24027 + #endif
24028 +
24029 + return 0;
24030 +@@ -8770,6 +8787,9 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
24031 + enum uclamp_id clamp_id;
24032 + unsigned int clamps;
24033 +
24034 ++ lockdep_assert_held(&uclamp_mutex);
24035 ++ SCHED_WARN_ON(!rcu_read_lock_held());
24036 ++
24037 + css_for_each_descendant_pre(css, top_css) {
24038 + uc_parent = css_tg(css)->parent
24039 + ? css_tg(css)->parent->uclamp : NULL;
24040 +@@ -8802,7 +8822,7 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
24041 + }
24042 +
24043 + /* Immediately update descendants RUNNABLE tasks */
24044 +- uclamp_update_active_tasks(css, clamps);
24045 ++ uclamp_update_active_tasks(css);
24046 + }
24047 + }
24048 +
24049 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
24050 +index 9a2989749b8d1..2f9964b467e03 100644
24051 +--- a/kernel/sched/deadline.c
24052 ++++ b/kernel/sched/deadline.c
24053 +@@ -2486,6 +2486,8 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
24054 + check_preempt_curr_dl(rq, p, 0);
24055 + else
24056 + resched_curr(rq);
24057 ++ } else {
24058 ++ update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
24059 + }
24060 + }
24061 +
24062 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
24063 +index 23663318fb81a..e807b743353d1 100644
24064 +--- a/kernel/sched/fair.c
24065 ++++ b/kernel/sched/fair.c
24066 +@@ -3139,7 +3139,7 @@ void reweight_task(struct task_struct *p, int prio)
24067 + *
24068 + * tg->weight * grq->load.weight
24069 + * ge->load.weight = ----------------------------- (1)
24070 +- * \Sum grq->load.weight
24071 ++ * \Sum grq->load.weight
24072 + *
24073 + * Now, because computing that sum is prohibitively expensive to compute (been
24074 + * there, done that) we approximate it with this average stuff. The average
24075 +@@ -3153,7 +3153,7 @@ void reweight_task(struct task_struct *p, int prio)
24076 + *
24077 + * tg->weight * grq->avg.load_avg
24078 + * ge->load.weight = ------------------------------ (3)
24079 +- * tg->load_avg
24080 ++ * tg->load_avg
24081 + *
24082 + * Where: tg->load_avg ~= \Sum grq->avg.load_avg
24083 + *
24084 +@@ -3169,7 +3169,7 @@ void reweight_task(struct task_struct *p, int prio)
24085 + *
24086 + * tg->weight * grq->load.weight
24087 + * ge->load.weight = ----------------------------- = tg->weight (4)
24088 +- * grp->load.weight
24089 ++ * grp->load.weight
24090 + *
24091 + * That is, the sum collapses because all other CPUs are idle; the UP scenario.
24092 + *
24093 +@@ -3188,7 +3188,7 @@ void reweight_task(struct task_struct *p, int prio)
24094 + *
24095 + * tg->weight * grq->load.weight
24096 + * ge->load.weight = ----------------------------- (6)
24097 +- * tg_load_avg'
24098 ++ * tg_load_avg'
24099 + *
24100 + * Where:
24101 + *
24102 +@@ -6620,8 +6620,11 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
24103 + struct cpumask *pd_mask = perf_domain_span(pd);
24104 + unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
24105 + unsigned long max_util = 0, sum_util = 0;
24106 ++ unsigned long _cpu_cap = cpu_cap;
24107 + int cpu;
24108 +
24109 ++ _cpu_cap -= arch_scale_thermal_pressure(cpumask_first(pd_mask));
24110 ++
24111 + /*
24112 + * The capacity state of CPUs of the current rd can be driven by CPUs
24113 + * of another rd if they belong to the same pd. So, account for the
24114 +@@ -6657,8 +6660,10 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
24115 + * is already enough to scale the EM reported power
24116 + * consumption at the (eventually clamped) cpu_capacity.
24117 + */
24118 +- sum_util += effective_cpu_util(cpu, util_running, cpu_cap,
24119 +- ENERGY_UTIL, NULL);
24120 ++ cpu_util = effective_cpu_util(cpu, util_running, cpu_cap,
24121 ++ ENERGY_UTIL, NULL);
24122 ++
24123 ++ sum_util += min(cpu_util, _cpu_cap);
24124 +
24125 + /*
24126 + * Performance domain frequency: utilization clamping
24127 +@@ -6669,7 +6674,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
24128 + */
24129 + cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap,
24130 + FREQUENCY_UTIL, tsk);
24131 +- max_util = max(max_util, cpu_util);
24132 ++ max_util = max(max_util, min(cpu_util, _cpu_cap));
24133 + }
24134 +
24135 + return em_cpu_energy(pd->em_pd, max_util, sum_util);
24136 +diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
24137 +index cc25a3cff41fb..58b36d17a09a0 100644
24138 +--- a/kernel/sched/psi.c
24139 ++++ b/kernel/sched/psi.c
24140 +@@ -182,6 +182,8 @@ struct psi_group psi_system = {
24141 +
24142 + static void psi_avgs_work(struct work_struct *work);
24143 +
24144 ++static void poll_timer_fn(struct timer_list *t);
24145 ++
24146 + static void group_init(struct psi_group *group)
24147 + {
24148 + int cpu;
24149 +@@ -201,6 +203,8 @@ static void group_init(struct psi_group *group)
24150 + memset(group->polling_total, 0, sizeof(group->polling_total));
24151 + group->polling_next_update = ULLONG_MAX;
24152 + group->polling_until = 0;
24153 ++ init_waitqueue_head(&group->poll_wait);
24154 ++ timer_setup(&group->poll_timer, poll_timer_fn, 0);
24155 + rcu_assign_pointer(group->poll_task, NULL);
24156 + }
24157 +
24158 +@@ -1157,9 +1161,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
24159 + return ERR_CAST(task);
24160 + }
24161 + atomic_set(&group->poll_wakeup, 0);
24162 +- init_waitqueue_head(&group->poll_wait);
24163 + wake_up_process(task);
24164 +- timer_setup(&group->poll_timer, poll_timer_fn, 0);
24165 + rcu_assign_pointer(group->poll_task, task);
24166 + }
24167 +
24168 +@@ -1211,6 +1213,7 @@ static void psi_trigger_destroy(struct kref *ref)
24169 + group->poll_task,
24170 + lockdep_is_held(&group->trigger_lock));
24171 + rcu_assign_pointer(group->poll_task, NULL);
24172 ++ del_timer(&group->poll_timer);
24173 + }
24174 + }
24175 +
24176 +@@ -1223,17 +1226,14 @@ static void psi_trigger_destroy(struct kref *ref)
24177 + */
24178 + synchronize_rcu();
24179 + /*
24180 +- * Destroy the kworker after releasing trigger_lock to prevent a
24181 ++ * Stop kthread 'psimon' after releasing trigger_lock to prevent a
24182 + * deadlock while waiting for psi_poll_work to acquire trigger_lock
24183 + */
24184 + if (task_to_destroy) {
24185 + /*
24186 + * After the RCU grace period has expired, the worker
24187 + * can no longer be found through group->poll_task.
24188 +- * But it might have been already scheduled before
24189 +- * that - deschedule it cleanly before destroying it.
24190 + */
24191 +- del_timer_sync(&group->poll_timer);
24192 + kthread_stop(task_to_destroy);
24193 + }
24194 + kfree(t);
24195 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
24196 +index c286e5ba3c942..3b1b8b025b746 100644
24197 +--- a/kernel/sched/rt.c
24198 ++++ b/kernel/sched/rt.c
24199 +@@ -2331,13 +2331,20 @@ void __init init_sched_rt_class(void)
24200 + static void switched_to_rt(struct rq *rq, struct task_struct *p)
24201 + {
24202 + /*
24203 +- * If we are already running, then there's nothing
24204 +- * that needs to be done. But if we are not running
24205 +- * we may need to preempt the current running task.
24206 +- * If that current running task is also an RT task
24207 ++ * If we are running, update the avg_rt tracking, as the running time
24208 ++ * will now on be accounted into the latter.
24209 ++ */
24210 ++ if (task_current(rq, p)) {
24211 ++ update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
24212 ++ return;
24213 ++ }
24214 ++
24215 ++ /*
24216 ++ * If we are not running we may need to preempt the current
24217 ++ * running task. If that current running task is also an RT task
24218 + * then see if we can move to another run queue.
24219 + */
24220 +- if (task_on_rq_queued(p) && rq->curr != p) {
24221 ++ if (task_on_rq_queued(p)) {
24222 + #ifdef CONFIG_SMP
24223 + if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
24224 + rt_queue_push_tasks(rq);
24225 +diff --git a/kernel/seccomp.c b/kernel/seccomp.c
24226 +index 9f58049ac16d9..057e17f3215d5 100644
24227 +--- a/kernel/seccomp.c
24228 ++++ b/kernel/seccomp.c
24229 +@@ -107,6 +107,7 @@ struct seccomp_knotif {
24230 + * installing process should allocate the fd as normal.
24231 + * @flags: The flags for the new file descriptor. At the moment, only O_CLOEXEC
24232 + * is allowed.
24233 ++ * @ioctl_flags: The flags used for the seccomp_addfd ioctl.
24234 + * @ret: The return value of the installing process. It is set to the fd num
24235 + * upon success (>= 0).
24236 + * @completion: Indicates that the installing process has completed fd
24237 +@@ -118,6 +119,7 @@ struct seccomp_kaddfd {
24238 + struct file *file;
24239 + int fd;
24240 + unsigned int flags;
24241 ++ __u32 ioctl_flags;
24242 +
24243 + union {
24244 + bool setfd;
24245 +@@ -1065,18 +1067,37 @@ static u64 seccomp_next_notify_id(struct seccomp_filter *filter)
24246 + return filter->notif->next_id++;
24247 + }
24248 +
24249 +-static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd)
24250 ++static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_knotif *n)
24251 + {
24252 ++ int fd;
24253 ++
24254 + /*
24255 + * Remove the notification, and reset the list pointers, indicating
24256 + * that it has been handled.
24257 + */
24258 + list_del_init(&addfd->list);
24259 + if (!addfd->setfd)
24260 +- addfd->ret = receive_fd(addfd->file, addfd->flags);
24261 ++ fd = receive_fd(addfd->file, addfd->flags);
24262 + else
24263 +- addfd->ret = receive_fd_replace(addfd->fd, addfd->file,
24264 +- addfd->flags);
24265 ++ fd = receive_fd_replace(addfd->fd, addfd->file, addfd->flags);
24266 ++ addfd->ret = fd;
24267 ++
24268 ++ if (addfd->ioctl_flags & SECCOMP_ADDFD_FLAG_SEND) {
24269 ++ /* If we fail reset and return an error to the notifier */
24270 ++ if (fd < 0) {
24271 ++ n->state = SECCOMP_NOTIFY_SENT;
24272 ++ } else {
24273 ++ /* Return the FD we just added */
24274 ++ n->flags = 0;
24275 ++ n->error = 0;
24276 ++ n->val = fd;
24277 ++ }
24278 ++ }
24279 ++
24280 ++ /*
24281 ++ * Mark the notification as completed. From this point, addfd mem
24282 ++ * might be invalidated and we can't safely read it anymore.
24283 ++ */
24284 + complete(&addfd->completion);
24285 + }
24286 +
24287 +@@ -1120,7 +1141,7 @@ static int seccomp_do_user_notification(int this_syscall,
24288 + struct seccomp_kaddfd, list);
24289 + /* Check if we were woken up by a addfd message */
24290 + if (addfd)
24291 +- seccomp_handle_addfd(addfd);
24292 ++ seccomp_handle_addfd(addfd, &n);
24293 +
24294 + } while (n.state != SECCOMP_NOTIFY_REPLIED);
24295 +
24296 +@@ -1581,7 +1602,7 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter,
24297 + if (addfd.newfd_flags & ~O_CLOEXEC)
24298 + return -EINVAL;
24299 +
24300 +- if (addfd.flags & ~SECCOMP_ADDFD_FLAG_SETFD)
24301 ++ if (addfd.flags & ~(SECCOMP_ADDFD_FLAG_SETFD | SECCOMP_ADDFD_FLAG_SEND))
24302 + return -EINVAL;
24303 +
24304 + if (addfd.newfd && !(addfd.flags & SECCOMP_ADDFD_FLAG_SETFD))
24305 +@@ -1591,6 +1612,7 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter,
24306 + if (!kaddfd.file)
24307 + return -EBADF;
24308 +
24309 ++ kaddfd.ioctl_flags = addfd.flags;
24310 + kaddfd.flags = addfd.newfd_flags;
24311 + kaddfd.setfd = addfd.flags & SECCOMP_ADDFD_FLAG_SETFD;
24312 + kaddfd.fd = addfd.newfd;
24313 +@@ -1616,6 +1638,23 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter,
24314 + goto out_unlock;
24315 + }
24316 +
24317 ++ if (addfd.flags & SECCOMP_ADDFD_FLAG_SEND) {
24318 ++ /*
24319 ++ * Disallow queuing an atomic addfd + send reply while there are
24320 ++ * some addfd requests still to process.
24321 ++ *
24322 ++ * There is no clear reason to support it and allows us to keep
24323 ++ * the loop on the other side straight-forward.
24324 ++ */
24325 ++ if (!list_empty(&knotif->addfd)) {
24326 ++ ret = -EBUSY;
24327 ++ goto out_unlock;
24328 ++ }
24329 ++
24330 ++ /* Allow exactly only one reply */
24331 ++ knotif->state = SECCOMP_NOTIFY_REPLIED;
24332 ++ }
24333 ++
24334 + list_add(&kaddfd.list, &knotif->addfd);
24335 + complete(&knotif->ready);
24336 + mutex_unlock(&filter->notify_lock);
24337 +diff --git a/kernel/smpboot.c b/kernel/smpboot.c
24338 +index f25208e8df836..e4163042c4d66 100644
24339 +--- a/kernel/smpboot.c
24340 ++++ b/kernel/smpboot.c
24341 +@@ -33,7 +33,6 @@ struct task_struct *idle_thread_get(unsigned int cpu)
24342 +
24343 + if (!tsk)
24344 + return ERR_PTR(-ENOMEM);
24345 +- init_idle(tsk, cpu);
24346 + return tsk;
24347 + }
24348 +
24349 +diff --git a/kernel/sys.c b/kernel/sys.c
24350 +index 3a583a29815fa..142ee040f5733 100644
24351 +--- a/kernel/sys.c
24352 ++++ b/kernel/sys.c
24353 +@@ -558,6 +558,10 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
24354 + if (retval < 0)
24355 + goto error;
24356 +
24357 ++ retval = set_cred_ucounts(new);
24358 ++ if (retval < 0)
24359 ++ goto error;
24360 ++
24361 + return commit_creds(new);
24362 +
24363 + error:
24364 +@@ -616,6 +620,10 @@ long __sys_setuid(uid_t uid)
24365 + if (retval < 0)
24366 + goto error;
24367 +
24368 ++ retval = set_cred_ucounts(new);
24369 ++ if (retval < 0)
24370 ++ goto error;
24371 ++
24372 + return commit_creds(new);
24373 +
24374 + error:
24375 +@@ -691,6 +699,10 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
24376 + if (retval < 0)
24377 + goto error;
24378 +
24379 ++ retval = set_cred_ucounts(new);
24380 ++ if (retval < 0)
24381 ++ goto error;
24382 ++
24383 + return commit_creds(new);
24384 +
24385 + error:
24386 +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
24387 +index 2cd902592fc1f..cb12225bf0502 100644
24388 +--- a/kernel/time/clocksource.c
24389 ++++ b/kernel/time/clocksource.c
24390 +@@ -124,6 +124,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating);
24391 + #define WATCHDOG_INTERVAL (HZ >> 1)
24392 + #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
24393 +
24394 ++/*
24395 ++ * Maximum permissible delay between two readouts of the watchdog
24396 ++ * clocksource surrounding a read of the clocksource being validated.
24397 ++ * This delay could be due to SMIs, NMIs, or to VCPU preemptions.
24398 ++ */
24399 ++#define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC)
24400 ++
24401 + static void clocksource_watchdog_work(struct work_struct *work)
24402 + {
24403 + /*
24404 +@@ -184,12 +191,99 @@ void clocksource_mark_unstable(struct clocksource *cs)
24405 + spin_unlock_irqrestore(&watchdog_lock, flags);
24406 + }
24407 +
24408 ++static ulong max_cswd_read_retries = 3;
24409 ++module_param(max_cswd_read_retries, ulong, 0644);
24410 ++
24411 ++static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
24412 ++{
24413 ++ unsigned int nretries;
24414 ++ u64 wd_end, wd_delta;
24415 ++ int64_t wd_delay;
24416 ++
24417 ++ for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
24418 ++ local_irq_disable();
24419 ++ *wdnow = watchdog->read(watchdog);
24420 ++ *csnow = cs->read(cs);
24421 ++ wd_end = watchdog->read(watchdog);
24422 ++ local_irq_enable();
24423 ++
24424 ++ wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
24425 ++ wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
24426 ++ watchdog->shift);
24427 ++ if (wd_delay <= WATCHDOG_MAX_SKEW) {
24428 ++ if (nretries > 1 || nretries >= max_cswd_read_retries) {
24429 ++ pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
24430 ++ smp_processor_id(), watchdog->name, nretries);
24431 ++ }
24432 ++ return true;
24433 ++ }
24434 ++ }
24435 ++
24436 ++ pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n",
24437 ++ smp_processor_id(), watchdog->name, wd_delay, nretries);
24438 ++ return false;
24439 ++}
24440 ++
24441 ++static u64 csnow_mid;
24442 ++static cpumask_t cpus_ahead;
24443 ++static cpumask_t cpus_behind;
24444 ++
24445 ++static void clocksource_verify_one_cpu(void *csin)
24446 ++{
24447 ++ struct clocksource *cs = (struct clocksource *)csin;
24448 ++
24449 ++ csnow_mid = cs->read(cs);
24450 ++}
24451 ++
24452 ++static void clocksource_verify_percpu(struct clocksource *cs)
24453 ++{
24454 ++ int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
24455 ++ u64 csnow_begin, csnow_end;
24456 ++ int cpu, testcpu;
24457 ++ s64 delta;
24458 ++
24459 ++ cpumask_clear(&cpus_ahead);
24460 ++ cpumask_clear(&cpus_behind);
24461 ++ preempt_disable();
24462 ++ testcpu = smp_processor_id();
24463 ++ pr_warn("Checking clocksource %s synchronization from CPU %d.\n", cs->name, testcpu);
24464 ++ for_each_online_cpu(cpu) {
24465 ++ if (cpu == testcpu)
24466 ++ continue;
24467 ++ csnow_begin = cs->read(cs);
24468 ++ smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
24469 ++ csnow_end = cs->read(cs);
24470 ++ delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
24471 ++ if (delta < 0)
24472 ++ cpumask_set_cpu(cpu, &cpus_behind);
24473 ++ delta = (csnow_end - csnow_mid) & cs->mask;
24474 ++ if (delta < 0)
24475 ++ cpumask_set_cpu(cpu, &cpus_ahead);
24476 ++ delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
24477 ++ cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
24478 ++ if (cs_nsec > cs_nsec_max)
24479 ++ cs_nsec_max = cs_nsec;
24480 ++ if (cs_nsec < cs_nsec_min)
24481 ++ cs_nsec_min = cs_nsec;
24482 ++ }
24483 ++ preempt_enable();
24484 ++ if (!cpumask_empty(&cpus_ahead))
24485 ++ pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
24486 ++ cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
24487 ++ if (!cpumask_empty(&cpus_behind))
24488 ++ pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n",
24489 ++ cpumask_pr_args(&cpus_behind), testcpu, cs->name);
24490 ++ if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
24491 ++ pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
24492 ++ testcpu, cs_nsec_min, cs_nsec_max, cs->name);
24493 ++}
24494 ++
24495 + static void clocksource_watchdog(struct timer_list *unused)
24496 + {
24497 +- struct clocksource *cs;
24498 + u64 csnow, wdnow, cslast, wdlast, delta;
24499 +- int64_t wd_nsec, cs_nsec;
24500 + int next_cpu, reset_pending;
24501 ++ int64_t wd_nsec, cs_nsec;
24502 ++ struct clocksource *cs;
24503 +
24504 + spin_lock(&watchdog_lock);
24505 + if (!watchdog_running)
24506 +@@ -206,10 +300,11 @@ static void clocksource_watchdog(struct timer_list *unused)
24507 + continue;
24508 + }
24509 +
24510 +- local_irq_disable();
24511 +- csnow = cs->read(cs);
24512 +- wdnow = watchdog->read(watchdog);
24513 +- local_irq_enable();
24514 ++ if (!cs_watchdog_read(cs, &csnow, &wdnow)) {
24515 ++ /* Clock readout unreliable, so give it up. */
24516 ++ __clocksource_unstable(cs);
24517 ++ continue;
24518 ++ }
24519 +
24520 + /* Clocksource initialized ? */
24521 + if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
24522 +@@ -407,6 +502,12 @@ static int __clocksource_watchdog_kthread(void)
24523 + unsigned long flags;
24524 + int select = 0;
24525 +
24526 ++ /* Do any required per-CPU skew verification. */
24527 ++ if (curr_clocksource &&
24528 ++ curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
24529 ++ curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
24530 ++ clocksource_verify_percpu(curr_clocksource);
24531 ++
24532 + spin_lock_irqsave(&watchdog_lock, flags);
24533 + list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
24534 + if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
24535 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
24536 +index 7a52bc1728414..f0568b3d6bd1e 100644
24537 +--- a/kernel/trace/bpf_trace.c
24538 ++++ b/kernel/trace/bpf_trace.c
24539 +@@ -1840,7 +1840,8 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
24540 + if (prog->aux->max_tp_access > btp->writable_size)
24541 + return -EINVAL;
24542 +
24543 +- return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
24544 ++ return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
24545 ++ prog);
24546 + }
24547 +
24548 + int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
24549 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
24550 +index c1abd63f1d6c5..797e096bc1f3d 100644
24551 +--- a/kernel/trace/trace_events_hist.c
24552 ++++ b/kernel/trace/trace_events_hist.c
24553 +@@ -1555,6 +1555,13 @@ static int contains_operator(char *str)
24554 +
24555 + switch (*op) {
24556 + case '-':
24557 ++ /*
24558 ++ * Unfortunately, the modifier ".sym-offset"
24559 ++ * can confuse things.
24560 ++ */
24561 ++ if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11))
24562 ++ return FIELD_OP_NONE;
24563 ++
24564 + if (*str == '-')
24565 + field_op = FIELD_OP_UNARY_MINUS;
24566 + else
24567 +diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
24568 +index 9f478d29b9264..976bf8ce80396 100644
24569 +--- a/kernel/tracepoint.c
24570 ++++ b/kernel/tracepoint.c
24571 +@@ -273,7 +273,8 @@ static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func
24572 + * Add the probe function to a tracepoint.
24573 + */
24574 + static int tracepoint_add_func(struct tracepoint *tp,
24575 +- struct tracepoint_func *func, int prio)
24576 ++ struct tracepoint_func *func, int prio,
24577 ++ bool warn)
24578 + {
24579 + struct tracepoint_func *old, *tp_funcs;
24580 + int ret;
24581 +@@ -288,7 +289,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
24582 + lockdep_is_held(&tracepoints_mutex));
24583 + old = func_add(&tp_funcs, func, prio);
24584 + if (IS_ERR(old)) {
24585 +- WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
24586 ++ WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
24587 + return PTR_ERR(old);
24588 + }
24589 +
24590 +@@ -343,6 +344,32 @@ static int tracepoint_remove_func(struct tracepoint *tp,
24591 + return 0;
24592 + }
24593 +
24594 ++/**
24595 ++ * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority
24596 ++ * @tp: tracepoint
24597 ++ * @probe: probe handler
24598 ++ * @data: tracepoint data
24599 ++ * @prio: priority of this function over other registered functions
24600 ++ *
24601 ++ * Same as tracepoint_probe_register_prio() except that it will not warn
24602 ++ * if the tracepoint is already registered.
24603 ++ */
24604 ++int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
24605 ++ void *data, int prio)
24606 ++{
24607 ++ struct tracepoint_func tp_func;
24608 ++ int ret;
24609 ++
24610 ++ mutex_lock(&tracepoints_mutex);
24611 ++ tp_func.func = probe;
24612 ++ tp_func.data = data;
24613 ++ tp_func.prio = prio;
24614 ++ ret = tracepoint_add_func(tp, &tp_func, prio, false);
24615 ++ mutex_unlock(&tracepoints_mutex);
24616 ++ return ret;
24617 ++}
24618 ++EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
24619 ++
24620 + /**
24621 + * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
24622 + * @tp: tracepoint
24623 +@@ -366,7 +393,7 @@ int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
24624 + tp_func.func = probe;
24625 + tp_func.data = data;
24626 + tp_func.prio = prio;
24627 +- ret = tracepoint_add_func(tp, &tp_func, prio);
24628 ++ ret = tracepoint_add_func(tp, &tp_func, prio, true);
24629 + mutex_unlock(&tracepoints_mutex);
24630 + return ret;
24631 + }
24632 +diff --git a/kernel/ucount.c b/kernel/ucount.c
24633 +index 8d8874f1c35e2..1f4455874aa0d 100644
24634 +--- a/kernel/ucount.c
24635 ++++ b/kernel/ucount.c
24636 +@@ -8,6 +8,12 @@
24637 + #include <linux/kmemleak.h>
24638 + #include <linux/user_namespace.h>
24639 +
24640 ++struct ucounts init_ucounts = {
24641 ++ .ns = &init_user_ns,
24642 ++ .uid = GLOBAL_ROOT_UID,
24643 ++ .count = 1,
24644 ++};
24645 ++
24646 + #define UCOUNTS_HASHTABLE_BITS 10
24647 + static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
24648 + static DEFINE_SPINLOCK(ucounts_lock);
24649 +@@ -129,7 +135,15 @@ static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struc
24650 + return NULL;
24651 + }
24652 +
24653 +-static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
24654 ++static void hlist_add_ucounts(struct ucounts *ucounts)
24655 ++{
24656 ++ struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
24657 ++ spin_lock_irq(&ucounts_lock);
24658 ++ hlist_add_head(&ucounts->node, hashent);
24659 ++ spin_unlock_irq(&ucounts_lock);
24660 ++}
24661 ++
24662 ++struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
24663 + {
24664 + struct hlist_head *hashent = ucounts_hashentry(ns, uid);
24665 + struct ucounts *ucounts, *new;
24666 +@@ -164,7 +178,26 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
24667 + return ucounts;
24668 + }
24669 +
24670 +-static void put_ucounts(struct ucounts *ucounts)
24671 ++struct ucounts *get_ucounts(struct ucounts *ucounts)
24672 ++{
24673 ++ unsigned long flags;
24674 ++
24675 ++ if (!ucounts)
24676 ++ return NULL;
24677 ++
24678 ++ spin_lock_irqsave(&ucounts_lock, flags);
24679 ++ if (ucounts->count == INT_MAX) {
24680 ++ WARN_ONCE(1, "ucounts: counter has reached its maximum value");
24681 ++ ucounts = NULL;
24682 ++ } else {
24683 ++ ucounts->count += 1;
24684 ++ }
24685 ++ spin_unlock_irqrestore(&ucounts_lock, flags);
24686 ++
24687 ++ return ucounts;
24688 ++}
24689 ++
24690 ++void put_ucounts(struct ucounts *ucounts)
24691 + {
24692 + unsigned long flags;
24693 +
24694 +@@ -198,7 +231,7 @@ struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
24695 + {
24696 + struct ucounts *ucounts, *iter, *bad;
24697 + struct user_namespace *tns;
24698 +- ucounts = get_ucounts(ns, uid);
24699 ++ ucounts = alloc_ucounts(ns, uid);
24700 + for (iter = ucounts; iter; iter = tns->ucounts) {
24701 + int max;
24702 + tns = iter->ns;
24703 +@@ -241,6 +274,7 @@ static __init int user_namespace_sysctl_init(void)
24704 + BUG_ON(!user_header);
24705 + BUG_ON(!setup_userns_sysctls(&init_user_ns));
24706 + #endif
24707 ++ hlist_add_ucounts(&init_ucounts);
24708 + return 0;
24709 + }
24710 + subsys_initcall(user_namespace_sysctl_init);
24711 +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
24712 +index 8d62863721b05..27670ab7a4eda 100644
24713 +--- a/kernel/user_namespace.c
24714 ++++ b/kernel/user_namespace.c
24715 +@@ -1340,6 +1340,9 @@ static int userns_install(struct nsset *nsset, struct ns_common *ns)
24716 + put_user_ns(cred->user_ns);
24717 + set_cred_user_ns(cred, get_user_ns(user_ns));
24718 +
24719 ++ if (set_cred_ucounts(cred) < 0)
24720 ++ return -EINVAL;
24721 ++
24722 + return 0;
24723 + }
24724 +
24725 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
24726 +index 678c13967580e..1e1bd6f4a13de 100644
24727 +--- a/lib/Kconfig.debug
24728 ++++ b/lib/Kconfig.debug
24729 +@@ -1372,7 +1372,6 @@ config LOCKDEP
24730 + bool
24731 + depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
24732 + select STACKTRACE
24733 +- depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
24734 + select KALLSYMS
24735 + select KALLSYMS_ALL
24736 +
24737 +diff --git a/lib/iov_iter.c b/lib/iov_iter.c
24738 +index c701b7a187f2b..9eb7c31688cc8 100644
24739 +--- a/lib/iov_iter.c
24740 ++++ b/lib/iov_iter.c
24741 +@@ -476,7 +476,7 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
24742 + int err;
24743 + struct iovec v;
24744 +
24745 +- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
24746 ++ if (iter_is_iovec(i)) {
24747 + iterate_iovec(i, bytes, v, iov, skip, ({
24748 + err = fault_in_pages_readable(v.iov_base, v.iov_len);
24749 + if (unlikely(err))
24750 +@@ -957,23 +957,48 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
24751 + return false;
24752 + }
24753 +
24754 +-size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
24755 ++static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
24756 + struct iov_iter *i)
24757 + {
24758 +- if (unlikely(!page_copy_sane(page, offset, bytes)))
24759 +- return 0;
24760 + if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
24761 + void *kaddr = kmap_atomic(page);
24762 + size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
24763 + kunmap_atomic(kaddr);
24764 + return wanted;
24765 +- } else if (unlikely(iov_iter_is_discard(i)))
24766 ++ } else if (unlikely(iov_iter_is_discard(i))) {
24767 ++ if (unlikely(i->count < bytes))
24768 ++ bytes = i->count;
24769 ++ i->count -= bytes;
24770 + return bytes;
24771 +- else if (likely(!iov_iter_is_pipe(i)))
24772 ++ } else if (likely(!iov_iter_is_pipe(i)))
24773 + return copy_page_to_iter_iovec(page, offset, bytes, i);
24774 + else
24775 + return copy_page_to_iter_pipe(page, offset, bytes, i);
24776 + }
24777 ++
24778 ++size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
24779 ++ struct iov_iter *i)
24780 ++{
24781 ++ size_t res = 0;
24782 ++ if (unlikely(!page_copy_sane(page, offset, bytes)))
24783 ++ return 0;
24784 ++ page += offset / PAGE_SIZE; // first subpage
24785 ++ offset %= PAGE_SIZE;
24786 ++ while (1) {
24787 ++ size_t n = __copy_page_to_iter(page, offset,
24788 ++ min(bytes, (size_t)PAGE_SIZE - offset), i);
24789 ++ res += n;
24790 ++ bytes -= n;
24791 ++ if (!bytes || !n)
24792 ++ break;
24793 ++ offset += n;
24794 ++ if (offset == PAGE_SIZE) {
24795 ++ page++;
24796 ++ offset = 0;
24797 ++ }
24798 ++ }
24799 ++ return res;
24800 ++}
24801 + EXPORT_SYMBOL(copy_page_to_iter);
24802 +
24803 + size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
24804 +diff --git a/lib/kstrtox.c b/lib/kstrtox.c
24805 +index a118b0b1e9b2c..0b5fe8b411732 100644
24806 +--- a/lib/kstrtox.c
24807 ++++ b/lib/kstrtox.c
24808 +@@ -39,20 +39,22 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
24809 +
24810 + /*
24811 + * Convert non-negative integer string representation in explicitly given radix
24812 +- * to an integer.
24813 ++ * to an integer. A maximum of max_chars characters will be converted.
24814 ++ *
24815 + * Return number of characters consumed maybe or-ed with overflow bit.
24816 + * If overflow occurs, result integer (incorrect) is still returned.
24817 + *
24818 + * Don't you dare use this function.
24819 + */
24820 +-unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
24821 ++unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
24822 ++ size_t max_chars)
24823 + {
24824 + unsigned long long res;
24825 + unsigned int rv;
24826 +
24827 + res = 0;
24828 + rv = 0;
24829 +- while (1) {
24830 ++ while (max_chars--) {
24831 + unsigned int c = *s;
24832 + unsigned int lc = c | 0x20; /* don't tolower() this line */
24833 + unsigned int val;
24834 +@@ -82,6 +84,11 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
24835 + return rv;
24836 + }
24837 +
24838 ++unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
24839 ++{
24840 ++ return _parse_integer_limit(s, base, p, INT_MAX);
24841 ++}
24842 ++
24843 + static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
24844 + {
24845 + unsigned long long _res;
24846 +diff --git a/lib/kstrtox.h b/lib/kstrtox.h
24847 +index 3b4637bcd2540..158c400ca8658 100644
24848 +--- a/lib/kstrtox.h
24849 ++++ b/lib/kstrtox.h
24850 +@@ -4,6 +4,8 @@
24851 +
24852 + #define KSTRTOX_OVERFLOW (1U << 31)
24853 + const char *_parse_integer_fixup_radix(const char *s, unsigned int *base);
24854 ++unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *res,
24855 ++ size_t max_chars);
24856 + unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res);
24857 +
24858 + #endif
24859 +diff --git a/lib/kunit/test.c b/lib/kunit/test.c
24860 +index 2f6cc01232322..17973a4a44c29 100644
24861 +--- a/lib/kunit/test.c
24862 ++++ b/lib/kunit/test.c
24863 +@@ -376,7 +376,7 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
24864 + context.test_case = test_case;
24865 + kunit_try_catch_run(try_catch, &context);
24866 +
24867 +- test_case->success = test->success;
24868 ++ test_case->success &= test->success;
24869 + }
24870 +
24871 + int kunit_run_tests(struct kunit_suite *suite)
24872 +@@ -388,7 +388,7 @@ int kunit_run_tests(struct kunit_suite *suite)
24873 +
24874 + kunit_suite_for_each_test_case(suite, test_case) {
24875 + struct kunit test = { .param_value = NULL, .param_index = 0 };
24876 +- bool test_success = true;
24877 ++ test_case->success = true;
24878 +
24879 + if (test_case->generate_params) {
24880 + /* Get initial param. */
24881 +@@ -398,7 +398,6 @@ int kunit_run_tests(struct kunit_suite *suite)
24882 +
24883 + do {
24884 + kunit_run_case_catch_errors(suite, test_case, &test);
24885 +- test_success &= test_case->success;
24886 +
24887 + if (test_case->generate_params) {
24888 + if (param_desc[0] == '\0') {
24889 +@@ -420,7 +419,7 @@ int kunit_run_tests(struct kunit_suite *suite)
24890 + }
24891 + } while (test.param_value);
24892 +
24893 +- kunit_print_ok_not_ok(&test, true, test_success,
24894 ++ kunit_print_ok_not_ok(&test, true, test_case->success,
24895 + kunit_test_case_num(suite, test_case),
24896 + test_case->name);
24897 + }
24898 +diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
24899 +index 2d85abac17448..0f6b262e09648 100644
24900 +--- a/lib/locking-selftest.c
24901 ++++ b/lib/locking-selftest.c
24902 +@@ -194,6 +194,7 @@ static void init_shared_classes(void)
24903 + #define HARDIRQ_ENTER() \
24904 + local_irq_disable(); \
24905 + __irq_enter(); \
24906 ++ lockdep_hardirq_threaded(); \
24907 + WARN_ON(!in_irq());
24908 +
24909 + #define HARDIRQ_EXIT() \
24910 +diff --git a/lib/math/rational.c b/lib/math/rational.c
24911 +index 9781d521963d1..c0ab51d8fbb98 100644
24912 +--- a/lib/math/rational.c
24913 ++++ b/lib/math/rational.c
24914 +@@ -12,6 +12,7 @@
24915 + #include <linux/compiler.h>
24916 + #include <linux/export.h>
24917 + #include <linux/minmax.h>
24918 ++#include <linux/limits.h>
24919 +
24920 + /*
24921 + * calculate best rational approximation for a given fraction
24922 +@@ -78,13 +79,18 @@ void rational_best_approximation(
24923 + * found below as 't'.
24924 + */
24925 + if ((n2 > max_numerator) || (d2 > max_denominator)) {
24926 +- unsigned long t = min((max_numerator - n0) / n1,
24927 +- (max_denominator - d0) / d1);
24928 ++ unsigned long t = ULONG_MAX;
24929 +
24930 +- /* This tests if the semi-convergent is closer
24931 +- * than the previous convergent.
24932 ++ if (d1)
24933 ++ t = (max_denominator - d0) / d1;
24934 ++ if (n1)
24935 ++ t = min(t, (max_numerator - n0) / n1);
24936 ++
24937 ++ /* This tests if the semi-convergent is closer than the previous
24938 ++ * convergent. If d1 is zero there is no previous convergent as this
24939 ++ * is the 1st iteration, so always choose the semi-convergent.
24940 + */
24941 +- if (2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
24942 ++ if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
24943 + n1 = n0 + t * n1;
24944 + d1 = d0 + t * d1;
24945 + }
24946 +diff --git a/lib/seq_buf.c b/lib/seq_buf.c
24947 +index 707453f5d58ee..89c26c393bdba 100644
24948 +--- a/lib/seq_buf.c
24949 ++++ b/lib/seq_buf.c
24950 +@@ -243,12 +243,14 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
24951 + break;
24952 +
24953 + /* j increments twice per loop */
24954 +- len -= j / 2;
24955 + hex[j++] = ' ';
24956 +
24957 + seq_buf_putmem(s, hex, j);
24958 + if (seq_buf_has_overflowed(s))
24959 + return -1;
24960 ++
24961 ++ len -= start_len;
24962 ++ data += start_len;
24963 + }
24964 + return 0;
24965 + }
24966 +diff --git a/lib/vsprintf.c b/lib/vsprintf.c
24967 +index f0c35d9b65bff..077a4a7c6f00c 100644
24968 +--- a/lib/vsprintf.c
24969 ++++ b/lib/vsprintf.c
24970 +@@ -53,6 +53,31 @@
24971 + #include <linux/string_helpers.h>
24972 + #include "kstrtox.h"
24973 +
24974 ++static unsigned long long simple_strntoull(const char *startp, size_t max_chars,
24975 ++ char **endp, unsigned int base)
24976 ++{
24977 ++ const char *cp;
24978 ++ unsigned long long result = 0ULL;
24979 ++ size_t prefix_chars;
24980 ++ unsigned int rv;
24981 ++
24982 ++ cp = _parse_integer_fixup_radix(startp, &base);
24983 ++ prefix_chars = cp - startp;
24984 ++ if (prefix_chars < max_chars) {
24985 ++ rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
24986 ++ /* FIXME */
24987 ++ cp += (rv & ~KSTRTOX_OVERFLOW);
24988 ++ } else {
24989 ++ /* Field too short for prefix + digit, skip over without converting */
24990 ++ cp = startp + max_chars;
24991 ++ }
24992 ++
24993 ++ if (endp)
24994 ++ *endp = (char *)cp;
24995 ++
24996 ++ return result;
24997 ++}
24998 ++
24999 + /**
25000 + * simple_strtoull - convert a string to an unsigned long long
25001 + * @cp: The start of the string
25002 +@@ -63,18 +88,7 @@
25003 + */
25004 + unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
25005 + {
25006 +- unsigned long long result;
25007 +- unsigned int rv;
25008 +-
25009 +- cp = _parse_integer_fixup_radix(cp, &base);
25010 +- rv = _parse_integer(cp, base, &result);
25011 +- /* FIXME */
25012 +- cp += (rv & ~KSTRTOX_OVERFLOW);
25013 +-
25014 +- if (endp)
25015 +- *endp = (char *)cp;
25016 +-
25017 +- return result;
25018 ++ return simple_strntoull(cp, INT_MAX, endp, base);
25019 + }
25020 + EXPORT_SYMBOL(simple_strtoull);
25021 +
25022 +@@ -109,6 +123,21 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
25023 + }
25024 + EXPORT_SYMBOL(simple_strtol);
25025 +
25026 ++static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
25027 ++ unsigned int base)
25028 ++{
25029 ++ /*
25030 ++ * simple_strntoull() safely handles receiving max_chars==0 in the
25031 ++ * case cp[0] == '-' && max_chars == 1.
25032 ++ * If max_chars == 0 we can drop through and pass it to simple_strntoull()
25033 ++ * and the content of *cp is irrelevant.
25034 ++ */
25035 ++ if (*cp == '-' && max_chars > 0)
25036 ++ return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
25037 ++
25038 ++ return simple_strntoull(cp, max_chars, endp, base);
25039 ++}
25040 ++
25041 + /**
25042 + * simple_strtoll - convert a string to a signed long long
25043 + * @cp: The start of the string
25044 +@@ -119,10 +148,7 @@ EXPORT_SYMBOL(simple_strtol);
25045 + */
25046 + long long simple_strtoll(const char *cp, char **endp, unsigned int base)
25047 + {
25048 +- if (*cp == '-')
25049 +- return -simple_strtoull(cp + 1, endp, base);
25050 +-
25051 +- return simple_strtoull(cp, endp, base);
25052 ++ return simple_strntoll(cp, INT_MAX, endp, base);
25053 + }
25054 + EXPORT_SYMBOL(simple_strtoll);
25055 +
25056 +@@ -3576,25 +3602,13 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
25057 + break;
25058 +
25059 + if (is_sign)
25060 +- val.s = qualifier != 'L' ?
25061 +- simple_strtol(str, &next, base) :
25062 +- simple_strtoll(str, &next, base);
25063 ++ val.s = simple_strntoll(str,
25064 ++ field_width >= 0 ? field_width : INT_MAX,
25065 ++ &next, base);
25066 + else
25067 +- val.u = qualifier != 'L' ?
25068 +- simple_strtoul(str, &next, base) :
25069 +- simple_strtoull(str, &next, base);
25070 +-
25071 +- if (field_width > 0 && next - str > field_width) {
25072 +- if (base == 0)
25073 +- _parse_integer_fixup_radix(str, &base);
25074 +- while (next - str > field_width) {
25075 +- if (is_sign)
25076 +- val.s = div_s64(val.s, base);
25077 +- else
25078 +- val.u = div_u64(val.u, base);
25079 +- --next;
25080 +- }
25081 +- }
25082 ++ val.u = simple_strntoull(str,
25083 ++ field_width >= 0 ? field_width : INT_MAX,
25084 ++ &next, base);
25085 +
25086 + switch (qualifier) {
25087 + case 'H': /* that's 'hh' in format */
25088 +diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
25089 +index 297d1b349c197..92bfc37300dfc 100644
25090 +--- a/mm/debug_vm_pgtable.c
25091 ++++ b/mm/debug_vm_pgtable.c
25092 +@@ -146,13 +146,14 @@ static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
25093 + static void __init pmd_basic_tests(unsigned long pfn, int idx)
25094 + {
25095 + pgprot_t prot = protection_map[idx];
25096 +- pmd_t pmd = pfn_pmd(pfn, prot);
25097 + unsigned long val = idx, *ptr = &val;
25098 ++ pmd_t pmd;
25099 +
25100 + if (!has_transparent_hugepage())
25101 + return;
25102 +
25103 + pr_debug("Validating PMD basic (%pGv)\n", ptr);
25104 ++ pmd = pfn_pmd(pfn, prot);
25105 +
25106 + /*
25107 + * This test needs to be executed after the given page table entry
25108 +@@ -185,7 +186,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
25109 + unsigned long pfn, unsigned long vaddr,
25110 + pgprot_t prot, pgtable_t pgtable)
25111 + {
25112 +- pmd_t pmd = pfn_pmd(pfn, prot);
25113 ++ pmd_t pmd;
25114 +
25115 + if (!has_transparent_hugepage())
25116 + return;
25117 +@@ -232,9 +233,14 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
25118 +
25119 + static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
25120 + {
25121 +- pmd_t pmd = pfn_pmd(pfn, prot);
25122 ++ pmd_t pmd;
25123 ++
25124 ++ if (!has_transparent_hugepage())
25125 ++ return;
25126 +
25127 + pr_debug("Validating PMD leaf\n");
25128 ++ pmd = pfn_pmd(pfn, prot);
25129 ++
25130 + /*
25131 + * PMD based THP is a leaf entry.
25132 + */
25133 +@@ -267,12 +273,16 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
25134 +
25135 + static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
25136 + {
25137 +- pmd_t pmd = pfn_pmd(pfn, prot);
25138 ++ pmd_t pmd;
25139 +
25140 + if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
25141 + return;
25142 +
25143 ++ if (!has_transparent_hugepage())
25144 ++ return;
25145 ++
25146 + pr_debug("Validating PMD saved write\n");
25147 ++ pmd = pfn_pmd(pfn, prot);
25148 + WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
25149 + WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
25150 + }
25151 +@@ -281,13 +291,14 @@ static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
25152 + static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
25153 + {
25154 + pgprot_t prot = protection_map[idx];
25155 +- pud_t pud = pfn_pud(pfn, prot);
25156 + unsigned long val = idx, *ptr = &val;
25157 ++ pud_t pud;
25158 +
25159 + if (!has_transparent_hugepage())
25160 + return;
25161 +
25162 + pr_debug("Validating PUD basic (%pGv)\n", ptr);
25163 ++ pud = pfn_pud(pfn, prot);
25164 +
25165 + /*
25166 + * This test needs to be executed after the given page table entry
25167 +@@ -323,7 +334,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
25168 + unsigned long pfn, unsigned long vaddr,
25169 + pgprot_t prot)
25170 + {
25171 +- pud_t pud = pfn_pud(pfn, prot);
25172 ++ pud_t pud;
25173 +
25174 + if (!has_transparent_hugepage())
25175 + return;
25176 +@@ -332,6 +343,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
25177 + /* Align the address wrt HPAGE_PUD_SIZE */
25178 + vaddr &= HPAGE_PUD_MASK;
25179 +
25180 ++ pud = pfn_pud(pfn, prot);
25181 + set_pud_at(mm, vaddr, pudp, pud);
25182 + pudp_set_wrprotect(mm, vaddr, pudp);
25183 + pud = READ_ONCE(*pudp);
25184 +@@ -370,9 +382,13 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
25185 +
25186 + static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
25187 + {
25188 +- pud_t pud = pfn_pud(pfn, prot);
25189 ++ pud_t pud;
25190 ++
25191 ++ if (!has_transparent_hugepage())
25192 ++ return;
25193 +
25194 + pr_debug("Validating PUD leaf\n");
25195 ++ pud = pfn_pud(pfn, prot);
25196 + /*
25197 + * PUD based THP is a leaf entry.
25198 + */
25199 +@@ -654,12 +670,16 @@ static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
25200 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
25201 + static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
25202 + {
25203 +- pmd_t pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
25204 ++ pmd_t pmd;
25205 +
25206 + if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
25207 + return;
25208 +
25209 ++ if (!has_transparent_hugepage())
25210 ++ return;
25211 ++
25212 + pr_debug("Validating PMD protnone\n");
25213 ++ pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
25214 + WARN_ON(!pmd_protnone(pmd));
25215 + WARN_ON(!pmd_present(pmd));
25216 + }
25217 +@@ -679,18 +699,26 @@ static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
25218 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
25219 + static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
25220 + {
25221 +- pmd_t pmd = pfn_pmd(pfn, prot);
25222 ++ pmd_t pmd;
25223 ++
25224 ++ if (!has_transparent_hugepage())
25225 ++ return;
25226 +
25227 + pr_debug("Validating PMD devmap\n");
25228 ++ pmd = pfn_pmd(pfn, prot);
25229 + WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
25230 + }
25231 +
25232 + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
25233 + static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
25234 + {
25235 +- pud_t pud = pfn_pud(pfn, prot);
25236 ++ pud_t pud;
25237 ++
25238 ++ if (!has_transparent_hugepage())
25239 ++ return;
25240 +
25241 + pr_debug("Validating PUD devmap\n");
25242 ++ pud = pfn_pud(pfn, prot);
25243 + WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
25244 + }
25245 + #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
25246 +@@ -733,25 +761,33 @@ static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
25247 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
25248 + static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
25249 + {
25250 +- pmd_t pmd = pfn_pmd(pfn, prot);
25251 ++ pmd_t pmd;
25252 +
25253 + if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
25254 + return;
25255 +
25256 ++ if (!has_transparent_hugepage())
25257 ++ return;
25258 ++
25259 + pr_debug("Validating PMD soft dirty\n");
25260 ++ pmd = pfn_pmd(pfn, prot);
25261 + WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
25262 + WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
25263 + }
25264 +
25265 + static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
25266 + {
25267 +- pmd_t pmd = pfn_pmd(pfn, prot);
25268 ++ pmd_t pmd;
25269 +
25270 + if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
25271 + !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
25272 + return;
25273 +
25274 ++ if (!has_transparent_hugepage())
25275 ++ return;
25276 ++
25277 + pr_debug("Validating PMD swap soft dirty\n");
25278 ++ pmd = pfn_pmd(pfn, prot);
25279 + WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
25280 + WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
25281 + }
25282 +@@ -780,6 +816,9 @@ static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
25283 + swp_entry_t swp;
25284 + pmd_t pmd;
25285 +
25286 ++ if (!has_transparent_hugepage())
25287 ++ return;
25288 ++
25289 + pr_debug("Validating PMD swap\n");
25290 + pmd = pfn_pmd(pfn, prot);
25291 + swp = __pmd_to_swp_entry(pmd);
25292 +diff --git a/mm/gup.c b/mm/gup.c
25293 +index 3ded6a5f26b25..90262e448552a 100644
25294 +--- a/mm/gup.c
25295 ++++ b/mm/gup.c
25296 +@@ -44,6 +44,23 @@ static void hpage_pincount_sub(struct page *page, int refs)
25297 + atomic_sub(refs, compound_pincount_ptr(page));
25298 + }
25299 +
25300 ++/* Equivalent to calling put_page() @refs times. */
25301 ++static void put_page_refs(struct page *page, int refs)
25302 ++{
25303 ++#ifdef CONFIG_DEBUG_VM
25304 ++ if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
25305 ++ return;
25306 ++#endif
25307 ++
25308 ++ /*
25309 ++ * Calling put_page() for each ref is unnecessarily slow. Only the last
25310 ++ * ref needs a put_page().
25311 ++ */
25312 ++ if (refs > 1)
25313 ++ page_ref_sub(page, refs - 1);
25314 ++ put_page(page);
25315 ++}
25316 ++
25317 + /*
25318 + * Return the compound head page with ref appropriately incremented,
25319 + * or NULL if that failed.
25320 +@@ -56,6 +73,21 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
25321 + return NULL;
25322 + if (unlikely(!page_cache_add_speculative(head, refs)))
25323 + return NULL;
25324 ++
25325 ++ /*
25326 ++ * At this point we have a stable reference to the head page; but it
25327 ++ * could be that between the compound_head() lookup and the refcount
25328 ++ * increment, the compound page was split, in which case we'd end up
25329 ++ * holding a reference on a page that has nothing to do with the page
25330 ++ * we were given anymore.
25331 ++ * So now that the head page is stable, recheck that the pages still
25332 ++ * belong together.
25333 ++ */
25334 ++ if (unlikely(compound_head(page) != head)) {
25335 ++ put_page_refs(head, refs);
25336 ++ return NULL;
25337 ++ }
25338 ++
25339 + return head;
25340 + }
25341 +
25342 +@@ -95,6 +127,14 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
25343 + !is_pinnable_page(page)))
25344 + return NULL;
25345 +
25346 ++ /*
25347 ++ * CAUTION: Don't use compound_head() on the page before this
25348 ++ * point, the result won't be stable.
25349 ++ */
25350 ++ page = try_get_compound_head(page, refs);
25351 ++ if (!page)
25352 ++ return NULL;
25353 ++
25354 + /*
25355 + * When pinning a compound page of order > 1 (which is what
25356 + * hpage_pincount_available() checks for), use an exact count to
25357 +@@ -103,15 +143,10 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
25358 + * However, be sure to *also* increment the normal page refcount
25359 + * field at least once, so that the page really is pinned.
25360 + */
25361 +- if (!hpage_pincount_available(page))
25362 +- refs *= GUP_PIN_COUNTING_BIAS;
25363 +-
25364 +- page = try_get_compound_head(page, refs);
25365 +- if (!page)
25366 +- return NULL;
25367 +-
25368 + if (hpage_pincount_available(page))
25369 + hpage_pincount_add(page, refs);
25370 ++ else
25371 ++ page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
25372 +
25373 + mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
25374 + orig_refs);
25375 +@@ -135,14 +170,7 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
25376 + refs *= GUP_PIN_COUNTING_BIAS;
25377 + }
25378 +
25379 +- VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
25380 +- /*
25381 +- * Calling put_page() for each ref is unnecessarily slow. Only the last
25382 +- * ref needs a put_page().
25383 +- */
25384 +- if (refs > 1)
25385 +- page_ref_sub(page, refs - 1);
25386 +- put_page(page);
25387 ++ put_page_refs(page, refs);
25388 + }
25389 +
25390 + /**
25391 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
25392 +index 6d2a0119fc58e..8857ef1543eb6 100644
25393 +--- a/mm/huge_memory.c
25394 ++++ b/mm/huge_memory.c
25395 +@@ -64,7 +64,14 @@ static atomic_t huge_zero_refcount;
25396 + struct page *huge_zero_page __read_mostly;
25397 + unsigned long huge_zero_pfn __read_mostly = ~0UL;
25398 +
25399 +-bool transparent_hugepage_enabled(struct vm_area_struct *vma)
25400 ++static inline bool file_thp_enabled(struct vm_area_struct *vma)
25401 ++{
25402 ++ return transhuge_vma_enabled(vma, vma->vm_flags) && vma->vm_file &&
25403 ++ !inode_is_open_for_write(vma->vm_file->f_inode) &&
25404 ++ (vma->vm_flags & VM_EXEC);
25405 ++}
25406 ++
25407 ++bool transparent_hugepage_active(struct vm_area_struct *vma)
25408 + {
25409 + /* The addr is used to check if the vma size fits */
25410 + unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
25411 +@@ -75,6 +82,8 @@ bool transparent_hugepage_enabled(struct vm_area_struct *vma)
25412 + return __transparent_hugepage_enabled(vma);
25413 + if (vma_is_shmem(vma))
25414 + return shmem_huge_enabled(vma);
25415 ++ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
25416 ++ return file_thp_enabled(vma);
25417 +
25418 + return false;
25419 + }
25420 +@@ -1604,7 +1613,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
25421 + * If other processes are mapping this page, we couldn't discard
25422 + * the page unless they all do MADV_FREE so let's skip the page.
25423 + */
25424 +- if (page_mapcount(page) != 1)
25425 ++ if (total_mapcount(page) != 1)
25426 + goto out;
25427 +
25428 + if (!trylock_page(page))
25429 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
25430 +index 5ba5a0da6d572..65e0e8642ded8 100644
25431 +--- a/mm/hugetlb.c
25432 ++++ b/mm/hugetlb.c
25433 +@@ -1318,8 +1318,6 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
25434 + return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
25435 + }
25436 +
25437 +-static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
25438 +-static void prep_compound_gigantic_page(struct page *page, unsigned int order);
25439 + #else /* !CONFIG_CONTIG_ALLOC */
25440 + static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
25441 + int nid, nodemask_t *nodemask)
25442 +@@ -2625,16 +2623,10 @@ found:
25443 + return 1;
25444 + }
25445 +
25446 +-static void __init prep_compound_huge_page(struct page *page,
25447 +- unsigned int order)
25448 +-{
25449 +- if (unlikely(order > (MAX_ORDER - 1)))
25450 +- prep_compound_gigantic_page(page, order);
25451 +- else
25452 +- prep_compound_page(page, order);
25453 +-}
25454 +-
25455 +-/* Put bootmem huge pages into the standard lists after mem_map is up */
25456 ++/*
25457 ++ * Put bootmem huge pages into the standard lists after mem_map is up.
25458 ++ * Note: This only applies to gigantic (order > MAX_ORDER) pages.
25459 ++ */
25460 + static void __init gather_bootmem_prealloc(void)
25461 + {
25462 + struct huge_bootmem_page *m;
25463 +@@ -2643,20 +2635,19 @@ static void __init gather_bootmem_prealloc(void)
25464 + struct page *page = virt_to_page(m);
25465 + struct hstate *h = m->hstate;
25466 +
25467 ++ VM_BUG_ON(!hstate_is_gigantic(h));
25468 + WARN_ON(page_count(page) != 1);
25469 +- prep_compound_huge_page(page, huge_page_order(h));
25470 ++ prep_compound_gigantic_page(page, huge_page_order(h));
25471 + WARN_ON(PageReserved(page));
25472 + prep_new_huge_page(h, page, page_to_nid(page));
25473 + put_page(page); /* free it into the hugepage allocator */
25474 +
25475 + /*
25476 +- * If we had gigantic hugepages allocated at boot time, we need
25477 +- * to restore the 'stolen' pages to totalram_pages in order to
25478 +- * fix confusing memory reports from free(1) and another
25479 +- * side-effects, like CommitLimit going negative.
25480 ++ * We need to restore the 'stolen' pages to totalram_pages
25481 ++ * in order to fix confusing memory reports from free(1) and
25482 ++ * other side-effects, like CommitLimit going negative.
25483 + */
25484 +- if (hstate_is_gigantic(h))
25485 +- adjust_managed_page_count(page, pages_per_huge_page(h));
25486 ++ adjust_managed_page_count(page, pages_per_huge_page(h));
25487 + cond_resched();
25488 + }
25489 + }
25490 +diff --git a/mm/kfence/core.c b/mm/kfence/core.c
25491 +index 4d21ac44d5d35..d7666ace9d2e4 100644
25492 +--- a/mm/kfence/core.c
25493 ++++ b/mm/kfence/core.c
25494 +@@ -636,7 +636,7 @@ static void toggle_allocation_gate(struct work_struct *work)
25495 + /* Disable static key and reset timer. */
25496 + static_branch_disable(&kfence_allocation_key);
25497 + #endif
25498 +- queue_delayed_work(system_power_efficient_wq, &kfence_timer,
25499 ++ queue_delayed_work(system_unbound_wq, &kfence_timer,
25500 + msecs_to_jiffies(kfence_sample_interval));
25501 + }
25502 + static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
25503 +@@ -666,7 +666,7 @@ void __init kfence_init(void)
25504 + }
25505 +
25506 + WRITE_ONCE(kfence_enabled, true);
25507 +- queue_delayed_work(system_power_efficient_wq, &kfence_timer, 0);
25508 ++ queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
25509 + pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
25510 + CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
25511 + (void *)(__kfence_pool + KFENCE_POOL_SIZE));
25512 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
25513 +index 6c0185fdd8158..d97b20fad6e8e 100644
25514 +--- a/mm/khugepaged.c
25515 ++++ b/mm/khugepaged.c
25516 +@@ -442,9 +442,7 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
25517 + static bool hugepage_vma_check(struct vm_area_struct *vma,
25518 + unsigned long vm_flags)
25519 + {
25520 +- /* Explicitly disabled through madvise. */
25521 +- if ((vm_flags & VM_NOHUGEPAGE) ||
25522 +- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
25523 ++ if (!transhuge_vma_enabled(vma, vm_flags))
25524 + return false;
25525 +
25526 + /* Enabled via shmem mount options or sysfs settings. */
25527 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
25528 +index 64ada9e650a51..f4f2d05c8c7ba 100644
25529 +--- a/mm/memcontrol.c
25530 ++++ b/mm/memcontrol.c
25531 +@@ -2739,6 +2739,13 @@ retry:
25532 + }
25533 +
25534 + #ifdef CONFIG_MEMCG_KMEM
25535 ++/*
25536 ++ * The allocated objcg pointers array is not accounted directly.
25537 ++ * Moreover, it should not come from DMA buffer and is not readily
25538 ++ * reclaimable. So those GFP bits should be masked off.
25539 ++ */
25540 ++#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
25541 ++
25542 + int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
25543 + gfp_t gfp, bool new_page)
25544 + {
25545 +@@ -2746,6 +2753,7 @@ int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
25546 + unsigned long memcg_data;
25547 + void *vec;
25548 +
25549 ++ gfp &= ~OBJCGS_CLEAR_MASK;
25550 + vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
25551 + page_to_nid(page));
25552 + if (!vec)
25553 +diff --git a/mm/memory.c b/mm/memory.c
25554 +index 486f4a2874e72..b15367c285bde 100644
25555 +--- a/mm/memory.c
25556 ++++ b/mm/memory.c
25557 +@@ -3353,6 +3353,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
25558 + {
25559 + struct vm_area_struct *vma = vmf->vma;
25560 + struct page *page = NULL, *swapcache;
25561 ++ struct swap_info_struct *si = NULL;
25562 + swp_entry_t entry;
25563 + pte_t pte;
25564 + int locked;
25565 +@@ -3380,14 +3381,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
25566 + goto out;
25567 + }
25568 +
25569 ++ /* Prevent swapoff from happening to us. */
25570 ++ si = get_swap_device(entry);
25571 ++ if (unlikely(!si))
25572 ++ goto out;
25573 +
25574 + delayacct_set_flag(current, DELAYACCT_PF_SWAPIN);
25575 + page = lookup_swap_cache(entry, vma, vmf->address);
25576 + swapcache = page;
25577 +
25578 + if (!page) {
25579 +- struct swap_info_struct *si = swp_swap_info(entry);
25580 +-
25581 + if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
25582 + __swap_count(entry) == 1) {
25583 + /* skip swapcache */
25584 +@@ -3556,6 +3559,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
25585 + unlock:
25586 + pte_unmap_unlock(vmf->pte, vmf->ptl);
25587 + out:
25588 ++ if (si)
25589 ++ put_swap_device(si);
25590 + return ret;
25591 + out_nomap:
25592 + pte_unmap_unlock(vmf->pte, vmf->ptl);
25593 +@@ -3567,6 +3572,8 @@ out_release:
25594 + unlock_page(swapcache);
25595 + put_page(swapcache);
25596 + }
25597 ++ if (si)
25598 ++ put_swap_device(si);
25599 + return ret;
25600 + }
25601 +
25602 +diff --git a/mm/migrate.c b/mm/migrate.c
25603 +index 41ff2c9896c4f..047209d6602eb 100644
25604 +--- a/mm/migrate.c
25605 ++++ b/mm/migrate.c
25606 +@@ -1288,7 +1288,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
25607 + * page_mapping() set, hugetlbfs specific move page routine will not
25608 + * be called and we could leak usage counts for subpools.
25609 + */
25610 +- if (page_private(hpage) && !page_mapping(hpage)) {
25611 ++ if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
25612 + rc = -EBUSY;
25613 + goto out_unlock;
25614 + }
25615 +diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
25616 +index dcdde4f722a40..2ae3f33b85b16 100644
25617 +--- a/mm/mmap_lock.c
25618 ++++ b/mm/mmap_lock.c
25619 +@@ -11,6 +11,7 @@
25620 + #include <linux/rcupdate.h>
25621 + #include <linux/smp.h>
25622 + #include <linux/trace_events.h>
25623 ++#include <linux/local_lock.h>
25624 +
25625 + EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
25626 + EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
25627 +@@ -39,21 +40,30 @@ static int reg_refcount; /* Protected by reg_lock. */
25628 + */
25629 + #define CONTEXT_COUNT 4
25630 +
25631 +-static DEFINE_PER_CPU(char __rcu *, memcg_path_buf);
25632 ++struct memcg_path {
25633 ++ local_lock_t lock;
25634 ++ char __rcu *buf;
25635 ++ local_t buf_idx;
25636 ++};
25637 ++static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
25638 ++ .lock = INIT_LOCAL_LOCK(lock),
25639 ++ .buf_idx = LOCAL_INIT(0),
25640 ++};
25641 ++
25642 + static char **tmp_bufs;
25643 +-static DEFINE_PER_CPU(int, memcg_path_buf_idx);
25644 +
25645 + /* Called with reg_lock held. */
25646 + static void free_memcg_path_bufs(void)
25647 + {
25648 ++ struct memcg_path *memcg_path;
25649 + int cpu;
25650 + char **old = tmp_bufs;
25651 +
25652 + for_each_possible_cpu(cpu) {
25653 +- *(old++) = rcu_dereference_protected(
25654 +- per_cpu(memcg_path_buf, cpu),
25655 ++ memcg_path = per_cpu_ptr(&memcg_paths, cpu);
25656 ++ *(old++) = rcu_dereference_protected(memcg_path->buf,
25657 + lockdep_is_held(&reg_lock));
25658 +- rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), NULL);
25659 ++ rcu_assign_pointer(memcg_path->buf, NULL);
25660 + }
25661 +
25662 + /* Wait for inflight memcg_path_buf users to finish. */
25663 +@@ -88,7 +98,7 @@ int trace_mmap_lock_reg(void)
25664 + new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
25665 + if (new == NULL)
25666 + goto out_fail_free;
25667 +- rcu_assign_pointer(per_cpu(memcg_path_buf, cpu), new);
25668 ++ rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
25669 + /* Don't need to wait for inflights, they'd have gotten NULL. */
25670 + }
25671 +
25672 +@@ -122,23 +132,24 @@ out:
25673 +
25674 + static inline char *get_memcg_path_buf(void)
25675 + {
25676 ++ struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
25677 + char *buf;
25678 + int idx;
25679 +
25680 + rcu_read_lock();
25681 +- buf = rcu_dereference(*this_cpu_ptr(&memcg_path_buf));
25682 ++ buf = rcu_dereference(memcg_path->buf);
25683 + if (buf == NULL) {
25684 + rcu_read_unlock();
25685 + return NULL;
25686 + }
25687 +- idx = this_cpu_add_return(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE) -
25688 ++ idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
25689 + MEMCG_PATH_BUF_SIZE;
25690 + return &buf[idx];
25691 + }
25692 +
25693 + static inline void put_memcg_path_buf(void)
25694 + {
25695 +- this_cpu_sub(memcg_path_buf_idx, MEMCG_PATH_BUF_SIZE);
25696 ++ local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
25697 + rcu_read_unlock();
25698 + }
25699 +
25700 +@@ -179,14 +190,14 @@ out:
25701 + #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
25702 + do { \
25703 + const char *memcg_path; \
25704 +- preempt_disable(); \
25705 ++ local_lock(&memcg_paths.lock); \
25706 + memcg_path = get_mm_memcg_path(mm); \
25707 + trace_mmap_lock_##type(mm, \
25708 + memcg_path != NULL ? memcg_path : "", \
25709 + ##__VA_ARGS__); \
25710 + if (likely(memcg_path != NULL)) \
25711 + put_memcg_path_buf(); \
25712 +- preempt_enable(); \
25713 ++ local_unlock(&memcg_paths.lock); \
25714 + } while (0)
25715 +
25716 + #else /* !CONFIG_MEMCG */
25717 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
25718 +index 04220581579cd..fc5beebf69887 100644
25719 +--- a/mm/page_alloc.c
25720 ++++ b/mm/page_alloc.c
25721 +@@ -6400,7 +6400,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
25722 + return;
25723 +
25724 + /*
25725 +- * The call to memmap_init_zone should have already taken care
25726 ++ * The call to memmap_init should have already taken care
25727 + * of the pages reserved for the memmap, so we can just jump to
25728 + * the end of that region and start processing the device pages.
25729 + */
25730 +@@ -6465,7 +6465,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
25731 + /*
25732 + * Only struct pages that correspond to ranges defined by memblock.memory
25733 + * are zeroed and initialized by going through __init_single_page() during
25734 +- * memmap_init_zone().
25735 ++ * memmap_init_zone_range().
25736 + *
25737 + * But, there could be struct pages that correspond to holes in
25738 + * memblock.memory. This can happen because of the following reasons:
25739 +@@ -6484,9 +6484,9 @@ static void __meminit zone_init_free_lists(struct zone *zone)
25740 + * zone/node above the hole except for the trailing pages in the last
25741 + * section that will be appended to the zone/node below.
25742 + */
25743 +-static u64 __meminit init_unavailable_range(unsigned long spfn,
25744 +- unsigned long epfn,
25745 +- int zone, int node)
25746 ++static void __init init_unavailable_range(unsigned long spfn,
25747 ++ unsigned long epfn,
25748 ++ int zone, int node)
25749 + {
25750 + unsigned long pfn;
25751 + u64 pgcnt = 0;
25752 +@@ -6502,56 +6502,77 @@ static u64 __meminit init_unavailable_range(unsigned long spfn,
25753 + pgcnt++;
25754 + }
25755 +
25756 +- return pgcnt;
25757 ++ if (pgcnt)
25758 ++ pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
25759 ++ node, zone_names[zone], pgcnt);
25760 + }
25761 + #else
25762 +-static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
25763 +- int zone, int node)
25764 ++static inline void init_unavailable_range(unsigned long spfn,
25765 ++ unsigned long epfn,
25766 ++ int zone, int node)
25767 + {
25768 +- return 0;
25769 + }
25770 + #endif
25771 +
25772 +-void __meminit __weak memmap_init_zone(struct zone *zone)
25773 ++static void __init memmap_init_zone_range(struct zone *zone,
25774 ++ unsigned long start_pfn,
25775 ++ unsigned long end_pfn,
25776 ++ unsigned long *hole_pfn)
25777 + {
25778 + unsigned long zone_start_pfn = zone->zone_start_pfn;
25779 + unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
25780 +- int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
25781 +- static unsigned long hole_pfn;
25782 ++ int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
25783 ++
25784 ++ start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
25785 ++ end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
25786 ++
25787 ++ if (start_pfn >= end_pfn)
25788 ++ return;
25789 ++
25790 ++ memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
25791 ++ zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
25792 ++
25793 ++ if (*hole_pfn < start_pfn)
25794 ++ init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
25795 ++
25796 ++ *hole_pfn = end_pfn;
25797 ++}
25798 ++
25799 ++static void __init memmap_init(void)
25800 ++{
25801 + unsigned long start_pfn, end_pfn;
25802 +- u64 pgcnt = 0;
25803 ++ unsigned long hole_pfn = 0;
25804 ++ int i, j, zone_id, nid;
25805 +
25806 +- for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
25807 +- start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
25808 +- end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
25809 ++ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
25810 ++ struct pglist_data *node = NODE_DATA(nid);
25811 ++
25812 ++ for (j = 0; j < MAX_NR_ZONES; j++) {
25813 ++ struct zone *zone = node->node_zones + j;
25814 +
25815 +- if (end_pfn > start_pfn)
25816 +- memmap_init_range(end_pfn - start_pfn, nid,
25817 +- zone_id, start_pfn, zone_end_pfn,
25818 +- MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
25819 ++ if (!populated_zone(zone))
25820 ++ continue;
25821 +
25822 +- if (hole_pfn < start_pfn)
25823 +- pgcnt += init_unavailable_range(hole_pfn, start_pfn,
25824 +- zone_id, nid);
25825 +- hole_pfn = end_pfn;
25826 ++ memmap_init_zone_range(zone, start_pfn, end_pfn,
25827 ++ &hole_pfn);
25828 ++ zone_id = j;
25829 ++ }
25830 + }
25831 +
25832 + #ifdef CONFIG_SPARSEMEM
25833 + /*
25834 +- * Initialize the hole in the range [zone_end_pfn, section_end].
25835 +- * If zone boundary falls in the middle of a section, this hole
25836 +- * will be re-initialized during the call to this function for the
25837 +- * higher zone.
25838 ++ * Initialize the memory map for hole in the range [memory_end,
25839 ++ * section_end].
25840 ++ * Append the pages in this hole to the highest zone in the last
25841 ++ * node.
25842 ++ * The call to init_unavailable_range() is outside the ifdef to
25843 ++ * silence the compiler warining about zone_id set but not used;
25844 ++ * for FLATMEM it is a nop anyway
25845 + */
25846 +- end_pfn = round_up(zone_end_pfn, PAGES_PER_SECTION);
25847 ++ end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
25848 + if (hole_pfn < end_pfn)
25849 +- pgcnt += init_unavailable_range(hole_pfn, end_pfn,
25850 +- zone_id, nid);
25851 + #endif
25852 +-
25853 +- if (pgcnt)
25854 +- pr_info(" %s zone: %llu pages in unavailable ranges\n",
25855 +- zone->name, pgcnt);
25856 ++ init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
25857 + }
25858 +
25859 + static int zone_batchsize(struct zone *zone)
25860 +@@ -7254,7 +7275,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
25861 + set_pageblock_order();
25862 + setup_usemap(zone);
25863 + init_currently_empty_zone(zone, zone->zone_start_pfn, size);
25864 +- memmap_init_zone(zone);
25865 + }
25866 + }
25867 +
25868 +@@ -7780,6 +7800,8 @@ void __init free_area_init(unsigned long *max_zone_pfn)
25869 + node_set_state(nid, N_MEMORY);
25870 + check_for_memory(pgdat, nid);
25871 + }
25872 ++
25873 ++ memmap_init();
25874 + }
25875 +
25876 + static int __init cmdline_parse_core(char *p, unsigned long *core,
25877 +@@ -8065,14 +8087,14 @@ static void setup_per_zone_lowmem_reserve(void)
25878 + unsigned long managed_pages = 0;
25879 +
25880 + for (j = i + 1; j < MAX_NR_ZONES; j++) {
25881 +- if (clear) {
25882 +- zone->lowmem_reserve[j] = 0;
25883 +- } else {
25884 +- struct zone *upper_zone = &pgdat->node_zones[j];
25885 ++ struct zone *upper_zone = &pgdat->node_zones[j];
25886 +
25887 +- managed_pages += zone_managed_pages(upper_zone);
25888 ++ managed_pages += zone_managed_pages(upper_zone);
25889 ++
25890 ++ if (clear)
25891 ++ zone->lowmem_reserve[j] = 0;
25892 ++ else
25893 + zone->lowmem_reserve[j] = managed_pages / ratio;
25894 +- }
25895 + }
25896 + }
25897 + }
25898 +diff --git a/mm/shmem.c b/mm/shmem.c
25899 +index 5d46611cba8dc..5fa21d66af203 100644
25900 +--- a/mm/shmem.c
25901 ++++ b/mm/shmem.c
25902 +@@ -1696,7 +1696,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
25903 + struct address_space *mapping = inode->i_mapping;
25904 + struct shmem_inode_info *info = SHMEM_I(inode);
25905 + struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
25906 +- struct page *page;
25907 ++ struct swap_info_struct *si;
25908 ++ struct page *page = NULL;
25909 + swp_entry_t swap;
25910 + int error;
25911 +
25912 +@@ -1704,6 +1705,12 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
25913 + swap = radix_to_swp_entry(*pagep);
25914 + *pagep = NULL;
25915 +
25916 ++ /* Prevent swapoff from happening to us. */
25917 ++ si = get_swap_device(swap);
25918 ++ if (!si) {
25919 ++ error = EINVAL;
25920 ++ goto failed;
25921 ++ }
25922 + /* Look it up and read it in.. */
25923 + page = lookup_swap_cache(swap, NULL, 0);
25924 + if (!page) {
25925 +@@ -1765,6 +1772,8 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
25926 + swap_free(swap);
25927 +
25928 + *pagep = page;
25929 ++ if (si)
25930 ++ put_swap_device(si);
25931 + return 0;
25932 + failed:
25933 + if (!shmem_confirm_swap(mapping, index, swap))
25934 +@@ -1775,6 +1784,9 @@ unlock:
25935 + put_page(page);
25936 + }
25937 +
25938 ++ if (si)
25939 ++ put_swap_device(si);
25940 ++
25941 + return error;
25942 + }
25943 +
25944 +@@ -4028,8 +4040,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
25945 + loff_t i_size;
25946 + pgoff_t off;
25947 +
25948 +- if ((vma->vm_flags & VM_NOHUGEPAGE) ||
25949 +- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
25950 ++ if (!transhuge_vma_enabled(vma, vma->vm_flags))
25951 + return false;
25952 + if (shmem_huge == SHMEM_HUGE_FORCE)
25953 + return true;
25954 +diff --git a/mm/slab.h b/mm/slab.h
25955 +index 18c1927cd196c..b3294712a6868 100644
25956 +--- a/mm/slab.h
25957 ++++ b/mm/slab.h
25958 +@@ -309,7 +309,6 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
25959 + if (!memcg_kmem_enabled() || !objcg)
25960 + return;
25961 +
25962 +- flags &= ~__GFP_ACCOUNT;
25963 + for (i = 0; i < size; i++) {
25964 + if (likely(p[i])) {
25965 + page = virt_to_head_page(p[i]);
25966 +diff --git a/mm/z3fold.c b/mm/z3fold.c
25967 +index 7fe7adaaad013..ed0023dc5a3d2 100644
25968 +--- a/mm/z3fold.c
25969 ++++ b/mm/z3fold.c
25970 +@@ -1059,6 +1059,7 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
25971 + destroy_workqueue(pool->compact_wq);
25972 + destroy_workqueue(pool->release_wq);
25973 + z3fold_unregister_migration(pool);
25974 ++ free_percpu(pool->unbuddied);
25975 + kfree(pool);
25976 + }
25977 +
25978 +@@ -1382,7 +1383,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
25979 + if (zhdr->foreign_handles ||
25980 + test_and_set_bit(PAGE_CLAIMED, &page->private)) {
25981 + if (kref_put(&zhdr->refcount,
25982 +- release_z3fold_page))
25983 ++ release_z3fold_page_locked))
25984 + atomic64_dec(&pool->pages_nr);
25985 + else
25986 + z3fold_page_unlock(zhdr);
25987 +diff --git a/mm/zswap.c b/mm/zswap.c
25988 +index 20763267a219e..706e0f98125ad 100644
25989 +--- a/mm/zswap.c
25990 ++++ b/mm/zswap.c
25991 +@@ -967,6 +967,13 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
25992 + spin_unlock(&tree->lock);
25993 + BUG_ON(offset != entry->offset);
25994 +
25995 ++ src = (u8 *)zhdr + sizeof(struct zswap_header);
25996 ++ if (!zpool_can_sleep_mapped(pool)) {
25997 ++ memcpy(tmp, src, entry->length);
25998 ++ src = tmp;
25999 ++ zpool_unmap_handle(pool, handle);
26000 ++ }
26001 ++
26002 + /* try to allocate swap cache page */
26003 + switch (zswap_get_swap_cache_page(swpentry, &page)) {
26004 + case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
26005 +@@ -982,17 +989,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
26006 + case ZSWAP_SWAPCACHE_NEW: /* page is locked */
26007 + /* decompress */
26008 + acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
26009 +-
26010 + dlen = PAGE_SIZE;
26011 +- src = (u8 *)zhdr + sizeof(struct zswap_header);
26012 +-
26013 +- if (!zpool_can_sleep_mapped(pool)) {
26014 +-
26015 +- memcpy(tmp, src, entry->length);
26016 +- src = tmp;
26017 +-
26018 +- zpool_unmap_handle(pool, handle);
26019 +- }
26020 +
26021 + mutex_lock(acomp_ctx->mutex);
26022 + sg_init_one(&input, src, entry->length);
26023 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
26024 +index 016b2999f2195..b077d150ac529 100644
26025 +--- a/net/bluetooth/hci_event.c
26026 ++++ b/net/bluetooth/hci_event.c
26027 +@@ -5296,8 +5296,19 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
26028 +
26029 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
26030 +
26031 +- if (ev->status)
26032 ++ if (ev->status) {
26033 ++ struct adv_info *adv;
26034 ++
26035 ++ adv = hci_find_adv_instance(hdev, ev->handle);
26036 ++ if (!adv)
26037 ++ return;
26038 ++
26039 ++ /* Remove advertising as it has been terminated */
26040 ++ hci_remove_adv_instance(hdev, ev->handle);
26041 ++ mgmt_advertising_removed(NULL, hdev, ev->handle);
26042 ++
26043 + return;
26044 ++ }
26045 +
26046 + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
26047 + if (conn) {
26048 +@@ -5441,7 +5452,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
26049 + struct hci_conn *conn;
26050 + bool match;
26051 + u32 flags;
26052 +- u8 *ptr, real_len;
26053 ++ u8 *ptr;
26054 +
26055 + switch (type) {
26056 + case LE_ADV_IND:
26057 +@@ -5472,14 +5483,10 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
26058 + break;
26059 + }
26060 +
26061 +- real_len = ptr - data;
26062 +-
26063 +- /* Adjust for actual length */
26064 +- if (len != real_len) {
26065 +- bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u",
26066 +- len, real_len);
26067 +- len = real_len;
26068 +- }
26069 ++ /* Adjust for actual length. This handles the case when remote
26070 ++ * device is advertising with incorrect data length.
26071 ++ */
26072 ++ len = ptr - data;
26073 +
26074 + /* If the direct address is present, then this report is from
26075 + * a LE Direct Advertising Report event. In that case it is
26076 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
26077 +index fa9125b782f85..b069f640394d0 100644
26078 +--- a/net/bluetooth/hci_request.c
26079 ++++ b/net/bluetooth/hci_request.c
26080 +@@ -1697,30 +1697,33 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
26081 + return;
26082 +
26083 + if (ext_adv_capable(hdev)) {
26084 +- struct hci_cp_le_set_ext_scan_rsp_data cp;
26085 ++ struct {
26086 ++ struct hci_cp_le_set_ext_scan_rsp_data cp;
26087 ++ u8 data[HCI_MAX_EXT_AD_LENGTH];
26088 ++ } pdu;
26089 +
26090 +- memset(&cp, 0, sizeof(cp));
26091 ++ memset(&pdu, 0, sizeof(pdu));
26092 +
26093 + if (instance)
26094 + len = create_instance_scan_rsp_data(hdev, instance,
26095 +- cp.data);
26096 ++ pdu.data);
26097 + else
26098 +- len = create_default_scan_rsp_data(hdev, cp.data);
26099 ++ len = create_default_scan_rsp_data(hdev, pdu.data);
26100 +
26101 + if (hdev->scan_rsp_data_len == len &&
26102 +- !memcmp(cp.data, hdev->scan_rsp_data, len))
26103 ++ !memcmp(pdu.data, hdev->scan_rsp_data, len))
26104 + return;
26105 +
26106 +- memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
26107 ++ memcpy(hdev->scan_rsp_data, pdu.data, len);
26108 + hdev->scan_rsp_data_len = len;
26109 +
26110 +- cp.handle = instance;
26111 +- cp.length = len;
26112 +- cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
26113 +- cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
26114 ++ pdu.cp.handle = instance;
26115 ++ pdu.cp.length = len;
26116 ++ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
26117 ++ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
26118 +
26119 +- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
26120 +- &cp);
26121 ++ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
26122 ++ sizeof(pdu.cp) + len, &pdu.cp);
26123 + } else {
26124 + struct hci_cp_le_set_scan_rsp_data cp;
26125 +
26126 +@@ -1843,26 +1846,30 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
26127 + return;
26128 +
26129 + if (ext_adv_capable(hdev)) {
26130 +- struct hci_cp_le_set_ext_adv_data cp;
26131 ++ struct {
26132 ++ struct hci_cp_le_set_ext_adv_data cp;
26133 ++ u8 data[HCI_MAX_EXT_AD_LENGTH];
26134 ++ } pdu;
26135 +
26136 +- memset(&cp, 0, sizeof(cp));
26137 ++ memset(&pdu, 0, sizeof(pdu));
26138 +
26139 +- len = create_instance_adv_data(hdev, instance, cp.data);
26140 ++ len = create_instance_adv_data(hdev, instance, pdu.data);
26141 +
26142 + /* There's nothing to do if the data hasn't changed */
26143 + if (hdev->adv_data_len == len &&
26144 +- memcmp(cp.data, hdev->adv_data, len) == 0)
26145 ++ memcmp(pdu.data, hdev->adv_data, len) == 0)
26146 + return;
26147 +
26148 +- memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
26149 ++ memcpy(hdev->adv_data, pdu.data, len);
26150 + hdev->adv_data_len = len;
26151 +
26152 +- cp.length = len;
26153 +- cp.handle = instance;
26154 +- cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
26155 +- cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
26156 ++ pdu.cp.length = len;
26157 ++ pdu.cp.handle = instance;
26158 ++ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
26159 ++ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
26160 +
26161 +- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
26162 ++ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
26163 ++ sizeof(pdu.cp) + len, &pdu.cp);
26164 + } else {
26165 + struct hci_cp_le_set_adv_data cp;
26166 +
26167 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
26168 +index f9be7f9084d67..023a98f7c9922 100644
26169 +--- a/net/bluetooth/mgmt.c
26170 ++++ b/net/bluetooth/mgmt.c
26171 +@@ -7585,6 +7585,9 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
26172 + for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
26173 + cur_len = data[i];
26174 +
26175 ++ if (!cur_len)
26176 ++ continue;
26177 ++
26178 + if (data[i + 1] == EIR_FLAGS &&
26179 + (!is_adv_data || flags_managed(adv_flags)))
26180 + return false;
26181 +diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
26182 +index 05e1cfc1e5cd1..291a925462463 100644
26183 +--- a/net/bpfilter/main.c
26184 ++++ b/net/bpfilter/main.c
26185 +@@ -57,7 +57,7 @@ int main(void)
26186 + {
26187 + debug_f = fopen("/dev/kmsg", "w");
26188 + setvbuf(debug_f, 0, _IOLBF, 0);
26189 +- fprintf(debug_f, "Started bpfilter\n");
26190 ++ fprintf(debug_f, "<5>Started bpfilter\n");
26191 + loop();
26192 + fclose(debug_f);
26193 + return 0;
26194 +diff --git a/net/can/bcm.c b/net/can/bcm.c
26195 +index f3e4d9528fa38..0928a39c4423b 100644
26196 +--- a/net/can/bcm.c
26197 ++++ b/net/can/bcm.c
26198 +@@ -785,6 +785,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
26199 + bcm_rx_handler, op);
26200 +
26201 + list_del(&op->list);
26202 ++ synchronize_rcu();
26203 + bcm_remove_op(op);
26204 + return 1; /* done */
26205 + }
26206 +@@ -1533,9 +1534,13 @@ static int bcm_release(struct socket *sock)
26207 + REGMASK(op->can_id),
26208 + bcm_rx_handler, op);
26209 +
26210 +- bcm_remove_op(op);
26211 + }
26212 +
26213 ++ synchronize_rcu();
26214 ++
26215 ++ list_for_each_entry_safe(op, next, &bo->rx_ops, list)
26216 ++ bcm_remove_op(op);
26217 ++
26218 + #if IS_ENABLED(CONFIG_PROC_FS)
26219 + /* remove procfs entry */
26220 + if (net->can.bcmproc_dir && bo->bcm_proc_read)
26221 +diff --git a/net/can/gw.c b/net/can/gw.c
26222 +index ba41248056029..d8861e862f157 100644
26223 +--- a/net/can/gw.c
26224 ++++ b/net/can/gw.c
26225 +@@ -596,6 +596,7 @@ static int cgw_notifier(struct notifier_block *nb,
26226 + if (gwj->src.dev == dev || gwj->dst.dev == dev) {
26227 + hlist_del(&gwj->list);
26228 + cgw_unregister_filter(net, gwj);
26229 ++ synchronize_rcu();
26230 + kmem_cache_free(cgw_cache, gwj);
26231 + }
26232 + }
26233 +@@ -1154,6 +1155,7 @@ static void cgw_remove_all_jobs(struct net *net)
26234 + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
26235 + hlist_del(&gwj->list);
26236 + cgw_unregister_filter(net, gwj);
26237 ++ synchronize_rcu();
26238 + kmem_cache_free(cgw_cache, gwj);
26239 + }
26240 + }
26241 +@@ -1222,6 +1224,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
26242 +
26243 + hlist_del(&gwj->list);
26244 + cgw_unregister_filter(net, gwj);
26245 ++ synchronize_rcu();
26246 + kmem_cache_free(cgw_cache, gwj);
26247 + err = 0;
26248 + break;
26249 +diff --git a/net/can/isotp.c b/net/can/isotp.c
26250 +index be6183f8ca110..234cc4ad179a2 100644
26251 +--- a/net/can/isotp.c
26252 ++++ b/net/can/isotp.c
26253 +@@ -1028,9 +1028,6 @@ static int isotp_release(struct socket *sock)
26254 +
26255 + lock_sock(sk);
26256 +
26257 +- hrtimer_cancel(&so->txtimer);
26258 +- hrtimer_cancel(&so->rxtimer);
26259 +-
26260 + /* remove current filters & unregister */
26261 + if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
26262 + if (so->ifindex) {
26263 +@@ -1042,10 +1039,14 @@ static int isotp_release(struct socket *sock)
26264 + SINGLE_MASK(so->rxid),
26265 + isotp_rcv, sk);
26266 + dev_put(dev);
26267 ++ synchronize_rcu();
26268 + }
26269 + }
26270 + }
26271 +
26272 ++ hrtimer_cancel(&so->txtimer);
26273 ++ hrtimer_cancel(&so->rxtimer);
26274 ++
26275 + so->ifindex = 0;
26276 + so->bound = 0;
26277 +
26278 +diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
26279 +index da3a7a7bcff2b..08c8606cfd9c7 100644
26280 +--- a/net/can/j1939/main.c
26281 ++++ b/net/can/j1939/main.c
26282 +@@ -193,6 +193,10 @@ static void j1939_can_rx_unregister(struct j1939_priv *priv)
26283 + can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
26284 + j1939_can_recv, priv);
26285 +
26286 ++ /* The last reference of priv is dropped by the RCU deferred
26287 ++ * j1939_sk_sock_destruct() of the last socket, so we can
26288 ++ * safely drop this reference here.
26289 ++ */
26290 + j1939_priv_put(priv);
26291 + }
26292 +
26293 +diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
26294 +index 56aa66147d5ac..e1a399821238f 100644
26295 +--- a/net/can/j1939/socket.c
26296 ++++ b/net/can/j1939/socket.c
26297 +@@ -398,6 +398,9 @@ static int j1939_sk_init(struct sock *sk)
26298 + atomic_set(&jsk->skb_pending, 0);
26299 + spin_lock_init(&jsk->sk_session_queue_lock);
26300 + INIT_LIST_HEAD(&jsk->sk_session_queue);
26301 ++
26302 ++ /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
26303 ++ sock_set_flag(sk, SOCK_RCU_FREE);
26304 + sk->sk_destruct = j1939_sk_sock_destruct;
26305 + sk->sk_protocol = CAN_J1939;
26306 +
26307 +@@ -673,7 +676,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
26308 +
26309 + switch (optname) {
26310 + case SO_J1939_FILTER:
26311 +- if (!sockptr_is_null(optval)) {
26312 ++ if (!sockptr_is_null(optval) && optlen != 0) {
26313 + struct j1939_filter *f;
26314 + int c;
26315 +
26316 +diff --git a/net/core/filter.c b/net/core/filter.c
26317 +index 65ab4e21c087f..6541358a770bf 100644
26318 +--- a/net/core/filter.c
26319 ++++ b/net/core/filter.c
26320 +@@ -3263,8 +3263,6 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
26321 + shinfo->gso_type |= SKB_GSO_TCPV6;
26322 + }
26323 +
26324 +- /* Due to IPv6 header, MSS needs to be downgraded. */
26325 +- skb_decrease_gso_size(shinfo, len_diff);
26326 + /* Header must be checked, and gso_segs recomputed. */
26327 + shinfo->gso_type |= SKB_GSO_DODGY;
26328 + shinfo->gso_segs = 0;
26329 +@@ -3304,8 +3302,6 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
26330 + shinfo->gso_type |= SKB_GSO_TCPV4;
26331 + }
26332 +
26333 +- /* Due to IPv4 header, MSS can be upgraded. */
26334 +- skb_increase_gso_size(shinfo, len_diff);
26335 + /* Header must be checked, and gso_segs recomputed. */
26336 + shinfo->gso_type |= SKB_GSO_DODGY;
26337 + shinfo->gso_segs = 0;
26338 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
26339 +index ec931b080156d..c6e75bd0035dc 100644
26340 +--- a/net/core/rtnetlink.c
26341 ++++ b/net/core/rtnetlink.c
26342 +@@ -543,7 +543,9 @@ static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
26343 + {
26344 + const struct rtnl_af_ops *ops;
26345 +
26346 +- list_for_each_entry_rcu(ops, &rtnl_af_ops, list) {
26347 ++ ASSERT_RTNL();
26348 ++
26349 ++ list_for_each_entry(ops, &rtnl_af_ops, list) {
26350 + if (ops->family == family)
26351 + return ops;
26352 + }
26353 +@@ -2274,27 +2276,18 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
26354 + nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
26355 + const struct rtnl_af_ops *af_ops;
26356 +
26357 +- rcu_read_lock();
26358 + af_ops = rtnl_af_lookup(nla_type(af));
26359 +- if (!af_ops) {
26360 +- rcu_read_unlock();
26361 ++ if (!af_ops)
26362 + return -EAFNOSUPPORT;
26363 +- }
26364 +
26365 +- if (!af_ops->set_link_af) {
26366 +- rcu_read_unlock();
26367 ++ if (!af_ops->set_link_af)
26368 + return -EOPNOTSUPP;
26369 +- }
26370 +
26371 + if (af_ops->validate_link_af) {
26372 + err = af_ops->validate_link_af(dev, af);
26373 +- if (err < 0) {
26374 +- rcu_read_unlock();
26375 ++ if (err < 0)
26376 + return err;
26377 +- }
26378 + }
26379 +-
26380 +- rcu_read_unlock();
26381 + }
26382 + }
26383 +
26384 +@@ -2868,17 +2861,12 @@ static int do_setlink(const struct sk_buff *skb,
26385 + nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
26386 + const struct rtnl_af_ops *af_ops;
26387 +
26388 +- rcu_read_lock();
26389 +-
26390 + BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
26391 +
26392 + err = af_ops->set_link_af(dev, af, extack);
26393 +- if (err < 0) {
26394 +- rcu_read_unlock();
26395 ++ if (err < 0)
26396 + goto errout;
26397 +- }
26398 +
26399 +- rcu_read_unlock();
26400 + status |= DO_SETLINK_NOTIFY;
26401 + }
26402 + }
26403 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
26404 +index 43ce17a6a5852..539c83a45665e 100644
26405 +--- a/net/core/skmsg.c
26406 ++++ b/net/core/skmsg.c
26407 +@@ -847,7 +847,7 @@ out:
26408 + }
26409 + EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
26410 +
26411 +-static void sk_psock_skb_redirect(struct sk_buff *skb)
26412 ++static int sk_psock_skb_redirect(struct sk_buff *skb)
26413 + {
26414 + struct sk_psock *psock_other;
26415 + struct sock *sk_other;
26416 +@@ -858,7 +858,7 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
26417 + */
26418 + if (unlikely(!sk_other)) {
26419 + kfree_skb(skb);
26420 +- return;
26421 ++ return -EIO;
26422 + }
26423 + psock_other = sk_psock(sk_other);
26424 + /* This error indicates the socket is being torn down or had another
26425 +@@ -866,19 +866,22 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
26426 + * a socket that is in this state so we drop the skb.
26427 + */
26428 + if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
26429 ++ skb_bpf_redirect_clear(skb);
26430 + kfree_skb(skb);
26431 +- return;
26432 ++ return -EIO;
26433 + }
26434 + spin_lock_bh(&psock_other->ingress_lock);
26435 + if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
26436 + spin_unlock_bh(&psock_other->ingress_lock);
26437 ++ skb_bpf_redirect_clear(skb);
26438 + kfree_skb(skb);
26439 +- return;
26440 ++ return -EIO;
26441 + }
26442 +
26443 + skb_queue_tail(&psock_other->ingress_skb, skb);
26444 + schedule_work(&psock_other->work);
26445 + spin_unlock_bh(&psock_other->ingress_lock);
26446 ++ return 0;
26447 + }
26448 +
26449 + static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
26450 +@@ -915,14 +918,15 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
26451 + }
26452 + EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
26453 +
26454 +-static void sk_psock_verdict_apply(struct sk_psock *psock,
26455 +- struct sk_buff *skb, int verdict)
26456 ++static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
26457 ++ int verdict)
26458 + {
26459 + struct sock *sk_other;
26460 +- int err = -EIO;
26461 ++ int err = 0;
26462 +
26463 + switch (verdict) {
26464 + case __SK_PASS:
26465 ++ err = -EIO;
26466 + sk_other = psock->sk;
26467 + if (sock_flag(sk_other, SOCK_DEAD) ||
26468 + !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
26469 +@@ -945,18 +949,25 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
26470 + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
26471 + skb_queue_tail(&psock->ingress_skb, skb);
26472 + schedule_work(&psock->work);
26473 ++ err = 0;
26474 + }
26475 + spin_unlock_bh(&psock->ingress_lock);
26476 ++ if (err < 0) {
26477 ++ skb_bpf_redirect_clear(skb);
26478 ++ goto out_free;
26479 ++ }
26480 + }
26481 + break;
26482 + case __SK_REDIRECT:
26483 +- sk_psock_skb_redirect(skb);
26484 ++ err = sk_psock_skb_redirect(skb);
26485 + break;
26486 + case __SK_DROP:
26487 + default:
26488 + out_free:
26489 + kfree_skb(skb);
26490 + }
26491 ++
26492 ++ return err;
26493 + }
26494 +
26495 + static void sk_psock_write_space(struct sock *sk)
26496 +@@ -1123,7 +1134,8 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
26497 + ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
26498 + skb->sk = NULL;
26499 + }
26500 +- sk_psock_verdict_apply(psock, skb, ret);
26501 ++ if (sk_psock_verdict_apply(psock, skb, ret) < 0)
26502 ++ len = 0;
26503 + out:
26504 + rcu_read_unlock();
26505 + return len;
26506 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
26507 +index 6f1b82b8ad49a..60decd6420ca1 100644
26508 +--- a/net/core/sock_map.c
26509 ++++ b/net/core/sock_map.c
26510 +@@ -48,7 +48,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
26511 + bpf_map_init_from_attr(&stab->map, attr);
26512 + raw_spin_lock_init(&stab->lock);
26513 +
26514 +- stab->sks = bpf_map_area_alloc(stab->map.max_entries *
26515 ++ stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
26516 + sizeof(struct sock *),
26517 + stab->map.numa_node);
26518 + if (!stab->sks) {
26519 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
26520 +index 1c6429c353a96..73721a4448bd4 100644
26521 +--- a/net/ipv4/devinet.c
26522 ++++ b/net/ipv4/devinet.c
26523 +@@ -1955,7 +1955,7 @@ static int inet_validate_link_af(const struct net_device *dev,
26524 + struct nlattr *a, *tb[IFLA_INET_MAX+1];
26525 + int err, rem;
26526 +
26527 +- if (dev && !__in_dev_get_rcu(dev))
26528 ++ if (dev && !__in_dev_get_rtnl(dev))
26529 + return -EAFNOSUPPORT;
26530 +
26531 + err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
26532 +@@ -1981,7 +1981,7 @@ static int inet_validate_link_af(const struct net_device *dev,
26533 + static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
26534 + struct netlink_ext_ack *extack)
26535 + {
26536 +- struct in_device *in_dev = __in_dev_get_rcu(dev);
26537 ++ struct in_device *in_dev = __in_dev_get_rtnl(dev);
26538 + struct nlattr *a, *tb[IFLA_INET_MAX+1];
26539 + int rem;
26540 +
26541 +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
26542 +index 35803ab7ac804..26171dec08c4d 100644
26543 +--- a/net/ipv4/esp4.c
26544 ++++ b/net/ipv4/esp4.c
26545 +@@ -673,7 +673,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
26546 + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
26547 + u32 padto;
26548 +
26549 +- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
26550 ++ padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
26551 + if (skb->len < padto)
26552 + esp.tfclen = padto - skb->len;
26553 + }
26554 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
26555 +index 84bb707bd88d8..647bceab56c2d 100644
26556 +--- a/net/ipv4/fib_frontend.c
26557 ++++ b/net/ipv4/fib_frontend.c
26558 +@@ -371,6 +371,8 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
26559 + fl4.flowi4_proto = 0;
26560 + fl4.fl4_sport = 0;
26561 + fl4.fl4_dport = 0;
26562 ++ } else {
26563 ++ swap(fl4.fl4_sport, fl4.fl4_dport);
26564 + }
26565 +
26566 + if (fib_lookup(net, &fl4, &res, 0))
26567 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
26568 +index 6a36ac98476fa..78d1e5afc4520 100644
26569 +--- a/net/ipv4/route.c
26570 ++++ b/net/ipv4/route.c
26571 +@@ -1306,7 +1306,7 @@ INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
26572 + mtu = dst_metric_raw(dst, RTAX_MTU);
26573 +
26574 + if (mtu)
26575 +- return mtu;
26576 ++ goto out;
26577 +
26578 + mtu = READ_ONCE(dst->dev->mtu);
26579 +
26580 +@@ -1315,6 +1315,7 @@ INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
26581 + mtu = 576;
26582 + }
26583 +
26584 ++out:
26585 + mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
26586 +
26587 + return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
26588 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
26589 +index 1307ad0d3b9ed..8091276cb85b8 100644
26590 +--- a/net/ipv4/udp.c
26591 ++++ b/net/ipv4/udp.c
26592 +@@ -1798,11 +1798,13 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
26593 + if (used <= 0) {
26594 + if (!copied)
26595 + copied = used;
26596 ++ kfree_skb(skb);
26597 + break;
26598 + } else if (used <= skb->len) {
26599 + copied += used;
26600 + }
26601 +
26602 ++ kfree_skb(skb);
26603 + if (!desc->count)
26604 + break;
26605 + }
26606 +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
26607 +index 393ae2b78e7d4..1654e4ce094f3 100644
26608 +--- a/net/ipv6/esp6.c
26609 ++++ b/net/ipv6/esp6.c
26610 +@@ -708,7 +708,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
26611 + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
26612 + u32 padto;
26613 +
26614 +- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
26615 ++ padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
26616 + if (skb->len < padto)
26617 + esp.tfclen = padto - skb->len;
26618 + }
26619 +diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
26620 +index 56e479d158b7c..26882e165c9e3 100644
26621 +--- a/net/ipv6/exthdrs.c
26622 ++++ b/net/ipv6/exthdrs.c
26623 +@@ -135,18 +135,23 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
26624 + len -= 2;
26625 +
26626 + while (len > 0) {
26627 +- int optlen = nh[off + 1] + 2;
26628 +- int i;
26629 ++ int optlen, i;
26630 +
26631 +- switch (nh[off]) {
26632 +- case IPV6_TLV_PAD1:
26633 +- optlen = 1;
26634 ++ if (nh[off] == IPV6_TLV_PAD1) {
26635 + padlen++;
26636 + if (padlen > 7)
26637 + goto bad;
26638 +- break;
26639 ++ off++;
26640 ++ len--;
26641 ++ continue;
26642 ++ }
26643 ++ if (len < 2)
26644 ++ goto bad;
26645 ++ optlen = nh[off + 1] + 2;
26646 ++ if (optlen > len)
26647 ++ goto bad;
26648 +
26649 +- case IPV6_TLV_PADN:
26650 ++ if (nh[off] == IPV6_TLV_PADN) {
26651 + /* RFC 2460 states that the purpose of PadN is
26652 + * to align the containing header to multiples
26653 + * of 8. 7 is therefore the highest valid value.
26654 +@@ -163,12 +168,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
26655 + if (nh[off + i] != 0)
26656 + goto bad;
26657 + }
26658 +- break;
26659 +-
26660 +- default: /* Other TLV code so scan list */
26661 +- if (optlen > len)
26662 +- goto bad;
26663 +-
26664 ++ } else {
26665 + tlv_count++;
26666 + if (tlv_count > max_count)
26667 + goto bad;
26668 +@@ -188,7 +188,6 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
26669 + return false;
26670 +
26671 + padlen = 0;
26672 +- break;
26673 + }
26674 + off += optlen;
26675 + len -= optlen;
26676 +@@ -306,7 +305,7 @@ fail_and_free:
26677 + #endif
26678 +
26679 + if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
26680 +- init_net.ipv6.sysctl.max_dst_opts_cnt)) {
26681 ++ net->ipv6.sysctl.max_dst_opts_cnt)) {
26682 + skb->transport_header += extlen;
26683 + opt = IP6CB(skb);
26684 + #if IS_ENABLED(CONFIG_IPV6_MIP6)
26685 +@@ -1037,7 +1036,7 @@ fail_and_free:
26686 +
26687 + opt->flags |= IP6SKB_HOPBYHOP;
26688 + if (ip6_parse_tlv(tlvprochopopt_lst, skb,
26689 +- init_net.ipv6.sysctl.max_hbh_opts_cnt)) {
26690 ++ net->ipv6.sysctl.max_hbh_opts_cnt)) {
26691 + skb->transport_header += extlen;
26692 + opt = IP6CB(skb);
26693 + opt->nhoff = sizeof(struct ipv6hdr);
26694 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
26695 +index 288bafded9989..28ca70af014ad 100644
26696 +--- a/net/ipv6/ip6_tunnel.c
26697 ++++ b/net/ipv6/ip6_tunnel.c
26698 +@@ -1239,8 +1239,6 @@ route_lookup:
26699 + if (max_headroom > dev->needed_headroom)
26700 + dev->needed_headroom = max_headroom;
26701 +
26702 +- skb_set_inner_ipproto(skb, proto);
26703 +-
26704 + err = ip6_tnl_encap(skb, t, &proto, fl6);
26705 + if (err)
26706 + return err;
26707 +@@ -1377,6 +1375,8 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
26708 + if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
26709 + return -1;
26710 +
26711 ++ skb_set_inner_ipproto(skb, protocol);
26712 ++
26713 + err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
26714 + protocol);
26715 + if (err != 0) {
26716 +diff --git a/net/mac80211/he.c b/net/mac80211/he.c
26717 +index 0c0b970835ceb..a87421c8637d6 100644
26718 +--- a/net/mac80211/he.c
26719 ++++ b/net/mac80211/he.c
26720 +@@ -111,7 +111,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
26721 + struct sta_info *sta)
26722 + {
26723 + struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
26724 +- struct ieee80211_sta_he_cap own_he_cap = sband->iftype_data->he_cap;
26725 ++ struct ieee80211_sta_he_cap own_he_cap;
26726 + struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
26727 + u8 he_ppe_size;
26728 + u8 mcs_nss_size;
26729 +@@ -123,6 +123,8 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
26730 + if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband))
26731 + return;
26732 +
26733 ++ own_he_cap = sband->iftype_data->he_cap;
26734 ++
26735 + /* Make sure size is OK */
26736 + mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
26737 + he_ppe_size =
26738 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
26739 +index 3f2aad2e74366..b1c44fa63a06f 100644
26740 +--- a/net/mac80211/mlme.c
26741 ++++ b/net/mac80211/mlme.c
26742 +@@ -1094,11 +1094,6 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
26743 + struct ieee80211_hdr_3addr *nullfunc;
26744 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
26745 +
26746 +- /* Don't send NDPs when STA is connected HE */
26747 +- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
26748 +- !(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
26749 +- return;
26750 +-
26751 + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
26752 + !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
26753 + if (!skb)
26754 +@@ -1130,10 +1125,6 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
26755 + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
26756 + return;
26757 +
26758 +- /* Don't send NDPs when connected HE */
26759 +- if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
26760 +- return;
26761 +-
26762 + skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
26763 + if (!skb)
26764 + return;
26765 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
26766 +index f2fb69da9b6e1..13250cadb4202 100644
26767 +--- a/net/mac80211/sta_info.c
26768 ++++ b/net/mac80211/sta_info.c
26769 +@@ -1398,11 +1398,6 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
26770 + struct ieee80211_tx_info *info;
26771 + struct ieee80211_chanctx_conf *chanctx_conf;
26772 +
26773 +- /* Don't send NDPs when STA is connected HE */
26774 +- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
26775 +- !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
26776 +- return;
26777 +-
26778 + if (qos) {
26779 + fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
26780 + IEEE80211_STYPE_QOS_NULLFUNC |
26781 +diff --git a/net/mptcp/options.c b/net/mptcp/options.c
26782 +index 9b263f27ce9bd..b87e46f515fb8 100644
26783 +--- a/net/mptcp/options.c
26784 ++++ b/net/mptcp/options.c
26785 +@@ -896,19 +896,20 @@ reset:
26786 + return false;
26787 + }
26788 +
26789 +-static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
26790 ++u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
26791 + {
26792 +- u32 old_ack32, cur_ack32;
26793 +-
26794 +- if (use_64bit)
26795 +- return cur_ack;
26796 +-
26797 +- old_ack32 = (u32)old_ack;
26798 +- cur_ack32 = (u32)cur_ack;
26799 +- cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32;
26800 +- if (unlikely(before(cur_ack32, old_ack32)))
26801 +- return cur_ack + (1LL << 32);
26802 +- return cur_ack;
26803 ++ u32 old_seq32, cur_seq32;
26804 ++
26805 ++ old_seq32 = (u32)old_seq;
26806 ++ cur_seq32 = (u32)cur_seq;
26807 ++ cur_seq = (old_seq & GENMASK_ULL(63, 32)) + cur_seq32;
26808 ++ if (unlikely(cur_seq32 < old_seq32 && before(old_seq32, cur_seq32)))
26809 ++ return cur_seq + (1LL << 32);
26810 ++
26811 ++ /* reverse wrap could happen, too */
26812 ++ if (unlikely(cur_seq32 > old_seq32 && after(old_seq32, cur_seq32)))
26813 ++ return cur_seq - (1LL << 32);
26814 ++ return cur_seq;
26815 + }
26816 +
26817 + static void ack_update_msk(struct mptcp_sock *msk,
26818 +@@ -926,7 +927,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
26819 + * more dangerous than missing an ack
26820 + */
26821 + old_snd_una = msk->snd_una;
26822 +- new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
26823 ++ new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
26824 +
26825 + /* ACK for data not even sent yet? Ignore. */
26826 + if (after64(new_snd_una, snd_nxt))
26827 +@@ -963,7 +964,7 @@ bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool us
26828 + return false;
26829 +
26830 + WRITE_ONCE(msk->rcv_data_fin_seq,
26831 +- expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
26832 ++ mptcp_expand_seq(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
26833 + WRITE_ONCE(msk->rcv_data_fin, 1);
26834 +
26835 + return true;
26836 +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
26837 +index 2469e06a3a9d6..3f5d90a20235a 100644
26838 +--- a/net/mptcp/pm_netlink.c
26839 ++++ b/net/mptcp/pm_netlink.c
26840 +@@ -971,8 +971,14 @@ skip_family:
26841 + if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
26842 + entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
26843 +
26844 +- if (tb[MPTCP_PM_ADDR_ATTR_PORT])
26845 ++ if (tb[MPTCP_PM_ADDR_ATTR_PORT]) {
26846 ++ if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
26847 ++ NL_SET_ERR_MSG_ATTR(info->extack, attr,
26848 ++ "flags must have signal when using port");
26849 ++ return -EINVAL;
26850 ++ }
26851 + entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
26852 ++ }
26853 +
26854 + return 0;
26855 + }
26856 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
26857 +index 632350018fb66..8ead550df8b1e 100644
26858 +--- a/net/mptcp/protocol.c
26859 ++++ b/net/mptcp/protocol.c
26860 +@@ -2946,6 +2946,11 @@ static void mptcp_release_cb(struct sock *sk)
26861 + spin_lock_bh(&sk->sk_lock.slock);
26862 + }
26863 +
26864 ++ /* be sure to set the current sk state before tacking actions
26865 ++ * depending on sk_state
26866 ++ */
26867 ++ if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
26868 ++ __mptcp_set_connected(sk);
26869 + if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
26870 + __mptcp_clean_una_wakeup(sk);
26871 + if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
26872 +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
26873 +index 385796f0ef19b..7b634568f49cf 100644
26874 +--- a/net/mptcp/protocol.h
26875 ++++ b/net/mptcp/protocol.h
26876 +@@ -109,6 +109,7 @@
26877 + #define MPTCP_ERROR_REPORT 8
26878 + #define MPTCP_RETRANSMIT 9
26879 + #define MPTCP_WORK_SYNC_SETSOCKOPT 10
26880 ++#define MPTCP_CONNECTED 11
26881 +
26882 + static inline bool before64(__u64 seq1, __u64 seq2)
26883 + {
26884 +@@ -579,6 +580,7 @@ void mptcp_get_options(const struct sk_buff *skb,
26885 + struct mptcp_options_received *mp_opt);
26886 +
26887 + void mptcp_finish_connect(struct sock *sk);
26888 ++void __mptcp_set_connected(struct sock *sk);
26889 + static inline bool mptcp_is_fully_established(struct sock *sk)
26890 + {
26891 + return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
26892 +@@ -593,6 +595,14 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
26893 + int mptcp_getsockopt(struct sock *sk, int level, int optname,
26894 + char __user *optval, int __user *option);
26895 +
26896 ++u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq);
26897 ++static inline u64 mptcp_expand_seq(u64 old_seq, u64 cur_seq, bool use_64bit)
26898 ++{
26899 ++ if (use_64bit)
26900 ++ return cur_seq;
26901 ++
26902 ++ return __mptcp_expand_seq(old_seq, cur_seq);
26903 ++}
26904 + void __mptcp_check_push(struct sock *sk, struct sock *ssk);
26905 + void __mptcp_data_acked(struct sock *sk);
26906 + void __mptcp_error_report(struct sock *sk);
26907 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
26908 +index be1de4084196b..cbc452d0901ec 100644
26909 +--- a/net/mptcp/subflow.c
26910 ++++ b/net/mptcp/subflow.c
26911 +@@ -371,6 +371,24 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
26912 + return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
26913 + }
26914 +
26915 ++void __mptcp_set_connected(struct sock *sk)
26916 ++{
26917 ++ if (sk->sk_state == TCP_SYN_SENT) {
26918 ++ inet_sk_state_store(sk, TCP_ESTABLISHED);
26919 ++ sk->sk_state_change(sk);
26920 ++ }
26921 ++}
26922 ++
26923 ++static void mptcp_set_connected(struct sock *sk)
26924 ++{
26925 ++ mptcp_data_lock(sk);
26926 ++ if (!sock_owned_by_user(sk))
26927 ++ __mptcp_set_connected(sk);
26928 ++ else
26929 ++ set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags);
26930 ++ mptcp_data_unlock(sk);
26931 ++}
26932 ++
26933 + static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
26934 + {
26935 + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
26936 +@@ -379,10 +397,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
26937 +
26938 + subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
26939 +
26940 +- if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
26941 +- inet_sk_state_store(parent, TCP_ESTABLISHED);
26942 +- parent->sk_state_change(parent);
26943 +- }
26944 +
26945 + /* be sure no special action on any packet other than syn-ack */
26946 + if (subflow->conn_finished)
26947 +@@ -411,6 +425,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
26948 + subflow->remote_key);
26949 + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
26950 + mptcp_finish_connect(sk);
26951 ++ mptcp_set_connected(parent);
26952 + } else if (subflow->request_join) {
26953 + u8 hmac[SHA256_DIGEST_SIZE];
26954 +
26955 +@@ -430,15 +445,15 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
26956 + goto do_reset;
26957 + }
26958 +
26959 ++ if (!mptcp_finish_join(sk))
26960 ++ goto do_reset;
26961 ++
26962 + subflow_generate_hmac(subflow->local_key, subflow->remote_key,
26963 + subflow->local_nonce,
26964 + subflow->remote_nonce,
26965 + hmac);
26966 + memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
26967 +
26968 +- if (!mptcp_finish_join(sk))
26969 +- goto do_reset;
26970 +-
26971 + subflow->mp_join = 1;
26972 + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
26973 +
26974 +@@ -451,6 +466,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
26975 + } else if (mptcp_check_fallback(sk)) {
26976 + fallback:
26977 + mptcp_rcv_space_init(mptcp_sk(parent), sk);
26978 ++ mptcp_set_connected(parent);
26979 + }
26980 + return;
26981 +
26982 +@@ -558,6 +574,7 @@ static void mptcp_sock_destruct(struct sock *sk)
26983 +
26984 + static void mptcp_force_close(struct sock *sk)
26985 + {
26986 ++ /* the msk is not yet exposed to user-space */
26987 + inet_sk_state_store(sk, TCP_CLOSE);
26988 + sk_common_release(sk);
26989 + }
26990 +@@ -775,15 +792,6 @@ enum mapping_status {
26991 + MAPPING_DUMMY
26992 + };
26993 +
26994 +-static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
26995 +-{
26996 +- if ((u32)seq == (u32)old_seq)
26997 +- return old_seq;
26998 +-
26999 +- /* Assume map covers data not mapped yet. */
27000 +- return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
27001 +-}
27002 +-
27003 + static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
27004 + {
27005 + pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
27006 +@@ -907,13 +915,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
27007 + data_len--;
27008 + }
27009 +
27010 +- if (!mpext->dsn64) {
27011 +- map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
27012 +- mpext->data_seq);
27013 +- pr_debug("expanded seq=%llu", subflow->map_seq);
27014 +- } else {
27015 +- map_seq = mpext->data_seq;
27016 +- }
27017 ++ map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
27018 + WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
27019 +
27020 + if (subflow->map_valid) {
27021 +@@ -1489,10 +1491,7 @@ static void subflow_state_change(struct sock *sk)
27022 + mptcp_rcv_space_init(mptcp_sk(parent), sk);
27023 + pr_fallback(mptcp_sk(parent));
27024 + subflow->conn_finished = 1;
27025 +- if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
27026 +- inet_sk_state_store(parent, TCP_ESTABLISHED);
27027 +- parent->sk_state_change(parent);
27028 +- }
27029 ++ mptcp_set_connected(parent);
27030 + }
27031 +
27032 + /* as recvmsg() does not acquire the subflow socket for ssk selection
27033 +diff --git a/net/mptcp/token.c b/net/mptcp/token.c
27034 +index 8f0270a780ce5..72a24e63b1314 100644
27035 +--- a/net/mptcp/token.c
27036 ++++ b/net/mptcp/token.c
27037 +@@ -156,9 +156,6 @@ int mptcp_token_new_connect(struct sock *sk)
27038 + int retries = TOKEN_MAX_RETRIES;
27039 + struct token_bucket *bucket;
27040 +
27041 +- pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
27042 +- sk, subflow->local_key, subflow->token, subflow->idsn);
27043 +-
27044 + again:
27045 + mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token,
27046 + &subflow->idsn);
27047 +@@ -172,6 +169,9 @@ again:
27048 + goto again;
27049 + }
27050 +
27051 ++ pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
27052 ++ sk, subflow->local_key, subflow->token, subflow->idsn);
27053 ++
27054 + WRITE_ONCE(msk->token, subflow->token);
27055 + __sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
27056 + bucket->chain_len++;
27057 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
27058 +index bf4d6ec9fc55c..fcb15b8904e87 100644
27059 +--- a/net/netfilter/nf_tables_api.c
27060 ++++ b/net/netfilter/nf_tables_api.c
27061 +@@ -571,7 +571,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
27062 + table->family == family &&
27063 + nft_active_genmask(table, genmask)) {
27064 + if (nft_table_has_owner(table) &&
27065 +- table->nlpid != nlpid)
27066 ++ nlpid && table->nlpid != nlpid)
27067 + return ERR_PTR(-EPERM);
27068 +
27069 + return table;
27070 +@@ -583,7 +583,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
27071 +
27072 + static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
27073 + const struct nlattr *nla,
27074 +- u8 genmask)
27075 ++ u8 genmask, u32 nlpid)
27076 + {
27077 + struct nftables_pernet *nft_net;
27078 + struct nft_table *table;
27079 +@@ -591,8 +591,13 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
27080 + nft_net = nft_pernet(net);
27081 + list_for_each_entry(table, &nft_net->tables, list) {
27082 + if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
27083 +- nft_active_genmask(table, genmask))
27084 ++ nft_active_genmask(table, genmask)) {
27085 ++ if (nft_table_has_owner(table) &&
27086 ++ nlpid && table->nlpid != nlpid)
27087 ++ return ERR_PTR(-EPERM);
27088 ++
27089 + return table;
27090 ++ }
27091 + }
27092 +
27093 + return ERR_PTR(-ENOENT);
27094 +@@ -1279,7 +1284,8 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
27095 +
27096 + if (nla[NFTA_TABLE_HANDLE]) {
27097 + attr = nla[NFTA_TABLE_HANDLE];
27098 +- table = nft_table_lookup_byhandle(net, attr, genmask);
27099 ++ table = nft_table_lookup_byhandle(net, attr, genmask,
27100 ++ NETLINK_CB(skb).portid);
27101 + } else {
27102 + attr = nla[NFTA_TABLE_NAME];
27103 + table = nft_table_lookup(net, attr, family, genmask,
27104 +@@ -3243,9 +3249,9 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
27105 + u8 genmask = nft_genmask_next(info->net);
27106 + struct nft_rule *rule, *old_rule = NULL;
27107 + struct nft_expr_info *expr_info = NULL;
27108 ++ struct nft_flow_rule *flow = NULL;
27109 + int family = nfmsg->nfgen_family;
27110 + struct net *net = info->net;
27111 +- struct nft_flow_rule *flow;
27112 + struct nft_userdata *udata;
27113 + struct nft_table *table;
27114 + struct nft_chain *chain;
27115 +@@ -3340,13 +3346,13 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
27116 + nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
27117 + err = -EINVAL;
27118 + if (nla_type(tmp) != NFTA_LIST_ELEM)
27119 +- goto err1;
27120 ++ goto err_release_expr;
27121 + if (n == NFT_RULE_MAXEXPRS)
27122 +- goto err1;
27123 ++ goto err_release_expr;
27124 + err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
27125 + if (err < 0) {
27126 + NL_SET_BAD_ATTR(extack, tmp);
27127 +- goto err1;
27128 ++ goto err_release_expr;
27129 + }
27130 + size += expr_info[n].ops->size;
27131 + n++;
27132 +@@ -3355,7 +3361,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
27133 + /* Check for overflow of dlen field */
27134 + err = -EFBIG;
27135 + if (size >= 1 << 12)
27136 +- goto err1;
27137 ++ goto err_release_expr;
27138 +
27139 + if (nla[NFTA_RULE_USERDATA]) {
27140 + ulen = nla_len(nla[NFTA_RULE_USERDATA]);
27141 +@@ -3366,7 +3372,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
27142 + err = -ENOMEM;
27143 + rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
27144 + if (rule == NULL)
27145 +- goto err1;
27146 ++ goto err_release_expr;
27147 +
27148 + nft_activate_next(net, rule);
27149 +
27150 +@@ -3385,7 +3391,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
27151 + err = nf_tables_newexpr(&ctx, &expr_info[i], expr);
27152 + if (err < 0) {
27153 + NL_SET_BAD_ATTR(extack, expr_info[i].attr);
27154 +- goto err2;
27155 ++ goto err_release_rule;
27156 + }
27157 +
27158 + if (expr_info[i].ops->validate)
27159 +@@ -3395,16 +3401,24 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
27160 + expr = nft_expr_next(expr);
27161 + }
27162 +
27163 ++ if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
27164 ++ flow = nft_flow_rule_create(net, rule);
27165 ++ if (IS_ERR(flow)) {
27166 ++ err = PTR_ERR(flow);
27167 ++ goto err_release_rule;
27168 ++ }
27169 ++ }
27170 ++
27171 + if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
27172 + trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
27173 + if (trans == NULL) {
27174 + err = -ENOMEM;
27175 +- goto err2;
27176 ++ goto err_destroy_flow_rule;
27177 + }
27178 + err = nft_delrule(&ctx, old_rule);
27179 + if (err < 0) {
27180 + nft_trans_destroy(trans);
27181 +- goto err2;
27182 ++ goto err_destroy_flow_rule;
27183 + }
27184 +
27185 + list_add_tail_rcu(&rule->list, &old_rule->list);
27186 +@@ -3412,7 +3426,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
27187 + trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
27188 + if (!trans) {
27189 + err = -ENOMEM;
27190 +- goto err2;
27191 ++ goto err_destroy_flow_rule;
27192 + }
27193 +
27194 + if (info->nlh->nlmsg_flags & NLM_F_APPEND) {
27195 +@@ -3430,21 +3444,19 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
27196 + kvfree(expr_info);
27197 + chain->use++;
27198 +
27199 ++ if (flow)
27200 ++ nft_trans_flow_rule(trans) = flow;
27201 ++
27202 + if (nft_net->validate_state == NFT_VALIDATE_DO)
27203 + return nft_table_validate(net, table);
27204 +
27205 +- if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
27206 +- flow = nft_flow_rule_create(net, rule);
27207 +- if (IS_ERR(flow))
27208 +- return PTR_ERR(flow);
27209 +-
27210 +- nft_trans_flow_rule(trans) = flow;
27211 +- }
27212 +-
27213 + return 0;
27214 +-err2:
27215 ++
27216 ++err_destroy_flow_rule:
27217 ++ nft_flow_rule_destroy(flow);
27218 ++err_release_rule:
27219 + nf_tables_rule_release(&ctx, rule);
27220 +-err1:
27221 ++err_release_expr:
27222 + for (i = 0; i < n; i++) {
27223 + if (expr_info[i].ops) {
27224 + module_put(expr_info[i].ops->type->owner);
27225 +@@ -8839,11 +8851,16 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
27226 + nft_rule_expr_deactivate(&trans->ctx,
27227 + nft_trans_rule(trans),
27228 + NFT_TRANS_ABORT);
27229 ++ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
27230 ++ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
27231 + break;
27232 + case NFT_MSG_DELRULE:
27233 + trans->ctx.chain->use++;
27234 + nft_clear(trans->ctx.net, nft_trans_rule(trans));
27235 + nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
27236 ++ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
27237 ++ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
27238 ++
27239 + nft_trans_destroy(trans);
27240 + break;
27241 + case NFT_MSG_NEWSET:
27242 +diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
27243 +index a48c5fd53a80a..b58d73a965232 100644
27244 +--- a/net/netfilter/nf_tables_offload.c
27245 ++++ b/net/netfilter/nf_tables_offload.c
27246 +@@ -54,15 +54,10 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
27247 + struct nft_flow_rule *flow)
27248 + {
27249 + struct nft_flow_match *match = &flow->match;
27250 +- struct nft_offload_ethertype ethertype;
27251 +-
27252 +- if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
27253 +- match->key.basic.n_proto != htons(ETH_P_8021Q) &&
27254 +- match->key.basic.n_proto != htons(ETH_P_8021AD))
27255 +- return;
27256 +-
27257 +- ethertype.value = match->key.basic.n_proto;
27258 +- ethertype.mask = match->mask.basic.n_proto;
27259 ++ struct nft_offload_ethertype ethertype = {
27260 ++ .value = match->key.basic.n_proto,
27261 ++ .mask = match->mask.basic.n_proto,
27262 ++ };
27263 +
27264 + if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
27265 + (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
27266 +@@ -76,7 +71,9 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
27267 + match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
27268 + offsetof(struct nft_flow_key, cvlan);
27269 + match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
27270 +- } else {
27271 ++ } else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) &&
27272 ++ (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
27273 ++ match->key.basic.n_proto == htons(ETH_P_8021AD))) {
27274 + match->key.basic.n_proto = match->key.vlan.vlan_tpid;
27275 + match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
27276 + match->key.vlan.vlan_tpid = ethertype.value;
27277 +@@ -594,23 +591,6 @@ int nft_flow_rule_offload_commit(struct net *net)
27278 + }
27279 + }
27280 +
27281 +- list_for_each_entry(trans, &nft_net->commit_list, list) {
27282 +- if (trans->ctx.family != NFPROTO_NETDEV)
27283 +- continue;
27284 +-
27285 +- switch (trans->msg_type) {
27286 +- case NFT_MSG_NEWRULE:
27287 +- case NFT_MSG_DELRULE:
27288 +- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
27289 +- continue;
27290 +-
27291 +- nft_flow_rule_destroy(nft_trans_flow_rule(trans));
27292 +- break;
27293 +- default:
27294 +- break;
27295 +- }
27296 +- }
27297 +-
27298 + return err;
27299 + }
27300 +
27301 +diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
27302 +index f64f0017e9a53..670dd146fb2b1 100644
27303 +--- a/net/netfilter/nft_exthdr.c
27304 ++++ b/net/netfilter/nft_exthdr.c
27305 +@@ -42,6 +42,9 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
27306 + unsigned int offset = 0;
27307 + int err;
27308 +
27309 ++ if (pkt->skb->protocol != htons(ETH_P_IPV6))
27310 ++ goto err;
27311 ++
27312 + err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
27313 + if (priv->flags & NFT_EXTHDR_F_PRESENT) {
27314 + nft_reg_store8(dest, err >= 0);
27315 +diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
27316 +index ac61f708b82d2..d82677e83400b 100644
27317 +--- a/net/netfilter/nft_osf.c
27318 ++++ b/net/netfilter/nft_osf.c
27319 +@@ -28,6 +28,11 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
27320 + struct nf_osf_data data;
27321 + struct tcphdr _tcph;
27322 +
27323 ++ if (pkt->tprot != IPPROTO_TCP) {
27324 ++ regs->verdict.code = NFT_BREAK;
27325 ++ return;
27326 ++ }
27327 ++
27328 + tcp = skb_header_pointer(skb, ip_hdrlen(skb),
27329 + sizeof(struct tcphdr), &_tcph);
27330 + if (!tcp) {
27331 +diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
27332 +index accef672088c7..5cb4d575d47ff 100644
27333 +--- a/net/netfilter/nft_tproxy.c
27334 ++++ b/net/netfilter/nft_tproxy.c
27335 +@@ -30,6 +30,12 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
27336 + __be16 tport = 0;
27337 + struct sock *sk;
27338 +
27339 ++ if (pkt->tprot != IPPROTO_TCP &&
27340 ++ pkt->tprot != IPPROTO_UDP) {
27341 ++ regs->verdict.code = NFT_BREAK;
27342 ++ return;
27343 ++ }
27344 ++
27345 + hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
27346 + if (!hp) {
27347 + regs->verdict.code = NFT_BREAK;
27348 +@@ -91,7 +97,8 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
27349 +
27350 + memset(&taddr, 0, sizeof(taddr));
27351 +
27352 +- if (!pkt->tprot_set) {
27353 ++ if (pkt->tprot != IPPROTO_TCP &&
27354 ++ pkt->tprot != IPPROTO_UDP) {
27355 + regs->verdict.code = NFT_BREAK;
27356 + return;
27357 + }
27358 +diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
27359 +index ca52f50859899..e51ab37bbb038 100644
27360 +--- a/net/netlabel/netlabel_mgmt.c
27361 ++++ b/net/netlabel/netlabel_mgmt.c
27362 +@@ -76,6 +76,7 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = {
27363 + static int netlbl_mgmt_add_common(struct genl_info *info,
27364 + struct netlbl_audit *audit_info)
27365 + {
27366 ++ void *pmap = NULL;
27367 + int ret_val = -EINVAL;
27368 + struct netlbl_domaddr_map *addrmap = NULL;
27369 + struct cipso_v4_doi *cipsov4 = NULL;
27370 +@@ -175,6 +176,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
27371 + ret_val = -ENOMEM;
27372 + goto add_free_addrmap;
27373 + }
27374 ++ pmap = map;
27375 + map->list.addr = addr->s_addr & mask->s_addr;
27376 + map->list.mask = mask->s_addr;
27377 + map->list.valid = 1;
27378 +@@ -183,10 +185,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
27379 + map->def.cipso = cipsov4;
27380 +
27381 + ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
27382 +- if (ret_val != 0) {
27383 +- kfree(map);
27384 +- goto add_free_addrmap;
27385 +- }
27386 ++ if (ret_val != 0)
27387 ++ goto add_free_map;
27388 +
27389 + entry->family = AF_INET;
27390 + entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
27391 +@@ -223,6 +223,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
27392 + ret_val = -ENOMEM;
27393 + goto add_free_addrmap;
27394 + }
27395 ++ pmap = map;
27396 + map->list.addr = *addr;
27397 + map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
27398 + map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
27399 +@@ -235,10 +236,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
27400 + map->def.calipso = calipso;
27401 +
27402 + ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
27403 +- if (ret_val != 0) {
27404 +- kfree(map);
27405 +- goto add_free_addrmap;
27406 +- }
27407 ++ if (ret_val != 0)
27408 ++ goto add_free_map;
27409 +
27410 + entry->family = AF_INET6;
27411 + entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
27412 +@@ -248,10 +247,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
27413 +
27414 + ret_val = netlbl_domhsh_add(entry, audit_info);
27415 + if (ret_val != 0)
27416 +- goto add_free_addrmap;
27417 ++ goto add_free_map;
27418 +
27419 + return 0;
27420 +
27421 ++add_free_map:
27422 ++ kfree(pmap);
27423 + add_free_addrmap:
27424 + kfree(addrmap);
27425 + add_doi_put_def:
27426 +diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
27427 +index 8d00dfe8139e8..1990d496fcfc0 100644
27428 +--- a/net/qrtr/ns.c
27429 ++++ b/net/qrtr/ns.c
27430 +@@ -775,8 +775,10 @@ int qrtr_ns_init(void)
27431 + }
27432 +
27433 + qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
27434 +- if (!qrtr_ns.workqueue)
27435 ++ if (!qrtr_ns.workqueue) {
27436 ++ ret = -ENOMEM;
27437 + goto err_sock;
27438 ++ }
27439 +
27440 + qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
27441 +
27442 +diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
27443 +index 1cac3c6fbb49c..a108469c664f7 100644
27444 +--- a/net/sched/act_vlan.c
27445 ++++ b/net/sched/act_vlan.c
27446 +@@ -70,7 +70,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
27447 + /* replace the vid */
27448 + tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
27449 + /* replace prio bits, if tcfv_push_prio specified */
27450 +- if (p->tcfv_push_prio) {
27451 ++ if (p->tcfv_push_prio_exists) {
27452 + tci &= ~VLAN_PRIO_MASK;
27453 + tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
27454 + }
27455 +@@ -121,6 +121,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
27456 + struct tc_action_net *tn = net_generic(net, vlan_net_id);
27457 + struct nlattr *tb[TCA_VLAN_MAX + 1];
27458 + struct tcf_chain *goto_ch = NULL;
27459 ++ bool push_prio_exists = false;
27460 + struct tcf_vlan_params *p;
27461 + struct tc_vlan *parm;
27462 + struct tcf_vlan *v;
27463 +@@ -189,7 +190,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
27464 + push_proto = htons(ETH_P_8021Q);
27465 + }
27466 +
27467 +- if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
27468 ++ push_prio_exists = !!tb[TCA_VLAN_PUSH_VLAN_PRIORITY];
27469 ++ if (push_prio_exists)
27470 + push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
27471 + break;
27472 + case TCA_VLAN_ACT_POP_ETH:
27473 +@@ -241,6 +243,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
27474 + p->tcfv_action = action;
27475 + p->tcfv_push_vid = push_vid;
27476 + p->tcfv_push_prio = push_prio;
27477 ++ p->tcfv_push_prio_exists = push_prio_exists || action == TCA_VLAN_ACT_PUSH;
27478 + p->tcfv_push_proto = push_proto;
27479 +
27480 + if (action == TCA_VLAN_ACT_PUSH_ETH) {
27481 +diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
27482 +index c4007b9cd16d6..5b274534264c2 100644
27483 +--- a/net/sched/cls_tcindex.c
27484 ++++ b/net/sched/cls_tcindex.c
27485 +@@ -304,7 +304,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
27486 + int i, err = 0;
27487 +
27488 + cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
27489 +- GFP_KERNEL);
27490 ++ GFP_KERNEL | __GFP_NOWARN);
27491 + if (!cp->perfect)
27492 + return -ENOMEM;
27493 +
27494 +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
27495 +index 1db9d4a2ef5ef..b692a0de1ad5e 100644
27496 +--- a/net/sched/sch_qfq.c
27497 ++++ b/net/sched/sch_qfq.c
27498 +@@ -485,11 +485,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
27499 +
27500 + if (cl->qdisc != &noop_qdisc)
27501 + qdisc_hash_add(cl->qdisc, true);
27502 +- sch_tree_lock(sch);
27503 +- qdisc_class_hash_insert(&q->clhash, &cl->common);
27504 +- sch_tree_unlock(sch);
27505 +-
27506 +- qdisc_class_hash_grow(sch, &q->clhash);
27507 +
27508 + set_change_agg:
27509 + sch_tree_lock(sch);
27510 +@@ -507,8 +502,11 @@ set_change_agg:
27511 + }
27512 + if (existing)
27513 + qfq_deact_rm_from_agg(q, cl);
27514 ++ else
27515 ++ qdisc_class_hash_insert(&q->clhash, &cl->common);
27516 + qfq_add_to_agg(q, new_agg, cl);
27517 + sch_tree_unlock(sch);
27518 ++ qdisc_class_hash_grow(sch, &q->clhash);
27519 +
27520 + *arg = (unsigned long)cl;
27521 + return 0;
27522 +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
27523 +index 39ed0e0afe6d9..c045f63d11fa6 100644
27524 +--- a/net/sunrpc/sched.c
27525 ++++ b/net/sunrpc/sched.c
27526 +@@ -591,11 +591,21 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
27527 + struct list_head *q;
27528 + struct rpc_task *task;
27529 +
27530 ++ /*
27531 ++ * Service the privileged queue.
27532 ++ */
27533 ++ q = &queue->tasks[RPC_NR_PRIORITY - 1];
27534 ++ if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
27535 ++ task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
27536 ++ goto out;
27537 ++ }
27538 ++
27539 + /*
27540 + * Service a batch of tasks from a single owner.
27541 + */
27542 + q = &queue->tasks[queue->priority];
27543 +- if (!list_empty(q) && --queue->nr) {
27544 ++ if (!list_empty(q) && queue->nr) {
27545 ++ queue->nr--;
27546 + task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
27547 + goto out;
27548 + }
27549 +diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
27550 +index d4beca895992d..593846d252143 100644
27551 +--- a/net/tipc/bcast.c
27552 ++++ b/net/tipc/bcast.c
27553 +@@ -699,7 +699,7 @@ int tipc_bcast_init(struct net *net)
27554 + spin_lock_init(&tipc_net(net)->bclock);
27555 +
27556 + if (!tipc_link_bc_create(net, 0, 0, NULL,
27557 +- FB_MTU,
27558 ++ one_page_mtu,
27559 + BCLINK_WIN_DEFAULT,
27560 + BCLINK_WIN_DEFAULT,
27561 + 0,
27562 +diff --git a/net/tipc/msg.c b/net/tipc/msg.c
27563 +index ce6ab54822d8d..7053c22e393e7 100644
27564 +--- a/net/tipc/msg.c
27565 ++++ b/net/tipc/msg.c
27566 +@@ -44,12 +44,15 @@
27567 + #define MAX_FORWARD_SIZE 1024
27568 + #ifdef CONFIG_TIPC_CRYPTO
27569 + #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
27570 +-#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
27571 ++#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
27572 + #else
27573 + #define BUF_HEADROOM (LL_MAX_HEADER + 48)
27574 +-#define BUF_TAILROOM 16
27575 ++#define BUF_OVERHEAD BUF_HEADROOM
27576 + #endif
27577 +
27578 ++const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
27579 ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
27580 ++
27581 + static unsigned int align(unsigned int i)
27582 + {
27583 + return (i + 3) & ~3u;
27584 +@@ -69,13 +72,8 @@ static unsigned int align(unsigned int i)
27585 + struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
27586 + {
27587 + struct sk_buff *skb;
27588 +-#ifdef CONFIG_TIPC_CRYPTO
27589 +- unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
27590 +-#else
27591 +- unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
27592 +-#endif
27593 +
27594 +- skb = alloc_skb_fclone(buf_size, gfp);
27595 ++ skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
27596 + if (skb) {
27597 + skb_reserve(skb, BUF_HEADROOM);
27598 + skb_put(skb, size);
27599 +@@ -395,7 +393,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
27600 + if (unlikely(!skb)) {
27601 + if (pktmax != MAX_MSG_SIZE)
27602 + return -ENOMEM;
27603 +- rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
27604 ++ rc = tipc_msg_build(mhdr, m, offset, dsz,
27605 ++ one_page_mtu, list);
27606 + if (rc != dsz)
27607 + return rc;
27608 + if (tipc_msg_assemble(list))
27609 +diff --git a/net/tipc/msg.h b/net/tipc/msg.h
27610 +index 5d64596ba9877..64ae4c4c44f8c 100644
27611 +--- a/net/tipc/msg.h
27612 ++++ b/net/tipc/msg.h
27613 +@@ -99,9 +99,10 @@ struct plist;
27614 + #define MAX_H_SIZE 60 /* Largest possible TIPC header size */
27615 +
27616 + #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
27617 +-#define FB_MTU 3744
27618 + #define TIPC_MEDIA_INFO_OFFSET 5
27619 +
27620 ++extern const int one_page_mtu;
27621 ++
27622 + struct tipc_skb_cb {
27623 + union {
27624 + struct {
27625 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
27626 +index 694de024d0ee6..74e5701034aa6 100644
27627 +--- a/net/tls/tls_sw.c
27628 ++++ b/net/tls/tls_sw.c
27629 +@@ -1153,7 +1153,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
27630 + int ret = 0;
27631 + bool eor;
27632 +
27633 +- eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
27634 ++ eor = !(flags & MSG_SENDPAGE_NOTLAST);
27635 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
27636 +
27637 + /* Call the sk_stream functions to manage the sndbuf mem. */
27638 +diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
27639 +index 9d2a89d793c01..9ae13cccfb28d 100644
27640 +--- a/net/xdp/xsk_queue.h
27641 ++++ b/net/xdp/xsk_queue.h
27642 +@@ -128,12 +128,15 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
27643 + static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
27644 + struct xdp_desc *desc)
27645 + {
27646 +- u64 chunk;
27647 +-
27648 +- if (desc->len > pool->chunk_size)
27649 +- return false;
27650 ++ u64 chunk, chunk_end;
27651 +
27652 + chunk = xp_aligned_extract_addr(pool, desc->addr);
27653 ++ if (likely(desc->len)) {
27654 ++ chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
27655 ++ if (chunk != chunk_end)
27656 ++ return false;
27657 ++ }
27658 ++
27659 + if (chunk >= pool->addrs_cnt)
27660 + return false;
27661 +
27662 +diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
27663 +index 6d6917b68856f..e843b0d9e2a61 100644
27664 +--- a/net/xfrm/xfrm_device.c
27665 ++++ b/net/xfrm/xfrm_device.c
27666 +@@ -268,6 +268,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
27667 + xso->num_exthdrs = 0;
27668 + xso->flags = 0;
27669 + xso->dev = NULL;
27670 ++ xso->real_dev = NULL;
27671 + dev_put(dev);
27672 +
27673 + if (err != -EOPNOTSUPP)
27674 +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
27675 +index e4cb0ff4dcf41..ac907b9d32d1e 100644
27676 +--- a/net/xfrm/xfrm_output.c
27677 ++++ b/net/xfrm/xfrm_output.c
27678 +@@ -711,15 +711,8 @@ out:
27679 + static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
27680 + {
27681 + #if IS_ENABLED(CONFIG_IPV6)
27682 +- unsigned int ptr = 0;
27683 + int err;
27684 +
27685 +- if (x->outer_mode.encap == XFRM_MODE_BEET &&
27686 +- ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
27687 +- net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
27688 +- return -EAFNOSUPPORT;
27689 +- }
27690 +-
27691 + err = xfrm6_tunnel_check_size(skb);
27692 + if (err)
27693 + return err;
27694 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
27695 +index 4496f7efa2200..c25586156c6a7 100644
27696 +--- a/net/xfrm/xfrm_state.c
27697 ++++ b/net/xfrm/xfrm_state.c
27698 +@@ -2518,7 +2518,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
27699 + }
27700 + EXPORT_SYMBOL(xfrm_state_delete_tunnel);
27701 +
27702 +-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
27703 ++u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
27704 + {
27705 + const struct xfrm_type *type = READ_ONCE(x->type);
27706 + struct crypto_aead *aead;
27707 +@@ -2549,7 +2549,17 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
27708 + return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
27709 + net_adj) & ~(blksize - 1)) + net_adj - 2;
27710 + }
27711 +-EXPORT_SYMBOL_GPL(xfrm_state_mtu);
27712 ++EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
27713 ++
27714 ++u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
27715 ++{
27716 ++ mtu = __xfrm_state_mtu(x, mtu);
27717 ++
27718 ++ if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
27719 ++ return IPV6_MIN_MTU;
27720 ++
27721 ++ return mtu;
27722 ++}
27723 +
27724 + int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
27725 + {
27726 +diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
27727 +index 41d705c3a1f7f..93854e135134c 100644
27728 +--- a/samples/bpf/xdp_redirect_user.c
27729 ++++ b/samples/bpf/xdp_redirect_user.c
27730 +@@ -130,7 +130,7 @@ int main(int argc, char **argv)
27731 + if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
27732 + xdp_flags |= XDP_FLAGS_DRV_MODE;
27733 +
27734 +- if (optind == argc) {
27735 ++ if (optind + 2 != argc) {
27736 + printf("usage: %s <IFNAME|IFINDEX>_IN <IFNAME|IFINDEX>_OUT\n", argv[0]);
27737 + return 1;
27738 + }
27739 +@@ -213,5 +213,5 @@ int main(int argc, char **argv)
27740 + poll_stats(2, ifindex_out);
27741 +
27742 + out:
27743 +- return 0;
27744 ++ return ret;
27745 + }
27746 +diff --git a/scripts/Makefile.build b/scripts/Makefile.build
27747 +index 949f723efe538..34d257653fb47 100644
27748 +--- a/scripts/Makefile.build
27749 ++++ b/scripts/Makefile.build
27750 +@@ -268,7 +268,8 @@ define rule_as_o_S
27751 + endef
27752 +
27753 + # Built-in and composite module parts
27754 +-$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
27755 ++.SECONDEXPANSION:
27756 ++$(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE
27757 + $(call if_changed_rule,cc_o_c)
27758 + $(call cmd,force_checksrc)
27759 +
27760 +@@ -349,7 +350,7 @@ cmd_modversions_S = \
27761 + fi
27762 + endif
27763 +
27764 +-$(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE
27765 ++$(obj)/%.o: $(src)/%.S $$(objtool_dep) FORCE
27766 + $(call if_changed_rule,as_o_S)
27767 +
27768 + targets += $(filter-out $(subdir-builtin), $(real-obj-y))
27769 +diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
27770 +index 0e0f6466b18d6..475faa15854e6 100755
27771 +--- a/scripts/link-vmlinux.sh
27772 ++++ b/scripts/link-vmlinux.sh
27773 +@@ -235,6 +235,10 @@ gen_btf()
27774 +
27775 + vmlinux_link ${1}
27776 +
27777 ++ if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then
27778 ++ # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars
27779 ++ extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars"
27780 ++ fi
27781 + if [ "${pahole_ver}" -ge "121" ]; then
27782 + extra_paholeopt="${extra_paholeopt} --btf_gen_floats"
27783 + fi
27784 +diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh
27785 +index 45e8aa360b457..cb55878bd5b81 100755
27786 +--- a/scripts/tools-support-relr.sh
27787 ++++ b/scripts/tools-support-relr.sh
27788 +@@ -7,7 +7,8 @@ trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
27789 + cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
27790 + void *p = &p;
27791 + END
27792 +-$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
27793 ++$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \
27794 ++ --use-android-relr-tags -o $tmp_file
27795 +
27796 + # Despite printing an error message, GNU nm still exits with exit code 0 if it
27797 + # sees a relr section. So we need to check that nothing is printed to stderr.
27798 +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
27799 +index 0de367aaa2d31..7ac5204c8d1f2 100644
27800 +--- a/security/integrity/evm/evm_main.c
27801 ++++ b/security/integrity/evm/evm_main.c
27802 +@@ -521,7 +521,7 @@ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
27803 + }
27804 +
27805 + /*
27806 +- * evm_inode_init_security - initializes security.evm
27807 ++ * evm_inode_init_security - initializes security.evm HMAC value
27808 + */
27809 + int evm_inode_init_security(struct inode *inode,
27810 + const struct xattr *lsm_xattr,
27811 +@@ -530,7 +530,8 @@ int evm_inode_init_security(struct inode *inode,
27812 + struct evm_xattr *xattr_data;
27813 + int rc;
27814 +
27815 +- if (!evm_key_loaded() || !evm_protected_xattr(lsm_xattr->name))
27816 ++ if (!(evm_initialized & EVM_INIT_HMAC) ||
27817 ++ !evm_protected_xattr(lsm_xattr->name))
27818 + return 0;
27819 +
27820 + xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
27821 +diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
27822 +index bbc85637e18b2..5f0da41bccd07 100644
27823 +--- a/security/integrity/evm/evm_secfs.c
27824 ++++ b/security/integrity/evm/evm_secfs.c
27825 +@@ -66,12 +66,13 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
27826 + static ssize_t evm_write_key(struct file *file, const char __user *buf,
27827 + size_t count, loff_t *ppos)
27828 + {
27829 +- int i, ret;
27830 ++ unsigned int i;
27831 ++ int ret;
27832 +
27833 + if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
27834 + return -EPERM;
27835 +
27836 +- ret = kstrtoint_from_user(buf, count, 0, &i);
27837 ++ ret = kstrtouint_from_user(buf, count, 0, &i);
27838 +
27839 + if (ret)
27840 + return ret;
27841 +@@ -80,12 +81,12 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
27842 + if (!i || (i & ~EVM_INIT_MASK) != 0)
27843 + return -EINVAL;
27844 +
27845 +- /* Don't allow a request to freshly enable metadata writes if
27846 +- * keys are loaded.
27847 ++ /*
27848 ++ * Don't allow a request to enable metadata writes if
27849 ++ * an HMAC key is loaded.
27850 + */
27851 + if ((i & EVM_ALLOW_METADATA_WRITES) &&
27852 +- ((evm_initialized & EVM_KEY_MASK) != 0) &&
27853 +- !(evm_initialized & EVM_ALLOW_METADATA_WRITES))
27854 ++ (evm_initialized & EVM_INIT_HMAC) != 0)
27855 + return -EPERM;
27856 +
27857 + if (i & EVM_INIT_HMAC) {
27858 +diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
27859 +index 4e5eb0236278a..55dac618f2a12 100644
27860 +--- a/security/integrity/ima/ima_appraise.c
27861 ++++ b/security/integrity/ima/ima_appraise.c
27862 +@@ -522,8 +522,6 @@ void ima_inode_post_setattr(struct user_namespace *mnt_userns,
27863 + return;
27864 +
27865 + action = ima_must_appraise(mnt_userns, inode, MAY_ACCESS, POST_SETATTR);
27866 +- if (!action)
27867 +- __vfs_removexattr(&init_user_ns, dentry, XATTR_NAME_IMA);
27868 + iint = integrity_iint_find(inode);
27869 + if (iint) {
27870 + set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
27871 +diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
27872 +index 5805c5de39fbf..7a282d8e71485 100644
27873 +--- a/sound/firewire/amdtp-stream.c
27874 ++++ b/sound/firewire/amdtp-stream.c
27875 +@@ -1404,14 +1404,17 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
27876 + unsigned int queue_size;
27877 + struct amdtp_stream *s;
27878 + int cycle;
27879 ++ bool found = false;
27880 + int err;
27881 +
27882 + // Select an IT context as IRQ target.
27883 + list_for_each_entry(s, &d->streams, list) {
27884 +- if (s->direction == AMDTP_OUT_STREAM)
27885 ++ if (s->direction == AMDTP_OUT_STREAM) {
27886 ++ found = true;
27887 + break;
27888 ++ }
27889 + }
27890 +- if (!s)
27891 ++ if (!found)
27892 + return -ENXIO;
27893 + d->irq_target = s;
27894 +
27895 +diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
27896 +index b612ee3e33b65..317a4242cfe9f 100644
27897 +--- a/sound/firewire/bebob/bebob_stream.c
27898 ++++ b/sound/firewire/bebob/bebob_stream.c
27899 +@@ -883,6 +883,11 @@ static int detect_midi_ports(struct snd_bebob *bebob,
27900 + err = avc_bridgeco_get_plug_ch_count(bebob->unit, addr, &ch_count);
27901 + if (err < 0)
27902 + break;
27903 ++ // Yamaha GO44, GO46, Terratec Phase 24, Phase x24 reports 0 for the number of
27904 ++ // channels in external output plug 3 (MIDI type) even if it has a pair of physical
27905 ++ // MIDI jacks. As a workaround, assume it as one.
27906 ++ if (ch_count == 0)
27907 ++ ch_count = 1;
27908 + *midi_ports += ch_count;
27909 + }
27910 +
27911 +@@ -961,12 +966,12 @@ int snd_bebob_stream_discover(struct snd_bebob *bebob)
27912 + if (err < 0)
27913 + goto end;
27914 +
27915 +- err = detect_midi_ports(bebob, bebob->rx_stream_formations, addr, AVC_BRIDGECO_PLUG_DIR_IN,
27916 ++ err = detect_midi_ports(bebob, bebob->tx_stream_formations, addr, AVC_BRIDGECO_PLUG_DIR_IN,
27917 + plugs[2], &bebob->midi_input_ports);
27918 + if (err < 0)
27919 + goto end;
27920 +
27921 +- err = detect_midi_ports(bebob, bebob->tx_stream_formations, addr, AVC_BRIDGECO_PLUG_DIR_OUT,
27922 ++ err = detect_midi_ports(bebob, bebob->rx_stream_formations, addr, AVC_BRIDGECO_PLUG_DIR_OUT,
27923 + plugs[3], &bebob->midi_output_ports);
27924 + if (err < 0)
27925 + goto end;
27926 +diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
27927 +index e59e69ab1538b..784073aa10265 100644
27928 +--- a/sound/firewire/motu/motu-protocol-v2.c
27929 ++++ b/sound/firewire/motu/motu-protocol-v2.c
27930 +@@ -353,6 +353,7 @@ const struct snd_motu_spec snd_motu_spec_8pre = {
27931 + .protocol_version = SND_MOTU_PROTOCOL_V2,
27932 + .flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
27933 + SND_MOTU_SPEC_TX_MIDI_2ND_Q,
27934 +- .tx_fixed_pcm_chunks = {10, 6, 0},
27935 +- .rx_fixed_pcm_chunks = {10, 6, 0},
27936 ++ // Two dummy chunks always in the end of data block.
27937 ++ .tx_fixed_pcm_chunks = {10, 10, 0},
27938 ++ .rx_fixed_pcm_chunks = {6, 6, 0},
27939 + };
27940 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
27941 +index ab5113cccffae..1ca320fef670f 100644
27942 +--- a/sound/pci/hda/patch_realtek.c
27943 ++++ b/sound/pci/hda/patch_realtek.c
27944 +@@ -385,6 +385,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
27945 + alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
27946 + fallthrough;
27947 + case 0x10ec0215:
27948 ++ case 0x10ec0230:
27949 + case 0x10ec0233:
27950 + case 0x10ec0235:
27951 + case 0x10ec0236:
27952 +@@ -3153,6 +3154,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
27953 + alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
27954 + alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0);
27955 + break;
27956 ++ case 0x10ec0230:
27957 + case 0x10ec0236:
27958 + case 0x10ec0256:
27959 + alc_write_coef_idx(codec, 0x48, 0x0);
27960 +@@ -3180,6 +3182,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
27961 + alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
27962 + alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
27963 + break;
27964 ++ case 0x10ec0230:
27965 + case 0x10ec0236:
27966 + case 0x10ec0256:
27967 + alc_write_coef_idx(codec, 0x48, 0xd011);
27968 +@@ -4744,6 +4747,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
27969 + case 0x10ec0255:
27970 + alc_process_coef_fw(codec, coef0255);
27971 + break;
27972 ++ case 0x10ec0230:
27973 + case 0x10ec0236:
27974 + case 0x10ec0256:
27975 + alc_process_coef_fw(codec, coef0256);
27976 +@@ -4858,6 +4862,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
27977 + alc_process_coef_fw(codec, coef0255);
27978 + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
27979 + break;
27980 ++ case 0x10ec0230:
27981 + case 0x10ec0236:
27982 + case 0x10ec0256:
27983 + alc_write_coef_idx(codec, 0x45, 0xc489);
27984 +@@ -5007,6 +5012,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
27985 + case 0x10ec0255:
27986 + alc_process_coef_fw(codec, coef0255);
27987 + break;
27988 ++ case 0x10ec0230:
27989 + case 0x10ec0236:
27990 + case 0x10ec0256:
27991 + alc_write_coef_idx(codec, 0x1b, 0x0e4b);
27992 +@@ -5105,6 +5111,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
27993 + case 0x10ec0255:
27994 + alc_process_coef_fw(codec, coef0255);
27995 + break;
27996 ++ case 0x10ec0230:
27997 + case 0x10ec0236:
27998 + case 0x10ec0256:
27999 + alc_process_coef_fw(codec, coef0256);
28000 +@@ -5218,6 +5225,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
28001 + case 0x10ec0255:
28002 + alc_process_coef_fw(codec, coef0255);
28003 + break;
28004 ++ case 0x10ec0230:
28005 + case 0x10ec0236:
28006 + case 0x10ec0256:
28007 + alc_process_coef_fw(codec, coef0256);
28008 +@@ -5318,6 +5326,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
28009 + val = alc_read_coef_idx(codec, 0x46);
28010 + is_ctia = (val & 0x0070) == 0x0070;
28011 + break;
28012 ++ case 0x10ec0230:
28013 + case 0x10ec0236:
28014 + case 0x10ec0256:
28015 + alc_write_coef_idx(codec, 0x1b, 0x0e4b);
28016 +@@ -5611,6 +5620,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
28017 + case 0x10ec0255:
28018 + alc_process_coef_fw(codec, alc255fw);
28019 + break;
28020 ++ case 0x10ec0230:
28021 + case 0x10ec0236:
28022 + case 0x10ec0256:
28023 + alc_process_coef_fw(codec, alc256fw);
28024 +@@ -6211,6 +6221,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
28025 + alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
28026 + alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
28027 + break;
28028 ++ case 0x10ec0230:
28029 + case 0x10ec0235:
28030 + case 0x10ec0236:
28031 + case 0x10ec0255:
28032 +@@ -6343,6 +6354,24 @@ static void alc_fixup_no_int_mic(struct hda_codec *codec,
28033 + }
28034 + }
28035 +
28036 ++static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
28037 ++ const struct hda_fixup *fix, int action)
28038 ++{
28039 ++ static const hda_nid_t conn[] = { 0x02 };
28040 ++ static const struct hda_pintbl pincfgs[] = {
28041 ++ { 0x14, 0x90170110 }, /* rear speaker */
28042 ++ { }
28043 ++ };
28044 ++
28045 ++ switch (action) {
28046 ++ case HDA_FIXUP_ACT_PRE_PROBE:
28047 ++ snd_hda_apply_pincfgs(codec, pincfgs);
28048 ++ /* force front speaker to DAC1 */
28049 ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
28050 ++ break;
28051 ++ }
28052 ++}
28053 ++
28054 + /* for hda_fixup_thinkpad_acpi() */
28055 + #include "thinkpad_helper.c"
28056 +
28057 +@@ -7810,6 +7839,8 @@ static const struct hda_fixup alc269_fixups[] = {
28058 + { 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b },
28059 + { }
28060 + },
28061 ++ .chained = true,
28062 ++ .chain_id = ALC289_FIXUP_ASUS_GA401,
28063 + },
28064 + [ALC285_FIXUP_HP_GPIO_LED] = {
28065 + .type = HDA_FIXUP_FUNC,
28066 +@@ -8127,13 +8158,8 @@ static const struct hda_fixup alc269_fixups[] = {
28067 + .chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
28068 + },
28069 + [ALC285_FIXUP_HP_SPECTRE_X360] = {
28070 +- .type = HDA_FIXUP_PINS,
28071 +- .v.pins = (const struct hda_pintbl[]) {
28072 +- { 0x14, 0x90170110 }, /* enable top speaker */
28073 +- {}
28074 +- },
28075 +- .chained = true,
28076 +- .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
28077 ++ .type = HDA_FIXUP_FUNC,
28078 ++ .v.func = alc285_fixup_hp_spectre_x360,
28079 + },
28080 + [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
28081 + .type = HDA_FIXUP_FUNC,
28082 +@@ -8319,6 +8345,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
28083 + SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
28084 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
28085 + SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
28086 ++ SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
28087 + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
28088 + SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
28089 + SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
28090 +@@ -8336,19 +8363,26 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
28091 + ALC285_FIXUP_HP_GPIO_AMP_INIT),
28092 + SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
28093 + SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
28094 ++ SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
28095 ++ SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
28096 + SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
28097 + SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
28098 + SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
28099 + SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
28100 ++ SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
28101 + SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
28102 ++ SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
28103 + SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
28104 + SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
28105 ++ SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
28106 ++ SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
28107 + SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
28108 + SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
28109 + SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
28110 + SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
28111 + SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
28112 + SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
28113 ++ SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
28114 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
28115 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
28116 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
28117 +@@ -9341,6 +9375,7 @@ static int patch_alc269(struct hda_codec *codec)
28118 + spec->shutup = alc256_shutup;
28119 + spec->init_hook = alc256_init;
28120 + break;
28121 ++ case 0x10ec0230:
28122 + case 0x10ec0236:
28123 + case 0x10ec0256:
28124 + spec->codec_variant = ALC269_TYPE_ALC256;
28125 +@@ -10632,6 +10667,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
28126 + HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
28127 + HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
28128 + HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
28129 ++ HDA_CODEC_ENTRY(0x10ec0230, "ALC236", patch_alc269),
28130 + HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
28131 + HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
28132 + HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
28133 +diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
28134 +index 5b124c4ad5725..11b398be0954f 100644
28135 +--- a/sound/pci/intel8x0.c
28136 ++++ b/sound/pci/intel8x0.c
28137 +@@ -692,7 +692,7 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
28138 + int status, civ, i, step;
28139 + int ack = 0;
28140 +
28141 +- if (!ichdev->prepared || ichdev->suspended)
28142 ++ if (!(ichdev->prepared || chip->in_measurement) || ichdev->suspended)
28143 + return;
28144 +
28145 + spin_lock_irqsave(&chip->reg_lock, flags);
28146 +diff --git a/sound/soc/atmel/atmel-i2s.c b/sound/soc/atmel/atmel-i2s.c
28147 +index 584656cc7d3cb..e5c4625b7771f 100644
28148 +--- a/sound/soc/atmel/atmel-i2s.c
28149 ++++ b/sound/soc/atmel/atmel-i2s.c
28150 +@@ -200,6 +200,7 @@ struct atmel_i2s_dev {
28151 + unsigned int fmt;
28152 + const struct atmel_i2s_gck_param *gck_param;
28153 + const struct atmel_i2s_caps *caps;
28154 ++ int clk_use_no;
28155 + };
28156 +
28157 + static irqreturn_t atmel_i2s_interrupt(int irq, void *dev_id)
28158 +@@ -321,9 +322,16 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
28159 + {
28160 + struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
28161 + bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
28162 +- unsigned int mr = 0;
28163 ++ unsigned int mr = 0, mr_mask;
28164 + int ret;
28165 +
28166 ++ mr_mask = ATMEL_I2SC_MR_FORMAT_MASK | ATMEL_I2SC_MR_MODE_MASK |
28167 ++ ATMEL_I2SC_MR_DATALENGTH_MASK;
28168 ++ if (is_playback)
28169 ++ mr_mask |= ATMEL_I2SC_MR_TXMONO;
28170 ++ else
28171 ++ mr_mask |= ATMEL_I2SC_MR_RXMONO;
28172 ++
28173 + switch (dev->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
28174 + case SND_SOC_DAIFMT_I2S:
28175 + mr |= ATMEL_I2SC_MR_FORMAT_I2S;
28176 +@@ -402,7 +410,7 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
28177 + return -EINVAL;
28178 + }
28179 +
28180 +- return regmap_write(dev->regmap, ATMEL_I2SC_MR, mr);
28181 ++ return regmap_update_bits(dev->regmap, ATMEL_I2SC_MR, mr_mask, mr);
28182 + }
28183 +
28184 + static int atmel_i2s_switch_mck_generator(struct atmel_i2s_dev *dev,
28185 +@@ -495,18 +503,28 @@ static int atmel_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
28186 + is_master = (mr & ATMEL_I2SC_MR_MODE_MASK) == ATMEL_I2SC_MR_MODE_MASTER;
28187 +
28188 + /* If master starts, enable the audio clock. */
28189 +- if (is_master && mck_enabled)
28190 +- err = atmel_i2s_switch_mck_generator(dev, true);
28191 +- if (err)
28192 +- return err;
28193 ++ if (is_master && mck_enabled) {
28194 ++ if (!dev->clk_use_no) {
28195 ++ err = atmel_i2s_switch_mck_generator(dev, true);
28196 ++ if (err)
28197 ++ return err;
28198 ++ }
28199 ++ dev->clk_use_no++;
28200 ++ }
28201 +
28202 + err = regmap_write(dev->regmap, ATMEL_I2SC_CR, cr);
28203 + if (err)
28204 + return err;
28205 +
28206 + /* If master stops, disable the audio clock. */
28207 +- if (is_master && !mck_enabled)
28208 +- err = atmel_i2s_switch_mck_generator(dev, false);
28209 ++ if (is_master && !mck_enabled) {
28210 ++ if (dev->clk_use_no == 1) {
28211 ++ err = atmel_i2s_switch_mck_generator(dev, false);
28212 ++ if (err)
28213 ++ return err;
28214 ++ }
28215 ++ dev->clk_use_no--;
28216 ++ }
28217 +
28218 + return err;
28219 + }
28220 +@@ -542,6 +560,7 @@ static struct snd_soc_dai_driver atmel_i2s_dai = {
28221 + },
28222 + .ops = &atmel_i2s_dai_ops,
28223 + .symmetric_rate = 1,
28224 ++ .symmetric_sample_bits = 1,
28225 + };
28226 +
28227 + static const struct snd_soc_component_driver atmel_i2s_component = {
28228 +diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
28229 +index 36b763f0d1a06..386c40f9ed311 100644
28230 +--- a/sound/soc/codecs/cs42l42.h
28231 ++++ b/sound/soc/codecs/cs42l42.h
28232 +@@ -79,7 +79,7 @@
28233 + #define CS42L42_HP_PDN_SHIFT 3
28234 + #define CS42L42_HP_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
28235 + #define CS42L42_ADC_PDN_SHIFT 2
28236 +-#define CS42L42_ADC_PDN_MASK (1 << CS42L42_HP_PDN_SHIFT)
28237 ++#define CS42L42_ADC_PDN_MASK (1 << CS42L42_ADC_PDN_SHIFT)
28238 + #define CS42L42_PDN_ALL_SHIFT 0
28239 + #define CS42L42_PDN_ALL_MASK (1 << CS42L42_PDN_ALL_SHIFT)
28240 +
28241 +diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
28242 +index f3a12205cd484..dc520effc61cb 100644
28243 +--- a/sound/soc/codecs/max98373-sdw.c
28244 ++++ b/sound/soc/codecs/max98373-sdw.c
28245 +@@ -271,7 +271,7 @@ static __maybe_unused int max98373_resume(struct device *dev)
28246 + struct max98373_priv *max98373 = dev_get_drvdata(dev);
28247 + unsigned long time;
28248 +
28249 +- if (!max98373->hw_init)
28250 ++ if (!max98373->first_hw_init)
28251 + return 0;
28252 +
28253 + if (!slave->unattach_request)
28254 +@@ -362,7 +362,7 @@ static int max98373_io_init(struct sdw_slave *slave)
28255 + struct device *dev = &slave->dev;
28256 + struct max98373_priv *max98373 = dev_get_drvdata(dev);
28257 +
28258 +- if (max98373->pm_init_once) {
28259 ++ if (max98373->first_hw_init) {
28260 + regcache_cache_only(max98373->regmap, false);
28261 + regcache_cache_bypass(max98373->regmap, true);
28262 + }
28263 +@@ -370,7 +370,7 @@ static int max98373_io_init(struct sdw_slave *slave)
28264 + /*
28265 + * PM runtime is only enabled when a Slave reports as Attached
28266 + */
28267 +- if (!max98373->pm_init_once) {
28268 ++ if (!max98373->first_hw_init) {
28269 + /* set autosuspend parameters */
28270 + pm_runtime_set_autosuspend_delay(dev, 3000);
28271 + pm_runtime_use_autosuspend(dev);
28272 +@@ -462,12 +462,12 @@ static int max98373_io_init(struct sdw_slave *slave)
28273 + regmap_write(max98373->regmap, MAX98373_R20B5_BDE_EN, 1);
28274 + regmap_write(max98373->regmap, MAX98373_R20E2_LIMITER_EN, 1);
28275 +
28276 +- if (max98373->pm_init_once) {
28277 ++ if (max98373->first_hw_init) {
28278 + regcache_cache_bypass(max98373->regmap, false);
28279 + regcache_mark_dirty(max98373->regmap);
28280 + }
28281 +
28282 +- max98373->pm_init_once = true;
28283 ++ max98373->first_hw_init = true;
28284 + max98373->hw_init = true;
28285 +
28286 + pm_runtime_mark_last_busy(dev);
28287 +@@ -787,6 +787,8 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
28288 + max98373->cache = devm_kcalloc(dev, max98373->cache_num,
28289 + sizeof(*max98373->cache),
28290 + GFP_KERNEL);
28291 ++ if (!max98373->cache)
28292 ++ return -ENOMEM;
28293 +
28294 + for (i = 0; i < max98373->cache_num; i++)
28295 + max98373->cache[i].reg = max98373_sdw_cache_reg[i];
28296 +@@ -795,7 +797,7 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
28297 + max98373_slot_config(dev, max98373);
28298 +
28299 + max98373->hw_init = false;
28300 +- max98373->pm_init_once = false;
28301 ++ max98373->first_hw_init = false;
28302 +
28303 + /* codec registration */
28304 + ret = devm_snd_soc_register_component(dev, &soc_codec_dev_max98373_sdw,
28305 +diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
28306 +index 73a2cf69d84ad..e1810b3b1620b 100644
28307 +--- a/sound/soc/codecs/max98373.h
28308 ++++ b/sound/soc/codecs/max98373.h
28309 +@@ -226,7 +226,7 @@ struct max98373_priv {
28310 + /* variables to support soundwire */
28311 + struct sdw_slave *slave;
28312 + bool hw_init;
28313 +- bool pm_init_once;
28314 ++ bool first_hw_init;
28315 + int slot;
28316 + unsigned int rx_mask;
28317 + };
28318 +diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c
28319 +index bfefefcc76d81..758d439e8c7a5 100644
28320 +--- a/sound/soc/codecs/rk3328_codec.c
28321 ++++ b/sound/soc/codecs/rk3328_codec.c
28322 +@@ -474,7 +474,8 @@ static int rk3328_platform_probe(struct platform_device *pdev)
28323 + rk3328->pclk = devm_clk_get(&pdev->dev, "pclk");
28324 + if (IS_ERR(rk3328->pclk)) {
28325 + dev_err(&pdev->dev, "can't get acodec pclk\n");
28326 +- return PTR_ERR(rk3328->pclk);
28327 ++ ret = PTR_ERR(rk3328->pclk);
28328 ++ goto err_unprepare_mclk;
28329 + }
28330 +
28331 + ret = clk_prepare_enable(rk3328->pclk);
28332 +@@ -484,19 +485,34 @@ static int rk3328_platform_probe(struct platform_device *pdev)
28333 + }
28334 +
28335 + base = devm_platform_ioremap_resource(pdev, 0);
28336 +- if (IS_ERR(base))
28337 +- return PTR_ERR(base);
28338 ++ if (IS_ERR(base)) {
28339 ++ ret = PTR_ERR(base);
28340 ++ goto err_unprepare_pclk;
28341 ++ }
28342 +
28343 + rk3328->regmap = devm_regmap_init_mmio(&pdev->dev, base,
28344 + &rk3328_codec_regmap_config);
28345 +- if (IS_ERR(rk3328->regmap))
28346 +- return PTR_ERR(rk3328->regmap);
28347 ++ if (IS_ERR(rk3328->regmap)) {
28348 ++ ret = PTR_ERR(rk3328->regmap);
28349 ++ goto err_unprepare_pclk;
28350 ++ }
28351 +
28352 + platform_set_drvdata(pdev, rk3328);
28353 +
28354 +- return devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
28355 ++ ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
28356 + rk3328_dai,
28357 + ARRAY_SIZE(rk3328_dai));
28358 ++ if (ret)
28359 ++ goto err_unprepare_pclk;
28360 ++
28361 ++ return 0;
28362 ++
28363 ++err_unprepare_pclk:
28364 ++ clk_disable_unprepare(rk3328->pclk);
28365 ++
28366 ++err_unprepare_mclk:
28367 ++ clk_disable_unprepare(rk3328->mclk);
28368 ++ return ret;
28369 + }
28370 +
28371 + static const struct of_device_id rk3328_codec_of_match[] __maybe_unused = {
28372 +diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
28373 +index 1c226994aebd8..f716668de6400 100644
28374 +--- a/sound/soc/codecs/rt1308-sdw.c
28375 ++++ b/sound/soc/codecs/rt1308-sdw.c
28376 +@@ -709,7 +709,7 @@ static int __maybe_unused rt1308_dev_resume(struct device *dev)
28377 + struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
28378 + unsigned long time;
28379 +
28380 +- if (!rt1308->hw_init)
28381 ++ if (!rt1308->first_hw_init)
28382 + return 0;
28383 +
28384 + if (!slave->unattach_request)
28385 +diff --git a/sound/soc/codecs/rt1316-sdw.c b/sound/soc/codecs/rt1316-sdw.c
28386 +index 3b029c56467de..09b4914bba1bf 100644
28387 +--- a/sound/soc/codecs/rt1316-sdw.c
28388 ++++ b/sound/soc/codecs/rt1316-sdw.c
28389 +@@ -701,7 +701,7 @@ static int __maybe_unused rt1316_dev_resume(struct device *dev)
28390 + struct rt1316_sdw_priv *rt1316 = dev_get_drvdata(dev);
28391 + unsigned long time;
28392 +
28393 +- if (!rt1316->hw_init)
28394 ++ if (!rt1316->first_hw_init)
28395 + return 0;
28396 +
28397 + if (!slave->unattach_request)
28398 +diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
28399 +index 8ea9f1d9fec0e..cd964e023d96e 100644
28400 +--- a/sound/soc/codecs/rt5682-i2c.c
28401 ++++ b/sound/soc/codecs/rt5682-i2c.c
28402 +@@ -273,6 +273,7 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
28403 + {
28404 + struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
28405 +
28406 ++ disable_irq(client->irq);
28407 + cancel_delayed_work_sync(&rt5682->jack_detect_work);
28408 + cancel_delayed_work_sync(&rt5682->jd_check_work);
28409 +
28410 +diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
28411 +index e78ba3b064c4f..54873730bec55 100644
28412 +--- a/sound/soc/codecs/rt5682-sdw.c
28413 ++++ b/sound/soc/codecs/rt5682-sdw.c
28414 +@@ -400,6 +400,11 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
28415 +
28416 + pm_runtime_get_noresume(&slave->dev);
28417 +
28418 ++ if (rt5682->first_hw_init) {
28419 ++ regcache_cache_only(rt5682->regmap, false);
28420 ++ regcache_cache_bypass(rt5682->regmap, true);
28421 ++ }
28422 ++
28423 + while (loop > 0) {
28424 + regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
28425 + if (val == DEVICE_ID)
28426 +@@ -408,14 +413,11 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
28427 + usleep_range(30000, 30005);
28428 + loop--;
28429 + }
28430 ++
28431 + if (val != DEVICE_ID) {
28432 + dev_err(dev, "Device with ID register %x is not rt5682\n", val);
28433 +- return -ENODEV;
28434 +- }
28435 +-
28436 +- if (rt5682->first_hw_init) {
28437 +- regcache_cache_only(rt5682->regmap, false);
28438 +- regcache_cache_bypass(rt5682->regmap, true);
28439 ++ ret = -ENODEV;
28440 ++ goto err_nodev;
28441 + }
28442 +
28443 + rt5682_calibrate(rt5682);
28444 +@@ -486,10 +488,11 @@ reinit:
28445 + rt5682->hw_init = true;
28446 + rt5682->first_hw_init = true;
28447 +
28448 ++err_nodev:
28449 + pm_runtime_mark_last_busy(&slave->dev);
28450 + pm_runtime_put_autosuspend(&slave->dev);
28451 +
28452 +- dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
28453 ++ dev_dbg(&slave->dev, "%s hw_init complete: %d\n", __func__, ret);
28454 +
28455 + return ret;
28456 + }
28457 +@@ -743,7 +746,7 @@ static int __maybe_unused rt5682_dev_resume(struct device *dev)
28458 + struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
28459 + unsigned long time;
28460 +
28461 +- if (!rt5682->hw_init)
28462 ++ if (!rt5682->first_hw_init)
28463 + return 0;
28464 +
28465 + if (!slave->unattach_request)
28466 +diff --git a/sound/soc/codecs/rt700-sdw.c b/sound/soc/codecs/rt700-sdw.c
28467 +index ff9c081fd52af..d1d9c0f455b43 100644
28468 +--- a/sound/soc/codecs/rt700-sdw.c
28469 ++++ b/sound/soc/codecs/rt700-sdw.c
28470 +@@ -498,7 +498,7 @@ static int __maybe_unused rt700_dev_resume(struct device *dev)
28471 + struct rt700_priv *rt700 = dev_get_drvdata(dev);
28472 + unsigned long time;
28473 +
28474 +- if (!rt700->hw_init)
28475 ++ if (!rt700->first_hw_init)
28476 + return 0;
28477 +
28478 + if (!slave->unattach_request)
28479 +diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
28480 +index 9685c8905468a..03cd3e0142f99 100644
28481 +--- a/sound/soc/codecs/rt711-sdca-sdw.c
28482 ++++ b/sound/soc/codecs/rt711-sdca-sdw.c
28483 +@@ -75,6 +75,16 @@ static bool rt711_sdca_mbq_readable_register(struct device *dev, unsigned int re
28484 + case 0x5b00000 ... 0x5b000ff:
28485 + case 0x5f00000 ... 0x5f000ff:
28486 + case 0x6100000 ... 0x61000ff:
28487 ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU05, RT711_SDCA_CTL_FU_VOLUME, CH_L):
28488 ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU05, RT711_SDCA_CTL_FU_VOLUME, CH_R):
28489 ++ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E, RT711_SDCA_CTL_FU_VOLUME, CH_L):
28490 ++ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E, RT711_SDCA_CTL_FU_VOLUME, CH_R):
28491 ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU0F, RT711_SDCA_CTL_FU_VOLUME, CH_L):
28492 ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU0F, RT711_SDCA_CTL_FU_VOLUME, CH_R):
28493 ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_PLATFORM_FU44, RT711_SDCA_CTL_FU_CH_GAIN, CH_L):
28494 ++ case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_PLATFORM_FU44, RT711_SDCA_CTL_FU_CH_GAIN, CH_R):
28495 ++ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_PLATFORM_FU15, RT711_SDCA_CTL_FU_CH_GAIN, CH_L):
28496 ++ case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_PLATFORM_FU15, RT711_SDCA_CTL_FU_CH_GAIN, CH_R):
28497 + return true;
28498 + default:
28499 + return false;
28500 +@@ -380,7 +390,7 @@ static int __maybe_unused rt711_sdca_dev_resume(struct device *dev)
28501 + struct rt711_sdca_priv *rt711 = dev_get_drvdata(dev);
28502 + unsigned long time;
28503 +
28504 +- if (!rt711->hw_init)
28505 ++ if (!rt711->first_hw_init)
28506 + return 0;
28507 +
28508 + if (!slave->unattach_request)
28509 +diff --git a/sound/soc/codecs/rt711-sdca.c b/sound/soc/codecs/rt711-sdca.c
28510 +index 24a084e0b48a1..0b0c230dcf716 100644
28511 +--- a/sound/soc/codecs/rt711-sdca.c
28512 ++++ b/sound/soc/codecs/rt711-sdca.c
28513 +@@ -1500,6 +1500,8 @@ int rt711_sdca_io_init(struct device *dev, struct sdw_slave *slave)
28514 + if (rt711->first_hw_init) {
28515 + regcache_cache_only(rt711->regmap, false);
28516 + regcache_cache_bypass(rt711->regmap, true);
28517 ++ regcache_cache_only(rt711->mbq_regmap, false);
28518 ++ regcache_cache_bypass(rt711->mbq_regmap, true);
28519 + } else {
28520 + /*
28521 + * PM runtime is only enabled when a Slave reports as Attached
28522 +@@ -1565,6 +1567,8 @@ int rt711_sdca_io_init(struct device *dev, struct sdw_slave *slave)
28523 + if (rt711->first_hw_init) {
28524 + regcache_cache_bypass(rt711->regmap, false);
28525 + regcache_mark_dirty(rt711->regmap);
28526 ++ regcache_cache_bypass(rt711->mbq_regmap, false);
28527 ++ regcache_mark_dirty(rt711->mbq_regmap);
28528 + } else
28529 + rt711->first_hw_init = true;
28530 +
28531 +diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
28532 +index 8f5ebe92d4076..15299084429f2 100644
28533 +--- a/sound/soc/codecs/rt711-sdw.c
28534 ++++ b/sound/soc/codecs/rt711-sdw.c
28535 +@@ -501,7 +501,7 @@ static int __maybe_unused rt711_dev_resume(struct device *dev)
28536 + struct rt711_priv *rt711 = dev_get_drvdata(dev);
28537 + unsigned long time;
28538 +
28539 +- if (!rt711->hw_init)
28540 ++ if (!rt711->first_hw_init)
28541 + return 0;
28542 +
28543 + if (!slave->unattach_request)
28544 +diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
28545 +index 1350798406f0a..a5c673f43d824 100644
28546 +--- a/sound/soc/codecs/rt715-sdca-sdw.c
28547 ++++ b/sound/soc/codecs/rt715-sdca-sdw.c
28548 +@@ -70,6 +70,7 @@ static bool rt715_sdca_mbq_readable_register(struct device *dev, unsigned int re
28549 + case 0x2000036:
28550 + case 0x2000037:
28551 + case 0x2000039:
28552 ++ case 0x2000044:
28553 + case 0x6100000:
28554 + return true;
28555 + default:
28556 +@@ -224,7 +225,7 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
28557 + struct rt715_sdca_priv *rt715 = dev_get_drvdata(dev);
28558 + unsigned long time;
28559 +
28560 +- if (!rt715->hw_init)
28561 ++ if (!rt715->first_hw_init)
28562 + return 0;
28563 +
28564 + if (!slave->unattach_request)
28565 +diff --git a/sound/soc/codecs/rt715-sdca-sdw.h b/sound/soc/codecs/rt715-sdca-sdw.h
28566 +index cd365bb60747e..0cbc14844f8c2 100644
28567 +--- a/sound/soc/codecs/rt715-sdca-sdw.h
28568 ++++ b/sound/soc/codecs/rt715-sdca-sdw.h
28569 +@@ -113,6 +113,7 @@ static const struct reg_default rt715_mbq_reg_defaults_sdca[] = {
28570 + { 0x2000036, 0x0000 },
28571 + { 0x2000037, 0x0000 },
28572 + { 0x2000039, 0xaa81 },
28573 ++ { 0x2000044, 0x0202 },
28574 + { 0x6100000, 0x0100 },
28575 + { SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_ADC8_9_VOL,
28576 + RT715_SDCA_FU_VOL_CTRL, CH_01), 0x00 },
28577 +diff --git a/sound/soc/codecs/rt715-sdca.c b/sound/soc/codecs/rt715-sdca.c
28578 +index 7db76c19e0480..66e166568c508 100644
28579 +--- a/sound/soc/codecs/rt715-sdca.c
28580 ++++ b/sound/soc/codecs/rt715-sdca.c
28581 +@@ -997,7 +997,7 @@ int rt715_sdca_init(struct device *dev, struct regmap *mbq_regmap,
28582 + * HW init will be performed when device reports present
28583 + */
28584 + rt715->hw_init = false;
28585 +- rt715->first_init = false;
28586 ++ rt715->first_hw_init = false;
28587 +
28588 + ret = devm_snd_soc_register_component(dev,
28589 + &soc_codec_dev_rt715_sdca,
28590 +@@ -1018,7 +1018,7 @@ int rt715_sdca_io_init(struct device *dev, struct sdw_slave *slave)
28591 + /*
28592 + * PM runtime is only enabled when a Slave reports as Attached
28593 + */
28594 +- if (!rt715->first_init) {
28595 ++ if (!rt715->first_hw_init) {
28596 + /* set autosuspend parameters */
28597 + pm_runtime_set_autosuspend_delay(&slave->dev, 3000);
28598 + pm_runtime_use_autosuspend(&slave->dev);
28599 +@@ -1031,7 +1031,7 @@ int rt715_sdca_io_init(struct device *dev, struct sdw_slave *slave)
28600 +
28601 + pm_runtime_enable(&slave->dev);
28602 +
28603 +- rt715->first_init = true;
28604 ++ rt715->first_hw_init = true;
28605 + }
28606 +
28607 + pm_runtime_get_noresume(&slave->dev);
28608 +@@ -1054,6 +1054,9 @@ int rt715_sdca_io_init(struct device *dev, struct sdw_slave *slave)
28609 + rt715_sdca_index_update_bits(rt715, RT715_VENDOR_REG,
28610 + RT715_REV_1, 0x40, 0x40);
28611 + }
28612 ++ /* DFLL Calibration trigger */
28613 ++ rt715_sdca_index_update_bits(rt715, RT715_VENDOR_REG,
28614 ++ RT715_DFLL_VAD, 0x1, 0x1);
28615 + /* trigger mode = VAD enable */
28616 + regmap_write(rt715->regmap,
28617 + SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_SMPU_TRIG_ST_EN,
28618 +diff --git a/sound/soc/codecs/rt715-sdca.h b/sound/soc/codecs/rt715-sdca.h
28619 +index 85ce4d95e5eb2..90881b455ece9 100644
28620 +--- a/sound/soc/codecs/rt715-sdca.h
28621 ++++ b/sound/soc/codecs/rt715-sdca.h
28622 +@@ -27,7 +27,7 @@ struct rt715_sdca_priv {
28623 + enum sdw_slave_status status;
28624 + struct sdw_bus_params params;
28625 + bool hw_init;
28626 +- bool first_init;
28627 ++ bool first_hw_init;
28628 + int l_is_unmute;
28629 + int r_is_unmute;
28630 + int hw_sdw_ver;
28631 +@@ -81,6 +81,7 @@ struct rt715_sdca_kcontrol_private {
28632 + #define RT715_AD_FUNC_EN 0x36
28633 + #define RT715_REV_1 0x37
28634 + #define RT715_SDW_INPUT_SEL 0x39
28635 ++#define RT715_DFLL_VAD 0x44
28636 + #define RT715_EXT_DMIC_CLK_CTRL2 0x54
28637 +
28638 + /* Index (NID:61h) */
28639 +diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
28640 +index 81a1dd77b6f69..a7b21b03c08bb 100644
28641 +--- a/sound/soc/codecs/rt715-sdw.c
28642 ++++ b/sound/soc/codecs/rt715-sdw.c
28643 +@@ -541,7 +541,7 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
28644 + struct rt715_priv *rt715 = dev_get_drvdata(dev);
28645 + unsigned long time;
28646 +
28647 +- if (!rt715->hw_init)
28648 ++ if (!rt715->first_hw_init)
28649 + return 0;
28650 +
28651 + if (!slave->unattach_request)
28652 +diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
28653 +index c631de325a6e0..53499bc71fa99 100644
28654 +--- a/sound/soc/fsl/fsl_spdif.c
28655 ++++ b/sound/soc/fsl/fsl_spdif.c
28656 +@@ -1375,14 +1375,27 @@ static int fsl_spdif_probe(struct platform_device *pdev)
28657 + &spdif_priv->cpu_dai_drv, 1);
28658 + if (ret) {
28659 + dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
28660 +- return ret;
28661 ++ goto err_pm_disable;
28662 + }
28663 +
28664 + ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
28665 +- if (ret && ret != -EPROBE_DEFER)
28666 +- dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
28667 ++ if (ret) {
28668 ++ dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
28669 ++ goto err_pm_disable;
28670 ++ }
28671 +
28672 + return ret;
28673 ++
28674 ++err_pm_disable:
28675 ++ pm_runtime_disable(&pdev->dev);
28676 ++ return ret;
28677 ++}
28678 ++
28679 ++static int fsl_spdif_remove(struct platform_device *pdev)
28680 ++{
28681 ++ pm_runtime_disable(&pdev->dev);
28682 ++
28683 ++ return 0;
28684 + }
28685 +
28686 + #ifdef CONFIG_PM
28687 +@@ -1391,6 +1404,9 @@ static int fsl_spdif_runtime_suspend(struct device *dev)
28688 + struct fsl_spdif_priv *spdif_priv = dev_get_drvdata(dev);
28689 + int i;
28690 +
28691 ++ /* Disable all the interrupts */
28692 ++ regmap_update_bits(spdif_priv->regmap, REG_SPDIF_SIE, 0xffffff, 0);
28693 ++
28694 + regmap_read(spdif_priv->regmap, REG_SPDIF_SRPC,
28695 + &spdif_priv->regcache_srpc);
28696 + regcache_cache_only(spdif_priv->regmap, true);
28697 +@@ -1487,6 +1503,7 @@ static struct platform_driver fsl_spdif_driver = {
28698 + .pm = &fsl_spdif_pm,
28699 + },
28700 + .probe = fsl_spdif_probe,
28701 ++ .remove = fsl_spdif_remove,
28702 + };
28703 +
28704 + module_platform_driver(fsl_spdif_driver);
28705 +diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
28706 +index 6cb5581658485..46f3f2c687566 100644
28707 +--- a/sound/soc/fsl/fsl_xcvr.c
28708 ++++ b/sound/soc/fsl/fsl_xcvr.c
28709 +@@ -1233,6 +1233,16 @@ static __maybe_unused int fsl_xcvr_runtime_suspend(struct device *dev)
28710 + struct fsl_xcvr *xcvr = dev_get_drvdata(dev);
28711 + int ret;
28712 +
28713 ++ /*
28714 ++ * Clear interrupts, when streams starts or resumes after
28715 ++ * suspend, interrupts are enabled in prepare(), so no need
28716 ++ * to enable interrupts in resume().
28717 ++ */
28718 ++ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_IER0,
28719 ++ FSL_XCVR_IRQ_EARC_ALL, 0);
28720 ++ if (ret < 0)
28721 ++ dev_err(dev, "Failed to clear IER0: %d\n", ret);
28722 ++
28723 + /* Assert M0+ reset */
28724 + ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
28725 + FSL_XCVR_EXT_CTRL_CORE_RESET,
28726 +diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
28727 +index 907f5f1f7b445..ff05b9779e4be 100644
28728 +--- a/sound/soc/hisilicon/hi6210-i2s.c
28729 ++++ b/sound/soc/hisilicon/hi6210-i2s.c
28730 +@@ -102,18 +102,15 @@ static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
28731 +
28732 + for (n = 0; n < i2s->clocks; n++) {
28733 + ret = clk_prepare_enable(i2s->clk[n]);
28734 +- if (ret) {
28735 +- while (n--)
28736 +- clk_disable_unprepare(i2s->clk[n]);
28737 +- return ret;
28738 +- }
28739 ++ if (ret)
28740 ++ goto err_unprepare_clk;
28741 + }
28742 +
28743 + ret = clk_set_rate(i2s->clk[CLK_I2S_BASE], 49152000);
28744 + if (ret) {
28745 + dev_err(i2s->dev, "%s: setting 49.152MHz base rate failed %d\n",
28746 + __func__, ret);
28747 +- return ret;
28748 ++ goto err_unprepare_clk;
28749 + }
28750 +
28751 + /* enable clock before frequency division */
28752 +@@ -165,6 +162,11 @@ static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
28753 + hi6210_write_reg(i2s, HII2S_SW_RST_N, val);
28754 +
28755 + return 0;
28756 ++
28757 ++err_unprepare_clk:
28758 ++ while (n--)
28759 ++ clk_disable_unprepare(i2s->clk[n]);
28760 ++ return ret;
28761 + }
28762 +
28763 + static void hi6210_i2s_shutdown(struct snd_pcm_substream *substream,
28764 +diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
28765 +index ecd3f90f4bbea..dfad2ad129abb 100644
28766 +--- a/sound/soc/intel/boards/sof_sdw.c
28767 ++++ b/sound/soc/intel/boards/sof_sdw.c
28768 +@@ -196,6 +196,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
28769 + },
28770 + .driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
28771 + SOF_SDW_TGL_HDMI |
28772 ++ SOF_RT715_DAI_ID_FIX |
28773 + SOF_SDW_PCH_DMIC),
28774 + },
28775 + {}
28776 +diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
28777 +index f85b5ea180ec0..d884bb7c0fc74 100644
28778 +--- a/sound/soc/mediatek/common/mtk-btcvsd.c
28779 ++++ b/sound/soc/mediatek/common/mtk-btcvsd.c
28780 +@@ -1281,7 +1281,7 @@ static const struct snd_soc_component_driver mtk_btcvsd_snd_platform = {
28781 +
28782 + static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
28783 + {
28784 +- int ret = 0;
28785 ++ int ret;
28786 + int irq_id;
28787 + u32 offset[5] = {0, 0, 0, 0, 0};
28788 + struct mtk_btcvsd_snd *btcvsd;
28789 +@@ -1337,7 +1337,8 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
28790 + btcvsd->bt_sram_bank2_base = of_iomap(dev->of_node, 1);
28791 + if (!btcvsd->bt_sram_bank2_base) {
28792 + dev_err(dev, "iomap bt_sram_bank2_base fail\n");
28793 +- return -EIO;
28794 ++ ret = -EIO;
28795 ++ goto unmap_pkv_err;
28796 + }
28797 +
28798 + btcvsd->infra = syscon_regmap_lookup_by_phandle(dev->of_node,
28799 +@@ -1345,7 +1346,8 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
28800 + if (IS_ERR(btcvsd->infra)) {
28801 + dev_err(dev, "cannot find infra controller: %ld\n",
28802 + PTR_ERR(btcvsd->infra));
28803 +- return PTR_ERR(btcvsd->infra);
28804 ++ ret = PTR_ERR(btcvsd->infra);
28805 ++ goto unmap_bank2_err;
28806 + }
28807 +
28808 + /* get offset */
28809 +@@ -1354,7 +1356,7 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
28810 + ARRAY_SIZE(offset));
28811 + if (ret) {
28812 + dev_warn(dev, "%s(), get offset fail, ret %d\n", __func__, ret);
28813 +- return ret;
28814 ++ goto unmap_bank2_err;
28815 + }
28816 + btcvsd->infra_misc_offset = offset[0];
28817 + btcvsd->conn_bt_cvsd_mask = offset[1];
28818 +@@ -1373,8 +1375,18 @@ static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
28819 + mtk_btcvsd_snd_set_state(btcvsd, btcvsd->tx, BT_SCO_STATE_IDLE);
28820 + mtk_btcvsd_snd_set_state(btcvsd, btcvsd->rx, BT_SCO_STATE_IDLE);
28821 +
28822 +- return devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
28823 +- NULL, 0);
28824 ++ ret = devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
28825 ++ NULL, 0);
28826 ++ if (ret)
28827 ++ goto unmap_bank2_err;
28828 ++
28829 ++ return 0;
28830 ++
28831 ++unmap_bank2_err:
28832 ++ iounmap(btcvsd->bt_sram_bank2_base);
28833 ++unmap_pkv_err:
28834 ++ iounmap(btcvsd->bt_pkv_base);
28835 ++ return ret;
28836 + }
28837 +
28838 + static int mtk_btcvsd_snd_remove(struct platform_device *pdev)
28839 +diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
28840 +index 0b8ae3eee148f..93751099465d2 100644
28841 +--- a/sound/soc/sh/rcar/adg.c
28842 ++++ b/sound/soc/sh/rcar/adg.c
28843 +@@ -290,7 +290,6 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val)
28844 + int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
28845 + {
28846 + struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
28847 +- struct clk *clk;
28848 + int i;
28849 + int sel_table[] = {
28850 + [CLKA] = 0x1,
28851 +@@ -303,10 +302,9 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
28852 + * find suitable clock from
28853 + * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
28854 + */
28855 +- for_each_rsnd_clk(clk, adg, i) {
28856 ++ for (i = 0; i < CLKMAX; i++)
28857 + if (rate == adg->clk_rate[i])
28858 + return sel_table[i];
28859 +- }
28860 +
28861 + /*
28862 + * find divided clock from BRGA/BRGB
28863 +diff --git a/sound/usb/format.c b/sound/usb/format.c
28864 +index 2287f8c653150..eb216fef4ba75 100644
28865 +--- a/sound/usb/format.c
28866 ++++ b/sound/usb/format.c
28867 +@@ -223,9 +223,11 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
28868 + continue;
28869 + /* C-Media CM6501 mislabels its 96 kHz altsetting */
28870 + /* Terratec Aureon 7.1 USB C-Media 6206, too */
28871 ++ /* Ozone Z90 USB C-Media, too */
28872 + if (rate == 48000 && nr_rates == 1 &&
28873 + (chip->usb_id == USB_ID(0x0d8c, 0x0201) ||
28874 + chip->usb_id == USB_ID(0x0d8c, 0x0102) ||
28875 ++ chip->usb_id == USB_ID(0x0d8c, 0x0078) ||
28876 + chip->usb_id == USB_ID(0x0ccd, 0x00b1)) &&
28877 + fp->altsetting == 5 && fp->maxpacksize == 392)
28878 + rate = 96000;
28879 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
28880 +index 428d581f988fe..30b3e128e28d8 100644
28881 +--- a/sound/usb/mixer.c
28882 ++++ b/sound/usb/mixer.c
28883 +@@ -3294,8 +3294,9 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
28884 + struct usb_mixer_elem_list *list)
28885 + {
28886 + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
28887 +- static const char * const val_types[] = {"BOOLEAN", "INV_BOOLEAN",
28888 +- "S8", "U8", "S16", "U16"};
28889 ++ static const char * const val_types[] = {
28890 ++ "BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32",
28891 ++ };
28892 + snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
28893 + "channels=%i, type=\"%s\"\n", cval->head.id,
28894 + cval->control, cval->cmask, cval->channels,
28895 +@@ -3605,6 +3606,9 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
28896 + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
28897 + int c, err, idx;
28898 +
28899 ++ if (cval->val_type == USB_MIXER_BESPOKEN)
28900 ++ return 0;
28901 ++
28902 + if (cval->cmask) {
28903 + idx = 0;
28904 + for (c = 0; c < MAX_CHANNELS; c++) {
28905 +diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
28906 +index e5a01f17bf3c9..ea41e7a1f7bf2 100644
28907 +--- a/sound/usb/mixer.h
28908 ++++ b/sound/usb/mixer.h
28909 +@@ -55,6 +55,7 @@ enum {
28910 + USB_MIXER_U16,
28911 + USB_MIXER_S32,
28912 + USB_MIXER_U32,
28913 ++ USB_MIXER_BESPOKEN, /* non-standard type */
28914 + };
28915 +
28916 + typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
28917 +diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
28918 +index 4caf379d5b991..bca3e7fe27df6 100644
28919 +--- a/sound/usb/mixer_scarlett_gen2.c
28920 ++++ b/sound/usb/mixer_scarlett_gen2.c
28921 +@@ -949,10 +949,15 @@ static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
28922 + if (!elem)
28923 + return -ENOMEM;
28924 +
28925 ++ /* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
28926 ++ * ignores them for resume and other operations.
28927 ++ * Also, the head.id field is set to 0, as we don't use this field.
28928 ++ */
28929 + elem->head.mixer = mixer;
28930 + elem->control = index;
28931 +- elem->head.id = index;
28932 ++ elem->head.id = 0;
28933 + elem->channels = channels;
28934 ++ elem->val_type = USB_MIXER_BESPOKEN;
28935 +
28936 + kctl = snd_ctl_new1(ncontrol, elem);
28937 + if (!kctl) {
28938 +diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
28939 +index d9afb730136a4..0f36b9edd3f55 100644
28940 +--- a/tools/bpf/bpftool/main.c
28941 ++++ b/tools/bpf/bpftool/main.c
28942 +@@ -340,8 +340,10 @@ static int do_batch(int argc, char **argv)
28943 + n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
28944 + if (!n_argc)
28945 + continue;
28946 +- if (n_argc < 0)
28947 ++ if (n_argc < 0) {
28948 ++ err = n_argc;
28949 + goto err_close;
28950 ++ }
28951 +
28952 + if (json_output) {
28953 + jsonw_start_object(json_wtr);
28954 +diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
28955 +index 7550fd9c31883..3ad9301b0f00c 100644
28956 +--- a/tools/bpf/resolve_btfids/main.c
28957 ++++ b/tools/bpf/resolve_btfids/main.c
28958 +@@ -655,6 +655,9 @@ static int symbols_patch(struct object *obj)
28959 + if (sets_patch(obj))
28960 + return -1;
28961 +
28962 ++ /* Set type to ensure endian translation occurs. */
28963 ++ obj->efile.idlist->d_type = ELF_T_WORD;
28964 ++
28965 + elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
28966 +
28967 + err = elf_update(obj->efile.elf, ELF_C_WRITE);
28968 +diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
28969 +index 9de084b1c6993..f44f8a37f780e 100644
28970 +--- a/tools/lib/bpf/linker.c
28971 ++++ b/tools/lib/bpf/linker.c
28972 +@@ -1780,7 +1780,7 @@ static void sym_update_visibility(Elf64_Sym *sym, int sym_vis)
28973 + /* libelf doesn't provide setters for ST_VISIBILITY,
28974 + * but it is stored in the lower 2 bits of st_other
28975 + */
28976 +- sym->st_other &= 0x03;
28977 ++ sym->st_other &= ~0x03;
28978 + sym->st_other |= sym_vis;
28979 + }
28980 +
28981 +diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
28982 +index 523aa4157f801..bc821056aba90 100644
28983 +--- a/tools/objtool/arch/x86/decode.c
28984 ++++ b/tools/objtool/arch/x86/decode.c
28985 +@@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
28986 + sec = find_section_by_name(elf, ".altinstructions");
28987 + if (!sec) {
28988 + sec = elf_create_section(elf, ".altinstructions",
28989 +- SHF_WRITE, size, 0);
28990 ++ SHF_ALLOC, size, 0);
28991 +
28992 + if (!sec) {
28993 + WARN_ELF("elf_create_section");
28994 +diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
28995 +index 3ceaf7ef33013..cbd9b268f1684 100644
28996 +--- a/tools/perf/util/llvm-utils.c
28997 ++++ b/tools/perf/util/llvm-utils.c
28998 +@@ -504,6 +504,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
28999 + goto errout;
29000 + }
29001 +
29002 ++ err = -ENOMEM;
29003 + if (asprintf(&pipe_template, "%s -emit-llvm | %s -march=bpf %s -filetype=obj -o -",
29004 + template, llc_path, opts) < 0) {
29005 + pr_err("ERROR:\tnot enough memory to setup command line\n");
29006 +@@ -524,6 +525,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
29007 +
29008 + pr_debug("llvm compiling command template: %s\n", template);
29009 +
29010 ++ err = -ENOMEM;
29011 + if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
29012 + goto errout;
29013 +
29014 +diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
29015 +index 4e4aa4c97ac59..3dfc543327af8 100644
29016 +--- a/tools/perf/util/scripting-engines/trace-event-python.c
29017 ++++ b/tools/perf/util/scripting-engines/trace-event-python.c
29018 +@@ -934,7 +934,7 @@ static PyObject *tuple_new(unsigned int sz)
29019 + return t;
29020 + }
29021 +
29022 +-static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
29023 ++static int tuple_set_s64(PyObject *t, unsigned int pos, s64 val)
29024 + {
29025 + #if BITS_PER_LONG == 64
29026 + return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
29027 +@@ -944,6 +944,22 @@ static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
29028 + #endif
29029 + }
29030 +
29031 ++/*
29032 ++ * Databases support only signed 64-bit numbers, so even though we are
29033 ++ * exporting a u64, it must be as s64.
29034 ++ */
29035 ++#define tuple_set_d64 tuple_set_s64
29036 ++
29037 ++static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
29038 ++{
29039 ++#if BITS_PER_LONG == 64
29040 ++ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
29041 ++#endif
29042 ++#if BITS_PER_LONG == 32
29043 ++ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLongLong(val));
29044 ++#endif
29045 ++}
29046 ++
29047 + static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
29048 + {
29049 + return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
29050 +@@ -967,7 +983,7 @@ static int python_export_evsel(struct db_export *dbe, struct evsel *evsel)
29051 +
29052 + t = tuple_new(2);
29053 +
29054 +- tuple_set_u64(t, 0, evsel->db_id);
29055 ++ tuple_set_d64(t, 0, evsel->db_id);
29056 + tuple_set_string(t, 1, evsel__name(evsel));
29057 +
29058 + call_object(tables->evsel_handler, t, "evsel_table");
29059 +@@ -985,7 +1001,7 @@ static int python_export_machine(struct db_export *dbe,
29060 +
29061 + t = tuple_new(3);
29062 +
29063 +- tuple_set_u64(t, 0, machine->db_id);
29064 ++ tuple_set_d64(t, 0, machine->db_id);
29065 + tuple_set_s32(t, 1, machine->pid);
29066 + tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
29067 +
29068 +@@ -1004,9 +1020,9 @@ static int python_export_thread(struct db_export *dbe, struct thread *thread,
29069 +
29070 + t = tuple_new(5);
29071 +
29072 +- tuple_set_u64(t, 0, thread->db_id);
29073 +- tuple_set_u64(t, 1, machine->db_id);
29074 +- tuple_set_u64(t, 2, main_thread_db_id);
29075 ++ tuple_set_d64(t, 0, thread->db_id);
29076 ++ tuple_set_d64(t, 1, machine->db_id);
29077 ++ tuple_set_d64(t, 2, main_thread_db_id);
29078 + tuple_set_s32(t, 3, thread->pid_);
29079 + tuple_set_s32(t, 4, thread->tid);
29080 +
29081 +@@ -1025,10 +1041,10 @@ static int python_export_comm(struct db_export *dbe, struct comm *comm,
29082 +
29083 + t = tuple_new(5);
29084 +
29085 +- tuple_set_u64(t, 0, comm->db_id);
29086 ++ tuple_set_d64(t, 0, comm->db_id);
29087 + tuple_set_string(t, 1, comm__str(comm));
29088 +- tuple_set_u64(t, 2, thread->db_id);
29089 +- tuple_set_u64(t, 3, comm->start);
29090 ++ tuple_set_d64(t, 2, thread->db_id);
29091 ++ tuple_set_d64(t, 3, comm->start);
29092 + tuple_set_s32(t, 4, comm->exec);
29093 +
29094 + call_object(tables->comm_handler, t, "comm_table");
29095 +@@ -1046,9 +1062,9 @@ static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
29096 +
29097 + t = tuple_new(3);
29098 +
29099 +- tuple_set_u64(t, 0, db_id);
29100 +- tuple_set_u64(t, 1, comm->db_id);
29101 +- tuple_set_u64(t, 2, thread->db_id);
29102 ++ tuple_set_d64(t, 0, db_id);
29103 ++ tuple_set_d64(t, 1, comm->db_id);
29104 ++ tuple_set_d64(t, 2, thread->db_id);
29105 +
29106 + call_object(tables->comm_thread_handler, t, "comm_thread_table");
29107 +
29108 +@@ -1068,8 +1084,8 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso,
29109 +
29110 + t = tuple_new(5);
29111 +
29112 +- tuple_set_u64(t, 0, dso->db_id);
29113 +- tuple_set_u64(t, 1, machine->db_id);
29114 ++ tuple_set_d64(t, 0, dso->db_id);
29115 ++ tuple_set_d64(t, 1, machine->db_id);
29116 + tuple_set_string(t, 2, dso->short_name);
29117 + tuple_set_string(t, 3, dso->long_name);
29118 + tuple_set_string(t, 4, sbuild_id);
29119 +@@ -1090,10 +1106,10 @@ static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
29120 +
29121 + t = tuple_new(6);
29122 +
29123 +- tuple_set_u64(t, 0, *sym_db_id);
29124 +- tuple_set_u64(t, 1, dso->db_id);
29125 +- tuple_set_u64(t, 2, sym->start);
29126 +- tuple_set_u64(t, 3, sym->end);
29127 ++ tuple_set_d64(t, 0, *sym_db_id);
29128 ++ tuple_set_d64(t, 1, dso->db_id);
29129 ++ tuple_set_d64(t, 2, sym->start);
29130 ++ tuple_set_d64(t, 3, sym->end);
29131 + tuple_set_s32(t, 4, sym->binding);
29132 + tuple_set_string(t, 5, sym->name);
29133 +
29134 +@@ -1130,30 +1146,30 @@ static void python_export_sample_table(struct db_export *dbe,
29135 +
29136 + t = tuple_new(24);
29137 +
29138 +- tuple_set_u64(t, 0, es->db_id);
29139 +- tuple_set_u64(t, 1, es->evsel->db_id);
29140 +- tuple_set_u64(t, 2, es->al->maps->machine->db_id);
29141 +- tuple_set_u64(t, 3, es->al->thread->db_id);
29142 +- tuple_set_u64(t, 4, es->comm_db_id);
29143 +- tuple_set_u64(t, 5, es->dso_db_id);
29144 +- tuple_set_u64(t, 6, es->sym_db_id);
29145 +- tuple_set_u64(t, 7, es->offset);
29146 +- tuple_set_u64(t, 8, es->sample->ip);
29147 +- tuple_set_u64(t, 9, es->sample->time);
29148 ++ tuple_set_d64(t, 0, es->db_id);
29149 ++ tuple_set_d64(t, 1, es->evsel->db_id);
29150 ++ tuple_set_d64(t, 2, es->al->maps->machine->db_id);
29151 ++ tuple_set_d64(t, 3, es->al->thread->db_id);
29152 ++ tuple_set_d64(t, 4, es->comm_db_id);
29153 ++ tuple_set_d64(t, 5, es->dso_db_id);
29154 ++ tuple_set_d64(t, 6, es->sym_db_id);
29155 ++ tuple_set_d64(t, 7, es->offset);
29156 ++ tuple_set_d64(t, 8, es->sample->ip);
29157 ++ tuple_set_d64(t, 9, es->sample->time);
29158 + tuple_set_s32(t, 10, es->sample->cpu);
29159 +- tuple_set_u64(t, 11, es->addr_dso_db_id);
29160 +- tuple_set_u64(t, 12, es->addr_sym_db_id);
29161 +- tuple_set_u64(t, 13, es->addr_offset);
29162 +- tuple_set_u64(t, 14, es->sample->addr);
29163 +- tuple_set_u64(t, 15, es->sample->period);
29164 +- tuple_set_u64(t, 16, es->sample->weight);
29165 +- tuple_set_u64(t, 17, es->sample->transaction);
29166 +- tuple_set_u64(t, 18, es->sample->data_src);
29167 ++ tuple_set_d64(t, 11, es->addr_dso_db_id);
29168 ++ tuple_set_d64(t, 12, es->addr_sym_db_id);
29169 ++ tuple_set_d64(t, 13, es->addr_offset);
29170 ++ tuple_set_d64(t, 14, es->sample->addr);
29171 ++ tuple_set_d64(t, 15, es->sample->period);
29172 ++ tuple_set_d64(t, 16, es->sample->weight);
29173 ++ tuple_set_d64(t, 17, es->sample->transaction);
29174 ++ tuple_set_d64(t, 18, es->sample->data_src);
29175 + tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
29176 + tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
29177 +- tuple_set_u64(t, 21, es->call_path_id);
29178 +- tuple_set_u64(t, 22, es->sample->insn_cnt);
29179 +- tuple_set_u64(t, 23, es->sample->cyc_cnt);
29180 ++ tuple_set_d64(t, 21, es->call_path_id);
29181 ++ tuple_set_d64(t, 22, es->sample->insn_cnt);
29182 ++ tuple_set_d64(t, 23, es->sample->cyc_cnt);
29183 +
29184 + call_object(tables->sample_handler, t, "sample_table");
29185 +
29186 +@@ -1167,8 +1183,8 @@ static void python_export_synth(struct db_export *dbe, struct export_sample *es)
29187 +
29188 + t = tuple_new(3);
29189 +
29190 +- tuple_set_u64(t, 0, es->db_id);
29191 +- tuple_set_u64(t, 1, es->evsel->core.attr.config);
29192 ++ tuple_set_d64(t, 0, es->db_id);
29193 ++ tuple_set_d64(t, 1, es->evsel->core.attr.config);
29194 + tuple_set_bytes(t, 2, es->sample->raw_data, es->sample->raw_size);
29195 +
29196 + call_object(tables->synth_handler, t, "synth_data");
29197 +@@ -1200,10 +1216,10 @@ static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
29198 +
29199 + t = tuple_new(4);
29200 +
29201 +- tuple_set_u64(t, 0, cp->db_id);
29202 +- tuple_set_u64(t, 1, parent_db_id);
29203 +- tuple_set_u64(t, 2, sym_db_id);
29204 +- tuple_set_u64(t, 3, cp->ip);
29205 ++ tuple_set_d64(t, 0, cp->db_id);
29206 ++ tuple_set_d64(t, 1, parent_db_id);
29207 ++ tuple_set_d64(t, 2, sym_db_id);
29208 ++ tuple_set_d64(t, 3, cp->ip);
29209 +
29210 + call_object(tables->call_path_handler, t, "call_path_table");
29211 +
29212 +@@ -1221,20 +1237,20 @@ static int python_export_call_return(struct db_export *dbe,
29213 +
29214 + t = tuple_new(14);
29215 +
29216 +- tuple_set_u64(t, 0, cr->db_id);
29217 +- tuple_set_u64(t, 1, cr->thread->db_id);
29218 +- tuple_set_u64(t, 2, comm_db_id);
29219 +- tuple_set_u64(t, 3, cr->cp->db_id);
29220 +- tuple_set_u64(t, 4, cr->call_time);
29221 +- tuple_set_u64(t, 5, cr->return_time);
29222 +- tuple_set_u64(t, 6, cr->branch_count);
29223 +- tuple_set_u64(t, 7, cr->call_ref);
29224 +- tuple_set_u64(t, 8, cr->return_ref);
29225 +- tuple_set_u64(t, 9, cr->cp->parent->db_id);
29226 ++ tuple_set_d64(t, 0, cr->db_id);
29227 ++ tuple_set_d64(t, 1, cr->thread->db_id);
29228 ++ tuple_set_d64(t, 2, comm_db_id);
29229 ++ tuple_set_d64(t, 3, cr->cp->db_id);
29230 ++ tuple_set_d64(t, 4, cr->call_time);
29231 ++ tuple_set_d64(t, 5, cr->return_time);
29232 ++ tuple_set_d64(t, 6, cr->branch_count);
29233 ++ tuple_set_d64(t, 7, cr->call_ref);
29234 ++ tuple_set_d64(t, 8, cr->return_ref);
29235 ++ tuple_set_d64(t, 9, cr->cp->parent->db_id);
29236 + tuple_set_s32(t, 10, cr->flags);
29237 +- tuple_set_u64(t, 11, cr->parent_db_id);
29238 +- tuple_set_u64(t, 12, cr->insn_count);
29239 +- tuple_set_u64(t, 13, cr->cyc_count);
29240 ++ tuple_set_d64(t, 11, cr->parent_db_id);
29241 ++ tuple_set_d64(t, 12, cr->insn_count);
29242 ++ tuple_set_d64(t, 13, cr->cyc_count);
29243 +
29244 + call_object(tables->call_return_handler, t, "call_return_table");
29245 +
29246 +@@ -1254,14 +1270,14 @@ static int python_export_context_switch(struct db_export *dbe, u64 db_id,
29247 +
29248 + t = tuple_new(9);
29249 +
29250 +- tuple_set_u64(t, 0, db_id);
29251 +- tuple_set_u64(t, 1, machine->db_id);
29252 +- tuple_set_u64(t, 2, sample->time);
29253 ++ tuple_set_d64(t, 0, db_id);
29254 ++ tuple_set_d64(t, 1, machine->db_id);
29255 ++ tuple_set_d64(t, 2, sample->time);
29256 + tuple_set_s32(t, 3, sample->cpu);
29257 +- tuple_set_u64(t, 4, th_out_id);
29258 +- tuple_set_u64(t, 5, comm_out_id);
29259 +- tuple_set_u64(t, 6, th_in_id);
29260 +- tuple_set_u64(t, 7, comm_in_id);
29261 ++ tuple_set_d64(t, 4, th_out_id);
29262 ++ tuple_set_d64(t, 5, comm_out_id);
29263 ++ tuple_set_d64(t, 6, th_in_id);
29264 ++ tuple_set_d64(t, 7, comm_in_id);
29265 + tuple_set_s32(t, 8, flags);
29266 +
29267 + call_object(tables->context_switch_handler, t, "context_switch");
29268 +diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
29269 +index ab940c508ef0c..d4f0a7872e493 100644
29270 +--- a/tools/power/x86/intel-speed-select/isst-config.c
29271 ++++ b/tools/power/x86/intel-speed-select/isst-config.c
29272 +@@ -106,6 +106,22 @@ int is_skx_based_platform(void)
29273 + return 0;
29274 + }
29275 +
29276 ++int is_spr_platform(void)
29277 ++{
29278 ++ if (cpu_model == 0x8F)
29279 ++ return 1;
29280 ++
29281 ++ return 0;
29282 ++}
29283 ++
29284 ++int is_icx_platform(void)
29285 ++{
29286 ++ if (cpu_model == 0x6A || cpu_model == 0x6C)
29287 ++ return 1;
29288 ++
29289 ++ return 0;
29290 ++}
29291 ++
29292 + static int update_cpu_model(void)
29293 + {
29294 + unsigned int ebx, ecx, edx;
29295 +diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
29296 +index 6a26d57699845..4431c8a0d40ae 100644
29297 +--- a/tools/power/x86/intel-speed-select/isst-core.c
29298 ++++ b/tools/power/x86/intel-speed-select/isst-core.c
29299 +@@ -201,6 +201,7 @@ void isst_get_uncore_mem_freq(int cpu, int config_index,
29300 + {
29301 + unsigned int resp;
29302 + int ret;
29303 ++
29304 + ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_MEM_FREQ,
29305 + 0, config_index, &resp);
29306 + if (ret) {
29307 +@@ -209,6 +210,20 @@ void isst_get_uncore_mem_freq(int cpu, int config_index,
29308 + }
29309 +
29310 + ctdp_level->mem_freq = resp & GENMASK(7, 0);
29311 ++ if (is_spr_platform()) {
29312 ++ ctdp_level->mem_freq *= 200;
29313 ++ } else if (is_icx_platform()) {
29314 ++ if (ctdp_level->mem_freq < 7) {
29315 ++ ctdp_level->mem_freq = (12 - ctdp_level->mem_freq) * 133.33 * 2 * 10;
29316 ++ ctdp_level->mem_freq /= 10;
29317 ++ if (ctdp_level->mem_freq % 10 > 5)
29318 ++ ctdp_level->mem_freq++;
29319 ++ } else {
29320 ++ ctdp_level->mem_freq = 0;
29321 ++ }
29322 ++ } else {
29323 ++ ctdp_level->mem_freq = 0;
29324 ++ }
29325 + debug_printf(
29326 + "cpu:%d ctdp:%d CONFIG_TDP_GET_MEM_FREQ resp:%x uncore mem_freq:%d\n",
29327 + cpu, config_index, resp, ctdp_level->mem_freq);
29328 +diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
29329 +index 3bf1820c0da11..f97d8859ada72 100644
29330 +--- a/tools/power/x86/intel-speed-select/isst-display.c
29331 ++++ b/tools/power/x86/intel-speed-select/isst-display.c
29332 +@@ -446,7 +446,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
29333 + if (ctdp_level->mem_freq) {
29334 + snprintf(header, sizeof(header), "mem-frequency(MHz)");
29335 + snprintf(value, sizeof(value), "%d",
29336 +- ctdp_level->mem_freq * DISP_FREQ_MULTIPLIER);
29337 ++ ctdp_level->mem_freq);
29338 + format_and_print(outf, level + 2, header, value);
29339 + }
29340 +
29341 +diff --git a/tools/power/x86/intel-speed-select/isst.h b/tools/power/x86/intel-speed-select/isst.h
29342 +index 0cac6c54be873..1aa15d5ea57ce 100644
29343 +--- a/tools/power/x86/intel-speed-select/isst.h
29344 ++++ b/tools/power/x86/intel-speed-select/isst.h
29345 +@@ -257,5 +257,7 @@ extern int get_cpufreq_base_freq(int cpu);
29346 + extern int isst_read_pm_config(int cpu, int *cp_state, int *cp_cap);
29347 + extern void isst_display_error_info_message(int error, char *msg, int arg_valid, int arg);
29348 + extern int is_skx_based_platform(void);
29349 ++extern int is_spr_platform(void);
29350 ++extern int is_icx_platform(void);
29351 + extern void isst_trl_display_information(int cpu, FILE *outf, unsigned long long trl);
29352 + #endif
29353 +diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
29354 +index 4866f6a219018..d89efd9785d89 100644
29355 +--- a/tools/testing/selftests/bpf/.gitignore
29356 ++++ b/tools/testing/selftests/bpf/.gitignore
29357 +@@ -10,6 +10,7 @@ FEATURE-DUMP.libbpf
29358 + fixdep
29359 + test_dev_cgroup
29360 + /test_progs*
29361 ++!test_progs.h
29362 + test_verifier_log
29363 + feature
29364 + test_sock
29365 +diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
29366 +index f9a8ae331963d..2a0549ae13f31 100644
29367 +--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
29368 ++++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
29369 +@@ -102,7 +102,7 @@ void test_ringbuf(void)
29370 + if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
29371 + goto cleanup;
29372 +
29373 +- rb_fd = bpf_map__fd(skel->maps.ringbuf);
29374 ++ rb_fd = skel->maps.ringbuf.map_fd;
29375 + /* good read/write cons_pos */
29376 + mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
29377 + ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
29378 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
29379 +index 648d9ae898d2f..01ab11259809e 100644
29380 +--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
29381 ++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
29382 +@@ -1610,6 +1610,7 @@ static void udp_redir_to_connected(int family, int sotype, int sock_mapfd,
29383 + struct sockaddr_storage addr;
29384 + int c0, c1, p0, p1;
29385 + unsigned int pass;
29386 ++ int retries = 100;
29387 + socklen_t len;
29388 + int err, n;
29389 + u64 value;
29390 +@@ -1686,9 +1687,13 @@ static void udp_redir_to_connected(int family, int sotype, int sock_mapfd,
29391 + if (pass != 1)
29392 + FAIL("%s: want pass count 1, have %d", log_prefix, pass);
29393 +
29394 ++again:
29395 + n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1);
29396 +- if (n < 0)
29397 ++ if (n < 0) {
29398 ++ if (errno == EAGAIN && retries--)
29399 ++ goto again;
29400 + FAIL_ERRNO("%s: read", log_prefix);
29401 ++ }
29402 + if (n == 0)
29403 + FAIL("%s: incomplete read", log_prefix);
29404 +
29405 +diff --git a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
29406 +index e6eb78f0b9545..9933ed24f9012 100644
29407 +--- a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
29408 ++++ b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
29409 +@@ -57,6 +57,10 @@ enable_events() {
29410 + echo 1 > tracing_on
29411 + }
29412 +
29413 ++other_task() {
29414 ++ sleep .001 || usleep 1 || sleep 1
29415 ++}
29416 ++
29417 + echo 0 > options/event-fork
29418 +
29419 + do_reset
29420 +@@ -94,6 +98,9 @@ child=$!
29421 + echo "child = $child"
29422 + wait $child
29423 +
29424 ++# Be sure some other events will happen for small systems (e.g. 1 core)
29425 ++other_task
29426 ++
29427 + echo 0 > tracing_on
29428 +
29429 + cnt=`count_pid $mypid`
29430 +diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
29431 +index 81edbd23d371c..b4d24f50aca62 100644
29432 +--- a/tools/testing/selftests/kvm/dirty_log_test.c
29433 ++++ b/tools/testing/selftests/kvm/dirty_log_test.c
29434 +@@ -16,7 +16,6 @@
29435 + #include <errno.h>
29436 + #include <linux/bitmap.h>
29437 + #include <linux/bitops.h>
29438 +-#include <asm/barrier.h>
29439 + #include <linux/atomic.h>
29440 +
29441 + #include "kvm_util.h"
29442 +diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
29443 +index a2b732cf96ea4..8ea854d7822d9 100644
29444 +--- a/tools/testing/selftests/kvm/lib/kvm_util.c
29445 ++++ b/tools/testing/selftests/kvm/lib/kvm_util.c
29446 +@@ -375,10 +375,6 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
29447 + uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
29448 +
29449 + vm_vcpu_add_default(vm, vcpuid, guest_code);
29450 +-
29451 +-#ifdef __x86_64__
29452 +- vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
29453 +-#endif
29454 + }
29455 +
29456 + return vm;
29457 +diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
29458 +index efe2350444213..595322b24e4cb 100644
29459 +--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
29460 ++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
29461 +@@ -600,6 +600,9 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
29462 + /* Setup the MP state */
29463 + mp_state.mp_state = 0;
29464 + vcpu_set_mp_state(vm, vcpuid, &mp_state);
29465 ++
29466 ++ /* Setup supported CPUIDs */
29467 ++ vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
29468 + }
29469 +
29470 + /*
29471 +diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
29472 +index fcc840088c919..a6fe75cb9a6eb 100644
29473 +--- a/tools/testing/selftests/kvm/steal_time.c
29474 ++++ b/tools/testing/selftests/kvm/steal_time.c
29475 +@@ -73,8 +73,6 @@ static void steal_time_init(struct kvm_vm *vm)
29476 + for (i = 0; i < NR_VCPUS; ++i) {
29477 + int ret;
29478 +
29479 +- vcpu_set_cpuid(vm, i, kvm_get_supported_cpuid());
29480 +-
29481 + /* ST_GPA_BASE is identity mapped */
29482 + st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
29483 + sync_global_to_guest(vm, st_gva[i]);
29484 +diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
29485 +index 12c558fc8074a..c8d2bbe202d0e 100644
29486 +--- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
29487 ++++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
29488 +@@ -106,8 +106,6 @@ static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)
29489 + vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu);
29490 + else
29491 + vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu);
29492 +-
29493 +- vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
29494 + }
29495 +
29496 + static void run_vm_bsp(uint32_t bsp_vcpu)
29497 +diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
29498 +index bb7a1775307b8..e95e79bd31268 100755
29499 +--- a/tools/testing/selftests/lkdtm/run.sh
29500 ++++ b/tools/testing/selftests/lkdtm/run.sh
29501 +@@ -76,10 +76,14 @@ fi
29502 + # Save existing dmesg so we can detect new content below
29503 + dmesg > "$DMESG"
29504 +
29505 +-# Most shells yell about signals and we're expecting the "cat" process
29506 +-# to usually be killed by the kernel. So we have to run it in a sub-shell
29507 +-# and silence errors.
29508 +-($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
29509 ++# Since the kernel is likely killing the process writing to the trigger
29510 ++# file, it must not be the script's shell itself. i.e. we cannot do:
29511 ++# echo "$test" >"$TRIGGER"
29512 ++# Instead, use "cat" to take the signal. Since the shell will yell about
29513 ++# the signal that killed the subprocess, we must ignore the failure and
29514 ++# continue. However we don't silence stderr since there might be other
29515 ++# useful details reported there in the case of other unexpected conditions.
29516 ++echo "$test" | cat >"$TRIGGER" || true
29517 +
29518 + # Record and dump the results
29519 + dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
29520 +diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
29521 +index 426d07875a48e..112d41d01b12d 100644
29522 +--- a/tools/testing/selftests/net/tls.c
29523 ++++ b/tools/testing/selftests/net/tls.c
29524 +@@ -25,6 +25,47 @@
29525 + #define TLS_PAYLOAD_MAX_LEN 16384
29526 + #define SOL_TLS 282
29527 +
29528 ++struct tls_crypto_info_keys {
29529 ++ union {
29530 ++ struct tls12_crypto_info_aes_gcm_128 aes128;
29531 ++ struct tls12_crypto_info_chacha20_poly1305 chacha20;
29532 ++ };
29533 ++ size_t len;
29534 ++};
29535 ++
29536 ++static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
29537 ++ struct tls_crypto_info_keys *tls12)
29538 ++{
29539 ++ memset(tls12, 0, sizeof(*tls12));
29540 ++
29541 ++ switch (cipher_type) {
29542 ++ case TLS_CIPHER_CHACHA20_POLY1305:
29543 ++ tls12->len = sizeof(struct tls12_crypto_info_chacha20_poly1305);
29544 ++ tls12->chacha20.info.version = tls_version;
29545 ++ tls12->chacha20.info.cipher_type = cipher_type;
29546 ++ break;
29547 ++ case TLS_CIPHER_AES_GCM_128:
29548 ++ tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_128);
29549 ++ tls12->aes128.info.version = tls_version;
29550 ++ tls12->aes128.info.cipher_type = cipher_type;
29551 ++ break;
29552 ++ default:
29553 ++ break;
29554 ++ }
29555 ++}
29556 ++
29557 ++static void memrnd(void *s, size_t n)
29558 ++{
29559 ++ int *dword = s;
29560 ++ char *byte;
29561 ++
29562 ++ for (; n >= 4; n -= 4)
29563 ++ *dword++ = rand();
29564 ++ byte = (void *)dword;
29565 ++ while (n--)
29566 ++ *byte++ = rand();
29567 ++}
29568 ++
29569 + FIXTURE(tls_basic)
29570 + {
29571 + int fd, cfd;
29572 +@@ -133,33 +174,16 @@ FIXTURE_VARIANT_ADD(tls, 13_chacha)
29573 +
29574 + FIXTURE_SETUP(tls)
29575 + {
29576 +- union {
29577 +- struct tls12_crypto_info_aes_gcm_128 aes128;
29578 +- struct tls12_crypto_info_chacha20_poly1305 chacha20;
29579 +- } tls12;
29580 ++ struct tls_crypto_info_keys tls12;
29581 + struct sockaddr_in addr;
29582 + socklen_t len;
29583 + int sfd, ret;
29584 +- size_t tls12_sz;
29585 +
29586 + self->notls = false;
29587 + len = sizeof(addr);
29588 +
29589 +- memset(&tls12, 0, sizeof(tls12));
29590 +- switch (variant->cipher_type) {
29591 +- case TLS_CIPHER_CHACHA20_POLY1305:
29592 +- tls12_sz = sizeof(struct tls12_crypto_info_chacha20_poly1305);
29593 +- tls12.chacha20.info.version = variant->tls_version;
29594 +- tls12.chacha20.info.cipher_type = variant->cipher_type;
29595 +- break;
29596 +- case TLS_CIPHER_AES_GCM_128:
29597 +- tls12_sz = sizeof(struct tls12_crypto_info_aes_gcm_128);
29598 +- tls12.aes128.info.version = variant->tls_version;
29599 +- tls12.aes128.info.cipher_type = variant->cipher_type;
29600 +- break;
29601 +- default:
29602 +- tls12_sz = 0;
29603 +- }
29604 ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type,
29605 ++ &tls12);
29606 +
29607 + addr.sin_family = AF_INET;
29608 + addr.sin_addr.s_addr = htonl(INADDR_ANY);
29609 +@@ -187,7 +211,7 @@ FIXTURE_SETUP(tls)
29610 +
29611 + if (!self->notls) {
29612 + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
29613 +- tls12_sz);
29614 ++ tls12.len);
29615 + ASSERT_EQ(ret, 0);
29616 + }
29617 +
29618 +@@ -200,7 +224,7 @@ FIXTURE_SETUP(tls)
29619 + ASSERT_EQ(ret, 0);
29620 +
29621 + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
29622 +- tls12_sz);
29623 ++ tls12.len);
29624 + ASSERT_EQ(ret, 0);
29625 + }
29626 +
29627 +@@ -308,6 +332,8 @@ TEST_F(tls, recv_max)
29628 + char recv_mem[TLS_PAYLOAD_MAX_LEN];
29629 + char buf[TLS_PAYLOAD_MAX_LEN];
29630 +
29631 ++ memrnd(buf, sizeof(buf));
29632 ++
29633 + EXPECT_GE(send(self->fd, buf, send_len, 0), 0);
29634 + EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
29635 + EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0);
29636 +@@ -588,6 +614,8 @@ TEST_F(tls, recvmsg_single_max)
29637 + struct iovec vec;
29638 + struct msghdr hdr;
29639 +
29640 ++ memrnd(send_mem, sizeof(send_mem));
29641 ++
29642 + EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len);
29643 + vec.iov_base = (char *)recv_mem;
29644 + vec.iov_len = TLS_PAYLOAD_MAX_LEN;
29645 +@@ -610,6 +638,8 @@ TEST_F(tls, recvmsg_multiple)
29646 + struct msghdr hdr;
29647 + int i;
29648 +
29649 ++ memrnd(buf, sizeof(buf));
29650 ++
29651 + EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len);
29652 + for (i = 0; i < msg_iovlen; i++) {
29653 + iov_base[i] = (char *)malloc(iov_len);
29654 +@@ -634,6 +664,8 @@ TEST_F(tls, single_send_multiple_recv)
29655 + char send_mem[TLS_PAYLOAD_MAX_LEN * 2];
29656 + char recv_mem[TLS_PAYLOAD_MAX_LEN * 2];
29657 +
29658 ++ memrnd(send_mem, sizeof(send_mem));
29659 ++
29660 + EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
29661 + memset(recv_mem, 0, total_len);
29662 +
29663 +@@ -834,18 +866,17 @@ TEST_F(tls, bidir)
29664 + int ret;
29665 +
29666 + if (!self->notls) {
29667 +- struct tls12_crypto_info_aes_gcm_128 tls12;
29668 ++ struct tls_crypto_info_keys tls12;
29669 +
29670 +- memset(&tls12, 0, sizeof(tls12));
29671 +- tls12.info.version = variant->tls_version;
29672 +- tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
29673 ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type,
29674 ++ &tls12);
29675 +
29676 + ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12,
29677 +- sizeof(tls12));
29678 ++ tls12.len);
29679 + ASSERT_EQ(ret, 0);
29680 +
29681 + ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12,
29682 +- sizeof(tls12));
29683 ++ tls12.len);
29684 + ASSERT_EQ(ret, 0);
29685 + }
29686 +
29687 +diff --git a/tools/testing/selftests/resctrl/README b/tools/testing/selftests/resctrl/README
29688 +index 4b36b25b6ac05..3d2bbd4fa3aa1 100644
29689 +--- a/tools/testing/selftests/resctrl/README
29690 ++++ b/tools/testing/selftests/resctrl/README
29691 +@@ -47,7 +47,7 @@ Parameter '-h' shows usage information.
29692 +
29693 + usage: resctrl_tests [-h] [-b "benchmark_cmd [options]"] [-t test list] [-n no_of_bits]
29694 + -b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT default benchmark is builtin fill_buf
29695 +- -t test list: run tests specified in the test list, e.g. -t mbm, mba, cmt, cat
29696 ++ -t test list: run tests specified in the test list, e.g. -t mbm,mba,cmt,cat
29697 + -n no_of_bits: run cache tests using specified no of bits in cache bit mask
29698 + -p cpu_no: specify CPU number to run the test. 1 is default
29699 + -h: help
29700 +diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
29701 +index f51b5fc066a32..973f09a66e1ee 100644
29702 +--- a/tools/testing/selftests/resctrl/resctrl_tests.c
29703 ++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
29704 +@@ -40,7 +40,7 @@ static void cmd_help(void)
29705 + printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT\n");
29706 + printf("\t default benchmark is builtin fill_buf\n");
29707 + printf("\t-t test list: run tests specified in the test list, ");
29708 +- printf("e.g. -t mbm, mba, cmt, cat\n");
29709 ++ printf("e.g. -t mbm,mba,cmt,cat\n");
29710 + printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
29711 + printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
29712 + printf("\t-h: help\n");
29713 +@@ -173,7 +173,7 @@ int main(int argc, char **argv)
29714 +
29715 + return -1;
29716 + }
29717 +- token = strtok(NULL, ":\t");
29718 ++ token = strtok(NULL, ",");
29719 + }
29720 + break;
29721 + case 'p':
29722 +diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c
29723 +index f441ac34b4d44..bae78c3263d94 100644
29724 +--- a/tools/testing/selftests/sgx/load.c
29725 ++++ b/tools/testing/selftests/sgx/load.c
29726 +@@ -150,16 +150,6 @@ bool encl_load(const char *path, struct encl *encl)
29727 + goto err;
29728 + }
29729 +
29730 +- /*
29731 +- * This just checks if the /dev file has these permission
29732 +- * bits set. It does not check that the current user is
29733 +- * the owner or in the owning group.
29734 +- */
29735 +- if (!(sb.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
29736 +- fprintf(stderr, "no execute permissions on device file %s\n", device_path);
29737 +- goto err;
29738 +- }
29739 +-
29740 + ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
29741 + if (ptr == (void *)-1) {
29742 + perror("mmap for read");
29743 +@@ -169,13 +159,13 @@ bool encl_load(const char *path, struct encl *encl)
29744 +
29745 + #define ERR_MSG \
29746 + "mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
29747 +-" Check that current user has execute permissions on %s and \n" \
29748 +-" that /dev does not have noexec set: mount | grep \"/dev .*noexec\"\n" \
29749 ++" Check that /dev does not have noexec set:\n" \
29750 ++" \tmount | grep \"/dev .*noexec\"\n" \
29751 + " If so, remount it executable: mount -o remount,exec /dev\n\n"
29752 +
29753 + ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
29754 + if (ptr == (void *)-1) {
29755 +- fprintf(stderr, ERR_MSG, device_path);
29756 ++ fprintf(stderr, ERR_MSG);
29757 + goto err;
29758 + }
29759 + munmap(ptr, PAGE_SIZE);
29760 +diff --git a/tools/testing/selftests/splice/short_splice_read.sh b/tools/testing/selftests/splice/short_splice_read.sh
29761 +index 7810d3589d9ab..22b6c8910b182 100755
29762 +--- a/tools/testing/selftests/splice/short_splice_read.sh
29763 ++++ b/tools/testing/selftests/splice/short_splice_read.sh
29764 +@@ -1,21 +1,87 @@
29765 + #!/bin/sh
29766 + # SPDX-License-Identifier: GPL-2.0
29767 ++#
29768 ++# Test for mishandling of splice() on pseudofilesystems, which should catch
29769 ++# bugs like 11990a5bd7e5 ("module: Correctly truncate sysfs sections output")
29770 ++#
29771 ++# Since splice fallback was removed as part of the set_fs() rework, many of these
29772 ++# tests expect to fail now. See https://lore.kernel.org/lkml/202009181443.C2179FB@keescook/
29773 + set -e
29774 +
29775 ++DIR=$(dirname "$0")
29776 ++
29777 + ret=0
29778 +
29779 ++expect_success()
29780 ++{
29781 ++ title="$1"
29782 ++ shift
29783 ++
29784 ++ echo "" >&2
29785 ++ echo "$title ..." >&2
29786 ++
29787 ++ set +e
29788 ++ "$@"
29789 ++ rc=$?
29790 ++ set -e
29791 ++
29792 ++ case "$rc" in
29793 ++ 0)
29794 ++ echo "ok: $title succeeded" >&2
29795 ++ ;;
29796 ++ 1)
29797 ++ echo "FAIL: $title should work" >&2
29798 ++ ret=$(( ret + 1 ))
29799 ++ ;;
29800 ++ *)
29801 ++ echo "FAIL: something else went wrong" >&2
29802 ++ ret=$(( ret + 1 ))
29803 ++ ;;
29804 ++ esac
29805 ++}
29806 ++
29807 ++expect_failure()
29808 ++{
29809 ++ title="$1"
29810 ++ shift
29811 ++
29812 ++ echo "" >&2
29813 ++ echo "$title ..." >&2
29814 ++
29815 ++ set +e
29816 ++ "$@"
29817 ++ rc=$?
29818 ++ set -e
29819 ++
29820 ++ case "$rc" in
29821 ++ 0)
29822 ++ echo "FAIL: $title unexpectedly worked" >&2
29823 ++ ret=$(( ret + 1 ))
29824 ++ ;;
29825 ++ 1)
29826 ++ echo "ok: $title correctly failed" >&2
29827 ++ ;;
29828 ++ *)
29829 ++ echo "FAIL: something else went wrong" >&2
29830 ++ ret=$(( ret + 1 ))
29831 ++ ;;
29832 ++ esac
29833 ++}
29834 ++
29835 + do_splice()
29836 + {
29837 + filename="$1"
29838 + bytes="$2"
29839 + expected="$3"
29840 ++ report="$4"
29841 +
29842 +- out=$(./splice_read "$filename" "$bytes" | cat)
29843 ++ out=$("$DIR"/splice_read "$filename" "$bytes" | cat)
29844 + if [ "$out" = "$expected" ] ; then
29845 +- echo "ok: $filename $bytes"
29846 ++ echo " matched $report" >&2
29847 ++ return 0
29848 + else
29849 +- echo "FAIL: $filename $bytes"
29850 +- ret=1
29851 ++ echo " no match: '$out' vs $report" >&2
29852 ++ return 1
29853 + fi
29854 + }
29855 +
29856 +@@ -23,34 +89,45 @@ test_splice()
29857 + {
29858 + filename="$1"
29859 +
29860 ++ echo " checking $filename ..." >&2
29861 ++
29862 + full=$(cat "$filename")
29863 ++ rc=$?
29864 ++ if [ $rc -ne 0 ] ; then
29865 ++ return 2
29866 ++ fi
29867 ++
29868 + two=$(echo "$full" | grep -m1 . | cut -c-2)
29869 +
29870 + # Make sure full splice has the same contents as a standard read.
29871 +- do_splice "$filename" 4096 "$full"
29872 ++ echo " splicing 4096 bytes ..." >&2
29873 ++ if ! do_splice "$filename" 4096 "$full" "full read" ; then
29874 ++ return 1
29875 ++ fi
29876 +
29877 + # Make sure a partial splice see the first two characters.
29878 +- do_splice "$filename" 2 "$two"
29879 ++ echo " splicing 2 bytes ..." >&2
29880 ++ if ! do_splice "$filename" 2 "$two" "'$two'" ; then
29881 ++ return 1
29882 ++ fi
29883 ++
29884 ++ return 0
29885 + }
29886 +
29887 +-# proc_single_open(), seq_read()
29888 +-test_splice /proc/$$/limits
29889 +-# special open, seq_read()
29890 +-test_splice /proc/$$/comm
29891 ++### /proc/$pid/ has no splice interface; these should all fail.
29892 ++expect_failure "proc_single_open(), seq_read() splice" test_splice /proc/$$/limits
29893 ++expect_failure "special open(), seq_read() splice" test_splice /proc/$$/comm
29894 +
29895 +-# proc_handler, proc_dointvec_minmax
29896 +-test_splice /proc/sys/fs/nr_open
29897 +-# proc_handler, proc_dostring
29898 +-test_splice /proc/sys/kernel/modprobe
29899 +-# proc_handler, special read
29900 +-test_splice /proc/sys/kernel/version
29901 ++### /proc/sys/ has a splice interface; these should all succeed.
29902 ++expect_success "proc_handler: proc_dointvec_minmax() splice" test_splice /proc/sys/fs/nr_open
29903 ++expect_success "proc_handler: proc_dostring() splice" test_splice /proc/sys/kernel/modprobe
29904 ++expect_success "proc_handler: special read splice" test_splice /proc/sys/kernel/version
29905 +
29906 ++### /sys/ has no splice interface; these should all fail.
29907 + if ! [ -d /sys/module/test_module/sections ] ; then
29908 +- modprobe test_module
29909 ++ expect_success "test_module kernel module load" modprobe test_module
29910 + fi
29911 +-# kernfs, attr
29912 +-test_splice /sys/module/test_module/coresize
29913 +-# kernfs, binattr
29914 +-test_splice /sys/module/test_module/sections/.init.text
29915 ++expect_failure "kernfs attr splice" test_splice /sys/module/test_module/coresize
29916 ++expect_failure "kernfs binattr splice" test_splice /sys/module/test_module/sections/.init.text
29917 +
29918 + exit $ret
29919 +diff --git a/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
29920 +index 229ee185b27e1..a7b21658af9b4 100644
29921 +--- a/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
29922 ++++ b/tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
29923 +@@ -36,7 +36,7 @@ class SubPlugin(TdcPlugin):
29924 + for k in scapy_keys:
29925 + if k not in scapyinfo:
29926 + keyfail = True
29927 +- missing_keys.add(k)
29928 ++ missing_keys.append(k)
29929 + if keyfail:
29930 + print('{}: Scapy block present in the test, but is missing info:'
29931 + .format(self.sub_class))
29932 +diff --git a/tools/testing/selftests/vm/protection_keys.c b/tools/testing/selftests/vm/protection_keys.c
29933 +index fdbb602ecf325..87eecd5ba577b 100644
29934 +--- a/tools/testing/selftests/vm/protection_keys.c
29935 ++++ b/tools/testing/selftests/vm/protection_keys.c
29936 +@@ -510,7 +510,7 @@ int alloc_pkey(void)
29937 + " shadow: 0x%016llx\n",
29938 + __func__, __LINE__, ret, __read_pkey_reg(),
29939 + shadow_pkey_reg);
29940 +- if (ret) {
29941 ++ if (ret > 0) {
29942 + /* clear both the bits: */
29943 + shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, ret,
29944 + ~PKEY_MASK);
29945 +@@ -561,7 +561,6 @@ int alloc_random_pkey(void)
29946 + int nr_alloced = 0;
29947 + int random_index;
29948 + memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
29949 +- srand((unsigned int)time(NULL));
29950 +
29951 + /* allocate every possible key and make a note of which ones we got */
29952 + max_nr_pkey_allocs = NR_PKEYS;
29953 +@@ -1449,6 +1448,13 @@ void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
29954 + ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
29955 + pkey_assert(!ret);
29956 +
29957 ++ /*
29958 ++ * Reset the shadow, assuming that the above mprotect()
29959 ++ * correctly changed PKRU, but to an unknown value since
29960 ++ * the actual alllocated pkey is unknown.
29961 ++ */
29962 ++ shadow_pkey_reg = __read_pkey_reg();
29963 ++
29964 + dprintf2("pkey_reg: %016llx\n", read_pkey_reg());
29965 +
29966 + /* Make sure this is an *instruction* fault */
29967 +@@ -1552,6 +1558,8 @@ int main(void)
29968 + int nr_iterations = 22;
29969 + int pkeys_supported = is_pkeys_supported();
29970 +
29971 ++ srand((unsigned int)time(NULL));
29972 ++
29973 + setup_handlers();
29974 +
29975 + printf("has pkeys: %d\n", pkeys_supported);