Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.5 commit in: /
Date: Wed, 19 Feb 2020 23:49:33
Message-Id: 1582156147.463e67dc5a2521fdc2e4ceafe2f42ac32f680752.mpagano@gentoo
1 commit: 463e67dc5a2521fdc2e4ceafe2f42ac32f680752
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 19 23:49:07 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 19 23:49:07 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=463e67dc
7
8 Linux patch 5.5.5
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1004_linux-5.5.5.patch | 3180 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3184 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 567c784..7eb2076 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -59,6 +59,10 @@ Patch: 1003_linux-5.5.4.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.5.4
23
24 +Patch: 1004_linux-5.5.5.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.5.5
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1004_linux-5.5.5.patch b/1004_linux-5.5.5.patch
33 new file mode 100644
34 index 0000000..1da35c5
35 --- /dev/null
36 +++ b/1004_linux-5.5.5.patch
37 @@ -0,0 +1,3180 @@
38 +diff --git a/Makefile b/Makefile
39 +index 62f956e9c81d..1f7dc3a2e1dd 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 5
46 +-SUBLEVEL = 4
47 ++SUBLEVEL = 5
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
52 +index 880bc2a5cada..7f7002dc2b21 100644
53 +--- a/arch/arm/mach-npcm/Kconfig
54 ++++ b/arch/arm/mach-npcm/Kconfig
55 +@@ -11,7 +11,7 @@ config ARCH_NPCM7XX
56 + depends on ARCH_MULTI_V7
57 + select PINCTRL_NPCM7XX
58 + select NPCM7XX_TIMER
59 +- select ARCH_REQUIRE_GPIOLIB
60 ++ select GPIOLIB
61 + select CACHE_L2X0
62 + select ARM_GIC
63 + select HAVE_ARM_TWD if SMP
64 +diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
65 +index 62ab0d54ff71..335fff762451 100644
66 +--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
67 ++++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
68 +@@ -161,10 +161,10 @@
69 + bus-range = <0x0 0x1>;
70 + reg = <0x0 0x40000000 0x0 0x10000000>;
71 + ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
72 +- interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
73 +- <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
74 +- <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
75 +- <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
76 ++ interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
77 ++ <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
78 ++ <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
79 ++ <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
80 + interrupt-map-mask = <0x0 0x0 0x0 0x7>;
81 + msi-map = <0x0 &its 0x0 0x10000>;
82 + iommu-map = <0x0 &smmu 0x0 0x10000>;
83 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
84 +index d54586d5b031..fab013c5ee8c 100644
85 +--- a/arch/arm64/kernel/process.c
86 ++++ b/arch/arm64/kernel/process.c
87 +@@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next)
88 + if (unlikely(next->flags & PF_KTHREAD))
89 + return;
90 +
91 ++ /*
92 ++ * If all CPUs implement the SSBS extension, then we just need to
93 ++ * context-switch the PSTATE field.
94 ++ */
95 ++ if (cpu_have_feature(cpu_feature(SSBS)))
96 ++ return;
97 ++
98 + /* If the mitigation is enabled, then we leave SSBS clear. */
99 + if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
100 + test_tsk_thread_flag(next, TIF_SSBD))
101 +diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
102 +index ed007f4a6444..3f501159ee9f 100644
103 +--- a/arch/s390/boot/uv.c
104 ++++ b/arch/s390/boot/uv.c
105 +@@ -15,7 +15,8 @@ void uv_query_info(void)
106 + if (!test_facility(158))
107 + return;
108 +
109 +- if (uv_call(0, (uint64_t)&uvcb))
110 ++ /* rc==0x100 means that there is additional data we do not process */
111 ++ if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
112 + return;
113 +
114 + if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
115 +diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
116 +index 670f14a228e5..6bf3a45ccfec 100644
117 +--- a/arch/s390/include/asm/timex.h
118 ++++ b/arch/s390/include/asm/timex.h
119 +@@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)
120 +
121 + static inline unsigned long long get_tod_clock(void)
122 + {
123 +- unsigned char clk[STORE_CLOCK_EXT_SIZE];
124 ++ char clk[STORE_CLOCK_EXT_SIZE];
125 +
126 + get_tod_clock_ext(clk);
127 + return *((unsigned long long *)&clk[1]);
128 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
129 +index a7752cd78b89..dede714b46e8 100644
130 +--- a/arch/x86/events/amd/core.c
131 ++++ b/arch/x86/events/amd/core.c
132 +@@ -246,6 +246,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
133 + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
134 + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
135 + [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
136 ++ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
137 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
138 + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
139 + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
140 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
141 +index ce83950036c5..e5ad97a82342 100644
142 +--- a/arch/x86/events/intel/ds.c
143 ++++ b/arch/x86/events/intel/ds.c
144 +@@ -1713,6 +1713,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
145 + old = ((s64)(prev_raw_count << shift) >> shift);
146 + local64_add(new - old + count * period, &event->count);
147 +
148 ++ local64_set(&hwc->period_left, -new);
149 ++
150 + perf_event_update_userpage(event);
151 +
152 + return 0;
153 +diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
154 +index c1d7b866a03f..4e3f137ffa8c 100644
155 +--- a/arch/x86/kvm/mmu/paging_tmpl.h
156 ++++ b/arch/x86/kvm/mmu/paging_tmpl.h
157 +@@ -33,7 +33,7 @@
158 + #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
159 + #define PT_HAVE_ACCESSED_DIRTY(mmu) true
160 + #ifdef CONFIG_X86_64
161 +- #define PT_MAX_FULL_LEVELS 4
162 ++ #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
163 + #define CMPXCHG cmpxchg
164 + #else
165 + #define CMPXCHG cmpxchg64
166 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
167 +index 5bfa8228f0c7..3babe5e29429 100644
168 +--- a/arch/x86/kvm/vmx/nested.c
169 ++++ b/arch/x86/kvm/vmx/nested.c
170 +@@ -3583,6 +3583,33 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
171 + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
172 + }
173 +
174 ++/*
175 ++ * Returns true if a debug trap is pending delivery.
176 ++ *
177 ++ * In KVM, debug traps bear an exception payload. As such, the class of a #DB
178 ++ * exception may be inferred from the presence of an exception payload.
179 ++ */
180 ++static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
181 ++{
182 ++ return vcpu->arch.exception.pending &&
183 ++ vcpu->arch.exception.nr == DB_VECTOR &&
184 ++ vcpu->arch.exception.payload;
185 ++}
186 ++
187 ++/*
188 ++ * Certain VM-exits set the 'pending debug exceptions' field to indicate a
189 ++ * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
190 ++ * represents these debug traps with a payload that is said to be compatible
191 ++ * with the 'pending debug exceptions' field, write the payload to the VMCS
192 ++ * field if a VM-exit is delivered before the debug trap.
193 ++ */
194 ++static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
195 ++{
196 ++ if (vmx_pending_dbg_trap(vcpu))
197 ++ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
198 ++ vcpu->arch.exception.payload);
199 ++}
200 ++
201 + static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
202 + {
203 + struct vcpu_vmx *vmx = to_vmx(vcpu);
204 +@@ -3595,6 +3622,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
205 + test_bit(KVM_APIC_INIT, &apic->pending_events)) {
206 + if (block_nested_events)
207 + return -EBUSY;
208 ++ nested_vmx_update_pending_dbg(vcpu);
209 + clear_bit(KVM_APIC_INIT, &apic->pending_events);
210 + nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
211 + return 0;
212 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
213 +index 78e01e2524bc..c0d837c37f34 100644
214 +--- a/arch/x86/kvm/vmx/vmx.c
215 ++++ b/arch/x86/kvm/vmx/vmx.c
216 +@@ -2968,6 +2968,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
217 +
218 + static int get_ept_level(struct kvm_vcpu *vcpu)
219 + {
220 ++ /* Nested EPT currently only supports 4-level walks. */
221 ++ if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
222 ++ return 4;
223 + if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
224 + return 5;
225 + return 4;
226 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
227 +index d744c1bf4dc8..e594fd2719dd 100644
228 +--- a/arch/x86/kvm/x86.c
229 ++++ b/arch/x86/kvm/x86.c
230 +@@ -437,6 +437,14 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
231 + * for #DB exceptions under VMX.
232 + */
233 + vcpu->arch.dr6 ^= payload & DR6_RTM;
234 ++
235 ++ /*
236 ++ * The #DB payload is defined as compatible with the 'pending
237 ++ * debug exceptions' field under VMX, not DR6. While bit 12 is
238 ++ * defined in the 'pending debug exceptions' field (enabled
239 ++ * breakpoint), it is reserved and must be zero in DR6.
240 ++ */
241 ++ vcpu->arch.dr6 &= ~BIT(12);
242 + break;
243 + case PF_VECTOR:
244 + vcpu->arch.cr2 = payload;
245 +diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
246 +index bcf8f7501db7..a74c1a0e892d 100644
247 +--- a/drivers/acpi/acpica/achware.h
248 ++++ b/drivers/acpi/acpica/achware.h
249 +@@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
250 +
251 + acpi_status acpi_hw_enable_all_wakeup_gpes(void);
252 +
253 ++u8 acpi_hw_check_all_gpes(void);
254 ++
255 + acpi_status
256 + acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
257 + struct acpi_gpe_block_info *gpe_block,
258 +diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
259 +index 04a40d563dd6..84b0b410310e 100644
260 +--- a/drivers/acpi/acpica/evxfgpe.c
261 ++++ b/drivers/acpi/acpica/evxfgpe.c
262 +@@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void)
263 +
264 + ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
265 +
266 ++/******************************************************************************
267 ++ *
268 ++ * FUNCTION: acpi_any_gpe_status_set
269 ++ *
270 ++ * PARAMETERS: None
271 ++ *
272 ++ * RETURN: Whether or not the status bit is set for any GPE
273 ++ *
274 ++ * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
275 ++ * of them is set or FALSE otherwise.
276 ++ *
277 ++ ******************************************************************************/
278 ++u32 acpi_any_gpe_status_set(void)
279 ++{
280 ++ acpi_status status;
281 ++ u8 ret;
282 ++
283 ++ ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
284 ++
285 ++ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
286 ++ if (ACPI_FAILURE(status)) {
287 ++ return (FALSE);
288 ++ }
289 ++
290 ++ ret = acpi_hw_check_all_gpes();
291 ++ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
292 ++
293 ++ return (ret);
294 ++}
295 ++
296 ++ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set)
297 ++
298 + /*******************************************************************************
299 + *
300 + * FUNCTION: acpi_install_gpe_block
301 +diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
302 +index 565bd3f29f31..b1d7d5f92495 100644
303 +--- a/drivers/acpi/acpica/hwgpe.c
304 ++++ b/drivers/acpi/acpica/hwgpe.c
305 +@@ -444,6 +444,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
306 + return (AE_OK);
307 + }
308 +
309 ++/******************************************************************************
310 ++ *
311 ++ * FUNCTION: acpi_hw_get_gpe_block_status
312 ++ *
313 ++ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
314 ++ * gpe_block - Gpe Block info
315 ++ *
316 ++ * RETURN: Success
317 ++ *
318 ++ * DESCRIPTION: Produce a combined GPE status bits mask for the given block.
319 ++ *
320 ++ ******************************************************************************/
321 ++
322 ++static acpi_status
323 ++acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
324 ++ struct acpi_gpe_block_info *gpe_block,
325 ++ void *ret_ptr)
326 ++{
327 ++ struct acpi_gpe_register_info *gpe_register_info;
328 ++ u64 in_enable, in_status;
329 ++ acpi_status status;
330 ++ u8 *ret = ret_ptr;
331 ++ u32 i;
332 ++
333 ++ /* Examine each GPE Register within the block */
334 ++
335 ++ for (i = 0; i < gpe_block->register_count; i++) {
336 ++ gpe_register_info = &gpe_block->register_info[i];
337 ++
338 ++ status = acpi_hw_read(&in_enable,
339 ++ &gpe_register_info->enable_address);
340 ++ if (ACPI_FAILURE(status)) {
341 ++ continue;
342 ++ }
343 ++
344 ++ status = acpi_hw_read(&in_status,
345 ++ &gpe_register_info->status_address);
346 ++ if (ACPI_FAILURE(status)) {
347 ++ continue;
348 ++ }
349 ++
350 ++ *ret |= in_enable & in_status;
351 ++ }
352 ++
353 ++ return (AE_OK);
354 ++}
355 ++
356 + /******************************************************************************
357 + *
358 + * FUNCTION: acpi_hw_disable_all_gpes
359 +@@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
360 + return_ACPI_STATUS(status);
361 + }
362 +
363 ++/******************************************************************************
364 ++ *
365 ++ * FUNCTION: acpi_hw_check_all_gpes
366 ++ *
367 ++ * PARAMETERS: None
368 ++ *
369 ++ * RETURN: Combined status of all GPEs
370 ++ *
371 ++ * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
372 ++ * status bit is set for at least one of them of FALSE otherwise.
373 ++ *
374 ++ ******************************************************************************/
375 ++
376 ++u8 acpi_hw_check_all_gpes(void)
377 ++{
378 ++ u8 ret = 0;
379 ++
380 ++ ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
381 ++
382 ++ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
383 ++
384 ++ return (ret != 0);
385 ++}
386 ++
387 + #endif /* !ACPI_REDUCED_HARDWARE */
388 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
389 +index d05be13c1022..bd74c7836675 100644
390 +--- a/drivers/acpi/ec.c
391 ++++ b/drivers/acpi/ec.c
392 +@@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec);
393 +
394 + static struct acpi_ec *boot_ec;
395 + static bool boot_ec_is_ecdt = false;
396 ++static struct workqueue_struct *ec_wq;
397 + static struct workqueue_struct *ec_query_wq;
398 +
399 + static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
400 +@@ -469,7 +470,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
401 + ec_dbg_evt("Command(%s) submitted/blocked",
402 + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
403 + ec->nr_pending_queries++;
404 +- schedule_work(&ec->work);
405 ++ queue_work(ec_wq, &ec->work);
406 + }
407 + }
408 +
409 +@@ -535,7 +536,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
410 + #ifdef CONFIG_PM_SLEEP
411 + static void __acpi_ec_flush_work(void)
412 + {
413 +- flush_scheduled_work(); /* flush ec->work */
414 ++ drain_workqueue(ec_wq); /* flush ec->work */
415 + flush_workqueue(ec_query_wq); /* flush queries */
416 + }
417 +
418 +@@ -556,8 +557,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
419 +
420 + void acpi_ec_flush_work(void)
421 + {
422 +- /* Without ec_query_wq there is nothing to flush. */
423 +- if (!ec_query_wq)
424 ++ /* Without ec_wq there is nothing to flush. */
425 ++ if (!ec_wq)
426 + return;
427 +
428 + __acpi_ec_flush_work();
429 +@@ -2115,25 +2116,33 @@ static struct acpi_driver acpi_ec_driver = {
430 + .drv.pm = &acpi_ec_pm,
431 + };
432 +
433 +-static inline int acpi_ec_query_init(void)
434 ++static void acpi_ec_destroy_workqueues(void)
435 + {
436 +- if (!ec_query_wq) {
437 +- ec_query_wq = alloc_workqueue("kec_query", 0,
438 +- ec_max_queries);
439 +- if (!ec_query_wq)
440 +- return -ENODEV;
441 ++ if (ec_wq) {
442 ++ destroy_workqueue(ec_wq);
443 ++ ec_wq = NULL;
444 + }
445 +- return 0;
446 +-}
447 +-
448 +-static inline void acpi_ec_query_exit(void)
449 +-{
450 + if (ec_query_wq) {
451 + destroy_workqueue(ec_query_wq);
452 + ec_query_wq = NULL;
453 + }
454 + }
455 +
456 ++static int acpi_ec_init_workqueues(void)
457 ++{
458 ++ if (!ec_wq)
459 ++ ec_wq = alloc_ordered_workqueue("kec", 0);
460 ++
461 ++ if (!ec_query_wq)
462 ++ ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
463 ++
464 ++ if (!ec_wq || !ec_query_wq) {
465 ++ acpi_ec_destroy_workqueues();
466 ++ return -ENODEV;
467 ++ }
468 ++ return 0;
469 ++}
470 ++
471 + static const struct dmi_system_id acpi_ec_no_wakeup[] = {
472 + {
473 + .ident = "Thinkpad X1 Carbon 6th",
474 +@@ -2164,8 +2173,7 @@ int __init acpi_ec_init(void)
475 + int result;
476 + int ecdt_fail, dsdt_fail;
477 +
478 +- /* register workqueue for _Qxx evaluations */
479 +- result = acpi_ec_query_init();
480 ++ result = acpi_ec_init_workqueues();
481 + if (result)
482 + return result;
483 +
484 +@@ -2196,6 +2204,6 @@ static void __exit acpi_ec_exit(void)
485 + {
486 +
487 + acpi_bus_unregister_driver(&acpi_ec_driver);
488 +- acpi_ec_query_exit();
489 ++ acpi_ec_destroy_workqueues();
490 + }
491 + #endif /* 0 */
492 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
493 +index 6747a279621b..5672fa8cb300 100644
494 +--- a/drivers/acpi/sleep.c
495 ++++ b/drivers/acpi/sleep.c
496 +@@ -987,21 +987,34 @@ static void acpi_s2idle_sync(void)
497 + acpi_os_wait_events_complete(); /* synchronize Notify handling */
498 + }
499 +
500 +-static void acpi_s2idle_wake(void)
501 ++static bool acpi_s2idle_wake(void)
502 + {
503 +- /*
504 +- * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
505 +- * not triggered while suspended, so bail out.
506 +- */
507 +- if (!acpi_sci_irq_valid() ||
508 +- irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
509 +- return;
510 ++ if (!acpi_sci_irq_valid())
511 ++ return pm_wakeup_pending();
512 ++
513 ++ while (pm_wakeup_pending()) {
514 ++ /*
515 ++ * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
516 ++ * SCI has not triggered while suspended, so bail out (the
517 ++ * wakeup is pending anyway and the SCI is not the source of
518 ++ * it).
519 ++ */
520 ++ if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
521 ++ return true;
522 ++
523 ++ /*
524 ++ * If there are no EC events to process and at least one of the
525 ++ * other enabled GPEs is active, the wakeup is regarded as a
526 ++ * genuine one.
527 ++ *
528 ++ * Note that the checks below must be carried out in this order
529 ++ * to avoid returning prematurely due to a change of the EC GPE
530 ++ * status bit from unset to set between the checks with the
531 ++ * status bits of all the other GPEs unset.
532 ++ */
533 ++ if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
534 ++ return true;
535 +
536 +- /*
537 +- * If there are EC events to process, the wakeup may be a spurious one
538 +- * coming from the EC.
539 +- */
540 +- if (acpi_ec_dispatch_gpe()) {
541 + /*
542 + * Cancel the wakeup and process all pending events in case
543 + * there are any wakeup ones in there.
544 +@@ -1014,8 +1027,19 @@ static void acpi_s2idle_wake(void)
545 +
546 + acpi_s2idle_sync();
547 +
548 ++ /*
549 ++ * The SCI is in the "suspended" state now and it cannot produce
550 ++ * new wakeup events till the rearming below, so if any of them
551 ++ * are pending here, they must be resulting from the processing
552 ++ * of EC events above or coming from somewhere else.
553 ++ */
554 ++ if (pm_wakeup_pending())
555 ++ return true;
556 ++
557 + rearm_wake_irq(acpi_sci_irq);
558 + }
559 ++
560 ++ return false;
561 + }
562 +
563 + static void acpi_s2idle_restore_early(void)
564 +diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
565 +index 36cf13eee6b8..68413bf9cf87 100644
566 +--- a/drivers/bus/moxtet.c
567 ++++ b/drivers/bus/moxtet.c
568 +@@ -466,7 +466,7 @@ static ssize_t input_read(struct file *file, char __user *buf, size_t len,
569 + {
570 + struct moxtet *moxtet = file->private_data;
571 + u8 bin[TURRIS_MOX_MAX_MODULES];
572 +- u8 hex[sizeof(buf) * 2 + 1];
573 ++ u8 hex[sizeof(bin) * 2 + 1];
574 + int ret, n;
575 +
576 + ret = moxtet_spi_read(moxtet, bin);
577 +diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
578 +index 1ff4fb1def7c..800532595ea7 100644
579 +--- a/drivers/char/ipmi/ipmb_dev_int.c
580 ++++ b/drivers/char/ipmi/ipmb_dev_int.c
581 +@@ -253,7 +253,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
582 + break;
583 +
584 + case I2C_SLAVE_WRITE_RECEIVED:
585 +- if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg))
586 ++ if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
587 + break;
588 +
589 + buf[++ipmb_dev->msg_idx] = *val;
590 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
591 +index 7243b88f81d8..69e0d90460e6 100644
592 +--- a/drivers/edac/edac_mc.c
593 ++++ b/drivers/edac/edac_mc.c
594 +@@ -505,16 +505,10 @@ void edac_mc_free(struct mem_ctl_info *mci)
595 + {
596 + edac_dbg(1, "\n");
597 +
598 +- /* If we're not yet registered with sysfs free only what was allocated
599 +- * in edac_mc_alloc().
600 +- */
601 +- if (!device_is_registered(&mci->dev)) {
602 +- _edac_mc_free(mci);
603 +- return;
604 +- }
605 ++ if (device_is_registered(&mci->dev))
606 ++ edac_unregister_sysfs(mci);
607 +
608 +- /* the mci instance is freed here, when the sysfs object is dropped */
609 +- edac_unregister_sysfs(mci);
610 ++ _edac_mc_free(mci);
611 + }
612 + EXPORT_SYMBOL_GPL(edac_mc_free);
613 +
614 +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
615 +index 0367554e7437..c70ec0a306d8 100644
616 +--- a/drivers/edac/edac_mc_sysfs.c
617 ++++ b/drivers/edac/edac_mc_sysfs.c
618 +@@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] = {
619 +
620 + static void csrow_attr_release(struct device *dev)
621 + {
622 +- struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
623 +-
624 +- edac_dbg(1, "device %s released\n", dev_name(dev));
625 +- kfree(csrow);
626 ++ /* release device with _edac_mc_free() */
627 + }
628 +
629 + static const struct device_type csrow_attr_type = {
630 +@@ -447,8 +444,7 @@ error:
631 + csrow = mci->csrows[i];
632 + if (!nr_pages_per_csrow(csrow))
633 + continue;
634 +-
635 +- device_del(&mci->csrows[i]->dev);
636 ++ device_unregister(&mci->csrows[i]->dev);
637 + }
638 +
639 + return err;
640 +@@ -608,10 +604,7 @@ static const struct attribute_group *dimm_attr_groups[] = {
641 +
642 + static void dimm_attr_release(struct device *dev)
643 + {
644 +- struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
645 +-
646 +- edac_dbg(1, "device %s released\n", dev_name(dev));
647 +- kfree(dimm);
648 ++ /* release device with _edac_mc_free() */
649 + }
650 +
651 + static const struct device_type dimm_attr_type = {
652 +@@ -893,10 +886,7 @@ static const struct attribute_group *mci_attr_groups[] = {
653 +
654 + static void mci_attr_release(struct device *dev)
655 + {
656 +- struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
657 +-
658 +- edac_dbg(1, "device %s released\n", dev_name(dev));
659 +- kfree(mci);
660 ++ /* release device with _edac_mc_free() */
661 + }
662 +
663 + static const struct device_type mci_attr_type = {
664 +diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
665 +index a9748b5198e6..67f9f82e0db0 100644
666 +--- a/drivers/gpio/gpio-xilinx.c
667 ++++ b/drivers/gpio/gpio-xilinx.c
668 +@@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
669 + for (i = 0; i < gc->ngpio; i++) {
670 + if (*mask == 0)
671 + break;
672 ++ /* Once finished with an index write it out to the register */
673 + if (index != xgpio_index(chip, i)) {
674 + xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
675 +- xgpio_regoffset(chip, i),
676 ++ index * XGPIO_CHANNEL_OFFSET,
677 + chip->gpio_state[index]);
678 + spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
679 + index = xgpio_index(chip, i);
680 +@@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
681 + }
682 +
683 + xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
684 +- xgpio_regoffset(chip, i), chip->gpio_state[index]);
685 ++ index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
686 +
687 + spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
688 + }
689 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
690 +index b696e4598a24..b0e79bed5952 100644
691 +--- a/drivers/gpio/gpiolib-of.c
692 ++++ b/drivers/gpio/gpiolib-of.c
693 +@@ -147,10 +147,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
694 + if (of_property_read_bool(np, "cd-inverted"))
695 + *flags ^= OF_GPIO_ACTIVE_LOW;
696 + }
697 +- if (!strcmp(propname, "wp-gpios")) {
698 +- if (of_property_read_bool(np, "wp-inverted"))
699 +- *flags ^= OF_GPIO_ACTIVE_LOW;
700 +- }
701 + }
702 + /*
703 + * Some GPIO fixed regulator quirks.
704 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
705 +index 78a16e42f222..bcfbfded9ba3 100644
706 +--- a/drivers/gpio/gpiolib.c
707 ++++ b/drivers/gpio/gpiolib.c
708 +@@ -3371,6 +3371,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
709 + }
710 + EXPORT_SYMBOL_GPL(gpiod_is_active_low);
711 +
712 ++/**
713 ++ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
714 ++ * @desc: the gpio descriptor to change
715 ++ */
716 ++void gpiod_toggle_active_low(struct gpio_desc *desc)
717 ++{
718 ++ VALIDATE_DESC_VOID(desc);
719 ++ change_bit(FLAG_ACTIVE_LOW, &desc->flags);
720 ++}
721 ++EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
722 ++
723 + /* I/O calls are only valid after configuration completed; the relevant
724 + * "is this a valid GPIO" error checks should already have been done.
725 + *
726 +diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
727 +index b2f96a101124..7a63cf8e85ed 100644
728 +--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
729 ++++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
730 +@@ -39,21 +39,39 @@
731 + #define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
732 + #define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
733 +
734 ++enum SMU_11_0_ODFEATURE_CAP {
735 ++ SMU_11_0_ODCAP_GFXCLK_LIMITS = 0,
736 ++ SMU_11_0_ODCAP_GFXCLK_CURVE,
737 ++ SMU_11_0_ODCAP_UCLK_MAX,
738 ++ SMU_11_0_ODCAP_POWER_LIMIT,
739 ++ SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,
740 ++ SMU_11_0_ODCAP_FAN_SPEED_MIN,
741 ++ SMU_11_0_ODCAP_TEMPERATURE_FAN,
742 ++ SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,
743 ++ SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,
744 ++ SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,
745 ++ SMU_11_0_ODCAP_AUTO_UV_ENGINE,
746 ++ SMU_11_0_ODCAP_AUTO_OC_ENGINE,
747 ++ SMU_11_0_ODCAP_AUTO_OC_MEMORY,
748 ++ SMU_11_0_ODCAP_FAN_CURVE,
749 ++ SMU_11_0_ODCAP_COUNT,
750 ++};
751 ++
752 + enum SMU_11_0_ODFEATURE_ID {
753 +- SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
754 +- SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
755 +- SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
756 +- SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
757 +- SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
758 +- SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
759 +- SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
760 +- SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
761 +- SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
762 +- SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
763 +- SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
764 +- SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
765 +- SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
766 +- SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
767 ++ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
768 ++ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
769 ++ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << SMU_11_0_ODCAP_UCLK_MAX, //UCLK Limit feature
770 ++ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_ODCAP_POWER_LIMIT, //Power Limit feature
771 ++ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
772 ++ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature
773 ++ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature
774 ++ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature
775 ++ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature
776 ++ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature
777 ++ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature
778 ++ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature
779 ++ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature
780 ++ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_ODCAP_FAN_CURVE, //Fan Curve feature
781 + SMU_11_0_ODFEATURE_COUNT = 14,
782 + };
783 + #define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
784 +diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
785 +index e3f8c45e7467..2cf81cafc669 100644
786 +--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
787 ++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
788 +@@ -705,9 +705,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
789 + return dpm_desc->SnapToDiscrete == 0 ? true : false;
790 + }
791 +
792 +-static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
793 ++static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
794 + {
795 +- return od_table->cap[feature];
796 ++ return od_table->cap[cap];
797 + }
798 +
799 + static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
800 +@@ -815,7 +815,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
801 + case SMU_OD_SCLK:
802 + if (!smu->od_enabled || !od_table || !od_settings)
803 + break;
804 +- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
805 ++ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
806 + break;
807 + size += sprintf(buf + size, "OD_SCLK:\n");
808 + size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
809 +@@ -823,7 +823,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
810 + case SMU_OD_MCLK:
811 + if (!smu->od_enabled || !od_table || !od_settings)
812 + break;
813 +- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
814 ++ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
815 + break;
816 + size += sprintf(buf + size, "OD_MCLK:\n");
817 + size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
818 +@@ -831,7 +831,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
819 + case SMU_OD_VDDC_CURVE:
820 + if (!smu->od_enabled || !od_table || !od_settings)
821 + break;
822 +- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
823 ++ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
824 + break;
825 + size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
826 + for (i = 0; i < 3; i++) {
827 +@@ -856,7 +856,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
828 + break;
829 + size = sprintf(buf, "%s:\n", "OD_RANGE");
830 +
831 +- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
832 ++ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
833 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
834 + &min_value, NULL);
835 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
836 +@@ -865,14 +865,14 @@ static int navi10_print_clk_levels(struct smu_context *smu,
837 + min_value, max_value);
838 + }
839 +
840 +- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
841 ++ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
842 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
843 + &min_value, &max_value);
844 + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
845 + min_value, max_value);
846 + }
847 +
848 +- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
849 ++ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
850 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
851 + &min_value, &max_value);
852 + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
853 +@@ -1956,7 +1956,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
854 +
855 + switch (type) {
856 + case PP_OD_EDIT_SCLK_VDDC_TABLE:
857 +- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
858 ++ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
859 + pr_warn("GFXCLK_LIMITS not supported!\n");
860 + return -ENOTSUPP;
861 + }
862 +@@ -2002,7 +2002,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
863 + }
864 + break;
865 + case PP_OD_EDIT_MCLK_VDDC_TABLE:
866 +- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
867 ++ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
868 + pr_warn("UCLK_MAX not supported!\n");
869 + return -ENOTSUPP;
870 + }
871 +@@ -2043,7 +2043,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
872 + }
873 + break;
874 + case PP_OD_EDIT_VDDC_CURVE:
875 +- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
876 ++ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
877 + pr_warn("GFXCLK_CURVE not supported!\n");
878 + return -ENOTSUPP;
879 + }
880 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
881 +index 141ba31cf548..6cd90cb4b6b1 100644
882 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
883 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
884 +@@ -3772,7 +3772,8 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
885 + else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
886 + guid = msg->u.resource_stat.guid;
887 +
888 +- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
889 ++ if (guid)
890 ++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
891 + } else {
892 + mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
893 + }
894 +diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
895 +index d6d2e6fb8674..40036eff709c 100644
896 +--- a/drivers/gpu/drm/i915/i915_pmu.c
897 ++++ b/drivers/gpu/drm/i915/i915_pmu.c
898 +@@ -594,8 +594,10 @@ static void i915_pmu_enable(struct perf_event *event)
899 + container_of(event->pmu, typeof(*i915), pmu.base);
900 + unsigned int bit = event_enabled_bit(event);
901 + struct i915_pmu *pmu = &i915->pmu;
902 ++ intel_wakeref_t wakeref;
903 + unsigned long flags;
904 +
905 ++ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
906 + spin_lock_irqsave(&pmu->lock, flags);
907 +
908 + /*
909 +@@ -605,6 +607,14 @@ static void i915_pmu_enable(struct perf_event *event)
910 + BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
911 + GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
912 + GEM_BUG_ON(pmu->enable_count[bit] == ~0);
913 ++
914 ++ if (pmu->enable_count[bit] == 0 &&
915 ++ config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
916 ++ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
917 ++ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
918 ++ pmu->sleep_last = ktime_get();
919 ++ }
920 ++
921 + pmu->enable |= BIT_ULL(bit);
922 + pmu->enable_count[bit]++;
923 +
924 +@@ -645,6 +655,8 @@ static void i915_pmu_enable(struct perf_event *event)
925 + * an existing non-zero value.
926 + */
927 + local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
928 ++
929 ++ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
930 + }
931 +
932 + static void i915_pmu_disable(struct perf_event *event)
933 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
934 +index 88b431a267af..273d67e251c2 100644
935 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
936 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
937 +@@ -166,6 +166,7 @@ panfrost_lookup_bos(struct drm_device *dev,
938 + break;
939 + }
940 +
941 ++ atomic_inc(&bo->gpu_usecount);
942 + job->mappings[i] = mapping;
943 + }
944 +
945 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
946 +index ca1bc9019600..b3517ff9630c 100644
947 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
948 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
949 +@@ -30,6 +30,12 @@ struct panfrost_gem_object {
950 + struct mutex lock;
951 + } mappings;
952 +
953 ++ /*
954 ++ * Count the number of jobs referencing this BO so we don't let the
955 ++ * shrinker reclaim this object prematurely.
956 ++ */
957 ++ atomic_t gpu_usecount;
958 ++
959 + bool noexec :1;
960 + bool is_heap :1;
961 + };
962 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
963 +index f5dd7b29bc95..288e46c40673 100644
964 +--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
965 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
966 +@@ -41,6 +41,9 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
967 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
968 + struct panfrost_gem_object *bo = to_panfrost_bo(obj);
969 +
970 ++ if (atomic_read(&bo->gpu_usecount))
971 ++ return false;
972 ++
973 + if (!mutex_trylock(&shmem->pages_lock))
974 + return false;
975 +
976 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
977 +index e364ee00f3d0..4d383831c1fc 100644
978 +--- a/drivers/gpu/drm/panfrost/panfrost_job.c
979 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
980 +@@ -269,8 +269,13 @@ static void panfrost_job_cleanup(struct kref *ref)
981 + dma_fence_put(job->render_done_fence);
982 +
983 + if (job->mappings) {
984 +- for (i = 0; i < job->bo_count; i++)
985 ++ for (i = 0; i < job->bo_count; i++) {
986 ++ if (!job->mappings[i])
987 ++ break;
988 ++
989 ++ atomic_dec(&job->mappings[i]->obj->gpu_usecount);
990 + panfrost_gem_mapping_put(job->mappings[i]);
991 ++ }
992 + kvfree(job->mappings);
993 + }
994 +
995 +diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
996 +index a5757b11b730..5b54eff12cc0 100644
997 +--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
998 ++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
999 +@@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
1000 + }
1001 +
1002 + drm_mode_config_init(drm);
1003 +- drm->mode_config.allow_fb_modifiers = true;
1004 +
1005 + ret = component_bind_all(drm->dev, drm);
1006 + if (ret) {
1007 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
1008 +index 5bd60ded3d81..909eba43664a 100644
1009 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
1010 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
1011 +@@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
1012 + return ERR_CAST(obj);
1013 +
1014 + ret = drm_gem_handle_create(file, &obj->base, handle);
1015 +- drm_gem_object_put_unlocked(&obj->base);
1016 +- if (ret)
1017 ++ if (ret) {
1018 ++ drm_gem_object_put_unlocked(&obj->base);
1019 + return ERR_PTR(ret);
1020 ++ }
1021 +
1022 + return &obj->base;
1023 + }
1024 +@@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
1025 + args->size = gem_object->size;
1026 + args->pitch = pitch;
1027 +
1028 +- DRM_DEBUG("Created object of size %lld\n", size);
1029 ++ drm_gem_object_put_unlocked(gem_object);
1030 ++
1031 ++ DRM_DEBUG("Created object of size %llu\n", args->size);
1032 +
1033 + return 0;
1034 + }
1035 +diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
1036 +index f01f4887fb2e..a91ed01abb68 100644
1037 +--- a/drivers/hwmon/pmbus/ltc2978.c
1038 ++++ b/drivers/hwmon/pmbus/ltc2978.c
1039 +@@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
1040 +
1041 + #define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
1042 +
1043 +-#define LTC_NOT_BUSY BIT(5)
1044 +-#define LTC_NOT_PENDING BIT(4)
1045 ++#define LTC_NOT_BUSY BIT(6)
1046 ++#define LTC_NOT_PENDING BIT(5)
1047 +
1048 + /*
1049 + * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
1050 +diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
1051 +index 6eb6d2717ca5..2b4d80393bd0 100644
1052 +--- a/drivers/infiniband/core/security.c
1053 ++++ b/drivers/infiniband/core/security.c
1054 +@@ -339,22 +339,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
1055 + if (!new_pps)
1056 + return NULL;
1057 +
1058 +- if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
1059 +- if (!qp_pps) {
1060 +- new_pps->main.port_num = qp_attr->port_num;
1061 +- new_pps->main.pkey_index = qp_attr->pkey_index;
1062 +- } else {
1063 +- new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
1064 +- qp_attr->port_num :
1065 +- qp_pps->main.port_num;
1066 +-
1067 +- new_pps->main.pkey_index =
1068 +- (qp_attr_mask & IB_QP_PKEY_INDEX) ?
1069 +- qp_attr->pkey_index :
1070 +- qp_pps->main.pkey_index;
1071 +- }
1072 ++ if (qp_attr_mask & IB_QP_PORT)
1073 ++ new_pps->main.port_num =
1074 ++ (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
1075 ++ if (qp_attr_mask & IB_QP_PKEY_INDEX)
1076 ++ new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
1077 ++ qp_attr->pkey_index;
1078 ++ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
1079 + new_pps->main.state = IB_PORT_PKEY_VALID;
1080 +- } else if (qp_pps) {
1081 ++
1082 ++ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
1083 + new_pps->main.port_num = qp_pps->main.port_num;
1084 + new_pps->main.pkey_index = qp_pps->main.pkey_index;
1085 + if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
1086 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
1087 +index d1407fa378e8..1235ffb2389b 100644
1088 +--- a/drivers/infiniband/core/user_mad.c
1089 ++++ b/drivers/infiniband/core/user_mad.c
1090 +@@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
1091 + struct ib_umad_file *file;
1092 + int id;
1093 +
1094 ++ cdev_device_del(&port->sm_cdev, &port->sm_dev);
1095 ++ cdev_device_del(&port->cdev, &port->dev);
1096 ++
1097 + mutex_lock(&port->file_mutex);
1098 +
1099 + /* Mark ib_dev NULL and block ioctl or other file ops to progress
1100 +@@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
1101 +
1102 + mutex_unlock(&port->file_mutex);
1103 +
1104 +- cdev_device_del(&port->sm_cdev, &port->sm_dev);
1105 +- cdev_device_del(&port->cdev, &port->dev);
1106 + ida_free(&umad_ida, port->dev_num);
1107 +
1108 + /* balances device_initialize() */
1109 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
1110 +index 06ed32c8662f..86e93ac46d75 100644
1111 +--- a/drivers/infiniband/core/uverbs_cmd.c
1112 ++++ b/drivers/infiniband/core/uverbs_cmd.c
1113 +@@ -2720,12 +2720,6 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
1114 + return 0;
1115 + }
1116 +
1117 +-static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
1118 +-{
1119 +- /* Returns user space filter size, includes padding */
1120 +- return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
1121 +-}
1122 +-
1123 + static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
1124 + u16 ib_real_filter_sz)
1125 + {
1126 +@@ -2869,11 +2863,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
1127 + static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
1128 + union ib_flow_spec *ib_spec)
1129 + {
1130 +- ssize_t kern_filter_sz;
1131 ++ size_t kern_filter_sz;
1132 + void *kern_spec_mask;
1133 + void *kern_spec_val;
1134 +
1135 +- kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
1136 ++ if (check_sub_overflow((size_t)kern_spec->hdr.size,
1137 ++ sizeof(struct ib_uverbs_flow_spec_hdr),
1138 ++ &kern_filter_sz))
1139 ++ return -EINVAL;
1140 ++
1141 ++ kern_filter_sz /= 2;
1142 +
1143 + kern_spec_val = (void *)kern_spec +
1144 + sizeof(struct ib_uverbs_flow_spec_hdr);
1145 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
1146 +index ee1182f9b627..d69dece3b1d5 100644
1147 +--- a/drivers/infiniband/hw/cxgb4/cm.c
1148 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
1149 +@@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1150 + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1151 + }
1152 +
1153 ++ /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
1154 ++ * when entering the TERM state the RNIC MUST initiate a CLOSE.
1155 ++ */
1156 ++ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
1157 + c4iw_put_ep(&ep->com);
1158 + } else
1159 + pr_warn("TERM received tid %u no ep/qp\n", tid);
1160 +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
1161 +index bbcac539777a..89ac2f9ae6dd 100644
1162 +--- a/drivers/infiniband/hw/cxgb4/qp.c
1163 ++++ b/drivers/infiniband/hw/cxgb4/qp.c
1164 +@@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1165 + qhp->attr.layer_etype = attrs->layer_etype;
1166 + qhp->attr.ecode = attrs->ecode;
1167 + ep = qhp->ep;
1168 +- c4iw_get_ep(&ep->com);
1169 +- disconnect = 1;
1170 + if (!internal) {
1171 ++ c4iw_get_ep(&ep->com);
1172 + terminate = 1;
1173 ++ disconnect = 1;
1174 + } else {
1175 + terminate = qhp->attr.send_term;
1176 + ret = rdma_fini(rhp, qhp, ep);
1177 +diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
1178 +index c142b23bb401..1aeea5d65c01 100644
1179 +--- a/drivers/infiniband/hw/hfi1/affinity.c
1180 ++++ b/drivers/infiniband/hw/hfi1/affinity.c
1181 +@@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
1182 + rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
1183 + }
1184 +
1185 ++ free_cpumask_var(available_cpus);
1186 ++ free_cpumask_var(non_intr_cpus);
1187 + return 0;
1188 +
1189 + fail:
1190 +diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
1191 +index 7c5e3fb22413..b7bb55b57889 100644
1192 +--- a/drivers/infiniband/hw/hfi1/file_ops.c
1193 ++++ b/drivers/infiniband/hw/hfi1/file_ops.c
1194 +@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
1195 +
1196 + fd = kzalloc(sizeof(*fd), GFP_KERNEL);
1197 +
1198 +- if (fd) {
1199 +- fd->rec_cpu_num = -1; /* no cpu affinity by default */
1200 +- fd->mm = current->mm;
1201 +- mmgrab(fd->mm);
1202 +- fd->dd = dd;
1203 +- kobject_get(&fd->dd->kobj);
1204 +- fp->private_data = fd;
1205 +- } else {
1206 +- fp->private_data = NULL;
1207 +-
1208 +- if (atomic_dec_and_test(&dd->user_refcount))
1209 +- complete(&dd->user_comp);
1210 +-
1211 +- return -ENOMEM;
1212 +- }
1213 +-
1214 ++ if (!fd || init_srcu_struct(&fd->pq_srcu))
1215 ++ goto nomem;
1216 ++ spin_lock_init(&fd->pq_rcu_lock);
1217 ++ spin_lock_init(&fd->tid_lock);
1218 ++ spin_lock_init(&fd->invalid_lock);
1219 ++ fd->rec_cpu_num = -1; /* no cpu affinity by default */
1220 ++ fd->mm = current->mm;
1221 ++ mmgrab(fd->mm);
1222 ++ fd->dd = dd;
1223 ++ kobject_get(&fd->dd->kobj);
1224 ++ fp->private_data = fd;
1225 + return 0;
1226 ++nomem:
1227 ++ kfree(fd);
1228 ++ fp->private_data = NULL;
1229 ++ if (atomic_dec_and_test(&dd->user_refcount))
1230 ++ complete(&dd->user_comp);
1231 ++ return -ENOMEM;
1232 + }
1233 +
1234 + static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
1235 +@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
1236 + static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
1237 + {
1238 + struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
1239 +- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
1240 ++ struct hfi1_user_sdma_pkt_q *pq;
1241 + struct hfi1_user_sdma_comp_q *cq = fd->cq;
1242 + int done = 0, reqs = 0;
1243 + unsigned long dim = from->nr_segs;
1244 ++ int idx;
1245 +
1246 +- if (!cq || !pq)
1247 ++ idx = srcu_read_lock(&fd->pq_srcu);
1248 ++ pq = srcu_dereference(fd->pq, &fd->pq_srcu);
1249 ++ if (!cq || !pq) {
1250 ++ srcu_read_unlock(&fd->pq_srcu, idx);
1251 + return -EIO;
1252 ++ }
1253 +
1254 +- if (!iter_is_iovec(from) || !dim)
1255 ++ if (!iter_is_iovec(from) || !dim) {
1256 ++ srcu_read_unlock(&fd->pq_srcu, idx);
1257 + return -EINVAL;
1258 ++ }
1259 +
1260 + trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
1261 +
1262 +- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
1263 ++ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
1264 ++ srcu_read_unlock(&fd->pq_srcu, idx);
1265 + return -ENOSPC;
1266 ++ }
1267 +
1268 + while (dim) {
1269 + int ret;
1270 +@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
1271 + reqs++;
1272 + }
1273 +
1274 ++ srcu_read_unlock(&fd->pq_srcu, idx);
1275 + return reqs;
1276 + }
1277 +
1278 +@@ -707,6 +718,7 @@ done:
1279 + if (atomic_dec_and_test(&dd->user_refcount))
1280 + complete(&dd->user_comp);
1281 +
1282 ++ cleanup_srcu_struct(&fdata->pq_srcu);
1283 + kfree(fdata);
1284 + return 0;
1285 + }
1286 +diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
1287 +index fc10d65fc3e1..27dea5e1e201 100644
1288 +--- a/drivers/infiniband/hw/hfi1/hfi.h
1289 ++++ b/drivers/infiniband/hw/hfi1/hfi.h
1290 +@@ -1436,10 +1436,13 @@ struct mmu_rb_handler;
1291 +
1292 + /* Private data for file operations */
1293 + struct hfi1_filedata {
1294 ++ struct srcu_struct pq_srcu;
1295 + struct hfi1_devdata *dd;
1296 + struct hfi1_ctxtdata *uctxt;
1297 + struct hfi1_user_sdma_comp_q *cq;
1298 +- struct hfi1_user_sdma_pkt_q *pq;
1299 ++ /* update side lock for SRCU */
1300 ++ spinlock_t pq_rcu_lock;
1301 ++ struct hfi1_user_sdma_pkt_q __rcu *pq;
1302 + u16 subctxt;
1303 + /* for cpu affinity; -1 if none */
1304 + int rec_cpu_num;
1305 +diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1306 +index f05742ac0949..4da03f823474 100644
1307 +--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1308 ++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1309 +@@ -87,9 +87,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
1310 + {
1311 + int ret = 0;
1312 +
1313 +- spin_lock_init(&fd->tid_lock);
1314 +- spin_lock_init(&fd->invalid_lock);
1315 +-
1316 + fd->entry_to_rb = kcalloc(uctxt->expected_count,
1317 + sizeof(struct rb_node *),
1318 + GFP_KERNEL);
1319 +@@ -142,10 +139,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
1320 + {
1321 + struct hfi1_ctxtdata *uctxt = fd->uctxt;
1322 +
1323 ++ mutex_lock(&uctxt->exp_mutex);
1324 + if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
1325 + unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
1326 + if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
1327 + unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
1328 ++ mutex_unlock(&uctxt->exp_mutex);
1329 +
1330 + kfree(fd->invalid_tids);
1331 + fd->invalid_tids = NULL;
1332 +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
1333 +index fd754a16475a..c2f0d9ba93de 100644
1334 +--- a/drivers/infiniband/hw/hfi1/user_sdma.c
1335 ++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
1336 +@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
1337 + pq = kzalloc(sizeof(*pq), GFP_KERNEL);
1338 + if (!pq)
1339 + return -ENOMEM;
1340 +-
1341 + pq->dd = dd;
1342 + pq->ctxt = uctxt->ctxt;
1343 + pq->subctxt = fd->subctxt;
1344 +@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
1345 + goto pq_mmu_fail;
1346 + }
1347 +
1348 +- fd->pq = pq;
1349 ++ rcu_assign_pointer(fd->pq, pq);
1350 + fd->cq = cq;
1351 +
1352 + return 0;
1353 +@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
1354 +
1355 + trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
1356 +
1357 +- pq = fd->pq;
1358 ++ spin_lock(&fd->pq_rcu_lock);
1359 ++ pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
1360 ++ lockdep_is_held(&fd->pq_rcu_lock));
1361 + if (pq) {
1362 ++ rcu_assign_pointer(fd->pq, NULL);
1363 ++ spin_unlock(&fd->pq_rcu_lock);
1364 ++ synchronize_srcu(&fd->pq_srcu);
1365 ++ /* at this point there can be no more new requests */
1366 + if (pq->handler)
1367 + hfi1_mmu_rb_unregister(pq->handler);
1368 + iowait_sdma_drain(&pq->busy);
1369 +@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
1370 + kfree(pq->req_in_use);
1371 + kmem_cache_destroy(pq->txreq_cache);
1372 + kfree(pq);
1373 +- fd->pq = NULL;
1374 ++ } else {
1375 ++ spin_unlock(&fd->pq_rcu_lock);
1376 + }
1377 + if (fd->cq) {
1378 + vfree(fd->cq->comps);
1379 +@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
1380 + {
1381 + int ret = 0, i;
1382 + struct hfi1_ctxtdata *uctxt = fd->uctxt;
1383 +- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
1384 ++ struct hfi1_user_sdma_pkt_q *pq =
1385 ++ srcu_dereference(fd->pq, &fd->pq_srcu);
1386 + struct hfi1_user_sdma_comp_q *cq = fd->cq;
1387 + struct hfi1_devdata *dd = pq->dd;
1388 + unsigned long idx = 0;
1389 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
1390 +index 7e51870e9e01..89ba2f6cd815 100644
1391 +--- a/drivers/infiniband/hw/mlx5/qp.c
1392 ++++ b/drivers/infiniband/hw/mlx5/qp.c
1393 +@@ -3394,9 +3394,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
1394 + struct mlx5_ib_qp_base *base;
1395 + u32 set_id;
1396 +
1397 +- if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
1398 +- return 0;
1399 +-
1400 + if (counter)
1401 + set_id = counter->id;
1402 + else
1403 +@@ -6529,6 +6526,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
1404 + */
1405 + int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
1406 + {
1407 ++ struct mlx5_ib_dev *dev = to_mdev(qp->device);
1408 + struct mlx5_ib_qp *mqp = to_mqp(qp);
1409 + int err = 0;
1410 +
1411 +@@ -6538,6 +6536,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
1412 + goto out;
1413 + }
1414 +
1415 ++ if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
1416 ++ err = -EOPNOTSUPP;
1417 ++ goto out;
1418 ++ }
1419 ++
1420 + if (mqp->state == IB_QPS_RTS) {
1421 + err = __mlx5_ib_qp_set_counter(qp, counter);
1422 + if (!err)
1423 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
1424 +index 3cdf75d0c7a4..7858d499db03 100644
1425 +--- a/drivers/infiniband/sw/rdmavt/qp.c
1426 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
1427 +@@ -61,6 +61,8 @@
1428 + #define RVT_RWQ_COUNT_THRESHOLD 16
1429 +
1430 + static void rvt_rc_timeout(struct timer_list *t);
1431 ++static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1432 ++ enum ib_qp_type type);
1433 +
1434 + /*
1435 + * Convert the AETH RNR timeout code into the number of microseconds.
1436 +@@ -452,40 +454,41 @@ no_qp_table:
1437 + }
1438 +
1439 + /**
1440 +- * free_all_qps - check for QPs still in use
1441 ++ * rvt_free_qp_cb - callback function to reset a qp
1442 ++ * @qp: the qp to reset
1443 ++ * @v: a 64-bit value
1444 ++ *
1445 ++ * This function resets the qp and removes it from the
1446 ++ * qp hash table.
1447 ++ */
1448 ++static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
1449 ++{
1450 ++ unsigned int *qp_inuse = (unsigned int *)v;
1451 ++ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1452 ++
1453 ++ /* Reset the qp and remove it from the qp hash list */
1454 ++ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
1455 ++
1456 ++ /* Increment the qp_inuse count */
1457 ++ (*qp_inuse)++;
1458 ++}
1459 ++
1460 ++/**
1461 ++ * rvt_free_all_qps - check for QPs still in use
1462 + * @rdi: rvt device info structure
1463 + *
1464 + * There should not be any QPs still in use.
1465 + * Free memory for table.
1466 ++ * Return the number of QPs still in use.
1467 + */
1468 + static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
1469 + {
1470 +- unsigned long flags;
1471 +- struct rvt_qp *qp;
1472 +- unsigned n, qp_inuse = 0;
1473 +- spinlock_t *ql; /* work around too long line below */
1474 +-
1475 +- if (rdi->driver_f.free_all_qps)
1476 +- qp_inuse = rdi->driver_f.free_all_qps(rdi);
1477 ++ unsigned int qp_inuse = 0;
1478 +
1479 + qp_inuse += rvt_mcast_tree_empty(rdi);
1480 +
1481 +- if (!rdi->qp_dev)
1482 +- return qp_inuse;
1483 +-
1484 +- ql = &rdi->qp_dev->qpt_lock;
1485 +- spin_lock_irqsave(ql, flags);
1486 +- for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
1487 +- qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
1488 +- lockdep_is_held(ql));
1489 +- RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
1490 ++ rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
1491 +
1492 +- for (; qp; qp = rcu_dereference_protected(qp->next,
1493 +- lockdep_is_held(ql)))
1494 +- qp_inuse++;
1495 +- }
1496 +- spin_unlock_irqrestore(ql, flags);
1497 +- synchronize_rcu();
1498 + return qp_inuse;
1499 + }
1500 +
1501 +@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1502 + }
1503 +
1504 + /**
1505 +- * rvt_reset_qp - initialize the QP state to the reset state
1506 ++ * _rvt_reset_qp - initialize the QP state to the reset state
1507 + * @qp: the QP to reset
1508 + * @type: the QP type
1509 + *
1510 + * r_lock, s_hlock, and s_lock are required to be held by the caller
1511 + */
1512 +-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1513 +- enum ib_qp_type type)
1514 ++static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1515 ++ enum ib_qp_type type)
1516 + __must_hold(&qp->s_lock)
1517 + __must_hold(&qp->s_hlock)
1518 + __must_hold(&qp->r_lock)
1519 +@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1520 + lockdep_assert_held(&qp->s_lock);
1521 + }
1522 +
1523 ++/**
1524 ++ * rvt_reset_qp - initialize the QP state to the reset state
1525 ++ * @rdi: the device info
1526 ++ * @qp: the QP to reset
1527 ++ * @type: the QP type
1528 ++ *
1529 ++ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
1530 ++ * before calling _rvt_reset_qp().
1531 ++ */
1532 ++static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1533 ++ enum ib_qp_type type)
1534 ++{
1535 ++ spin_lock_irq(&qp->r_lock);
1536 ++ spin_lock(&qp->s_hlock);
1537 ++ spin_lock(&qp->s_lock);
1538 ++ _rvt_reset_qp(rdi, qp, type);
1539 ++ spin_unlock(&qp->s_lock);
1540 ++ spin_unlock(&qp->s_hlock);
1541 ++ spin_unlock_irq(&qp->r_lock);
1542 ++}
1543 ++
1544 + /** rvt_free_qpn - Free a qpn from the bit map
1545 + * @qpt: QP table
1546 + * @qpn: queue pair number to free
1547 +@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1548 + switch (new_state) {
1549 + case IB_QPS_RESET:
1550 + if (qp->state != IB_QPS_RESET)
1551 +- rvt_reset_qp(rdi, qp, ibqp->qp_type);
1552 ++ _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1553 + break;
1554 +
1555 + case IB_QPS_RTR:
1556 +@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1557 + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1558 + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1559 +
1560 +- spin_lock_irq(&qp->r_lock);
1561 +- spin_lock(&qp->s_hlock);
1562 +- spin_lock(&qp->s_lock);
1563 + rvt_reset_qp(rdi, qp, ibqp->qp_type);
1564 +- spin_unlock(&qp->s_lock);
1565 +- spin_unlock(&qp->s_hlock);
1566 +- spin_unlock_irq(&qp->r_lock);
1567 +
1568 + wait_event(qp->wait, !atomic_read(&qp->refcount));
1569 + /* qpn is now available for use again */
1570 +diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
1571 +index 116cafc9afcf..4bc88708b355 100644
1572 +--- a/drivers/infiniband/sw/rxe/rxe_comp.c
1573 ++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
1574 +@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
1575 + qp->comp.psn = pkt->psn;
1576 + if (qp->req.wait_psn) {
1577 + qp->req.wait_psn = 0;
1578 +- rxe_run_task(&qp->req.task, 1);
1579 ++ rxe_run_task(&qp->req.task, 0);
1580 + }
1581 + }
1582 + return COMPST_ERROR_RETRY;
1583 +@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
1584 + */
1585 + if (qp->req.wait_fence) {
1586 + qp->req.wait_fence = 0;
1587 +- rxe_run_task(&qp->req.task, 1);
1588 ++ rxe_run_task(&qp->req.task, 0);
1589 + }
1590 + }
1591 +
1592 +@@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
1593 + if (qp->req.need_rd_atomic) {
1594 + qp->comp.timeout_retry = 0;
1595 + qp->req.need_rd_atomic = 0;
1596 +- rxe_run_task(&qp->req.task, 1);
1597 ++ rxe_run_task(&qp->req.task, 0);
1598 + }
1599 + }
1600 +
1601 +@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
1602 + RXE_CNT_COMP_RETRY);
1603 + qp->req.need_retry = 1;
1604 + qp->comp.started_retry = 1;
1605 +- rxe_run_task(&qp->req.task, 1);
1606 ++ rxe_run_task(&qp->req.task, 0);
1607 + }
1608 +
1609 + if (pkt) {
1610 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1611 +index 1ae6f8bba9ae..2c666fb34625 100644
1612 +--- a/drivers/input/mouse/synaptics.c
1613 ++++ b/drivers/input/mouse/synaptics.c
1614 +@@ -146,7 +146,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
1615 + "LEN0042", /* Yoga */
1616 + "LEN0045",
1617 + "LEN0047",
1618 +- "LEN0049",
1619 + "LEN2000", /* S540 */
1620 + "LEN2001", /* Edge E431 */
1621 + "LEN2002", /* Edge E531 */
1622 +@@ -166,9 +165,11 @@ static const char * const smbus_pnp_ids[] = {
1623 + /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
1624 + "LEN0048", /* X1 Carbon 3 */
1625 + "LEN0046", /* X250 */
1626 ++ "LEN0049", /* Yoga 11e */
1627 + "LEN004a", /* W541 */
1628 + "LEN005b", /* P50 */
1629 + "LEN005e", /* T560 */
1630 ++ "LEN006c", /* T470s */
1631 + "LEN0071", /* T480 */
1632 + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
1633 + "LEN0073", /* X1 Carbon G5 (Elantech) */
1634 +@@ -179,6 +180,7 @@ static const char * const smbus_pnp_ids[] = {
1635 + "LEN0097", /* X280 -> ALPS trackpoint */
1636 + "LEN009b", /* T580 */
1637 + "LEN200f", /* T450s */
1638 ++ "LEN2044", /* L470 */
1639 + "LEN2054", /* E480 */
1640 + "LEN2055", /* E580 */
1641 + "SYN3052", /* HP EliteBook 840 G4 */
1642 +diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
1643 +index 4a17096e83e1..84bf51d79888 100644
1644 +--- a/drivers/input/touchscreen/ili210x.c
1645 ++++ b/drivers/input/touchscreen/ili210x.c
1646 +@@ -321,7 +321,7 @@ static umode_t ili210x_calibrate_visible(struct kobject *kobj,
1647 + struct i2c_client *client = to_i2c_client(dev);
1648 + struct ili210x *priv = i2c_get_clientdata(client);
1649 +
1650 +- return priv->chip->has_calibrate_reg;
1651 ++ return priv->chip->has_calibrate_reg ? attr->mode : 0;
1652 + }
1653 +
1654 + static const struct attribute_group ili210x_attr_group = {
1655 +diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
1656 +index 105b7a7c0251..b3484def0a8b 100644
1657 +--- a/drivers/mmc/core/host.c
1658 ++++ b/drivers/mmc/core/host.c
1659 +@@ -176,7 +176,6 @@ int mmc_of_parse(struct mmc_host *host)
1660 + u32 bus_width, drv_type, cd_debounce_delay_ms;
1661 + int ret;
1662 + bool cd_cap_invert, cd_gpio_invert = false;
1663 +- bool ro_cap_invert, ro_gpio_invert = false;
1664 +
1665 + if (!dev || !dev_fwnode(dev))
1666 + return 0;
1667 +@@ -255,9 +254,11 @@ int mmc_of_parse(struct mmc_host *host)
1668 + }
1669 +
1670 + /* Parse Write Protection */
1671 +- ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
1672 +
1673 +- ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
1674 ++ if (device_property_read_bool(dev, "wp-inverted"))
1675 ++ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1676 ++
1677 ++ ret = mmc_gpiod_request_ro(host, "wp", 0, 0, NULL);
1678 + if (!ret)
1679 + dev_info(host->parent, "Got WP GPIO\n");
1680 + else if (ret != -ENOENT && ret != -ENOSYS)
1681 +@@ -266,10 +267,6 @@ int mmc_of_parse(struct mmc_host *host)
1682 + if (device_property_read_bool(dev, "disable-wp"))
1683 + host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1684 +
1685 +- /* See the comment on CD inversion above */
1686 +- if (ro_cap_invert ^ ro_gpio_invert)
1687 +- host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1688 +-
1689 + if (device_property_read_bool(dev, "cap-sd-highspeed"))
1690 + host->caps |= MMC_CAP_SD_HIGHSPEED;
1691 + if (device_property_read_bool(dev, "cap-mmc-highspeed"))
1692 +diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
1693 +index da2596c5fa28..582ec3d720f6 100644
1694 +--- a/drivers/mmc/core/slot-gpio.c
1695 ++++ b/drivers/mmc/core/slot-gpio.c
1696 +@@ -241,6 +241,9 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
1697 + return ret;
1698 + }
1699 +
1700 ++ if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
1701 ++ gpiod_toggle_active_low(desc);
1702 ++
1703 + if (gpio_invert)
1704 + *gpio_invert = !gpiod_is_active_low(desc);
1705 +
1706 +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
1707 +index 024acc1b0a2e..b2bbcb09a49e 100644
1708 +--- a/drivers/mmc/host/pxamci.c
1709 ++++ b/drivers/mmc/host/pxamci.c
1710 +@@ -740,16 +740,16 @@ static int pxamci_probe(struct platform_device *pdev)
1711 + goto out;
1712 + }
1713 +
1714 ++ if (!host->pdata->gpio_card_ro_invert)
1715 ++ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1716 ++
1717 + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
1718 + if (ret && ret != -ENOENT) {
1719 + dev_err(dev, "Failed requesting gpio_ro\n");
1720 + goto out;
1721 + }
1722 +- if (!ret) {
1723 ++ if (!ret)
1724 + host->use_ro_gpio = true;
1725 +- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
1726 +- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
1727 +- }
1728 +
1729 + if (host->pdata->init)
1730 + host->pdata->init(dev, pxamci_detect_irq, mmc);
1731 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
1732 +index 1c988d6a2433..dccb4df46512 100644
1733 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
1734 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
1735 +@@ -1381,13 +1381,14 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
1736 + host->mmc->parent->platform_data);
1737 + /* write_protect */
1738 + if (boarddata->wp_type == ESDHC_WP_GPIO) {
1739 ++ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1740 ++
1741 + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
1742 + if (err) {
1743 + dev_err(mmc_dev(host->mmc),
1744 + "failed to request write-protect gpio!\n");
1745 + return err;
1746 + }
1747 +- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1748 + }
1749 +
1750 + /* card_detect */
1751 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1752 +index 5dc32b72e7fa..641c07347e8d 100644
1753 +--- a/drivers/nvme/host/core.c
1754 ++++ b/drivers/nvme/host/core.c
1755 +@@ -3867,7 +3867,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
1756 + if (!log)
1757 + return;
1758 +
1759 +- if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
1760 ++ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
1761 + sizeof(*log), 0))
1762 + dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
1763 + kfree(log);
1764 +diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
1765 +index d78d77686d7b..cda75118ccdb 100644
1766 +--- a/drivers/s390/crypto/pkey_api.c
1767 ++++ b/drivers/s390/crypto/pkey_api.c
1768 +@@ -774,7 +774,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
1769 + return -EFAULT;
1770 + rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
1771 + ksp.seckey.seckey, ksp.protkey.protkey,
1772 +- NULL, &ksp.protkey.type);
1773 ++ &ksp.protkey.len, &ksp.protkey.type);
1774 + DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
1775 + if (rc)
1776 + break;
1777 +diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
1778 +index 97acc2ba2912..de844b412110 100644
1779 +--- a/drivers/spmi/spmi-pmic-arb.c
1780 ++++ b/drivers/spmi/spmi-pmic-arb.c
1781 +@@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain *d,
1782 + return 0;
1783 + }
1784 +
1785 ++static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
1786 +
1787 + static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
1788 + struct irq_domain *domain, unsigned int virq,
1789 +@@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
1790 + else
1791 + handler = handle_level_irq;
1792 +
1793 ++
1794 ++ irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
1795 ++ &qpnpint_irq_request_class);
1796 + irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
1797 + handler, NULL, NULL);
1798 + }
1799 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1800 +index 0c3c6450d1df..c1e47db439e2 100644
1801 +--- a/fs/btrfs/disk-io.c
1802 ++++ b/fs/btrfs/disk-io.c
1803 +@@ -3164,6 +3164,7 @@ int __cold open_ctree(struct super_block *sb,
1804 + /* do not make disk changes in broken FS or nologreplay is given */
1805 + if (btrfs_super_log_root(disk_super) != 0 &&
1806 + !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
1807 ++ btrfs_info(fs_info, "start tree-log replay");
1808 + ret = btrfs_replay_log(fs_info, fs_devices);
1809 + if (ret) {
1810 + err = ret;
1811 +diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
1812 +index 6f417ff68980..bd6229fb2b6f 100644
1813 +--- a/fs/btrfs/extent_map.c
1814 ++++ b/fs/btrfs/extent_map.c
1815 +@@ -237,6 +237,17 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
1816 + struct extent_map *merge = NULL;
1817 + struct rb_node *rb;
1818 +
1819 ++ /*
1820 ++ * We can't modify an extent map that is in the tree and that is being
1821 ++ * used by another task, as it can cause that other task to see it in
1822 ++ * inconsistent state during the merging. We always have 1 reference for
1823 ++ * the tree and 1 for this task (which is unpinning the extent map or
1824 ++ * clearing the logging flag), so anything > 2 means it's being used by
1825 ++ * other tasks too.
1826 ++ */
1827 ++ if (refcount_read(&em->refs) > 2)
1828 ++ return;
1829 ++
1830 + if (em->start != 0) {
1831 + rb = rb_prev(&em->rb_node);
1832 + if (rb)
1833 +diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
1834 +index b57f3618e58e..454a1015d026 100644
1835 +--- a/fs/btrfs/ref-verify.c
1836 ++++ b/fs/btrfs/ref-verify.c
1837 +@@ -744,6 +744,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
1838 + */
1839 + be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
1840 + if (IS_ERR(be)) {
1841 ++ kfree(ref);
1842 + kfree(ra);
1843 + ret = PTR_ERR(be);
1844 + goto out;
1845 +@@ -757,6 +758,8 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
1846 + "re-allocated a block that still has references to it!");
1847 + dump_block_entry(fs_info, be);
1848 + dump_ref_action(fs_info, ra);
1849 ++ kfree(ref);
1850 ++ kfree(ra);
1851 + goto out_unlock;
1852 + }
1853 +
1854 +@@ -819,6 +822,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
1855 + "dropping a ref for a existing root that doesn't have a ref on the block");
1856 + dump_block_entry(fs_info, be);
1857 + dump_ref_action(fs_info, ra);
1858 ++ kfree(ref);
1859 + kfree(ra);
1860 + goto out_unlock;
1861 + }
1862 +@@ -834,6 +838,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
1863 + "attempting to add another ref for an existing ref on a tree block");
1864 + dump_block_entry(fs_info, be);
1865 + dump_ref_action(fs_info, ra);
1866 ++ kfree(ref);
1867 + kfree(ra);
1868 + goto out_unlock;
1869 + }
1870 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1871 +index 6ccfef72d0e1..c6557d44907a 100644
1872 +--- a/fs/btrfs/super.c
1873 ++++ b/fs/btrfs/super.c
1874 +@@ -1803,6 +1803,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1875 + }
1876 +
1877 + if (btrfs_super_log_root(fs_info->super_copy) != 0) {
1878 ++ btrfs_warn(fs_info,
1879 ++ "mount required to replay tree-log, cannot remount read-write");
1880 + ret = -EINVAL;
1881 + goto restore;
1882 + }
1883 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
1884 +index 29a795f975df..9b5536451528 100644
1885 +--- a/fs/ceph/super.c
1886 ++++ b/fs/ceph/super.c
1887 +@@ -1020,10 +1020,6 @@ static int ceph_get_tree(struct fs_context *fc)
1888 + if (!fc->source)
1889 + return invalf(fc, "ceph: No source");
1890 +
1891 +-#ifdef CONFIG_CEPH_FS_POSIX_ACL
1892 +- fc->sb_flags |= SB_POSIXACL;
1893 +-#endif
1894 +-
1895 + /* create client (which we may/may not use) */
1896 + fsc = create_fs_client(pctx->opts, pctx->copts);
1897 + pctx->opts = NULL;
1898 +@@ -1141,6 +1137,10 @@ static int ceph_init_fs_context(struct fs_context *fc)
1899 + fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
1900 + fsopt->congestion_kb = default_congestion_kb();
1901 +
1902 ++#ifdef CONFIG_CEPH_FS_POSIX_ACL
1903 ++ fc->sb_flags |= SB_POSIXACL;
1904 ++#endif
1905 ++
1906 + fc->fs_private = pctx;
1907 + fc->ops = &ceph_context_ops;
1908 + return 0;
1909 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1910 +index 5492b9860baa..92b9c8221f07 100644
1911 +--- a/fs/cifs/cifsfs.c
1912 ++++ b/fs/cifs/cifsfs.c
1913 +@@ -414,7 +414,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
1914 + seq_puts(s, "ntlm");
1915 + break;
1916 + case Kerberos:
1917 +- seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
1918 ++ seq_puts(s, "krb5");
1919 + break;
1920 + case RawNTLMSSP:
1921 + seq_puts(s, "ntlmssp");
1922 +@@ -427,6 +427,10 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
1923 +
1924 + if (ses->sign)
1925 + seq_puts(s, "i");
1926 ++
1927 ++ if (ses->sectype == Kerberos)
1928 ++ seq_printf(s, ",cruid=%u",
1929 ++ from_kuid_munged(&init_user_ns, ses->cred_uid));
1930 + }
1931 +
1932 + static void
1933 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
1934 +index 4b9c805ae5e1..65f76be0f454 100644
1935 +--- a/fs/cifs/smb2ops.c
1936 ++++ b/fs/cifs/smb2ops.c
1937 +@@ -1115,7 +1115,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1938 + void *data[1];
1939 + struct smb2_file_full_ea_info *ea = NULL;
1940 + struct kvec close_iov[1];
1941 +- int rc;
1942 ++ struct smb2_query_info_rsp *rsp;
1943 ++ int rc, used_len = 0;
1944 +
1945 + if (smb3_encryption_required(tcon))
1946 + flags |= CIFS_TRANSFORM_REQ;
1947 +@@ -1138,6 +1139,38 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1948 + cifs_sb);
1949 + if (rc == -ENODATA)
1950 + goto sea_exit;
1951 ++ } else {
1952 ++ /* If we are adding a attribute we should first check
1953 ++ * if there will be enough space available to store
1954 ++ * the new EA. If not we should not add it since we
1955 ++ * would not be able to even read the EAs back.
1956 ++ */
1957 ++ rc = smb2_query_info_compound(xid, tcon, utf16_path,
1958 ++ FILE_READ_EA,
1959 ++ FILE_FULL_EA_INFORMATION,
1960 ++ SMB2_O_INFO_FILE,
1961 ++ CIFSMaxBufSize -
1962 ++ MAX_SMB2_CREATE_RESPONSE_SIZE -
1963 ++ MAX_SMB2_CLOSE_RESPONSE_SIZE,
1964 ++ &rsp_iov[1], &resp_buftype[1], cifs_sb);
1965 ++ if (rc == 0) {
1966 ++ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1967 ++ used_len = le32_to_cpu(rsp->OutputBufferLength);
1968 ++ }
1969 ++ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1970 ++ resp_buftype[1] = CIFS_NO_BUFFER;
1971 ++ memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1972 ++ rc = 0;
1973 ++
1974 ++ /* Use a fudge factor of 256 bytes in case we collide
1975 ++ * with a different set_EAs command.
1976 ++ */
1977 ++ if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1978 ++ MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1979 ++ used_len + ea_name_len + ea_value_len + 1) {
1980 ++ rc = -ENOSPC;
1981 ++ goto sea_exit;
1982 ++ }
1983 + }
1984 + }
1985 +
1986 +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
1987 +index 1ee04e76bbe0..0a734ffb4310 100644
1988 +--- a/fs/ext4/block_validity.c
1989 ++++ b/fs/ext4/block_validity.c
1990 +@@ -207,6 +207,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
1991 + return PTR_ERR(inode);
1992 + num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
1993 + while (i < num) {
1994 ++ cond_resched();
1995 + map.m_lblk = i;
1996 + map.m_len = num - i;
1997 + n = ext4_map_blocks(NULL, inode, &map, 0);
1998 +diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
1999 +index 060f8a6a5da9..4dc2615ab289 100644
2000 +--- a/fs/ext4/dir.c
2001 ++++ b/fs/ext4/dir.c
2002 +@@ -129,12 +129,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
2003 + if (err != ERR_BAD_DX_DIR) {
2004 + return err;
2005 + }
2006 +- /*
2007 +- * We don't set the inode dirty flag since it's not
2008 +- * critical that it get flushed back to the disk.
2009 +- */
2010 +- ext4_clear_inode_flag(file_inode(file),
2011 +- EXT4_INODE_INDEX);
2012 ++ /* Can we just clear INDEX flag to ignore htree information? */
2013 ++ if (!ext4_has_metadata_csum(sb)) {
2014 ++ /*
2015 ++ * We don't set the inode dirty flag since it's not
2016 ++ * critical that it gets flushed back to the disk.
2017 ++ */
2018 ++ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
2019 ++ }
2020 + }
2021 +
2022 + if (ext4_has_inline_data(inode)) {
2023 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2024 +index f8578caba40d..1fd6c1e2ce2a 100644
2025 +--- a/fs/ext4/ext4.h
2026 ++++ b/fs/ext4/ext4.h
2027 +@@ -2482,8 +2482,11 @@ void ext4_insert_dentry(struct inode *inode,
2028 + struct ext4_filename *fname);
2029 + static inline void ext4_update_dx_flag(struct inode *inode)
2030 + {
2031 +- if (!ext4_has_feature_dir_index(inode->i_sb))
2032 ++ if (!ext4_has_feature_dir_index(inode->i_sb)) {
2033 ++ /* ext4_iget() should have caught this... */
2034 ++ WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
2035 + ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
2036 ++ }
2037 + }
2038 + static const unsigned char ext4_filetype_table[] = {
2039 + DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
2040 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2041 +index 629a25d999f0..25191201ccdc 100644
2042 +--- a/fs/ext4/inode.c
2043 ++++ b/fs/ext4/inode.c
2044 +@@ -4615,6 +4615,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
2045 + ret = -EFSCORRUPTED;
2046 + goto bad_inode;
2047 + }
2048 ++ /*
2049 ++ * If dir_index is not enabled but there's dir with INDEX flag set,
2050 ++ * we'd normally treat htree data as empty space. But with metadata
2051 ++ * checksumming that corrupts checksums so forbid that.
2052 ++ */
2053 ++ if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
2054 ++ ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
2055 ++ ext4_error_inode(inode, function, line, 0,
2056 ++ "iget: Dir with htree data on filesystem without dir_index feature.");
2057 ++ ret = -EFSCORRUPTED;
2058 ++ goto bad_inode;
2059 ++ }
2060 + ei->i_disksize = inode->i_size;
2061 + #ifdef CONFIG_QUOTA
2062 + ei->i_reserved_quota = 0;
2063 +diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
2064 +index 2305b4374fd3..9d00e0dd2ba9 100644
2065 +--- a/fs/ext4/mmp.c
2066 ++++ b/fs/ext4/mmp.c
2067 +@@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
2068 + {
2069 + __ext4_warning(sb, function, line, "%s", msg);
2070 + __ext4_warning(sb, function, line,
2071 +- "MMP failure info: last update time: %llu, last update "
2072 +- "node: %s, last update device: %s",
2073 +- (long long unsigned int) le64_to_cpu(mmp->mmp_time),
2074 +- mmp->mmp_nodename, mmp->mmp_bdevname);
2075 ++ "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
2076 ++ (unsigned long long)le64_to_cpu(mmp->mmp_time),
2077 ++ (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
2078 ++ (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
2079 + }
2080 +
2081 + /*
2082 +@@ -154,6 +154,7 @@ static int kmmpd(void *data)
2083 + mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
2084 + EXT4_MMP_MIN_CHECK_INTERVAL);
2085 + mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
2086 ++ BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
2087 + bdevname(bh->b_bdev, mmp->mmp_bdevname);
2088 +
2089 + memcpy(mmp->mmp_nodename, init_utsname()->nodename,
2090 +@@ -375,7 +376,8 @@ skip:
2091 + /*
2092 + * Start a kernel thread to update the MMP block periodically.
2093 + */
2094 +- EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
2095 ++ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
2096 ++ (int)sizeof(mmp->mmp_bdevname),
2097 + bdevname(bh->b_bdev,
2098 + mmp->mmp_bdevname));
2099 + if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
2100 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2101 +index 1cb42d940784..deb9f7a02976 100644
2102 +--- a/fs/ext4/namei.c
2103 ++++ b/fs/ext4/namei.c
2104 +@@ -2207,6 +2207,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
2105 + retval = ext4_dx_add_entry(handle, &fname, dir, inode);
2106 + if (!retval || (retval != ERR_BAD_DX_DIR))
2107 + goto out;
2108 ++ /* Can we just ignore htree data? */
2109 ++ if (ext4_has_metadata_csum(sb)) {
2110 ++ EXT4_ERROR_INODE(dir,
2111 ++ "Directory has corrupted htree index.");
2112 ++ retval = -EFSCORRUPTED;
2113 ++ goto out;
2114 ++ }
2115 + ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
2116 + dx_fallback++;
2117 + ext4_mark_inode_dirty(handle, dir);
2118 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2119 +index 2937a8873fe1..c51d7ef2e467 100644
2120 +--- a/fs/ext4/super.c
2121 ++++ b/fs/ext4/super.c
2122 +@@ -2964,17 +2964,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
2123 + return 0;
2124 + }
2125 +
2126 +-#ifndef CONFIG_QUOTA
2127 +- if (ext4_has_feature_quota(sb) && !readonly) {
2128 ++#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
2129 ++ if (!readonly && (ext4_has_feature_quota(sb) ||
2130 ++ ext4_has_feature_project(sb))) {
2131 + ext4_msg(sb, KERN_ERR,
2132 +- "Filesystem with quota feature cannot be mounted RDWR "
2133 +- "without CONFIG_QUOTA");
2134 +- return 0;
2135 +- }
2136 +- if (ext4_has_feature_project(sb) && !readonly) {
2137 +- ext4_msg(sb, KERN_ERR,
2138 +- "Filesystem with project quota feature cannot be mounted RDWR "
2139 +- "without CONFIG_QUOTA");
2140 ++ "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
2141 + return 0;
2142 + }
2143 + #endif /* CONFIG_QUOTA */
2144 +@@ -3768,6 +3762,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2145 + */
2146 + sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
2147 +
2148 ++ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
2149 ++ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
2150 ++ blocksize > EXT4_MAX_BLOCK_SIZE) {
2151 ++ ext4_msg(sb, KERN_ERR,
2152 ++ "Unsupported filesystem blocksize %d (%d log_block_size)",
2153 ++ blocksize, le32_to_cpu(es->s_log_block_size));
2154 ++ goto failed_mount;
2155 ++ }
2156 ++
2157 + if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
2158 + sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
2159 + sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
2160 +@@ -3785,6 +3788,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2161 + ext4_msg(sb, KERN_ERR,
2162 + "unsupported inode size: %d",
2163 + sbi->s_inode_size);
2164 ++ ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
2165 + goto failed_mount;
2166 + }
2167 + /*
2168 +@@ -3988,14 +3992,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2169 + if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
2170 + goto failed_mount;
2171 +
2172 +- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
2173 +- if (blocksize < EXT4_MIN_BLOCK_SIZE ||
2174 +- blocksize > EXT4_MAX_BLOCK_SIZE) {
2175 +- ext4_msg(sb, KERN_ERR,
2176 +- "Unsupported filesystem blocksize %d (%d log_block_size)",
2177 +- blocksize, le32_to_cpu(es->s_log_block_size));
2178 +- goto failed_mount;
2179 +- }
2180 + if (le32_to_cpu(es->s_log_block_size) >
2181 + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
2182 + ext4_msg(sb, KERN_ERR,
2183 +@@ -5540,9 +5536,15 @@ static int ext4_statfs_project(struct super_block *sb,
2184 + return PTR_ERR(dquot);
2185 + spin_lock(&dquot->dq_dqb_lock);
2186 +
2187 +- limit = (dquot->dq_dqb.dqb_bsoftlimit ?
2188 +- dquot->dq_dqb.dqb_bsoftlimit :
2189 +- dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
2190 ++ limit = 0;
2191 ++ if (dquot->dq_dqb.dqb_bsoftlimit &&
2192 ++ (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit))
2193 ++ limit = dquot->dq_dqb.dqb_bsoftlimit;
2194 ++ if (dquot->dq_dqb.dqb_bhardlimit &&
2195 ++ (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
2196 ++ limit = dquot->dq_dqb.dqb_bhardlimit;
2197 ++ limit >>= sb->s_blocksize_bits;
2198 ++
2199 + if (limit && buf->f_blocks > limit) {
2200 + curblock = (dquot->dq_dqb.dqb_curspace +
2201 + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
2202 +@@ -5552,9 +5554,14 @@ static int ext4_statfs_project(struct super_block *sb,
2203 + (buf->f_blocks - curblock) : 0;
2204 + }
2205 +
2206 +- limit = dquot->dq_dqb.dqb_isoftlimit ?
2207 +- dquot->dq_dqb.dqb_isoftlimit :
2208 +- dquot->dq_dqb.dqb_ihardlimit;
2209 ++ limit = 0;
2210 ++ if (dquot->dq_dqb.dqb_isoftlimit &&
2211 ++ (!limit || dquot->dq_dqb.dqb_isoftlimit < limit))
2212 ++ limit = dquot->dq_dqb.dqb_isoftlimit;
2213 ++ if (dquot->dq_dqb.dqb_ihardlimit &&
2214 ++ (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
2215 ++ limit = dquot->dq_dqb.dqb_ihardlimit;
2216 ++
2217 + if (limit && buf->f_files > limit) {
2218 + buf->f_files = limit;
2219 + buf->f_ffree =
2220 +diff --git a/fs/io-wq.c b/fs/io-wq.c
2221 +index 5147d2213b01..0dc4bb6de656 100644
2222 +--- a/fs/io-wq.c
2223 ++++ b/fs/io-wq.c
2224 +@@ -16,6 +16,7 @@
2225 + #include <linux/slab.h>
2226 + #include <linux/kthread.h>
2227 + #include <linux/rculist_nulls.h>
2228 ++#include <linux/fs_struct.h>
2229 +
2230 + #include "io-wq.h"
2231 +
2232 +@@ -58,6 +59,7 @@ struct io_worker {
2233 + struct mm_struct *mm;
2234 + const struct cred *creds;
2235 + struct files_struct *restore_files;
2236 ++ struct fs_struct *restore_fs;
2237 + };
2238 +
2239 + #if BITS_PER_LONG == 64
2240 +@@ -150,6 +152,9 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
2241 + task_unlock(current);
2242 + }
2243 +
2244 ++ if (current->fs != worker->restore_fs)
2245 ++ current->fs = worker->restore_fs;
2246 ++
2247 + /*
2248 + * If we have an active mm, we need to drop the wq lock before unusing
2249 + * it. If we do, return true and let the caller retry the idle loop.
2250 +@@ -310,6 +315,7 @@ static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
2251 +
2252 + worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
2253 + worker->restore_files = current->files;
2254 ++ worker->restore_fs = current->fs;
2255 + io_wqe_inc_running(wqe, worker);
2256 + }
2257 +
2258 +@@ -456,6 +462,8 @@ next:
2259 + }
2260 + if (!worker->creds)
2261 + worker->creds = override_creds(wq->creds);
2262 ++ if (work->fs && current->fs != work->fs)
2263 ++ current->fs = work->fs;
2264 + if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
2265 + work->flags |= IO_WQ_WORK_CANCEL;
2266 + if (worker->mm)
2267 +diff --git a/fs/io-wq.h b/fs/io-wq.h
2268 +index 3f5e356de980..bbab98d1d328 100644
2269 +--- a/fs/io-wq.h
2270 ++++ b/fs/io-wq.h
2271 +@@ -72,6 +72,7 @@ struct io_wq_work {
2272 + };
2273 + void (*func)(struct io_wq_work **);
2274 + struct files_struct *files;
2275 ++ struct fs_struct *fs;
2276 + unsigned flags;
2277 + };
2278 +
2279 +@@ -79,8 +80,9 @@ struct io_wq_work {
2280 + do { \
2281 + (work)->list.next = NULL; \
2282 + (work)->func = _func; \
2283 +- (work)->flags = 0; \
2284 + (work)->files = NULL; \
2285 ++ (work)->fs = NULL; \
2286 ++ (work)->flags = 0; \
2287 + } while (0) \
2288 +
2289 + typedef void (get_work_fn)(struct io_wq_work *);
2290 +diff --git a/fs/io_uring.c b/fs/io_uring.c
2291 +index f470fb21467e..6ae692b02980 100644
2292 +--- a/fs/io_uring.c
2293 ++++ b/fs/io_uring.c
2294 +@@ -1786,17 +1786,6 @@ static int io_alloc_async_ctx(struct io_kiocb *req)
2295 + return req->io == NULL;
2296 + }
2297 +
2298 +-static void io_rw_async(struct io_wq_work **workptr)
2299 +-{
2300 +- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2301 +- struct iovec *iov = NULL;
2302 +-
2303 +- if (req->io->rw.iov != req->io->rw.fast_iov)
2304 +- iov = req->io->rw.iov;
2305 +- io_wq_submit_work(workptr);
2306 +- kfree(iov);
2307 +-}
2308 +-
2309 + static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2310 + struct iovec *iovec, struct iovec *fast_iov,
2311 + struct iov_iter *iter)
2312 +@@ -1810,7 +1799,6 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2313 +
2314 + io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2315 + }
2316 +- req->work.func = io_rw_async;
2317 + return 0;
2318 + }
2319 +
2320 +@@ -1897,8 +1885,7 @@ copy_iov:
2321 + }
2322 + }
2323 + out_free:
2324 +- if (!io_wq_current_is_worker())
2325 +- kfree(iovec);
2326 ++ kfree(iovec);
2327 + return ret;
2328 + }
2329 +
2330 +@@ -1991,6 +1978,12 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
2331 + ret2 = call_write_iter(req->file, kiocb, &iter);
2332 + else
2333 + ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
2334 ++ /*
2335 ++ * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just
2336 ++ * retry them without IOCB_NOWAIT.
2337 ++ */
2338 ++ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
2339 ++ ret2 = -EAGAIN;
2340 + if (!force_nonblock || ret2 != -EAGAIN) {
2341 + kiocb_done(kiocb, ret2, nxt, req->in_async);
2342 + } else {
2343 +@@ -2003,8 +1996,7 @@ copy_iov:
2344 + }
2345 + }
2346 + out_free:
2347 +- if (!io_wq_current_is_worker())
2348 +- kfree(iovec);
2349 ++ kfree(iovec);
2350 + return ret;
2351 + }
2352 +
2353 +@@ -2174,19 +2166,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
2354 + return 0;
2355 + }
2356 +
2357 +-#if defined(CONFIG_NET)
2358 +-static void io_sendrecv_async(struct io_wq_work **workptr)
2359 +-{
2360 +- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2361 +- struct iovec *iov = NULL;
2362 +-
2363 +- if (req->io->rw.iov != req->io->rw.fast_iov)
2364 +- iov = req->io->msg.iov;
2365 +- io_wq_submit_work(workptr);
2366 +- kfree(iov);
2367 +-}
2368 +-#endif
2369 +-
2370 + static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2371 + {
2372 + #if defined(CONFIG_NET)
2373 +@@ -2254,17 +2233,19 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
2374 + if (force_nonblock && ret == -EAGAIN) {
2375 + if (req->io)
2376 + return -EAGAIN;
2377 +- if (io_alloc_async_ctx(req))
2378 ++ if (io_alloc_async_ctx(req)) {
2379 ++ if (kmsg && kmsg->iov != kmsg->fast_iov)
2380 ++ kfree(kmsg->iov);
2381 + return -ENOMEM;
2382 ++ }
2383 + memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
2384 +- req->work.func = io_sendrecv_async;
2385 + return -EAGAIN;
2386 + }
2387 + if (ret == -ERESTARTSYS)
2388 + ret = -EINTR;
2389 + }
2390 +
2391 +- if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
2392 ++ if (kmsg && kmsg->iov != kmsg->fast_iov)
2393 + kfree(kmsg->iov);
2394 + io_cqring_add_event(req, ret);
2395 + if (ret < 0)
2396 +@@ -2346,17 +2327,19 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
2397 + if (force_nonblock && ret == -EAGAIN) {
2398 + if (req->io)
2399 + return -EAGAIN;
2400 +- if (io_alloc_async_ctx(req))
2401 ++ if (io_alloc_async_ctx(req)) {
2402 ++ if (kmsg && kmsg->iov != kmsg->fast_iov)
2403 ++ kfree(kmsg->iov);
2404 + return -ENOMEM;
2405 ++ }
2406 + memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
2407 +- req->work.func = io_sendrecv_async;
2408 + return -EAGAIN;
2409 + }
2410 + if (ret == -ERESTARTSYS)
2411 + ret = -EINTR;
2412 + }
2413 +
2414 +- if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
2415 ++ if (kmsg && kmsg->iov != kmsg->fast_iov)
2416 + kfree(kmsg->iov);
2417 + io_cqring_add_event(req, ret);
2418 + if (ret < 0)
2419 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
2420 +index 7f0b362b3842..3845750f70ec 100644
2421 +--- a/fs/jbd2/commit.c
2422 ++++ b/fs/jbd2/commit.c
2423 +@@ -976,29 +976,33 @@ restart_loop:
2424 + * it. */
2425 +
2426 + /*
2427 +- * A buffer which has been freed while still being journaled by
2428 +- * a previous transaction.
2429 +- */
2430 +- if (buffer_freed(bh)) {
2431 ++ * A buffer which has been freed while still being journaled
2432 ++ * by a previous transaction, refile the buffer to BJ_Forget of
2433 ++ * the running transaction. If the just committed transaction
2434 ++ * contains "add to orphan" operation, we can completely
2435 ++ * invalidate the buffer now. We are rather through in that
2436 ++ * since the buffer may be still accessible when blocksize <
2437 ++ * pagesize and it is attached to the last partial page.
2438 ++ */
2439 ++ if (buffer_freed(bh) && !jh->b_next_transaction) {
2440 ++ struct address_space *mapping;
2441 ++
2442 ++ clear_buffer_freed(bh);
2443 ++ clear_buffer_jbddirty(bh);
2444 ++
2445 + /*
2446 +- * If the running transaction is the one containing
2447 +- * "add to orphan" operation (b_next_transaction !=
2448 +- * NULL), we have to wait for that transaction to
2449 +- * commit before we can really get rid of the buffer.
2450 +- * So just clear b_modified to not confuse transaction
2451 +- * credit accounting and refile the buffer to
2452 +- * BJ_Forget of the running transaction. If the just
2453 +- * committed transaction contains "add to orphan"
2454 +- * operation, we can completely invalidate the buffer
2455 +- * now. We are rather through in that since the
2456 +- * buffer may be still accessible when blocksize <
2457 +- * pagesize and it is attached to the last partial
2458 +- * page.
2459 ++ * Block device buffers need to stay mapped all the
2460 ++ * time, so it is enough to clear buffer_jbddirty and
2461 ++ * buffer_freed bits. For the file mapping buffers (i.e.
2462 ++ * journalled data) we need to unmap buffer and clear
2463 ++ * more bits. We also need to be careful about the check
2464 ++ * because the data page mapping can get cleared under
2465 ++ * out hands, which alse need not to clear more bits
2466 ++ * because the page and buffers will be freed and can
2467 ++ * never be reused once we are done with them.
2468 + */
2469 +- jh->b_modified = 0;
2470 +- if (!jh->b_next_transaction) {
2471 +- clear_buffer_freed(bh);
2472 +- clear_buffer_jbddirty(bh);
2473 ++ mapping = READ_ONCE(bh->b_page->mapping);
2474 ++ if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
2475 + clear_buffer_mapped(bh);
2476 + clear_buffer_new(bh);
2477 + clear_buffer_req(bh);
2478 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
2479 +index 27b9f9dee434..0603dfa9ad90 100644
2480 +--- a/fs/jbd2/transaction.c
2481 ++++ b/fs/jbd2/transaction.c
2482 +@@ -2329,14 +2329,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2483 + return -EBUSY;
2484 + }
2485 + /*
2486 +- * OK, buffer won't be reachable after truncate. We just set
2487 +- * j_next_transaction to the running transaction (if there is
2488 +- * one) and mark buffer as freed so that commit code knows it
2489 +- * should clear dirty bits when it is done with the buffer.
2490 ++ * OK, buffer won't be reachable after truncate. We just clear
2491 ++ * b_modified to not confuse transaction credit accounting, and
2492 ++ * set j_next_transaction to the running transaction (if there
2493 ++ * is one) and mark buffer as freed so that commit code knows
2494 ++ * it should clear dirty bits when it is done with the buffer.
2495 + */
2496 + set_buffer_freed(bh);
2497 + if (journal->j_running_transaction && buffer_jbddirty(bh))
2498 + jh->b_next_transaction = journal->j_running_transaction;
2499 ++ jh->b_modified = 0;
2500 + spin_unlock(&journal->j_list_lock);
2501 + spin_unlock(&jh->b_state_lock);
2502 + write_unlock(&journal->j_state_lock);
2503 +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
2504 +index fe57b2b5314a..8e322bacde69 100644
2505 +--- a/fs/nfs/delegation.c
2506 ++++ b/fs/nfs/delegation.c
2507 +@@ -25,13 +25,29 @@
2508 + #include "internal.h"
2509 + #include "nfs4trace.h"
2510 +
2511 +-static void nfs_free_delegation(struct nfs_delegation *delegation)
2512 ++static atomic_long_t nfs_active_delegations;
2513 ++
2514 ++static void __nfs_free_delegation(struct nfs_delegation *delegation)
2515 + {
2516 + put_cred(delegation->cred);
2517 + delegation->cred = NULL;
2518 + kfree_rcu(delegation, rcu);
2519 + }
2520 +
2521 ++static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
2522 ++{
2523 ++ if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
2524 ++ delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
2525 ++ atomic_long_dec(&nfs_active_delegations);
2526 ++ }
2527 ++}
2528 ++
2529 ++static void nfs_free_delegation(struct nfs_delegation *delegation)
2530 ++{
2531 ++ nfs_mark_delegation_revoked(delegation);
2532 ++ __nfs_free_delegation(delegation);
2533 ++}
2534 ++
2535 + /**
2536 + * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
2537 + * @delegation: delegation to process
2538 +@@ -222,13 +238,18 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
2539 +
2540 + static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
2541 + {
2542 ++ const struct cred *cred;
2543 + int res = 0;
2544 +
2545 +- if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
2546 +- res = nfs4_proc_delegreturn(inode,
2547 +- delegation->cred,
2548 ++ if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
2549 ++ spin_lock(&delegation->lock);
2550 ++ cred = get_cred(delegation->cred);
2551 ++ spin_unlock(&delegation->lock);
2552 ++ res = nfs4_proc_delegreturn(inode, cred,
2553 + &delegation->stateid,
2554 + issync);
2555 ++ put_cred(cred);
2556 ++ }
2557 + return res;
2558 + }
2559 +
2560 +@@ -343,7 +364,8 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
2561 + delegation->stateid.seqid = update->stateid.seqid;
2562 + smp_wmb();
2563 + delegation->type = update->type;
2564 +- clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
2565 ++ if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
2566 ++ atomic_long_inc(&nfs_active_delegations);
2567 + }
2568 + }
2569 +
2570 +@@ -423,6 +445,8 @@ add_new:
2571 + rcu_assign_pointer(nfsi->delegation, delegation);
2572 + delegation = NULL;
2573 +
2574 ++ atomic_long_inc(&nfs_active_delegations);
2575 ++
2576 + trace_nfs4_set_delegation(inode, type);
2577 +
2578 + spin_lock(&inode->i_lock);
2579 +@@ -432,7 +456,7 @@ add_new:
2580 + out:
2581 + spin_unlock(&clp->cl_lock);
2582 + if (delegation != NULL)
2583 +- nfs_free_delegation(delegation);
2584 ++ __nfs_free_delegation(delegation);
2585 + if (freeme != NULL) {
2586 + nfs_do_return_delegation(inode, freeme, 0);
2587 + nfs_free_delegation(freeme);
2588 +@@ -760,13 +784,6 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
2589 + rcu_read_unlock();
2590 + }
2591 +
2592 +-static void nfs_mark_delegation_revoked(struct nfs_server *server,
2593 +- struct nfs_delegation *delegation)
2594 +-{
2595 +- set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
2596 +- delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
2597 +-}
2598 +-
2599 + static void nfs_revoke_delegation(struct inode *inode,
2600 + const nfs4_stateid *stateid)
2601 + {
2602 +@@ -794,7 +811,7 @@ static void nfs_revoke_delegation(struct inode *inode,
2603 + }
2604 + spin_unlock(&delegation->lock);
2605 + }
2606 +- nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
2607 ++ nfs_mark_delegation_revoked(delegation);
2608 + ret = true;
2609 + out:
2610 + rcu_read_unlock();
2611 +@@ -833,7 +850,7 @@ void nfs_delegation_mark_returned(struct inode *inode,
2612 + delegation->stateid.seqid = stateid->seqid;
2613 + }
2614 +
2615 +- nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
2616 ++ nfs_mark_delegation_revoked(delegation);
2617 +
2618 + out_clear_returning:
2619 + clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
2620 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2621 +index a2759b4062ae..6ddb4f517d37 100644
2622 +--- a/fs/nfs/nfs4proc.c
2623 ++++ b/fs/nfs/nfs4proc.c
2624 +@@ -5295,7 +5295,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
2625 + hdr->timestamp = jiffies;
2626 +
2627 + msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
2628 +- nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0);
2629 ++ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
2630 + nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
2631 + }
2632 +
2633 +diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
2634 +index 18790b9e16b5..11fdb0cc9a83 100644
2635 +--- a/include/acpi/acpixf.h
2636 ++++ b/include/acpi/acpixf.h
2637 +@@ -752,6 +752,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
2638 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
2639 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
2640 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
2641 ++ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
2642 +
2643 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
2644 + acpi_get_gpe_device(u32 gpe_index,
2645 +diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
2646 +index 5215fdba6b9a..bf2d017dd7b7 100644
2647 +--- a/include/linux/gpio/consumer.h
2648 ++++ b/include/linux/gpio/consumer.h
2649 +@@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
2650 +
2651 + int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
2652 + int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
2653 ++void gpiod_toggle_active_low(struct gpio_desc *desc);
2654 +
2655 + int gpiod_is_active_low(const struct gpio_desc *desc);
2656 + int gpiod_cansleep(const struct gpio_desc *desc);
2657 +@@ -483,6 +484,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
2658 + return -ENOSYS;
2659 + }
2660 +
2661 ++static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
2662 ++{
2663 ++ /* GPIO can never have been requested */
2664 ++ WARN_ON(desc);
2665 ++}
2666 ++
2667 + static inline int gpiod_is_active_low(const struct gpio_desc *desc)
2668 + {
2669 + /* GPIO can never have been requested */
2670 +diff --git a/include/linux/suspend.h b/include/linux/suspend.h
2671 +index 6fc8843f1c9e..cd97d2c8840c 100644
2672 +--- a/include/linux/suspend.h
2673 ++++ b/include/linux/suspend.h
2674 +@@ -191,7 +191,7 @@ struct platform_s2idle_ops {
2675 + int (*begin)(void);
2676 + int (*prepare)(void);
2677 + int (*prepare_late)(void);
2678 +- void (*wake)(void);
2679 ++ bool (*wake)(void);
2680 + void (*restore_early)(void);
2681 + void (*restore)(void);
2682 + void (*end)(void);
2683 +diff --git a/include/net/mac80211.h b/include/net/mac80211.h
2684 +index aa145808e57a..77e6b5a83b06 100644
2685 +--- a/include/net/mac80211.h
2686 ++++ b/include/net/mac80211.h
2687 +@@ -1004,12 +1004,11 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
2688 + struct ieee80211_tx_info {
2689 + /* common information */
2690 + u32 flags;
2691 +- u8 band;
2692 +-
2693 +- u8 hw_queue;
2694 +-
2695 +- u16 ack_frame_id:6;
2696 +- u16 tx_time_est:10;
2697 ++ u32 band:3,
2698 ++ ack_frame_id:13,
2699 ++ hw_queue:4,
2700 ++ tx_time_est:10;
2701 ++ /* 2 free bits */
2702 +
2703 + union {
2704 + struct {
2705 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
2706 +index 1e12e6928bca..30892c4759b4 100644
2707 +--- a/kernel/cgroup/cgroup.c
2708 ++++ b/kernel/cgroup/cgroup.c
2709 +@@ -5932,11 +5932,14 @@ void cgroup_post_fork(struct task_struct *child)
2710 +
2711 + spin_lock_irq(&css_set_lock);
2712 +
2713 +- WARN_ON_ONCE(!list_empty(&child->cg_list));
2714 +- cset = task_css_set(current); /* current is @child's parent */
2715 +- get_css_set(cset);
2716 +- cset->nr_tasks++;
2717 +- css_set_move_task(child, NULL, cset, false);
2718 ++ /* init tasks are special, only link regular threads */
2719 ++ if (likely(child->pid)) {
2720 ++ WARN_ON_ONCE(!list_empty(&child->cg_list));
2721 ++ cset = task_css_set(current); /* current is @child's parent */
2722 ++ get_css_set(cset);
2723 ++ cset->nr_tasks++;
2724 ++ css_set_move_task(child, NULL, cset, false);
2725 ++ }
2726 +
2727 + /*
2728 + * If the cgroup has to be frozen, the new task has too. Let's set
2729 +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
2730 +index f3b7239f1892..27f149f5d4a9 100644
2731 +--- a/kernel/power/suspend.c
2732 ++++ b/kernel/power/suspend.c
2733 +@@ -131,11 +131,12 @@ static void s2idle_loop(void)
2734 + * to avoid them upfront.
2735 + */
2736 + for (;;) {
2737 +- if (s2idle_ops && s2idle_ops->wake)
2738 +- s2idle_ops->wake();
2739 +-
2740 +- if (pm_wakeup_pending())
2741 ++ if (s2idle_ops && s2idle_ops->wake) {
2742 ++ if (s2idle_ops->wake())
2743 ++ break;
2744 ++ } else if (pm_wakeup_pending()) {
2745 + break;
2746 ++ }
2747 +
2748 + pm_wakeup_clear(false);
2749 +
2750 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2751 +index bfe756dee129..894fb81313fd 100644
2752 +--- a/kernel/sched/core.c
2753 ++++ b/kernel/sched/core.c
2754 +@@ -7260,7 +7260,7 @@ capacity_from_percent(char *buf)
2755 + &req.percent);
2756 + if (req.ret)
2757 + return req;
2758 +- if (req.percent > UCLAMP_PERCENT_SCALE) {
2759 ++ if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
2760 + req.ret = -ERANGE;
2761 + return req;
2762 + }
2763 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
2764 +index 000c742d0527..6aee699deb28 100644
2765 +--- a/net/mac80211/cfg.c
2766 ++++ b/net/mac80211/cfg.c
2767 +@@ -3450,7 +3450,7 @@ int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
2768 +
2769 + spin_lock_irqsave(&local->ack_status_lock, spin_flags);
2770 + id = idr_alloc(&local->ack_status_frames, ack_skb,
2771 +- 1, 0x40, GFP_ATOMIC);
2772 ++ 1, 0x2000, GFP_ATOMIC);
2773 + spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
2774 +
2775 + if (id < 0) {
2776 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2777 +index 5fa13176036f..e041af2f021a 100644
2778 +--- a/net/mac80211/mlme.c
2779 ++++ b/net/mac80211/mlme.c
2780 +@@ -8,7 +8,7 @@
2781 + * Copyright 2007, Michael Wu <flamingice@××××××××.net>
2782 + * Copyright 2013-2014 Intel Mobile Communications GmbH
2783 + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
2784 +- * Copyright (C) 2018 - 2019 Intel Corporation
2785 ++ * Copyright (C) 2018 - 2020 Intel Corporation
2786 + */
2787 +
2788 + #include <linux/delay.h>
2789 +@@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
2790 + if (!res) {
2791 + ch_switch.timestamp = timestamp;
2792 + ch_switch.device_timestamp = device_timestamp;
2793 +- ch_switch.block_tx = beacon ? csa_ie.mode : 0;
2794 ++ ch_switch.block_tx = csa_ie.mode;
2795 + ch_switch.chandef = csa_ie.chandef;
2796 + ch_switch.count = csa_ie.count;
2797 + ch_switch.delay = csa_ie.max_switch_time;
2798 +@@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
2799 +
2800 + sdata->vif.csa_active = true;
2801 + sdata->csa_chandef = csa_ie.chandef;
2802 +- sdata->csa_block_tx = ch_switch.block_tx;
2803 ++ sdata->csa_block_tx = csa_ie.mode;
2804 + ifmgd->csa_ignored_same_chan = false;
2805 +
2806 + if (sdata->csa_block_tx)
2807 +@@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
2808 + * reset when the disconnection worker runs.
2809 + */
2810 + sdata->vif.csa_active = true;
2811 +- sdata->csa_block_tx = ch_switch.block_tx;
2812 ++ sdata->csa_block_tx = csa_ie.mode;
2813 +
2814 + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
2815 + mutex_unlock(&local->chanctx_mtx);
2816 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
2817 +index a8a7306a1f56..b0444e4aba2a 100644
2818 +--- a/net/mac80211/tx.c
2819 ++++ b/net/mac80211/tx.c
2820 +@@ -2442,7 +2442,7 @@ static int ieee80211_store_ack_skb(struct ieee80211_local *local,
2821 +
2822 + spin_lock_irqsave(&local->ack_status_lock, flags);
2823 + id = idr_alloc(&local->ack_status_frames, ack_skb,
2824 +- 1, 0x40, GFP_ATOMIC);
2825 ++ 1, 0x2000, GFP_ATOMIC);
2826 + spin_unlock_irqrestore(&local->ack_status_lock, flags);
2827 +
2828 + if (id >= 0) {
2829 +diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
2830 +index 523722be6a16..45366570ea65 100644
2831 +--- a/net/sunrpc/xprtrdma/frwr_ops.c
2832 ++++ b/net/sunrpc/xprtrdma/frwr_ops.c
2833 +@@ -298,8 +298,8 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
2834 + {
2835 + struct rpcrdma_ia *ia = &r_xprt->rx_ia;
2836 + struct ib_reg_wr *reg_wr;
2837 ++ int i, n, dma_nents;
2838 + struct ib_mr *ibmr;
2839 +- int i, n;
2840 + u8 key;
2841 +
2842 + if (nsegs > ia->ri_max_frwr_depth)
2843 +@@ -323,15 +323,16 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
2844 + break;
2845 + }
2846 + mr->mr_dir = rpcrdma_data_dir(writing);
2847 ++ mr->mr_nents = i;
2848 +
2849 +- mr->mr_nents =
2850 +- ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir);
2851 +- if (!mr->mr_nents)
2852 ++ dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents,
2853 ++ mr->mr_dir);
2854 ++ if (!dma_nents)
2855 + goto out_dmamap_err;
2856 +
2857 + ibmr = mr->frwr.fr_mr;
2858 +- n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
2859 +- if (unlikely(n != mr->mr_nents))
2860 ++ n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
2861 ++ if (n != dma_nents)
2862 + goto out_mapmr_err;
2863 +
2864 + ibmr->iova &= 0x00000000ffffffff;
2865 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
2866 +index c9b7e1a073ee..df40d38f6e29 100644
2867 +--- a/sound/core/pcm_native.c
2868 ++++ b/sound/core/pcm_native.c
2869 +@@ -2474,7 +2474,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2870 +
2871 + snd_pcm_drop(substream);
2872 + if (substream->hw_opened) {
2873 +- do_hw_free(substream);
2874 ++ if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
2875 ++ do_hw_free(substream);
2876 + substream->ops->close(substream);
2877 + substream->hw_opened = 0;
2878 + }
2879 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2880 +index f2ea3528bfb1..128d81b4140b 100644
2881 +--- a/sound/pci/hda/patch_realtek.c
2882 ++++ b/sound/pci/hda/patch_realtek.c
2883 +@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2884 + SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
2885 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
2886 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
2887 ++ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
2888 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
2889 + SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
2890 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2891 +@@ -5701,8 +5702,11 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
2892 + break;
2893 + case HDA_FIXUP_ACT_INIT:
2894 + switch (codec->core.vendor_id) {
2895 ++ case 0x10ec0215:
2896 + case 0x10ec0225:
2897 ++ case 0x10ec0285:
2898 + case 0x10ec0295:
2899 ++ case 0x10ec0289:
2900 + case 0x10ec0299:
2901 + alc_write_coef_idx(codec, 0x48, 0xd011);
2902 + alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
2903 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
2904 +index 018b1ecb5404..a48313dfa967 100644
2905 +--- a/sound/usb/clock.c
2906 ++++ b/sound/usb/clock.c
2907 +@@ -151,8 +151,34 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i
2908 + return ret;
2909 + }
2910 +
2911 ++/*
2912 ++ * Assume the clock is valid if clock source supports only one single sample
2913 ++ * rate, the terminal is connected directly to it (there is no clock selector)
2914 ++ * and clock type is internal. This is to deal with some Denon DJ controllers
2915 ++ * that always reports that clock is invalid.
2916 ++ */
2917 ++static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
2918 ++ struct audioformat *fmt,
2919 ++ int source_id)
2920 ++{
2921 ++ if (fmt->protocol == UAC_VERSION_2) {
2922 ++ struct uac_clock_source_descriptor *cs_desc =
2923 ++ snd_usb_find_clock_source(chip->ctrl_intf, source_id);
2924 ++
2925 ++ if (!cs_desc)
2926 ++ return false;
2927 ++
2928 ++ return (fmt->nr_rates == 1 &&
2929 ++ (fmt->clock & 0xff) == cs_desc->bClockID &&
2930 ++ (cs_desc->bmAttributes & 0x3) !=
2931 ++ UAC_CLOCK_SOURCE_TYPE_EXT);
2932 ++ }
2933 ++
2934 ++ return false;
2935 ++}
2936 ++
2937 + static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
2938 +- int protocol,
2939 ++ struct audioformat *fmt,
2940 + int source_id)
2941 + {
2942 + int err;
2943 +@@ -160,7 +186,7 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
2944 + struct usb_device *dev = chip->dev;
2945 + u32 bmControls;
2946 +
2947 +- if (protocol == UAC_VERSION_3) {
2948 ++ if (fmt->protocol == UAC_VERSION_3) {
2949 + struct uac3_clock_source_descriptor *cs_desc =
2950 + snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
2951 +
2952 +@@ -194,10 +220,14 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
2953 + return false;
2954 + }
2955 +
2956 +- return data ? true : false;
2957 ++ if (data)
2958 ++ return true;
2959 ++ else
2960 ++ return uac_clock_source_is_valid_quirk(chip, fmt, source_id);
2961 + }
2962 +
2963 +-static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2964 ++static int __uac_clock_find_source(struct snd_usb_audio *chip,
2965 ++ struct audioformat *fmt, int entity_id,
2966 + unsigned long *visited, bool validate)
2967 + {
2968 + struct uac_clock_source_descriptor *source;
2969 +@@ -217,7 +247,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2970 + source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id);
2971 + if (source) {
2972 + entity_id = source->bClockID;
2973 +- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2,
2974 ++ if (validate && !uac_clock_source_is_valid(chip, fmt,
2975 + entity_id)) {
2976 + usb_audio_err(chip,
2977 + "clock source %d is not valid, cannot use\n",
2978 +@@ -248,8 +278,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2979 + }
2980 +
2981 + cur = ret;
2982 +- ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1],
2983 +- visited, validate);
2984 ++ ret = __uac_clock_find_source(chip, fmt,
2985 ++ selector->baCSourceID[ret - 1],
2986 ++ visited, validate);
2987 + if (!validate || ret > 0 || !chip->autoclock)
2988 + return ret;
2989 +
2990 +@@ -260,8 +291,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2991 + if (i == cur)
2992 + continue;
2993 +
2994 +- ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1],
2995 +- visited, true);
2996 ++ ret = __uac_clock_find_source(chip, fmt,
2997 ++ selector->baCSourceID[i - 1],
2998 ++ visited, true);
2999 + if (ret < 0)
3000 + continue;
3001 +
3002 +@@ -281,14 +313,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
3003 + /* FIXME: multipliers only act as pass-thru element for now */
3004 + multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id);
3005 + if (multiplier)
3006 +- return __uac_clock_find_source(chip, multiplier->bCSourceID,
3007 +- visited, validate);
3008 ++ return __uac_clock_find_source(chip, fmt,
3009 ++ multiplier->bCSourceID,
3010 ++ visited, validate);
3011 +
3012 + return -EINVAL;
3013 + }
3014 +
3015 +-static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
3016 +- unsigned long *visited, bool validate)
3017 ++static int __uac3_clock_find_source(struct snd_usb_audio *chip,
3018 ++ struct audioformat *fmt, int entity_id,
3019 ++ unsigned long *visited, bool validate)
3020 + {
3021 + struct uac3_clock_source_descriptor *source;
3022 + struct uac3_clock_selector_descriptor *selector;
3023 +@@ -307,7 +341,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
3024 + source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id);
3025 + if (source) {
3026 + entity_id = source->bClockID;
3027 +- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3,
3028 ++ if (validate && !uac_clock_source_is_valid(chip, fmt,
3029 + entity_id)) {
3030 + usb_audio_err(chip,
3031 + "clock source %d is not valid, cannot use\n",
3032 +@@ -338,7 +372,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
3033 + }
3034 +
3035 + cur = ret;
3036 +- ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret - 1],
3037 ++ ret = __uac3_clock_find_source(chip, fmt,
3038 ++ selector->baCSourceID[ret - 1],
3039 + visited, validate);
3040 + if (!validate || ret > 0 || !chip->autoclock)
3041 + return ret;
3042 +@@ -350,8 +385,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
3043 + if (i == cur)
3044 + continue;
3045 +
3046 +- ret = __uac3_clock_find_source(chip, selector->baCSourceID[i - 1],
3047 +- visited, true);
3048 ++ ret = __uac3_clock_find_source(chip, fmt,
3049 ++ selector->baCSourceID[i - 1],
3050 ++ visited, true);
3051 + if (ret < 0)
3052 + continue;
3053 +
3054 +@@ -372,7 +408,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
3055 + multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf,
3056 + entity_id);
3057 + if (multiplier)
3058 +- return __uac3_clock_find_source(chip, multiplier->bCSourceID,
3059 ++ return __uac3_clock_find_source(chip, fmt,
3060 ++ multiplier->bCSourceID,
3061 + visited, validate);
3062 +
3063 + return -EINVAL;
3064 +@@ -389,18 +426,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
3065 + *
3066 + * Returns the clock source UnitID (>=0) on success, or an error.
3067 + */
3068 +-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
3069 +- int entity_id, bool validate)
3070 ++int snd_usb_clock_find_source(struct snd_usb_audio *chip,
3071 ++ struct audioformat *fmt, bool validate)
3072 + {
3073 + DECLARE_BITMAP(visited, 256);
3074 + memset(visited, 0, sizeof(visited));
3075 +
3076 +- switch (protocol) {
3077 ++ switch (fmt->protocol) {
3078 + case UAC_VERSION_2:
3079 +- return __uac_clock_find_source(chip, entity_id, visited,
3080 ++ return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
3081 + validate);
3082 + case UAC_VERSION_3:
3083 +- return __uac3_clock_find_source(chip, entity_id, visited,
3084 ++ return __uac3_clock_find_source(chip, fmt, fmt->clock, visited,
3085 + validate);
3086 + default:
3087 + return -EINVAL;
3088 +@@ -501,8 +538,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
3089 + * automatic clock selection if the current clock is not
3090 + * valid.
3091 + */
3092 +- clock = snd_usb_clock_find_source(chip, fmt->protocol,
3093 +- fmt->clock, true);
3094 ++ clock = snd_usb_clock_find_source(chip, fmt, true);
3095 + if (clock < 0) {
3096 + /* We did not find a valid clock, but that might be
3097 + * because the current sample rate does not match an
3098 +@@ -510,8 +546,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
3099 + * and we will do another validation after setting the
3100 + * rate.
3101 + */
3102 +- clock = snd_usb_clock_find_source(chip, fmt->protocol,
3103 +- fmt->clock, false);
3104 ++ clock = snd_usb_clock_find_source(chip, fmt, false);
3105 + if (clock < 0)
3106 + return clock;
3107 + }
3108 +@@ -577,7 +612,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
3109 +
3110 + validation:
3111 + /* validate clock after rate change */
3112 +- if (!uac_clock_source_is_valid(chip, fmt->protocol, clock))
3113 ++ if (!uac_clock_source_is_valid(chip, fmt, clock))
3114 + return -ENXIO;
3115 + return 0;
3116 + }
3117 +diff --git a/sound/usb/clock.h b/sound/usb/clock.h
3118 +index 076e31b79ee0..68df0fbe09d0 100644
3119 +--- a/sound/usb/clock.h
3120 ++++ b/sound/usb/clock.h
3121 +@@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
3122 + struct usb_host_interface *alts,
3123 + struct audioformat *fmt, int rate);
3124 +
3125 +-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
3126 +- int entity_id, bool validate);
3127 ++int snd_usb_clock_find_source(struct snd_usb_audio *chip,
3128 ++ struct audioformat *fmt, bool validate);
3129 +
3130 + #endif /* __USBAUDIO_CLOCK_H */
3131 +diff --git a/sound/usb/format.c b/sound/usb/format.c
3132 +index d79db71305f6..25668ba5e68e 100644
3133 +--- a/sound/usb/format.c
3134 ++++ b/sound/usb/format.c
3135 +@@ -322,8 +322,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip,
3136 + struct usb_device *dev = chip->dev;
3137 + unsigned char tmp[2], *data;
3138 + int nr_triplets, data_size, ret = 0, ret_l6;
3139 +- int clock = snd_usb_clock_find_source(chip, fp->protocol,
3140 +- fp->clock, false);
3141 ++ int clock = snd_usb_clock_find_source(chip, fp, false);
3142 +
3143 + if (clock < 0) {
3144 + dev_err(&dev->dev,
3145 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3146 +index 6cd4ff09c5ee..d2a050bb8341 100644
3147 +--- a/sound/usb/mixer.c
3148 ++++ b/sound/usb/mixer.c
3149 +@@ -897,6 +897,15 @@ static int parse_term_proc_unit(struct mixer_build *state,
3150 + return 0;
3151 + }
3152 +
3153 ++static int parse_term_effect_unit(struct mixer_build *state,
3154 ++ struct usb_audio_term *term,
3155 ++ void *p1, int id)
3156 ++{
3157 ++ term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
3158 ++ term->id = id;
3159 ++ return 0;
3160 ++}
3161 ++
3162 + static int parse_term_uac2_clock_source(struct mixer_build *state,
3163 + struct usb_audio_term *term,
3164 + void *p1, int id)
3165 +@@ -981,8 +990,7 @@ static int __check_input_term(struct mixer_build *state, int id,
3166 + UAC3_PROCESSING_UNIT);
3167 + case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT):
3168 + case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT):
3169 +- return parse_term_proc_unit(state, term, p1, id,
3170 +- UAC3_EFFECT_UNIT);
3171 ++ return parse_term_effect_unit(state, term, p1, id);
3172 + case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT):
3173 + case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2):
3174 + case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT):
3175 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3176 +index 82184036437b..1ed25b1d2a6a 100644
3177 +--- a/sound/usb/quirks.c
3178 ++++ b/sound/usb/quirks.c
3179 +@@ -1402,6 +1402,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
3180 + case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
3181 + case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
3182 + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
3183 ++ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
3184 + return true;
3185 + }
3186 +
3187 +diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
3188 +index 2c41d47f6f83..90d23cc3c8d4 100644
3189 +--- a/tools/perf/util/stat-shadow.c
3190 ++++ b/tools/perf/util/stat-shadow.c
3191 +@@ -18,7 +18,6 @@
3192 + * AGGR_NONE: Use matching CPU
3193 + * AGGR_THREAD: Not supported?
3194 + */
3195 +-static bool have_frontend_stalled;
3196 +
3197 + struct runtime_stat rt_stat;
3198 + struct stats walltime_nsecs_stats;
3199 +@@ -144,7 +143,6 @@ void runtime_stat__exit(struct runtime_stat *st)
3200 +
3201 + void perf_stat__init_shadow_stats(void)
3202 + {
3203 +- have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
3204 + runtime_stat__init(&rt_stat);
3205 + }
3206 +
3207 +@@ -853,10 +851,6 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
3208 + print_metric(config, ctxp, NULL, "%7.2f ",
3209 + "stalled cycles per insn",
3210 + ratio);
3211 +- } else if (have_frontend_stalled) {
3212 +- out->new_line(config, ctxp);
3213 +- print_metric(config, ctxp, NULL, "%7.2f ",
3214 +- "stalled cycles per insn", 0);
3215 + }
3216 + } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
3217 + if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)