Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 19 Feb 2020 23:48:31
Message-Id: 1582156094.bbd411a2c268089f5cb7368a9f2fbc096ba7aa60.mpagano@gentoo
1 commit: bbd411a2c268089f5cb7368a9f2fbc096ba7aa60
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 19 23:48:14 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 19 23:48:14 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bbd411a2
7
8 Linux patch 5.4.21
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1020_linux-5.4.21.patch | 2512 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2516 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 48dbccb..f62ef6c 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -123,6 +123,10 @@ Patch: 1019_linux-5.4.20.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.20
23
24 +Patch: 1020_linux-5.4.21.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.21
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1020_linux-5.4.21.patch b/1020_linux-5.4.21.patch
33 new file mode 100644
34 index 0000000..dd5a76c
35 --- /dev/null
36 +++ b/1020_linux-5.4.21.patch
37 @@ -0,0 +1,2512 @@
38 +diff --git a/Makefile b/Makefile
39 +index 21e58bd54715..adfc88f00f07 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 20
47 ++SUBLEVEL = 21
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
52 +index 880bc2a5cada..7f7002dc2b21 100644
53 +--- a/arch/arm/mach-npcm/Kconfig
54 ++++ b/arch/arm/mach-npcm/Kconfig
55 +@@ -11,7 +11,7 @@ config ARCH_NPCM7XX
56 + depends on ARCH_MULTI_V7
57 + select PINCTRL_NPCM7XX
58 + select NPCM7XX_TIMER
59 +- select ARCH_REQUIRE_GPIOLIB
60 ++ select GPIOLIB
61 + select CACHE_L2X0
62 + select ARM_GIC
63 + select HAVE_ARM_TWD if SMP
64 +diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
65 +index 62ab0d54ff71..335fff762451 100644
66 +--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
67 ++++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
68 +@@ -161,10 +161,10 @@
69 + bus-range = <0x0 0x1>;
70 + reg = <0x0 0x40000000 0x0 0x10000000>;
71 + ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
72 +- interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
73 +- <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
74 +- <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
75 +- <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
76 ++ interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
77 ++ <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
78 ++ <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
79 ++ <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
80 + interrupt-map-mask = <0x0 0x0 0x0 0x7>;
81 + msi-map = <0x0 &its 0x0 0x10000>;
82 + iommu-map = <0x0 &smmu 0x0 0x10000>;
83 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
84 +index d54586d5b031..fab013c5ee8c 100644
85 +--- a/arch/arm64/kernel/process.c
86 ++++ b/arch/arm64/kernel/process.c
87 +@@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next)
88 + if (unlikely(next->flags & PF_KTHREAD))
89 + return;
90 +
91 ++ /*
92 ++ * If all CPUs implement the SSBS extension, then we just need to
93 ++ * context-switch the PSTATE field.
94 ++ */
95 ++ if (cpu_have_feature(cpu_feature(SSBS)))
96 ++ return;
97 ++
98 + /* If the mitigation is enabled, then we leave SSBS clear. */
99 + if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
100 + test_tsk_thread_flag(next, TIF_SSBD))
101 +diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
102 +index ed007f4a6444..3f501159ee9f 100644
103 +--- a/arch/s390/boot/uv.c
104 ++++ b/arch/s390/boot/uv.c
105 +@@ -15,7 +15,8 @@ void uv_query_info(void)
106 + if (!test_facility(158))
107 + return;
108 +
109 +- if (uv_call(0, (uint64_t)&uvcb))
110 ++ /* rc==0x100 means that there is additional data we do not process */
111 ++ if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
112 + return;
113 +
114 + if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
115 +diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
116 +index 2dc9eb4e1acc..b6a4ce9dafaf 100644
117 +--- a/arch/s390/include/asm/timex.h
118 ++++ b/arch/s390/include/asm/timex.h
119 +@@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)
120 +
121 + static inline unsigned long long get_tod_clock(void)
122 + {
123 +- unsigned char clk[STORE_CLOCK_EXT_SIZE];
124 ++ char clk[STORE_CLOCK_EXT_SIZE];
125 +
126 + get_tod_clock_ext(clk);
127 + return *((unsigned long long *)&clk[1]);
128 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
129 +index 64c3e70b0556..beffafd7dcc3 100644
130 +--- a/arch/x86/events/amd/core.c
131 ++++ b/arch/x86/events/amd/core.c
132 +@@ -246,6 +246,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
133 + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
134 + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
135 + [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
136 ++ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
137 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
138 + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
139 + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
140 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
141 +index ce83950036c5..e5ad97a82342 100644
142 +--- a/arch/x86/events/intel/ds.c
143 ++++ b/arch/x86/events/intel/ds.c
144 +@@ -1713,6 +1713,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
145 + old = ((s64)(prev_raw_count << shift) >> shift);
146 + local64_add(new - old + count * period, &event->count);
147 +
148 ++ local64_set(&hwc->period_left, -new);
149 ++
150 + perf_event_update_userpage(event);
151 +
152 + return 0;
153 +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
154 +index c1d7b866a03f..4e3f137ffa8c 100644
155 +--- a/arch/x86/kvm/paging_tmpl.h
156 ++++ b/arch/x86/kvm/paging_tmpl.h
157 +@@ -33,7 +33,7 @@
158 + #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
159 + #define PT_HAVE_ACCESSED_DIRTY(mmu) true
160 + #ifdef CONFIG_X86_64
161 +- #define PT_MAX_FULL_LEVELS 4
162 ++ #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
163 + #define CMPXCHG cmpxchg
164 + #else
165 + #define CMPXCHG cmpxchg64
166 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
167 +index dc7c166c4335..84b57b461ad6 100644
168 +--- a/arch/x86/kvm/vmx/vmx.c
169 ++++ b/arch/x86/kvm/vmx/vmx.c
170 +@@ -2975,6 +2975,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
171 +
172 + static int get_ept_level(struct kvm_vcpu *vcpu)
173 + {
174 ++ /* Nested EPT currently only supports 4-level walks. */
175 ++ if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
176 ++ return 4;
177 + if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
178 + return 5;
179 + return 4;
180 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
181 +index edde5ee8c6f5..95180d67d570 100644
182 +--- a/arch/x86/kvm/x86.c
183 ++++ b/arch/x86/kvm/x86.c
184 +@@ -445,6 +445,14 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
185 + * for #DB exceptions under VMX.
186 + */
187 + vcpu->arch.dr6 ^= payload & DR6_RTM;
188 ++
189 ++ /*
190 ++ * The #DB payload is defined as compatible with the 'pending
191 ++ * debug exceptions' field under VMX, not DR6. While bit 12 is
192 ++ * defined in the 'pending debug exceptions' field (enabled
193 ++ * breakpoint), it is reserved and must be zero in DR6.
194 ++ */
195 ++ vcpu->arch.dr6 &= ~BIT(12);
196 + break;
197 + case PF_VECTOR:
198 + vcpu->arch.cr2 = payload;
199 +diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
200 +index bcf8f7501db7..a74c1a0e892d 100644
201 +--- a/drivers/acpi/acpica/achware.h
202 ++++ b/drivers/acpi/acpica/achware.h
203 +@@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
204 +
205 + acpi_status acpi_hw_enable_all_wakeup_gpes(void);
206 +
207 ++u8 acpi_hw_check_all_gpes(void);
208 ++
209 + acpi_status
210 + acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
211 + struct acpi_gpe_block_info *gpe_block,
212 +diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
213 +index 04a40d563dd6..84b0b410310e 100644
214 +--- a/drivers/acpi/acpica/evxfgpe.c
215 ++++ b/drivers/acpi/acpica/evxfgpe.c
216 +@@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void)
217 +
218 + ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
219 +
220 ++/******************************************************************************
221 ++ *
222 ++ * FUNCTION: acpi_any_gpe_status_set
223 ++ *
224 ++ * PARAMETERS: None
225 ++ *
226 ++ * RETURN: Whether or not the status bit is set for any GPE
227 ++ *
228 ++ * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
229 ++ * of them is set or FALSE otherwise.
230 ++ *
231 ++ ******************************************************************************/
232 ++u32 acpi_any_gpe_status_set(void)
233 ++{
234 ++ acpi_status status;
235 ++ u8 ret;
236 ++
237 ++ ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
238 ++
239 ++ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
240 ++ if (ACPI_FAILURE(status)) {
241 ++ return (FALSE);
242 ++ }
243 ++
244 ++ ret = acpi_hw_check_all_gpes();
245 ++ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
246 ++
247 ++ return (ret);
248 ++}
249 ++
250 ++ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set)
251 ++
252 + /*******************************************************************************
253 + *
254 + * FUNCTION: acpi_install_gpe_block
255 +diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
256 +index 565bd3f29f31..b1d7d5f92495 100644
257 +--- a/drivers/acpi/acpica/hwgpe.c
258 ++++ b/drivers/acpi/acpica/hwgpe.c
259 +@@ -444,6 +444,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
260 + return (AE_OK);
261 + }
262 +
263 ++/******************************************************************************
264 ++ *
265 ++ * FUNCTION: acpi_hw_get_gpe_block_status
266 ++ *
267 ++ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
268 ++ * gpe_block - Gpe Block info
269 ++ *
270 ++ * RETURN: Success
271 ++ *
272 ++ * DESCRIPTION: Produce a combined GPE status bits mask for the given block.
273 ++ *
274 ++ ******************************************************************************/
275 ++
276 ++static acpi_status
277 ++acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
278 ++ struct acpi_gpe_block_info *gpe_block,
279 ++ void *ret_ptr)
280 ++{
281 ++ struct acpi_gpe_register_info *gpe_register_info;
282 ++ u64 in_enable, in_status;
283 ++ acpi_status status;
284 ++ u8 *ret = ret_ptr;
285 ++ u32 i;
286 ++
287 ++ /* Examine each GPE Register within the block */
288 ++
289 ++ for (i = 0; i < gpe_block->register_count; i++) {
290 ++ gpe_register_info = &gpe_block->register_info[i];
291 ++
292 ++ status = acpi_hw_read(&in_enable,
293 ++ &gpe_register_info->enable_address);
294 ++ if (ACPI_FAILURE(status)) {
295 ++ continue;
296 ++ }
297 ++
298 ++ status = acpi_hw_read(&in_status,
299 ++ &gpe_register_info->status_address);
300 ++ if (ACPI_FAILURE(status)) {
301 ++ continue;
302 ++ }
303 ++
304 ++ *ret |= in_enable & in_status;
305 ++ }
306 ++
307 ++ return (AE_OK);
308 ++}
309 ++
310 + /******************************************************************************
311 + *
312 + * FUNCTION: acpi_hw_disable_all_gpes
313 +@@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
314 + return_ACPI_STATUS(status);
315 + }
316 +
317 ++/******************************************************************************
318 ++ *
319 ++ * FUNCTION: acpi_hw_check_all_gpes
320 ++ *
321 ++ * PARAMETERS: None
322 ++ *
323 ++ * RETURN: Combined status of all GPEs
324 ++ *
325 ++ * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
326 ++ * status bit is set for at least one of them of FALSE otherwise.
327 ++ *
328 ++ ******************************************************************************/
329 ++
330 ++u8 acpi_hw_check_all_gpes(void)
331 ++{
332 ++ u8 ret = 0;
333 ++
334 ++ ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
335 ++
336 ++ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
337 ++
338 ++ return (ret != 0);
339 ++}
340 ++
341 + #endif /* !ACPI_REDUCED_HARDWARE */
342 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
343 +index bd75caff8322..ca5cdb621c2a 100644
344 +--- a/drivers/acpi/ec.c
345 ++++ b/drivers/acpi/ec.c
346 +@@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec);
347 +
348 + static struct acpi_ec *boot_ec;
349 + static bool boot_ec_is_ecdt = false;
350 ++static struct workqueue_struct *ec_wq;
351 + static struct workqueue_struct *ec_query_wq;
352 +
353 + static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
354 +@@ -461,7 +462,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
355 + ec_dbg_evt("Command(%s) submitted/blocked",
356 + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
357 + ec->nr_pending_queries++;
358 +- schedule_work(&ec->work);
359 ++ queue_work(ec_wq, &ec->work);
360 + }
361 + }
362 +
363 +@@ -527,7 +528,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
364 + #ifdef CONFIG_PM_SLEEP
365 + static void __acpi_ec_flush_work(void)
366 + {
367 +- flush_scheduled_work(); /* flush ec->work */
368 ++ drain_workqueue(ec_wq); /* flush ec->work */
369 + flush_workqueue(ec_query_wq); /* flush queries */
370 + }
371 +
372 +@@ -548,8 +549,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
373 +
374 + void acpi_ec_flush_work(void)
375 + {
376 +- /* Without ec_query_wq there is nothing to flush. */
377 +- if (!ec_query_wq)
378 ++ /* Without ec_wq there is nothing to flush. */
379 ++ if (!ec_wq)
380 + return;
381 +
382 + __acpi_ec_flush_work();
383 +@@ -2032,25 +2033,33 @@ static struct acpi_driver acpi_ec_driver = {
384 + .drv.pm = &acpi_ec_pm,
385 + };
386 +
387 +-static inline int acpi_ec_query_init(void)
388 ++static void acpi_ec_destroy_workqueues(void)
389 + {
390 +- if (!ec_query_wq) {
391 +- ec_query_wq = alloc_workqueue("kec_query", 0,
392 +- ec_max_queries);
393 +- if (!ec_query_wq)
394 +- return -ENODEV;
395 ++ if (ec_wq) {
396 ++ destroy_workqueue(ec_wq);
397 ++ ec_wq = NULL;
398 + }
399 +- return 0;
400 +-}
401 +-
402 +-static inline void acpi_ec_query_exit(void)
403 +-{
404 + if (ec_query_wq) {
405 + destroy_workqueue(ec_query_wq);
406 + ec_query_wq = NULL;
407 + }
408 + }
409 +
410 ++static int acpi_ec_init_workqueues(void)
411 ++{
412 ++ if (!ec_wq)
413 ++ ec_wq = alloc_ordered_workqueue("kec", 0);
414 ++
415 ++ if (!ec_query_wq)
416 ++ ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
417 ++
418 ++ if (!ec_wq || !ec_query_wq) {
419 ++ acpi_ec_destroy_workqueues();
420 ++ return -ENODEV;
421 ++ }
422 ++ return 0;
423 ++}
424 ++
425 + static const struct dmi_system_id acpi_ec_no_wakeup[] = {
426 + {
427 + .ident = "Thinkpad X1 Carbon 6th",
428 +@@ -2081,8 +2090,7 @@ int __init acpi_ec_init(void)
429 + int result;
430 + int ecdt_fail, dsdt_fail;
431 +
432 +- /* register workqueue for _Qxx evaluations */
433 +- result = acpi_ec_query_init();
434 ++ result = acpi_ec_init_workqueues();
435 + if (result)
436 + return result;
437 +
438 +@@ -2113,6 +2121,6 @@ static void __exit acpi_ec_exit(void)
439 + {
440 +
441 + acpi_bus_unregister_driver(&acpi_ec_driver);
442 +- acpi_ec_query_exit();
443 ++ acpi_ec_destroy_workqueues();
444 + }
445 + #endif /* 0 */
446 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
447 +index 2af937a8b1c5..62348ec2a807 100644
448 +--- a/drivers/acpi/sleep.c
449 ++++ b/drivers/acpi/sleep.c
450 +@@ -977,21 +977,34 @@ static int acpi_s2idle_prepare_late(void)
451 + return 0;
452 + }
453 +
454 +-static void acpi_s2idle_wake(void)
455 ++static bool acpi_s2idle_wake(void)
456 + {
457 +- /*
458 +- * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
459 +- * not triggered while suspended, so bail out.
460 +- */
461 +- if (!acpi_sci_irq_valid() ||
462 +- irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
463 +- return;
464 ++ if (!acpi_sci_irq_valid())
465 ++ return pm_wakeup_pending();
466 ++
467 ++ while (pm_wakeup_pending()) {
468 ++ /*
469 ++ * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
470 ++ * SCI has not triggered while suspended, so bail out (the
471 ++ * wakeup is pending anyway and the SCI is not the source of
472 ++ * it).
473 ++ */
474 ++ if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
475 ++ return true;
476 ++
477 ++ /*
478 ++ * If there are no EC events to process and at least one of the
479 ++ * other enabled GPEs is active, the wakeup is regarded as a
480 ++ * genuine one.
481 ++ *
482 ++ * Note that the checks below must be carried out in this order
483 ++ * to avoid returning prematurely due to a change of the EC GPE
484 ++ * status bit from unset to set between the checks with the
485 ++ * status bits of all the other GPEs unset.
486 ++ */
487 ++ if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
488 ++ return true;
489 +
490 +- /*
491 +- * If there are EC events to process, the wakeup may be a spurious one
492 +- * coming from the EC.
493 +- */
494 +- if (acpi_ec_dispatch_gpe()) {
495 + /*
496 + * Cancel the wakeup and process all pending events in case
497 + * there are any wakeup ones in there.
498 +@@ -1009,8 +1022,19 @@ static void acpi_s2idle_wake(void)
499 + acpi_ec_flush_work();
500 + acpi_os_wait_events_complete(); /* synchronize Notify handling */
501 +
502 ++ /*
503 ++ * The SCI is in the "suspended" state now and it cannot produce
504 ++ * new wakeup events till the rearming below, so if any of them
505 ++ * are pending here, they must be resulting from the processing
506 ++ * of EC events above or coming from somewhere else.
507 ++ */
508 ++ if (pm_wakeup_pending())
509 ++ return true;
510 ++
511 + rearm_wake_irq(acpi_sci_irq);
512 + }
513 ++
514 ++ return false;
515 + }
516 +
517 + static void acpi_s2idle_restore_early(void)
518 +diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
519 +index 36cf13eee6b8..68413bf9cf87 100644
520 +--- a/drivers/bus/moxtet.c
521 ++++ b/drivers/bus/moxtet.c
522 +@@ -466,7 +466,7 @@ static ssize_t input_read(struct file *file, char __user *buf, size_t len,
523 + {
524 + struct moxtet *moxtet = file->private_data;
525 + u8 bin[TURRIS_MOX_MAX_MODULES];
526 +- u8 hex[sizeof(buf) * 2 + 1];
527 ++ u8 hex[sizeof(bin) * 2 + 1];
528 + int ret, n;
529 +
530 + ret = moxtet_spi_read(moxtet, bin);
531 +diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
532 +index 285e0b8f9a97..09e3e25562a8 100644
533 +--- a/drivers/char/ipmi/ipmb_dev_int.c
534 ++++ b/drivers/char/ipmi/ipmb_dev_int.c
535 +@@ -265,7 +265,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
536 + break;
537 +
538 + case I2C_SLAVE_WRITE_RECEIVED:
539 +- if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg))
540 ++ if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
541 + break;
542 +
543 + buf[++ipmb_dev->msg_idx] = *val;
544 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
545 +index e6fd079783bd..e73ca303f1a7 100644
546 +--- a/drivers/edac/edac_mc.c
547 ++++ b/drivers/edac/edac_mc.c
548 +@@ -503,16 +503,10 @@ void edac_mc_free(struct mem_ctl_info *mci)
549 + {
550 + edac_dbg(1, "\n");
551 +
552 +- /* If we're not yet registered with sysfs free only what was allocated
553 +- * in edac_mc_alloc().
554 +- */
555 +- if (!device_is_registered(&mci->dev)) {
556 +- _edac_mc_free(mci);
557 +- return;
558 +- }
559 ++ if (device_is_registered(&mci->dev))
560 ++ edac_unregister_sysfs(mci);
561 +
562 +- /* the mci instance is freed here, when the sysfs object is dropped */
563 +- edac_unregister_sysfs(mci);
564 ++ _edac_mc_free(mci);
565 + }
566 + EXPORT_SYMBOL_GPL(edac_mc_free);
567 +
568 +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
569 +index 32d016f1ecd1..0287884ae28c 100644
570 +--- a/drivers/edac/edac_mc_sysfs.c
571 ++++ b/drivers/edac/edac_mc_sysfs.c
572 +@@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] = {
573 +
574 + static void csrow_attr_release(struct device *dev)
575 + {
576 +- struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
577 +-
578 +- edac_dbg(1, "device %s released\n", dev_name(dev));
579 +- kfree(csrow);
580 ++ /* release device with _edac_mc_free() */
581 + }
582 +
583 + static const struct device_type csrow_attr_type = {
584 +@@ -447,8 +444,7 @@ error:
585 + csrow = mci->csrows[i];
586 + if (!nr_pages_per_csrow(csrow))
587 + continue;
588 +-
589 +- device_del(&mci->csrows[i]->dev);
590 ++ device_unregister(&mci->csrows[i]->dev);
591 + }
592 +
593 + return err;
594 +@@ -620,10 +616,7 @@ static const struct attribute_group *dimm_attr_groups[] = {
595 +
596 + static void dimm_attr_release(struct device *dev)
597 + {
598 +- struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
599 +-
600 +- edac_dbg(1, "device %s released\n", dev_name(dev));
601 +- kfree(dimm);
602 ++ /* release device with _edac_mc_free() */
603 + }
604 +
605 + static const struct device_type dimm_attr_type = {
606 +@@ -906,10 +899,7 @@ static const struct attribute_group *mci_attr_groups[] = {
607 +
608 + static void mci_attr_release(struct device *dev)
609 + {
610 +- struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
611 +-
612 +- edac_dbg(1, "device %s released\n", dev_name(dev));
613 +- kfree(mci);
614 ++ /* release device with _edac_mc_free() */
615 + }
616 +
617 + static const struct device_type mci_attr_type = {
618 +diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
619 +index a9748b5198e6..67f9f82e0db0 100644
620 +--- a/drivers/gpio/gpio-xilinx.c
621 ++++ b/drivers/gpio/gpio-xilinx.c
622 +@@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
623 + for (i = 0; i < gc->ngpio; i++) {
624 + if (*mask == 0)
625 + break;
626 ++ /* Once finished with an index write it out to the register */
627 + if (index != xgpio_index(chip, i)) {
628 + xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
629 +- xgpio_regoffset(chip, i),
630 ++ index * XGPIO_CHANNEL_OFFSET,
631 + chip->gpio_state[index]);
632 + spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
633 + index = xgpio_index(chip, i);
634 +@@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
635 + }
636 +
637 + xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
638 +- xgpio_regoffset(chip, i), chip->gpio_state[index]);
639 ++ index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
640 +
641 + spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
642 + }
643 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
644 +index 7ee5b7f53aeb..3ece59185d37 100644
645 +--- a/drivers/gpio/gpiolib-of.c
646 ++++ b/drivers/gpio/gpiolib-of.c
647 +@@ -146,10 +146,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
648 + if (of_property_read_bool(np, "cd-inverted"))
649 + *flags ^= OF_GPIO_ACTIVE_LOW;
650 + }
651 +- if (!strcmp(propname, "wp-gpios")) {
652 +- if (of_property_read_bool(np, "wp-inverted"))
653 +- *flags ^= OF_GPIO_ACTIVE_LOW;
654 +- }
655 + }
656 + /*
657 + * Some GPIO fixed regulator quirks.
658 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
659 +index 2476306e7030..22506e4614b3 100644
660 +--- a/drivers/gpio/gpiolib.c
661 ++++ b/drivers/gpio/gpiolib.c
662 +@@ -3220,6 +3220,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
663 + }
664 + EXPORT_SYMBOL_GPL(gpiod_is_active_low);
665 +
666 ++/**
667 ++ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
668 ++ * @desc: the gpio descriptor to change
669 ++ */
670 ++void gpiod_toggle_active_low(struct gpio_desc *desc)
671 ++{
672 ++ VALIDATE_DESC_VOID(desc);
673 ++ change_bit(FLAG_ACTIVE_LOW, &desc->flags);
674 ++}
675 ++EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
676 ++
677 + /* I/O calls are only valid after configuration completed; the relevant
678 + * "is this a valid GPIO" error checks should already have been done.
679 + *
680 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
681 +index 5906c80c4b2c..f57dd195dfb8 100644
682 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
683 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
684 +@@ -166,6 +166,7 @@ panfrost_lookup_bos(struct drm_device *dev,
685 + break;
686 + }
687 +
688 ++ atomic_inc(&bo->gpu_usecount);
689 + job->mappings[i] = mapping;
690 + }
691 +
692 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
693 +index ca1bc9019600..b3517ff9630c 100644
694 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
695 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
696 +@@ -30,6 +30,12 @@ struct panfrost_gem_object {
697 + struct mutex lock;
698 + } mappings;
699 +
700 ++ /*
701 ++ * Count the number of jobs referencing this BO so we don't let the
702 ++ * shrinker reclaim this object prematurely.
703 ++ */
704 ++ atomic_t gpu_usecount;
705 ++
706 + bool noexec :1;
707 + bool is_heap :1;
708 + };
709 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
710 +index f5dd7b29bc95..288e46c40673 100644
711 +--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
712 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
713 +@@ -41,6 +41,9 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
714 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
715 + struct panfrost_gem_object *bo = to_panfrost_bo(obj);
716 +
717 ++ if (atomic_read(&bo->gpu_usecount))
718 ++ return false;
719 ++
720 + if (!mutex_trylock(&shmem->pages_lock))
721 + return false;
722 +
723 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
724 +index bbb0c5e3ca6f..9f770d454684 100644
725 +--- a/drivers/gpu/drm/panfrost/panfrost_job.c
726 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
727 +@@ -270,8 +270,13 @@ static void panfrost_job_cleanup(struct kref *ref)
728 + dma_fence_put(job->render_done_fence);
729 +
730 + if (job->mappings) {
731 +- for (i = 0; i < job->bo_count; i++)
732 ++ for (i = 0; i < job->bo_count; i++) {
733 ++ if (!job->mappings[i])
734 ++ break;
735 ++
736 ++ atomic_dec(&job->mappings[i]->obj->gpu_usecount);
737 + panfrost_gem_mapping_put(job->mappings[i]);
738 ++ }
739 + kvfree(job->mappings);
740 + }
741 +
742 +diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
743 +index a5757b11b730..5b54eff12cc0 100644
744 +--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
745 ++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
746 +@@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
747 + }
748 +
749 + drm_mode_config_init(drm);
750 +- drm->mode_config.allow_fb_modifiers = true;
751 +
752 + ret = component_bind_all(drm->dev, drm);
753 + if (ret) {
754 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
755 +index 5bd60ded3d81..909eba43664a 100644
756 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
757 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
758 +@@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
759 + return ERR_CAST(obj);
760 +
761 + ret = drm_gem_handle_create(file, &obj->base, handle);
762 +- drm_gem_object_put_unlocked(&obj->base);
763 +- if (ret)
764 ++ if (ret) {
765 ++ drm_gem_object_put_unlocked(&obj->base);
766 + return ERR_PTR(ret);
767 ++ }
768 +
769 + return &obj->base;
770 + }
771 +@@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
772 + args->size = gem_object->size;
773 + args->pitch = pitch;
774 +
775 +- DRM_DEBUG("Created object of size %lld\n", size);
776 ++ drm_gem_object_put_unlocked(gem_object);
777 ++
778 ++ DRM_DEBUG("Created object of size %llu\n", args->size);
779 +
780 + return 0;
781 + }
782 +diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
783 +index f01f4887fb2e..a91ed01abb68 100644
784 +--- a/drivers/hwmon/pmbus/ltc2978.c
785 ++++ b/drivers/hwmon/pmbus/ltc2978.c
786 +@@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
787 +
788 + #define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
789 +
790 +-#define LTC_NOT_BUSY BIT(5)
791 +-#define LTC_NOT_PENDING BIT(4)
792 ++#define LTC_NOT_BUSY BIT(6)
793 ++#define LTC_NOT_PENDING BIT(5)
794 +
795 + /*
796 + * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
797 +diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
798 +index 6eb6d2717ca5..2b4d80393bd0 100644
799 +--- a/drivers/infiniband/core/security.c
800 ++++ b/drivers/infiniband/core/security.c
801 +@@ -339,22 +339,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
802 + if (!new_pps)
803 + return NULL;
804 +
805 +- if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
806 +- if (!qp_pps) {
807 +- new_pps->main.port_num = qp_attr->port_num;
808 +- new_pps->main.pkey_index = qp_attr->pkey_index;
809 +- } else {
810 +- new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
811 +- qp_attr->port_num :
812 +- qp_pps->main.port_num;
813 +-
814 +- new_pps->main.pkey_index =
815 +- (qp_attr_mask & IB_QP_PKEY_INDEX) ?
816 +- qp_attr->pkey_index :
817 +- qp_pps->main.pkey_index;
818 +- }
819 ++ if (qp_attr_mask & IB_QP_PORT)
820 ++ new_pps->main.port_num =
821 ++ (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
822 ++ if (qp_attr_mask & IB_QP_PKEY_INDEX)
823 ++ new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
824 ++ qp_attr->pkey_index;
825 ++ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
826 + new_pps->main.state = IB_PORT_PKEY_VALID;
827 +- } else if (qp_pps) {
828 ++
829 ++ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
830 + new_pps->main.port_num = qp_pps->main.port_num;
831 + new_pps->main.pkey_index = qp_pps->main.pkey_index;
832 + if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
833 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
834 +index d1407fa378e8..1235ffb2389b 100644
835 +--- a/drivers/infiniband/core/user_mad.c
836 ++++ b/drivers/infiniband/core/user_mad.c
837 +@@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
838 + struct ib_umad_file *file;
839 + int id;
840 +
841 ++ cdev_device_del(&port->sm_cdev, &port->sm_dev);
842 ++ cdev_device_del(&port->cdev, &port->dev);
843 ++
844 + mutex_lock(&port->file_mutex);
845 +
846 + /* Mark ib_dev NULL and block ioctl or other file ops to progress
847 +@@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
848 +
849 + mutex_unlock(&port->file_mutex);
850 +
851 +- cdev_device_del(&port->sm_cdev, &port->sm_dev);
852 +- cdev_device_del(&port->cdev, &port->dev);
853 + ida_free(&umad_ida, port->dev_num);
854 +
855 + /* balances device_initialize() */
856 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
857 +index 14a80fd9f464..300353c1e5f1 100644
858 +--- a/drivers/infiniband/core/uverbs_cmd.c
859 ++++ b/drivers/infiniband/core/uverbs_cmd.c
860 +@@ -2718,12 +2718,6 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
861 + return 0;
862 + }
863 +
864 +-static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
865 +-{
866 +- /* Returns user space filter size, includes padding */
867 +- return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
868 +-}
869 +-
870 + static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
871 + u16 ib_real_filter_sz)
872 + {
873 +@@ -2867,11 +2861,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
874 + static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
875 + union ib_flow_spec *ib_spec)
876 + {
877 +- ssize_t kern_filter_sz;
878 ++ size_t kern_filter_sz;
879 + void *kern_spec_mask;
880 + void *kern_spec_val;
881 +
882 +- kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
883 ++ if (check_sub_overflow((size_t)kern_spec->hdr.size,
884 ++ sizeof(struct ib_uverbs_flow_spec_hdr),
885 ++ &kern_filter_sz))
886 ++ return -EINVAL;
887 ++
888 ++ kern_filter_sz /= 2;
889 +
890 + kern_spec_val = (void *)kern_spec +
891 + sizeof(struct ib_uverbs_flow_spec_hdr);
892 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
893 +index 347dc242fb88..d82e0589cfd2 100644
894 +--- a/drivers/infiniband/hw/cxgb4/cm.c
895 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
896 +@@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
897 + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
898 + }
899 +
900 ++ /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
901 ++ * when entering the TERM state the RNIC MUST initiate a CLOSE.
902 ++ */
903 ++ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
904 + c4iw_put_ep(&ep->com);
905 + } else
906 + pr_warn("TERM received tid %u no ep/qp\n", tid);
907 +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
908 +index bbcac539777a..89ac2f9ae6dd 100644
909 +--- a/drivers/infiniband/hw/cxgb4/qp.c
910 ++++ b/drivers/infiniband/hw/cxgb4/qp.c
911 +@@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
912 + qhp->attr.layer_etype = attrs->layer_etype;
913 + qhp->attr.ecode = attrs->ecode;
914 + ep = qhp->ep;
915 +- c4iw_get_ep(&ep->com);
916 +- disconnect = 1;
917 + if (!internal) {
918 ++ c4iw_get_ep(&ep->com);
919 + terminate = 1;
920 ++ disconnect = 1;
921 + } else {
922 + terminate = qhp->attr.send_term;
923 + ret = rdma_fini(rhp, qhp, ep);
924 +diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
925 +index c142b23bb401..1aeea5d65c01 100644
926 +--- a/drivers/infiniband/hw/hfi1/affinity.c
927 ++++ b/drivers/infiniband/hw/hfi1/affinity.c
928 +@@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
929 + rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
930 + }
931 +
932 ++ free_cpumask_var(available_cpus);
933 ++ free_cpumask_var(non_intr_cpus);
934 + return 0;
935 +
936 + fail:
937 +diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
938 +index f9a7e9d29c8b..89e1dfd07a1b 100644
939 +--- a/drivers/infiniband/hw/hfi1/file_ops.c
940 ++++ b/drivers/infiniband/hw/hfi1/file_ops.c
941 +@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
942 +
943 + fd = kzalloc(sizeof(*fd), GFP_KERNEL);
944 +
945 +- if (fd) {
946 +- fd->rec_cpu_num = -1; /* no cpu affinity by default */
947 +- fd->mm = current->mm;
948 +- mmgrab(fd->mm);
949 +- fd->dd = dd;
950 +- kobject_get(&fd->dd->kobj);
951 +- fp->private_data = fd;
952 +- } else {
953 +- fp->private_data = NULL;
954 +-
955 +- if (atomic_dec_and_test(&dd->user_refcount))
956 +- complete(&dd->user_comp);
957 +-
958 +- return -ENOMEM;
959 +- }
960 +-
961 ++ if (!fd || init_srcu_struct(&fd->pq_srcu))
962 ++ goto nomem;
963 ++ spin_lock_init(&fd->pq_rcu_lock);
964 ++ spin_lock_init(&fd->tid_lock);
965 ++ spin_lock_init(&fd->invalid_lock);
966 ++ fd->rec_cpu_num = -1; /* no cpu affinity by default */
967 ++ fd->mm = current->mm;
968 ++ mmgrab(fd->mm);
969 ++ fd->dd = dd;
970 ++ kobject_get(&fd->dd->kobj);
971 ++ fp->private_data = fd;
972 + return 0;
973 ++nomem:
974 ++ kfree(fd);
975 ++ fp->private_data = NULL;
976 ++ if (atomic_dec_and_test(&dd->user_refcount))
977 ++ complete(&dd->user_comp);
978 ++ return -ENOMEM;
979 + }
980 +
981 + static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
982 +@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
983 + static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
984 + {
985 + struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
986 +- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
987 ++ struct hfi1_user_sdma_pkt_q *pq;
988 + struct hfi1_user_sdma_comp_q *cq = fd->cq;
989 + int done = 0, reqs = 0;
990 + unsigned long dim = from->nr_segs;
991 ++ int idx;
992 +
993 +- if (!cq || !pq)
994 ++ idx = srcu_read_lock(&fd->pq_srcu);
995 ++ pq = srcu_dereference(fd->pq, &fd->pq_srcu);
996 ++ if (!cq || !pq) {
997 ++ srcu_read_unlock(&fd->pq_srcu, idx);
998 + return -EIO;
999 ++ }
1000 +
1001 +- if (!iter_is_iovec(from) || !dim)
1002 ++ if (!iter_is_iovec(from) || !dim) {
1003 ++ srcu_read_unlock(&fd->pq_srcu, idx);
1004 + return -EINVAL;
1005 ++ }
1006 +
1007 + trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
1008 +
1009 +- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
1010 ++ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
1011 ++ srcu_read_unlock(&fd->pq_srcu, idx);
1012 + return -ENOSPC;
1013 ++ }
1014 +
1015 + while (dim) {
1016 + int ret;
1017 +@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
1018 + reqs++;
1019 + }
1020 +
1021 ++ srcu_read_unlock(&fd->pq_srcu, idx);
1022 + return reqs;
1023 + }
1024 +
1025 +@@ -707,6 +718,7 @@ done:
1026 + if (atomic_dec_and_test(&dd->user_refcount))
1027 + complete(&dd->user_comp);
1028 +
1029 ++ cleanup_srcu_struct(&fdata->pq_srcu);
1030 + kfree(fdata);
1031 + return 0;
1032 + }
1033 +diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
1034 +index fa45350a9a1d..1af94650bd84 100644
1035 +--- a/drivers/infiniband/hw/hfi1/hfi.h
1036 ++++ b/drivers/infiniband/hw/hfi1/hfi.h
1037 +@@ -1436,10 +1436,13 @@ struct mmu_rb_handler;
1038 +
1039 + /* Private data for file operations */
1040 + struct hfi1_filedata {
1041 ++ struct srcu_struct pq_srcu;
1042 + struct hfi1_devdata *dd;
1043 + struct hfi1_ctxtdata *uctxt;
1044 + struct hfi1_user_sdma_comp_q *cq;
1045 +- struct hfi1_user_sdma_pkt_q *pq;
1046 ++ /* update side lock for SRCU */
1047 ++ spinlock_t pq_rcu_lock;
1048 ++ struct hfi1_user_sdma_pkt_q __rcu *pq;
1049 + u16 subctxt;
1050 + /* for cpu affinity; -1 if none */
1051 + int rec_cpu_num;
1052 +diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1053 +index 3592a9ec155e..4d732353379d 100644
1054 +--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1055 ++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
1056 +@@ -90,9 +90,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
1057 + struct hfi1_devdata *dd = uctxt->dd;
1058 + int ret = 0;
1059 +
1060 +- spin_lock_init(&fd->tid_lock);
1061 +- spin_lock_init(&fd->invalid_lock);
1062 +-
1063 + fd->entry_to_rb = kcalloc(uctxt->expected_count,
1064 + sizeof(struct rb_node *),
1065 + GFP_KERNEL);
1066 +@@ -165,10 +162,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
1067 + if (fd->handler) {
1068 + hfi1_mmu_rb_unregister(fd->handler);
1069 + } else {
1070 ++ mutex_lock(&uctxt->exp_mutex);
1071 + if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
1072 + unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
1073 + if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
1074 + unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
1075 ++ mutex_unlock(&uctxt->exp_mutex);
1076 + }
1077 +
1078 + kfree(fd->invalid_tids);
1079 +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
1080 +index fd754a16475a..c2f0d9ba93de 100644
1081 +--- a/drivers/infiniband/hw/hfi1/user_sdma.c
1082 ++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
1083 +@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
1084 + pq = kzalloc(sizeof(*pq), GFP_KERNEL);
1085 + if (!pq)
1086 + return -ENOMEM;
1087 +-
1088 + pq->dd = dd;
1089 + pq->ctxt = uctxt->ctxt;
1090 + pq->subctxt = fd->subctxt;
1091 +@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
1092 + goto pq_mmu_fail;
1093 + }
1094 +
1095 +- fd->pq = pq;
1096 ++ rcu_assign_pointer(fd->pq, pq);
1097 + fd->cq = cq;
1098 +
1099 + return 0;
1100 +@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
1101 +
1102 + trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
1103 +
1104 +- pq = fd->pq;
1105 ++ spin_lock(&fd->pq_rcu_lock);
1106 ++ pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
1107 ++ lockdep_is_held(&fd->pq_rcu_lock));
1108 + if (pq) {
1109 ++ rcu_assign_pointer(fd->pq, NULL);
1110 ++ spin_unlock(&fd->pq_rcu_lock);
1111 ++ synchronize_srcu(&fd->pq_srcu);
1112 ++ /* at this point there can be no more new requests */
1113 + if (pq->handler)
1114 + hfi1_mmu_rb_unregister(pq->handler);
1115 + iowait_sdma_drain(&pq->busy);
1116 +@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
1117 + kfree(pq->req_in_use);
1118 + kmem_cache_destroy(pq->txreq_cache);
1119 + kfree(pq);
1120 +- fd->pq = NULL;
1121 ++ } else {
1122 ++ spin_unlock(&fd->pq_rcu_lock);
1123 + }
1124 + if (fd->cq) {
1125 + vfree(fd->cq->comps);
1126 +@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
1127 + {
1128 + int ret = 0, i;
1129 + struct hfi1_ctxtdata *uctxt = fd->uctxt;
1130 +- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
1131 ++ struct hfi1_user_sdma_pkt_q *pq =
1132 ++ srcu_dereference(fd->pq, &fd->pq_srcu);
1133 + struct hfi1_user_sdma_comp_q *cq = fd->cq;
1134 + struct hfi1_devdata *dd = pq->dd;
1135 + unsigned long idx = 0;
1136 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
1137 +index 5fd071c05944..0865373bd12d 100644
1138 +--- a/drivers/infiniband/hw/mlx5/qp.c
1139 ++++ b/drivers/infiniband/hw/mlx5/qp.c
1140 +@@ -3391,9 +3391,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
1141 + struct mlx5_ib_qp_base *base;
1142 + u32 set_id;
1143 +
1144 +- if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
1145 +- return 0;
1146 +-
1147 + if (counter)
1148 + set_id = counter->id;
1149 + else
1150 +@@ -6503,6 +6500,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
1151 + */
1152 + int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
1153 + {
1154 ++ struct mlx5_ib_dev *dev = to_mdev(qp->device);
1155 + struct mlx5_ib_qp *mqp = to_mqp(qp);
1156 + int err = 0;
1157 +
1158 +@@ -6512,6 +6510,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
1159 + goto out;
1160 + }
1161 +
1162 ++ if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
1163 ++ err = -EOPNOTSUPP;
1164 ++ goto out;
1165 ++ }
1166 ++
1167 + if (mqp->state == IB_QPS_RTS) {
1168 + err = __mlx5_ib_qp_set_counter(qp, counter);
1169 + if (!err)
1170 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
1171 +index 0b0a241c57ff..799254a049ba 100644
1172 +--- a/drivers/infiniband/sw/rdmavt/qp.c
1173 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
1174 +@@ -61,6 +61,8 @@
1175 + #define RVT_RWQ_COUNT_THRESHOLD 16
1176 +
1177 + static void rvt_rc_timeout(struct timer_list *t);
1178 ++static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1179 ++ enum ib_qp_type type);
1180 +
1181 + /*
1182 + * Convert the AETH RNR timeout code into the number of microseconds.
1183 +@@ -452,40 +454,41 @@ no_qp_table:
1184 + }
1185 +
1186 + /**
1187 +- * free_all_qps - check for QPs still in use
1188 ++ * rvt_free_qp_cb - callback function to reset a qp
1189 ++ * @qp: the qp to reset
1190 ++ * @v: a 64-bit value
1191 ++ *
1192 ++ * This function resets the qp and removes it from the
1193 ++ * qp hash table.
1194 ++ */
1195 ++static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
1196 ++{
1197 ++ unsigned int *qp_inuse = (unsigned int *)v;
1198 ++ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1199 ++
1200 ++ /* Reset the qp and remove it from the qp hash list */
1201 ++ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
1202 ++
1203 ++ /* Increment the qp_inuse count */
1204 ++ (*qp_inuse)++;
1205 ++}
1206 ++
1207 ++/**
1208 ++ * rvt_free_all_qps - check for QPs still in use
1209 + * @rdi: rvt device info structure
1210 + *
1211 + * There should not be any QPs still in use.
1212 + * Free memory for table.
1213 ++ * Return the number of QPs still in use.
1214 + */
1215 + static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
1216 + {
1217 +- unsigned long flags;
1218 +- struct rvt_qp *qp;
1219 +- unsigned n, qp_inuse = 0;
1220 +- spinlock_t *ql; /* work around too long line below */
1221 +-
1222 +- if (rdi->driver_f.free_all_qps)
1223 +- qp_inuse = rdi->driver_f.free_all_qps(rdi);
1224 ++ unsigned int qp_inuse = 0;
1225 +
1226 + qp_inuse += rvt_mcast_tree_empty(rdi);
1227 +
1228 +- if (!rdi->qp_dev)
1229 +- return qp_inuse;
1230 ++ rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
1231 +
1232 +- ql = &rdi->qp_dev->qpt_lock;
1233 +- spin_lock_irqsave(ql, flags);
1234 +- for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
1235 +- qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
1236 +- lockdep_is_held(ql));
1237 +- RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
1238 +-
1239 +- for (; qp; qp = rcu_dereference_protected(qp->next,
1240 +- lockdep_is_held(ql)))
1241 +- qp_inuse++;
1242 +- }
1243 +- spin_unlock_irqrestore(ql, flags);
1244 +- synchronize_rcu();
1245 + return qp_inuse;
1246 + }
1247 +
1248 +@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1249 + }
1250 +
1251 + /**
1252 +- * rvt_reset_qp - initialize the QP state to the reset state
1253 ++ * _rvt_reset_qp - initialize the QP state to the reset state
1254 + * @qp: the QP to reset
1255 + * @type: the QP type
1256 + *
1257 + * r_lock, s_hlock, and s_lock are required to be held by the caller
1258 + */
1259 +-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1260 +- enum ib_qp_type type)
1261 ++static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1262 ++ enum ib_qp_type type)
1263 + __must_hold(&qp->s_lock)
1264 + __must_hold(&qp->s_hlock)
1265 + __must_hold(&qp->r_lock)
1266 +@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1267 + lockdep_assert_held(&qp->s_lock);
1268 + }
1269 +
1270 ++/**
1271 ++ * rvt_reset_qp - initialize the QP state to the reset state
1272 ++ * @rdi: the device info
1273 ++ * @qp: the QP to reset
1274 ++ * @type: the QP type
1275 ++ *
1276 ++ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
1277 ++ * before calling _rvt_reset_qp().
1278 ++ */
1279 ++static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1280 ++ enum ib_qp_type type)
1281 ++{
1282 ++ spin_lock_irq(&qp->r_lock);
1283 ++ spin_lock(&qp->s_hlock);
1284 ++ spin_lock(&qp->s_lock);
1285 ++ _rvt_reset_qp(rdi, qp, type);
1286 ++ spin_unlock(&qp->s_lock);
1287 ++ spin_unlock(&qp->s_hlock);
1288 ++ spin_unlock_irq(&qp->r_lock);
1289 ++}
1290 ++
1291 + /** rvt_free_qpn - Free a qpn from the bit map
1292 + * @qpt: QP table
1293 + * @qpn: queue pair number to free
1294 +@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1295 + switch (new_state) {
1296 + case IB_QPS_RESET:
1297 + if (qp->state != IB_QPS_RESET)
1298 +- rvt_reset_qp(rdi, qp, ibqp->qp_type);
1299 ++ _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1300 + break;
1301 +
1302 + case IB_QPS_RTR:
1303 +@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1304 + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1305 + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1306 +
1307 +- spin_lock_irq(&qp->r_lock);
1308 +- spin_lock(&qp->s_hlock);
1309 +- spin_lock(&qp->s_lock);
1310 + rvt_reset_qp(rdi, qp, ibqp->qp_type);
1311 +- spin_unlock(&qp->s_lock);
1312 +- spin_unlock(&qp->s_hlock);
1313 +- spin_unlock_irq(&qp->r_lock);
1314 +
1315 + wait_event(qp->wait, !atomic_read(&qp->refcount));
1316 + /* qpn is now available for use again */
1317 +diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
1318 +index 116cafc9afcf..4bc88708b355 100644
1319 +--- a/drivers/infiniband/sw/rxe/rxe_comp.c
1320 ++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
1321 +@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
1322 + qp->comp.psn = pkt->psn;
1323 + if (qp->req.wait_psn) {
1324 + qp->req.wait_psn = 0;
1325 +- rxe_run_task(&qp->req.task, 1);
1326 ++ rxe_run_task(&qp->req.task, 0);
1327 + }
1328 + }
1329 + return COMPST_ERROR_RETRY;
1330 +@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
1331 + */
1332 + if (qp->req.wait_fence) {
1333 + qp->req.wait_fence = 0;
1334 +- rxe_run_task(&qp->req.task, 1);
1335 ++ rxe_run_task(&qp->req.task, 0);
1336 + }
1337 + }
1338 +
1339 +@@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
1340 + if (qp->req.need_rd_atomic) {
1341 + qp->comp.timeout_retry = 0;
1342 + qp->req.need_rd_atomic = 0;
1343 +- rxe_run_task(&qp->req.task, 1);
1344 ++ rxe_run_task(&qp->req.task, 0);
1345 + }
1346 + }
1347 +
1348 +@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
1349 + RXE_CNT_COMP_RETRY);
1350 + qp->req.need_retry = 1;
1351 + qp->comp.started_retry = 1;
1352 +- rxe_run_task(&qp->req.task, 1);
1353 ++ rxe_run_task(&qp->req.task, 0);
1354 + }
1355 +
1356 + if (pkt) {
1357 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1358 +index 1ae6f8bba9ae..2c666fb34625 100644
1359 +--- a/drivers/input/mouse/synaptics.c
1360 ++++ b/drivers/input/mouse/synaptics.c
1361 +@@ -146,7 +146,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
1362 + "LEN0042", /* Yoga */
1363 + "LEN0045",
1364 + "LEN0047",
1365 +- "LEN0049",
1366 + "LEN2000", /* S540 */
1367 + "LEN2001", /* Edge E431 */
1368 + "LEN2002", /* Edge E531 */
1369 +@@ -166,9 +165,11 @@ static const char * const smbus_pnp_ids[] = {
1370 + /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
1371 + "LEN0048", /* X1 Carbon 3 */
1372 + "LEN0046", /* X250 */
1373 ++ "LEN0049", /* Yoga 11e */
1374 + "LEN004a", /* W541 */
1375 + "LEN005b", /* P50 */
1376 + "LEN005e", /* T560 */
1377 ++ "LEN006c", /* T470s */
1378 + "LEN0071", /* T480 */
1379 + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
1380 + "LEN0073", /* X1 Carbon G5 (Elantech) */
1381 +@@ -179,6 +180,7 @@ static const char * const smbus_pnp_ids[] = {
1382 + "LEN0097", /* X280 -> ALPS trackpoint */
1383 + "LEN009b", /* T580 */
1384 + "LEN200f", /* T450s */
1385 ++ "LEN2044", /* L470 */
1386 + "LEN2054", /* E480 */
1387 + "LEN2055", /* E580 */
1388 + "SYN3052", /* HP EliteBook 840 G4 */
1389 +diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
1390 +index 105b7a7c0251..b3484def0a8b 100644
1391 +--- a/drivers/mmc/core/host.c
1392 ++++ b/drivers/mmc/core/host.c
1393 +@@ -176,7 +176,6 @@ int mmc_of_parse(struct mmc_host *host)
1394 + u32 bus_width, drv_type, cd_debounce_delay_ms;
1395 + int ret;
1396 + bool cd_cap_invert, cd_gpio_invert = false;
1397 +- bool ro_cap_invert, ro_gpio_invert = false;
1398 +
1399 + if (!dev || !dev_fwnode(dev))
1400 + return 0;
1401 +@@ -255,9 +254,11 @@ int mmc_of_parse(struct mmc_host *host)
1402 + }
1403 +
1404 + /* Parse Write Protection */
1405 +- ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
1406 +
1407 +- ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
1408 ++ if (device_property_read_bool(dev, "wp-inverted"))
1409 ++ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1410 ++
1411 ++ ret = mmc_gpiod_request_ro(host, "wp", 0, 0, NULL);
1412 + if (!ret)
1413 + dev_info(host->parent, "Got WP GPIO\n");
1414 + else if (ret != -ENOENT && ret != -ENOSYS)
1415 +@@ -266,10 +267,6 @@ int mmc_of_parse(struct mmc_host *host)
1416 + if (device_property_read_bool(dev, "disable-wp"))
1417 + host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1418 +
1419 +- /* See the comment on CD inversion above */
1420 +- if (ro_cap_invert ^ ro_gpio_invert)
1421 +- host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1422 +-
1423 + if (device_property_read_bool(dev, "cap-sd-highspeed"))
1424 + host->caps |= MMC_CAP_SD_HIGHSPEED;
1425 + if (device_property_read_bool(dev, "cap-mmc-highspeed"))
1426 +diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
1427 +index da2596c5fa28..582ec3d720f6 100644
1428 +--- a/drivers/mmc/core/slot-gpio.c
1429 ++++ b/drivers/mmc/core/slot-gpio.c
1430 +@@ -241,6 +241,9 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
1431 + return ret;
1432 + }
1433 +
1434 ++ if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
1435 ++ gpiod_toggle_active_low(desc);
1436 ++
1437 + if (gpio_invert)
1438 + *gpio_invert = !gpiod_is_active_low(desc);
1439 +
1440 +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
1441 +index 024acc1b0a2e..b2bbcb09a49e 100644
1442 +--- a/drivers/mmc/host/pxamci.c
1443 ++++ b/drivers/mmc/host/pxamci.c
1444 +@@ -740,16 +740,16 @@ static int pxamci_probe(struct platform_device *pdev)
1445 + goto out;
1446 + }
1447 +
1448 ++ if (!host->pdata->gpio_card_ro_invert)
1449 ++ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1450 ++
1451 + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
1452 + if (ret && ret != -ENOENT) {
1453 + dev_err(dev, "Failed requesting gpio_ro\n");
1454 + goto out;
1455 + }
1456 +- if (!ret) {
1457 ++ if (!ret)
1458 + host->use_ro_gpio = true;
1459 +- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
1460 +- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
1461 +- }
1462 +
1463 + if (host->pdata->init)
1464 + host->pdata->init(dev, pxamci_detect_irq, mmc);
1465 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
1466 +index 1c988d6a2433..dccb4df46512 100644
1467 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
1468 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
1469 +@@ -1381,13 +1381,14 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
1470 + host->mmc->parent->platform_data);
1471 + /* write_protect */
1472 + if (boarddata->wp_type == ESDHC_WP_GPIO) {
1473 ++ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1474 ++
1475 + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
1476 + if (err) {
1477 + dev_err(mmc_dev(host->mmc),
1478 + "failed to request write-protect gpio!\n");
1479 + return err;
1480 + }
1481 +- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1482 + }
1483 +
1484 + /* card_detect */
1485 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1486 +index a6b7b242d516..e703827d27e9 100644
1487 +--- a/drivers/nvme/host/core.c
1488 ++++ b/drivers/nvme/host/core.c
1489 +@@ -3853,7 +3853,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
1490 + if (!log)
1491 + return;
1492 +
1493 +- if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
1494 ++ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
1495 + sizeof(*log), 0))
1496 + dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
1497 + kfree(log);
1498 +diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
1499 +index e17fac20127e..5c9898e934d9 100644
1500 +--- a/drivers/s390/crypto/pkey_api.c
1501 ++++ b/drivers/s390/crypto/pkey_api.c
1502 +@@ -794,7 +794,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
1503 + return -EFAULT;
1504 + rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
1505 + ksp.seckey.seckey, ksp.protkey.protkey,
1506 +- NULL, &ksp.protkey.type);
1507 ++ &ksp.protkey.len, &ksp.protkey.type);
1508 + DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
1509 + if (rc)
1510 + break;
1511 +diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
1512 +index 97acc2ba2912..de844b412110 100644
1513 +--- a/drivers/spmi/spmi-pmic-arb.c
1514 ++++ b/drivers/spmi/spmi-pmic-arb.c
1515 +@@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain *d,
1516 + return 0;
1517 + }
1518 +
1519 ++static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
1520 +
1521 + static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
1522 + struct irq_domain *domain, unsigned int virq,
1523 +@@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
1524 + else
1525 + handler = handle_level_irq;
1526 +
1527 ++
1528 ++ irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
1529 ++ &qpnpint_irq_request_class);
1530 + irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
1531 + handler, NULL, NULL);
1532 + }
1533 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1534 +index 7becc5e96f92..b0ccca5d08b5 100644
1535 +--- a/fs/btrfs/disk-io.c
1536 ++++ b/fs/btrfs/disk-io.c
1537 +@@ -3167,6 +3167,7 @@ retry_root_backup:
1538 + /* do not make disk changes in broken FS or nologreplay is given */
1539 + if (btrfs_super_log_root(disk_super) != 0 &&
1540 + !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
1541 ++ btrfs_info(fs_info, "start tree-log replay");
1542 + ret = btrfs_replay_log(fs_info, fs_devices);
1543 + if (ret) {
1544 + err = ret;
1545 +diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
1546 +index 9d30acca55e1..043eec682ccd 100644
1547 +--- a/fs/btrfs/extent_map.c
1548 ++++ b/fs/btrfs/extent_map.c
1549 +@@ -233,6 +233,17 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
1550 + struct extent_map *merge = NULL;
1551 + struct rb_node *rb;
1552 +
1553 ++ /*
1554 ++ * We can't modify an extent map that is in the tree and that is being
1555 ++ * used by another task, as it can cause that other task to see it in
1556 ++ * inconsistent state during the merging. We always have 1 reference for
1557 ++ * the tree and 1 for this task (which is unpinning the extent map or
1558 ++ * clearing the logging flag), so anything > 2 means it's being used by
1559 ++ * other tasks too.
1560 ++ */
1561 ++ if (refcount_read(&em->refs) > 2)
1562 ++ return;
1563 ++
1564 + if (em->start != 0) {
1565 + rb = rb_prev(&em->rb_node);
1566 + if (rb)
1567 +diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
1568 +index b57f3618e58e..454a1015d026 100644
1569 +--- a/fs/btrfs/ref-verify.c
1570 ++++ b/fs/btrfs/ref-verify.c
1571 +@@ -744,6 +744,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
1572 + */
1573 + be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
1574 + if (IS_ERR(be)) {
1575 ++ kfree(ref);
1576 + kfree(ra);
1577 + ret = PTR_ERR(be);
1578 + goto out;
1579 +@@ -757,6 +758,8 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
1580 + "re-allocated a block that still has references to it!");
1581 + dump_block_entry(fs_info, be);
1582 + dump_ref_action(fs_info, ra);
1583 ++ kfree(ref);
1584 ++ kfree(ra);
1585 + goto out_unlock;
1586 + }
1587 +
1588 +@@ -819,6 +822,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
1589 + "dropping a ref for a existing root that doesn't have a ref on the block");
1590 + dump_block_entry(fs_info, be);
1591 + dump_ref_action(fs_info, ra);
1592 ++ kfree(ref);
1593 + kfree(ra);
1594 + goto out_unlock;
1595 + }
1596 +@@ -834,6 +838,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
1597 + "attempting to add another ref for an existing ref on a tree block");
1598 + dump_block_entry(fs_info, be);
1599 + dump_ref_action(fs_info, ra);
1600 ++ kfree(ref);
1601 + kfree(ra);
1602 + goto out_unlock;
1603 + }
1604 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1605 +index abcd93a3ca1d..aea24202cd35 100644
1606 +--- a/fs/btrfs/super.c
1607 ++++ b/fs/btrfs/super.c
1608 +@@ -1804,6 +1804,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1609 + }
1610 +
1611 + if (btrfs_super_log_root(fs_info->super_copy) != 0) {
1612 ++ btrfs_warn(fs_info,
1613 ++ "mount required to replay tree-log, cannot remount read-write");
1614 + ret = -EINVAL;
1615 + goto restore;
1616 + }
1617 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1618 +index 07d8ace61f77..637624ab6e46 100644
1619 +--- a/fs/cifs/cifsfs.c
1620 ++++ b/fs/cifs/cifsfs.c
1621 +@@ -414,7 +414,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
1622 + seq_puts(s, "ntlm");
1623 + break;
1624 + case Kerberos:
1625 +- seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
1626 ++ seq_puts(s, "krb5");
1627 + break;
1628 + case RawNTLMSSP:
1629 + seq_puts(s, "ntlmssp");
1630 +@@ -427,6 +427,10 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
1631 +
1632 + if (ses->sign)
1633 + seq_puts(s, "i");
1634 ++
1635 ++ if (ses->sectype == Kerberos)
1636 ++ seq_printf(s, ",cruid=%u",
1637 ++ from_kuid_munged(&init_user_ns, ses->cred_uid));
1638 + }
1639 +
1640 + static void
1641 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
1642 +index 662256fa2a18..b75d208d4b2b 100644
1643 +--- a/fs/cifs/smb2ops.c
1644 ++++ b/fs/cifs/smb2ops.c
1645 +@@ -1087,7 +1087,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1646 + void *data[1];
1647 + struct smb2_file_full_ea_info *ea = NULL;
1648 + struct kvec close_iov[1];
1649 +- int rc;
1650 ++ struct smb2_query_info_rsp *rsp;
1651 ++ int rc, used_len = 0;
1652 +
1653 + if (smb3_encryption_required(tcon))
1654 + flags |= CIFS_TRANSFORM_REQ;
1655 +@@ -1110,6 +1111,38 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1656 + cifs_sb);
1657 + if (rc == -ENODATA)
1658 + goto sea_exit;
1659 ++ } else {
1660 ++ /* If we are adding a attribute we should first check
1661 ++ * if there will be enough space available to store
1662 ++ * the new EA. If not we should not add it since we
1663 ++ * would not be able to even read the EAs back.
1664 ++ */
1665 ++ rc = smb2_query_info_compound(xid, tcon, utf16_path,
1666 ++ FILE_READ_EA,
1667 ++ FILE_FULL_EA_INFORMATION,
1668 ++ SMB2_O_INFO_FILE,
1669 ++ CIFSMaxBufSize -
1670 ++ MAX_SMB2_CREATE_RESPONSE_SIZE -
1671 ++ MAX_SMB2_CLOSE_RESPONSE_SIZE,
1672 ++ &rsp_iov[1], &resp_buftype[1], cifs_sb);
1673 ++ if (rc == 0) {
1674 ++ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1675 ++ used_len = le32_to_cpu(rsp->OutputBufferLength);
1676 ++ }
1677 ++ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1678 ++ resp_buftype[1] = CIFS_NO_BUFFER;
1679 ++ memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1680 ++ rc = 0;
1681 ++
1682 ++ /* Use a fudge factor of 256 bytes in case we collide
1683 ++ * with a different set_EAs command.
1684 ++ */
1685 ++ if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1686 ++ MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1687 ++ used_len + ea_name_len + ea_value_len + 1) {
1688 ++ rc = -ENOSPC;
1689 ++ goto sea_exit;
1690 ++ }
1691 + }
1692 + }
1693 +
1694 +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
1695 +index d4d4fdfac1a6..ff8e1205127e 100644
1696 +--- a/fs/ext4/block_validity.c
1697 ++++ b/fs/ext4/block_validity.c
1698 +@@ -203,6 +203,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
1699 + return PTR_ERR(inode);
1700 + num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
1701 + while (i < num) {
1702 ++ cond_resched();
1703 + map.m_lblk = i;
1704 + map.m_len = num - i;
1705 + n = ext4_map_blocks(NULL, inode, &map, 0);
1706 +diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
1707 +index 5ef8d7ae231b..2743c6f8a457 100644
1708 +--- a/fs/ext4/dir.c
1709 ++++ b/fs/ext4/dir.c
1710 +@@ -130,12 +130,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
1711 + if (err != ERR_BAD_DX_DIR) {
1712 + return err;
1713 + }
1714 +- /*
1715 +- * We don't set the inode dirty flag since it's not
1716 +- * critical that it get flushed back to the disk.
1717 +- */
1718 +- ext4_clear_inode_flag(file_inode(file),
1719 +- EXT4_INODE_INDEX);
1720 ++ /* Can we just clear INDEX flag to ignore htree information? */
1721 ++ if (!ext4_has_metadata_csum(sb)) {
1722 ++ /*
1723 ++ * We don't set the inode dirty flag since it's not
1724 ++ * critical that it gets flushed back to the disk.
1725 ++ */
1726 ++ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1727 ++ }
1728 + }
1729 +
1730 + if (ext4_has_inline_data(inode)) {
1731 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1732 +index 03db3e71676c..e2f65b565c1f 100644
1733 +--- a/fs/ext4/ext4.h
1734 ++++ b/fs/ext4/ext4.h
1735 +@@ -2476,8 +2476,11 @@ void ext4_insert_dentry(struct inode *inode,
1736 + struct ext4_filename *fname);
1737 + static inline void ext4_update_dx_flag(struct inode *inode)
1738 + {
1739 +- if (!ext4_has_feature_dir_index(inode->i_sb))
1740 ++ if (!ext4_has_feature_dir_index(inode->i_sb)) {
1741 ++ /* ext4_iget() should have caught this... */
1742 ++ WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
1743 + ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
1744 ++ }
1745 + }
1746 + static const unsigned char ext4_filetype_table[] = {
1747 + DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1748 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1749 +index 8bba6cd5e870..76a38ef5f226 100644
1750 +--- a/fs/ext4/inode.c
1751 ++++ b/fs/ext4/inode.c
1752 +@@ -4972,6 +4972,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
1753 + ret = -EFSCORRUPTED;
1754 + goto bad_inode;
1755 + }
1756 ++ /*
1757 ++ * If dir_index is not enabled but there's dir with INDEX flag set,
1758 ++ * we'd normally treat htree data as empty space. But with metadata
1759 ++ * checksumming that corrupts checksums so forbid that.
1760 ++ */
1761 ++ if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
1762 ++ ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
1763 ++ ext4_error_inode(inode, function, line, 0,
1764 ++ "iget: Dir with htree data on filesystem without dir_index feature.");
1765 ++ ret = -EFSCORRUPTED;
1766 ++ goto bad_inode;
1767 ++ }
1768 + ei->i_disksize = inode->i_size;
1769 + #ifdef CONFIG_QUOTA
1770 + ei->i_reserved_quota = 0;
1771 +diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
1772 +index 2305b4374fd3..9d00e0dd2ba9 100644
1773 +--- a/fs/ext4/mmp.c
1774 ++++ b/fs/ext4/mmp.c
1775 +@@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
1776 + {
1777 + __ext4_warning(sb, function, line, "%s", msg);
1778 + __ext4_warning(sb, function, line,
1779 +- "MMP failure info: last update time: %llu, last update "
1780 +- "node: %s, last update device: %s",
1781 +- (long long unsigned int) le64_to_cpu(mmp->mmp_time),
1782 +- mmp->mmp_nodename, mmp->mmp_bdevname);
1783 ++ "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
1784 ++ (unsigned long long)le64_to_cpu(mmp->mmp_time),
1785 ++ (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
1786 ++ (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
1787 + }
1788 +
1789 + /*
1790 +@@ -154,6 +154,7 @@ static int kmmpd(void *data)
1791 + mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
1792 + EXT4_MMP_MIN_CHECK_INTERVAL);
1793 + mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
1794 ++ BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
1795 + bdevname(bh->b_bdev, mmp->mmp_bdevname);
1796 +
1797 + memcpy(mmp->mmp_nodename, init_utsname()->nodename,
1798 +@@ -375,7 +376,8 @@ skip:
1799 + /*
1800 + * Start a kernel thread to update the MMP block periodically.
1801 + */
1802 +- EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
1803 ++ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
1804 ++ (int)sizeof(mmp->mmp_bdevname),
1805 + bdevname(bh->b_bdev,
1806 + mmp->mmp_bdevname));
1807 + if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
1808 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1809 +index f56402e9c11c..94d84910dc1e 100644
1810 +--- a/fs/ext4/namei.c
1811 ++++ b/fs/ext4/namei.c
1812 +@@ -2205,6 +2205,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1813 + retval = ext4_dx_add_entry(handle, &fname, dir, inode);
1814 + if (!retval || (retval != ERR_BAD_DX_DIR))
1815 + goto out;
1816 ++ /* Can we just ignore htree data? */
1817 ++ if (ext4_has_metadata_csum(sb)) {
1818 ++ EXT4_ERROR_INODE(dir,
1819 ++ "Directory has corrupted htree index.");
1820 ++ retval = -EFSCORRUPTED;
1821 ++ goto out;
1822 ++ }
1823 + ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1824 + dx_fallback++;
1825 + ext4_mark_inode_dirty(handle, dir);
1826 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1827 +index 66162b430edc..914230e63054 100644
1828 +--- a/fs/ext4/super.c
1829 ++++ b/fs/ext4/super.c
1830 +@@ -2961,17 +2961,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
1831 + return 0;
1832 + }
1833 +
1834 +-#ifndef CONFIG_QUOTA
1835 +- if (ext4_has_feature_quota(sb) && !readonly) {
1836 ++#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
1837 ++ if (!readonly && (ext4_has_feature_quota(sb) ||
1838 ++ ext4_has_feature_project(sb))) {
1839 + ext4_msg(sb, KERN_ERR,
1840 +- "Filesystem with quota feature cannot be mounted RDWR "
1841 +- "without CONFIG_QUOTA");
1842 +- return 0;
1843 +- }
1844 +- if (ext4_has_feature_project(sb) && !readonly) {
1845 +- ext4_msg(sb, KERN_ERR,
1846 +- "Filesystem with project quota feature cannot be mounted RDWR "
1847 +- "without CONFIG_QUOTA");
1848 ++ "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
1849 + return 0;
1850 + }
1851 + #endif /* CONFIG_QUOTA */
1852 +@@ -3765,6 +3759,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1853 + */
1854 + sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
1855 +
1856 ++ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
1857 ++ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
1858 ++ blocksize > EXT4_MAX_BLOCK_SIZE) {
1859 ++ ext4_msg(sb, KERN_ERR,
1860 ++ "Unsupported filesystem blocksize %d (%d log_block_size)",
1861 ++ blocksize, le32_to_cpu(es->s_log_block_size));
1862 ++ goto failed_mount;
1863 ++ }
1864 ++
1865 + if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
1866 + sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
1867 + sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
1868 +@@ -3782,6 +3785,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1869 + ext4_msg(sb, KERN_ERR,
1870 + "unsupported inode size: %d",
1871 + sbi->s_inode_size);
1872 ++ ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
1873 + goto failed_mount;
1874 + }
1875 + /*
1876 +@@ -3985,14 +3989,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1877 + if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
1878 + goto failed_mount;
1879 +
1880 +- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
1881 +- if (blocksize < EXT4_MIN_BLOCK_SIZE ||
1882 +- blocksize > EXT4_MAX_BLOCK_SIZE) {
1883 +- ext4_msg(sb, KERN_ERR,
1884 +- "Unsupported filesystem blocksize %d (%d log_block_size)",
1885 +- blocksize, le32_to_cpu(es->s_log_block_size));
1886 +- goto failed_mount;
1887 +- }
1888 + if (le32_to_cpu(es->s_log_block_size) >
1889 + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1890 + ext4_msg(sb, KERN_ERR,
1891 +@@ -5544,9 +5540,15 @@ static int ext4_statfs_project(struct super_block *sb,
1892 + return PTR_ERR(dquot);
1893 + spin_lock(&dquot->dq_dqb_lock);
1894 +
1895 +- limit = (dquot->dq_dqb.dqb_bsoftlimit ?
1896 +- dquot->dq_dqb.dqb_bsoftlimit :
1897 +- dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
1898 ++ limit = 0;
1899 ++ if (dquot->dq_dqb.dqb_bsoftlimit &&
1900 ++ (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit))
1901 ++ limit = dquot->dq_dqb.dqb_bsoftlimit;
1902 ++ if (dquot->dq_dqb.dqb_bhardlimit &&
1903 ++ (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
1904 ++ limit = dquot->dq_dqb.dqb_bhardlimit;
1905 ++ limit >>= sb->s_blocksize_bits;
1906 ++
1907 + if (limit && buf->f_blocks > limit) {
1908 + curblock = (dquot->dq_dqb.dqb_curspace +
1909 + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1910 +@@ -5556,9 +5558,14 @@ static int ext4_statfs_project(struct super_block *sb,
1911 + (buf->f_blocks - curblock) : 0;
1912 + }
1913 +
1914 +- limit = dquot->dq_dqb.dqb_isoftlimit ?
1915 +- dquot->dq_dqb.dqb_isoftlimit :
1916 +- dquot->dq_dqb.dqb_ihardlimit;
1917 ++ limit = 0;
1918 ++ if (dquot->dq_dqb.dqb_isoftlimit &&
1919 ++ (!limit || dquot->dq_dqb.dqb_isoftlimit < limit))
1920 ++ limit = dquot->dq_dqb.dqb_isoftlimit;
1921 ++ if (dquot->dq_dqb.dqb_ihardlimit &&
1922 ++ (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
1923 ++ limit = dquot->dq_dqb.dqb_ihardlimit;
1924 ++
1925 + if (limit && buf->f_files > limit) {
1926 + buf->f_files = limit;
1927 + buf->f_ffree =
1928 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
1929 +index c43591cd70f1..2a42904bcd62 100644
1930 +--- a/fs/jbd2/commit.c
1931 ++++ b/fs/jbd2/commit.c
1932 +@@ -974,29 +974,33 @@ restart_loop:
1933 + * it. */
1934 +
1935 + /*
1936 +- * A buffer which has been freed while still being journaled by
1937 +- * a previous transaction.
1938 +- */
1939 +- if (buffer_freed(bh)) {
1940 ++ * A buffer which has been freed while still being journaled
1941 ++ * by a previous transaction, refile the buffer to BJ_Forget of
1942 ++ * the running transaction. If the just committed transaction
1943 ++ * contains "add to orphan" operation, we can completely
1944 ++ * invalidate the buffer now. We are rather through in that
1945 ++ * since the buffer may be still accessible when blocksize <
1946 ++ * pagesize and it is attached to the last partial page.
1947 ++ */
1948 ++ if (buffer_freed(bh) && !jh->b_next_transaction) {
1949 ++ struct address_space *mapping;
1950 ++
1951 ++ clear_buffer_freed(bh);
1952 ++ clear_buffer_jbddirty(bh);
1953 ++
1954 + /*
1955 +- * If the running transaction is the one containing
1956 +- * "add to orphan" operation (b_next_transaction !=
1957 +- * NULL), we have to wait for that transaction to
1958 +- * commit before we can really get rid of the buffer.
1959 +- * So just clear b_modified to not confuse transaction
1960 +- * credit accounting and refile the buffer to
1961 +- * BJ_Forget of the running transaction. If the just
1962 +- * committed transaction contains "add to orphan"
1963 +- * operation, we can completely invalidate the buffer
1964 +- * now. We are rather through in that since the
1965 +- * buffer may be still accessible when blocksize <
1966 +- * pagesize and it is attached to the last partial
1967 +- * page.
1968 ++ * Block device buffers need to stay mapped all the
1969 ++ * time, so it is enough to clear buffer_jbddirty and
1970 ++ * buffer_freed bits. For the file mapping buffers (i.e.
1971 ++ * journalled data) we need to unmap buffer and clear
1972 ++ * more bits. We also need to be careful about the check
1973 ++ * because the data page mapping can get cleared under
1974 ++ * out hands, which alse need not to clear more bits
1975 ++ * because the page and buffers will be freed and can
1976 ++ * never be reused once we are done with them.
1977 + */
1978 +- jh->b_modified = 0;
1979 +- if (!jh->b_next_transaction) {
1980 +- clear_buffer_freed(bh);
1981 +- clear_buffer_jbddirty(bh);
1982 ++ mapping = READ_ONCE(bh->b_page->mapping);
1983 ++ if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1984 + clear_buffer_mapped(bh);
1985 + clear_buffer_new(bh);
1986 + clear_buffer_req(bh);
1987 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
1988 +index bee8498d7792..3930c68a9c20 100644
1989 +--- a/fs/jbd2/transaction.c
1990 ++++ b/fs/jbd2/transaction.c
1991 +@@ -2296,14 +2296,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
1992 + return -EBUSY;
1993 + }
1994 + /*
1995 +- * OK, buffer won't be reachable after truncate. We just set
1996 +- * j_next_transaction to the running transaction (if there is
1997 +- * one) and mark buffer as freed so that commit code knows it
1998 +- * should clear dirty bits when it is done with the buffer.
1999 ++ * OK, buffer won't be reachable after truncate. We just clear
2000 ++ * b_modified to not confuse transaction credit accounting, and
2001 ++ * set j_next_transaction to the running transaction (if there
2002 ++ * is one) and mark buffer as freed so that commit code knows
2003 ++ * it should clear dirty bits when it is done with the buffer.
2004 + */
2005 + set_buffer_freed(bh);
2006 + if (journal->j_running_transaction && buffer_jbddirty(bh))
2007 + jh->b_next_transaction = journal->j_running_transaction;
2008 ++ jh->b_modified = 0;
2009 + jbd2_journal_put_journal_head(jh);
2010 + spin_unlock(&journal->j_list_lock);
2011 + jbd_unlock_bh_state(bh);
2012 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2013 +index 423960d480f1..f808fb34b110 100644
2014 +--- a/fs/nfs/nfs4proc.c
2015 ++++ b/fs/nfs/nfs4proc.c
2016 +@@ -5293,7 +5293,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
2017 + hdr->timestamp = jiffies;
2018 +
2019 + msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
2020 +- nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0);
2021 ++ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
2022 + nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
2023 + }
2024 +
2025 +diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
2026 +index e5e041413581..d1fdf26ccb33 100644
2027 +--- a/include/acpi/acpixf.h
2028 ++++ b/include/acpi/acpixf.h
2029 +@@ -748,6 +748,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
2030 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
2031 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
2032 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
2033 ++ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
2034 +
2035 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
2036 + acpi_get_gpe_device(u32 gpe_index,
2037 +diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
2038 +index b70af921c614..803bb63dd5ff 100644
2039 +--- a/include/linux/gpio/consumer.h
2040 ++++ b/include/linux/gpio/consumer.h
2041 +@@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
2042 +
2043 + int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
2044 + int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
2045 ++void gpiod_toggle_active_low(struct gpio_desc *desc);
2046 +
2047 + int gpiod_is_active_low(const struct gpio_desc *desc);
2048 + int gpiod_cansleep(const struct gpio_desc *desc);
2049 +@@ -479,6 +480,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
2050 + return -ENOSYS;
2051 + }
2052 +
2053 ++static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
2054 ++{
2055 ++ /* GPIO can never have been requested */
2056 ++ WARN_ON(desc);
2057 ++}
2058 ++
2059 + static inline int gpiod_is_active_low(const struct gpio_desc *desc)
2060 + {
2061 + /* GPIO can never have been requested */
2062 +diff --git a/include/linux/suspend.h b/include/linux/suspend.h
2063 +index 6fc8843f1c9e..cd97d2c8840c 100644
2064 +--- a/include/linux/suspend.h
2065 ++++ b/include/linux/suspend.h
2066 +@@ -191,7 +191,7 @@ struct platform_s2idle_ops {
2067 + int (*begin)(void);
2068 + int (*prepare)(void);
2069 + int (*prepare_late)(void);
2070 +- void (*wake)(void);
2071 ++ bool (*wake)(void);
2072 + void (*restore_early)(void);
2073 + void (*restore)(void);
2074 + void (*end)(void);
2075 +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
2076 +index f3b7239f1892..27f149f5d4a9 100644
2077 +--- a/kernel/power/suspend.c
2078 ++++ b/kernel/power/suspend.c
2079 +@@ -131,11 +131,12 @@ static void s2idle_loop(void)
2080 + * to avoid them upfront.
2081 + */
2082 + for (;;) {
2083 +- if (s2idle_ops && s2idle_ops->wake)
2084 +- s2idle_ops->wake();
2085 +-
2086 +- if (pm_wakeup_pending())
2087 ++ if (s2idle_ops && s2idle_ops->wake) {
2088 ++ if (s2idle_ops->wake())
2089 ++ break;
2090 ++ } else if (pm_wakeup_pending()) {
2091 + break;
2092 ++ }
2093 +
2094 + pm_wakeup_clear(false);
2095 +
2096 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2097 +index 00743684a549..dfaefb175ba0 100644
2098 +--- a/kernel/sched/core.c
2099 ++++ b/kernel/sched/core.c
2100 +@@ -7250,7 +7250,7 @@ capacity_from_percent(char *buf)
2101 + &req.percent);
2102 + if (req.ret)
2103 + return req;
2104 +- if (req.percent > UCLAMP_PERCENT_SCALE) {
2105 ++ if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
2106 + req.ret = -ERANGE;
2107 + return req;
2108 + }
2109 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2110 +index 54dd8849d1cc..1e3b9d34aaa4 100644
2111 +--- a/net/mac80211/mlme.c
2112 ++++ b/net/mac80211/mlme.c
2113 +@@ -8,7 +8,7 @@
2114 + * Copyright 2007, Michael Wu <flamingice@××××××××.net>
2115 + * Copyright 2013-2014 Intel Mobile Communications GmbH
2116 + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
2117 +- * Copyright (C) 2018 - 2019 Intel Corporation
2118 ++ * Copyright (C) 2018 - 2020 Intel Corporation
2119 + */
2120 +
2121 + #include <linux/delay.h>
2122 +@@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
2123 + if (!res) {
2124 + ch_switch.timestamp = timestamp;
2125 + ch_switch.device_timestamp = device_timestamp;
2126 +- ch_switch.block_tx = beacon ? csa_ie.mode : 0;
2127 ++ ch_switch.block_tx = csa_ie.mode;
2128 + ch_switch.chandef = csa_ie.chandef;
2129 + ch_switch.count = csa_ie.count;
2130 + ch_switch.delay = csa_ie.max_switch_time;
2131 +@@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
2132 +
2133 + sdata->vif.csa_active = true;
2134 + sdata->csa_chandef = csa_ie.chandef;
2135 +- sdata->csa_block_tx = ch_switch.block_tx;
2136 ++ sdata->csa_block_tx = csa_ie.mode;
2137 + ifmgd->csa_ignored_same_chan = false;
2138 +
2139 + if (sdata->csa_block_tx)
2140 +@@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
2141 + * reset when the disconnection worker runs.
2142 + */
2143 + sdata->vif.csa_active = true;
2144 +- sdata->csa_block_tx = ch_switch.block_tx;
2145 ++ sdata->csa_block_tx = csa_ie.mode;
2146 +
2147 + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
2148 + mutex_unlock(&local->chanctx_mtx);
2149 +diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
2150 +index 9901a811f598..0ad45a8fe3fb 100644
2151 +--- a/net/sunrpc/xprtrdma/frwr_ops.c
2152 ++++ b/net/sunrpc/xprtrdma/frwr_ops.c
2153 +@@ -326,8 +326,8 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
2154 + {
2155 + struct rpcrdma_ia *ia = &r_xprt->rx_ia;
2156 + struct ib_reg_wr *reg_wr;
2157 ++ int i, n, dma_nents;
2158 + struct ib_mr *ibmr;
2159 +- int i, n;
2160 + u8 key;
2161 +
2162 + if (nsegs > ia->ri_max_frwr_depth)
2163 +@@ -351,15 +351,16 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
2164 + break;
2165 + }
2166 + mr->mr_dir = rpcrdma_data_dir(writing);
2167 ++ mr->mr_nents = i;
2168 +
2169 +- mr->mr_nents =
2170 +- ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir);
2171 +- if (!mr->mr_nents)
2172 ++ dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents,
2173 ++ mr->mr_dir);
2174 ++ if (!dma_nents)
2175 + goto out_dmamap_err;
2176 +
2177 + ibmr = mr->frwr.fr_mr;
2178 +- n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
2179 +- if (unlikely(n != mr->mr_nents))
2180 ++ n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
2181 ++ if (n != dma_nents)
2182 + goto out_mapmr_err;
2183 +
2184 + ibmr->iova &= 0x00000000ffffffff;
2185 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2186 +index 68832f52c1ad..a66d4be3516e 100644
2187 +--- a/sound/pci/hda/patch_realtek.c
2188 ++++ b/sound/pci/hda/patch_realtek.c
2189 +@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2190 + SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
2191 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
2192 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
2193 ++ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
2194 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
2195 + SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
2196 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2197 +@@ -5701,8 +5702,11 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
2198 + break;
2199 + case HDA_FIXUP_ACT_INIT:
2200 + switch (codec->core.vendor_id) {
2201 ++ case 0x10ec0215:
2202 + case 0x10ec0225:
2203 ++ case 0x10ec0285:
2204 + case 0x10ec0295:
2205 ++ case 0x10ec0289:
2206 + case 0x10ec0299:
2207 + alc_write_coef_idx(codec, 0x48, 0xd011);
2208 + alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
2209 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
2210 +index 6b8c14f9b5d4..a48313dfa967 100644
2211 +--- a/sound/usb/clock.c
2212 ++++ b/sound/usb/clock.c
2213 +@@ -151,8 +151,34 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i
2214 + return ret;
2215 + }
2216 +
2217 ++/*
2218 ++ * Assume the clock is valid if clock source supports only one single sample
2219 ++ * rate, the terminal is connected directly to it (there is no clock selector)
2220 ++ * and clock type is internal. This is to deal with some Denon DJ controllers
2221 ++ * that always reports that clock is invalid.
2222 ++ */
2223 ++static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
2224 ++ struct audioformat *fmt,
2225 ++ int source_id)
2226 ++{
2227 ++ if (fmt->protocol == UAC_VERSION_2) {
2228 ++ struct uac_clock_source_descriptor *cs_desc =
2229 ++ snd_usb_find_clock_source(chip->ctrl_intf, source_id);
2230 ++
2231 ++ if (!cs_desc)
2232 ++ return false;
2233 ++
2234 ++ return (fmt->nr_rates == 1 &&
2235 ++ (fmt->clock & 0xff) == cs_desc->bClockID &&
2236 ++ (cs_desc->bmAttributes & 0x3) !=
2237 ++ UAC_CLOCK_SOURCE_TYPE_EXT);
2238 ++ }
2239 ++
2240 ++ return false;
2241 ++}
2242 ++
2243 + static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
2244 +- int protocol,
2245 ++ struct audioformat *fmt,
2246 + int source_id)
2247 + {
2248 + int err;
2249 +@@ -160,26 +186,26 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
2250 + struct usb_device *dev = chip->dev;
2251 + u32 bmControls;
2252 +
2253 +- if (protocol == UAC_VERSION_3) {
2254 ++ if (fmt->protocol == UAC_VERSION_3) {
2255 + struct uac3_clock_source_descriptor *cs_desc =
2256 + snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
2257 +
2258 + if (!cs_desc)
2259 +- return 0;
2260 ++ return false;
2261 + bmControls = le32_to_cpu(cs_desc->bmControls);
2262 + } else { /* UAC_VERSION_1/2 */
2263 + struct uac_clock_source_descriptor *cs_desc =
2264 + snd_usb_find_clock_source(chip->ctrl_intf, source_id);
2265 +
2266 + if (!cs_desc)
2267 +- return 0;
2268 ++ return false;
2269 + bmControls = cs_desc->bmControls;
2270 + }
2271 +
2272 + /* If a clock source can't tell us whether it's valid, we assume it is */
2273 + if (!uac_v2v3_control_is_readable(bmControls,
2274 + UAC2_CS_CONTROL_CLOCK_VALID))
2275 +- return 1;
2276 ++ return true;
2277 +
2278 + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
2279 + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
2280 +@@ -191,13 +217,17 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
2281 + dev_warn(&dev->dev,
2282 + "%s(): cannot get clock validity for id %d\n",
2283 + __func__, source_id);
2284 +- return 0;
2285 ++ return false;
2286 + }
2287 +
2288 +- return !!data;
2289 ++ if (data)
2290 ++ return true;
2291 ++ else
2292 ++ return uac_clock_source_is_valid_quirk(chip, fmt, source_id);
2293 + }
2294 +
2295 +-static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2296 ++static int __uac_clock_find_source(struct snd_usb_audio *chip,
2297 ++ struct audioformat *fmt, int entity_id,
2298 + unsigned long *visited, bool validate)
2299 + {
2300 + struct uac_clock_source_descriptor *source;
2301 +@@ -217,7 +247,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2302 + source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id);
2303 + if (source) {
2304 + entity_id = source->bClockID;
2305 +- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2,
2306 ++ if (validate && !uac_clock_source_is_valid(chip, fmt,
2307 + entity_id)) {
2308 + usb_audio_err(chip,
2309 + "clock source %d is not valid, cannot use\n",
2310 +@@ -248,8 +278,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2311 + }
2312 +
2313 + cur = ret;
2314 +- ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1],
2315 +- visited, validate);
2316 ++ ret = __uac_clock_find_source(chip, fmt,
2317 ++ selector->baCSourceID[ret - 1],
2318 ++ visited, validate);
2319 + if (!validate || ret > 0 || !chip->autoclock)
2320 + return ret;
2321 +
2322 +@@ -260,8 +291,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2323 + if (i == cur)
2324 + continue;
2325 +
2326 +- ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1],
2327 +- visited, true);
2328 ++ ret = __uac_clock_find_source(chip, fmt,
2329 ++ selector->baCSourceID[i - 1],
2330 ++ visited, true);
2331 + if (ret < 0)
2332 + continue;
2333 +
2334 +@@ -281,14 +313,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2335 + /* FIXME: multipliers only act as pass-thru element for now */
2336 + multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id);
2337 + if (multiplier)
2338 +- return __uac_clock_find_source(chip, multiplier->bCSourceID,
2339 +- visited, validate);
2340 ++ return __uac_clock_find_source(chip, fmt,
2341 ++ multiplier->bCSourceID,
2342 ++ visited, validate);
2343 +
2344 + return -EINVAL;
2345 + }
2346 +
2347 +-static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2348 +- unsigned long *visited, bool validate)
2349 ++static int __uac3_clock_find_source(struct snd_usb_audio *chip,
2350 ++ struct audioformat *fmt, int entity_id,
2351 ++ unsigned long *visited, bool validate)
2352 + {
2353 + struct uac3_clock_source_descriptor *source;
2354 + struct uac3_clock_selector_descriptor *selector;
2355 +@@ -307,7 +341,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2356 + source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id);
2357 + if (source) {
2358 + entity_id = source->bClockID;
2359 +- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3,
2360 ++ if (validate && !uac_clock_source_is_valid(chip, fmt,
2361 + entity_id)) {
2362 + usb_audio_err(chip,
2363 + "clock source %d is not valid, cannot use\n",
2364 +@@ -338,7 +372,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2365 + }
2366 +
2367 + cur = ret;
2368 +- ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret - 1],
2369 ++ ret = __uac3_clock_find_source(chip, fmt,
2370 ++ selector->baCSourceID[ret - 1],
2371 + visited, validate);
2372 + if (!validate || ret > 0 || !chip->autoclock)
2373 + return ret;
2374 +@@ -350,8 +385,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2375 + if (i == cur)
2376 + continue;
2377 +
2378 +- ret = __uac3_clock_find_source(chip, selector->baCSourceID[i - 1],
2379 +- visited, true);
2380 ++ ret = __uac3_clock_find_source(chip, fmt,
2381 ++ selector->baCSourceID[i - 1],
2382 ++ visited, true);
2383 + if (ret < 0)
2384 + continue;
2385 +
2386 +@@ -372,7 +408,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2387 + multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf,
2388 + entity_id);
2389 + if (multiplier)
2390 +- return __uac3_clock_find_source(chip, multiplier->bCSourceID,
2391 ++ return __uac3_clock_find_source(chip, fmt,
2392 ++ multiplier->bCSourceID,
2393 + visited, validate);
2394 +
2395 + return -EINVAL;
2396 +@@ -389,18 +426,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
2397 + *
2398 + * Returns the clock source UnitID (>=0) on success, or an error.
2399 + */
2400 +-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
2401 +- int entity_id, bool validate)
2402 ++int snd_usb_clock_find_source(struct snd_usb_audio *chip,
2403 ++ struct audioformat *fmt, bool validate)
2404 + {
2405 + DECLARE_BITMAP(visited, 256);
2406 + memset(visited, 0, sizeof(visited));
2407 +
2408 +- switch (protocol) {
2409 ++ switch (fmt->protocol) {
2410 + case UAC_VERSION_2:
2411 +- return __uac_clock_find_source(chip, entity_id, visited,
2412 ++ return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
2413 + validate);
2414 + case UAC_VERSION_3:
2415 +- return __uac3_clock_find_source(chip, entity_id, visited,
2416 ++ return __uac3_clock_find_source(chip, fmt, fmt->clock, visited,
2417 + validate);
2418 + default:
2419 + return -EINVAL;
2420 +@@ -501,8 +538,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
2421 + * automatic clock selection if the current clock is not
2422 + * valid.
2423 + */
2424 +- clock = snd_usb_clock_find_source(chip, fmt->protocol,
2425 +- fmt->clock, true);
2426 ++ clock = snd_usb_clock_find_source(chip, fmt, true);
2427 + if (clock < 0) {
2428 + /* We did not find a valid clock, but that might be
2429 + * because the current sample rate does not match an
2430 +@@ -510,8 +546,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
2431 + * and we will do another validation after setting the
2432 + * rate.
2433 + */
2434 +- clock = snd_usb_clock_find_source(chip, fmt->protocol,
2435 +- fmt->clock, false);
2436 ++ clock = snd_usb_clock_find_source(chip, fmt, false);
2437 + if (clock < 0)
2438 + return clock;
2439 + }
2440 +@@ -577,7 +612,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
2441 +
2442 + validation:
2443 + /* validate clock after rate change */
2444 +- if (!uac_clock_source_is_valid(chip, fmt->protocol, clock))
2445 ++ if (!uac_clock_source_is_valid(chip, fmt, clock))
2446 + return -ENXIO;
2447 + return 0;
2448 + }
2449 +diff --git a/sound/usb/clock.h b/sound/usb/clock.h
2450 +index 076e31b79ee0..68df0fbe09d0 100644
2451 +--- a/sound/usb/clock.h
2452 ++++ b/sound/usb/clock.h
2453 +@@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
2454 + struct usb_host_interface *alts,
2455 + struct audioformat *fmt, int rate);
2456 +
2457 +-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
2458 +- int entity_id, bool validate);
2459 ++int snd_usb_clock_find_source(struct snd_usb_audio *chip,
2460 ++ struct audioformat *fmt, bool validate);
2461 +
2462 + #endif /* __USBAUDIO_CLOCK_H */
2463 +diff --git a/sound/usb/format.c b/sound/usb/format.c
2464 +index d79db71305f6..25668ba5e68e 100644
2465 +--- a/sound/usb/format.c
2466 ++++ b/sound/usb/format.c
2467 +@@ -322,8 +322,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip,
2468 + struct usb_device *dev = chip->dev;
2469 + unsigned char tmp[2], *data;
2470 + int nr_triplets, data_size, ret = 0, ret_l6;
2471 +- int clock = snd_usb_clock_find_source(chip, fp->protocol,
2472 +- fp->clock, false);
2473 ++ int clock = snd_usb_clock_find_source(chip, fp, false);
2474 +
2475 + if (clock < 0) {
2476 + dev_err(&dev->dev,
2477 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2478 +index 6cd4ff09c5ee..d2a050bb8341 100644
2479 +--- a/sound/usb/mixer.c
2480 ++++ b/sound/usb/mixer.c
2481 +@@ -897,6 +897,15 @@ static int parse_term_proc_unit(struct mixer_build *state,
2482 + return 0;
2483 + }
2484 +
2485 ++static int parse_term_effect_unit(struct mixer_build *state,
2486 ++ struct usb_audio_term *term,
2487 ++ void *p1, int id)
2488 ++{
2489 ++ term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
2490 ++ term->id = id;
2491 ++ return 0;
2492 ++}
2493 ++
2494 + static int parse_term_uac2_clock_source(struct mixer_build *state,
2495 + struct usb_audio_term *term,
2496 + void *p1, int id)
2497 +@@ -981,8 +990,7 @@ static int __check_input_term(struct mixer_build *state, int id,
2498 + UAC3_PROCESSING_UNIT);
2499 + case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT):
2500 + case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT):
2501 +- return parse_term_proc_unit(state, term, p1, id,
2502 +- UAC3_EFFECT_UNIT);
2503 ++ return parse_term_effect_unit(state, term, p1, id);
2504 + case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT):
2505 + case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2):
2506 + case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT):
2507 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
2508 +index 82184036437b..1ed25b1d2a6a 100644
2509 +--- a/sound/usb/quirks.c
2510 ++++ b/sound/usb/quirks.c
2511 +@@ -1402,6 +1402,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
2512 + case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
2513 + case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
2514 + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
2515 ++ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
2516 + return true;
2517 + }
2518 +
2519 +diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
2520 +index 2c41d47f6f83..90d23cc3c8d4 100644
2521 +--- a/tools/perf/util/stat-shadow.c
2522 ++++ b/tools/perf/util/stat-shadow.c
2523 +@@ -18,7 +18,6 @@
2524 + * AGGR_NONE: Use matching CPU
2525 + * AGGR_THREAD: Not supported?
2526 + */
2527 +-static bool have_frontend_stalled;
2528 +
2529 + struct runtime_stat rt_stat;
2530 + struct stats walltime_nsecs_stats;
2531 +@@ -144,7 +143,6 @@ void runtime_stat__exit(struct runtime_stat *st)
2532 +
2533 + void perf_stat__init_shadow_stats(void)
2534 + {
2535 +- have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
2536 + runtime_stat__init(&rt_stat);
2537 + }
2538 +
2539 +@@ -853,10 +851,6 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
2540 + print_metric(config, ctxp, NULL, "%7.2f ",
2541 + "stalled cycles per insn",
2542 + ratio);
2543 +- } else if (have_frontend_stalled) {
2544 +- out->new_line(config, ctxp);
2545 +- print_metric(config, ctxp, NULL, "%7.2f ",
2546 +- "stalled cycles per insn", 0);
2547 + }
2548 + } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
2549 + if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)