Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 12 May 2022 11:30:23
Message-Id: 1652355008.0b7372f26c8ae87988e75371d0973f9b69ffb1e3.mpagano@gentoo
1 commit: 0b7372f26c8ae87988e75371d0973f9b69ffb1e3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu May 12 11:30:08 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu May 12 11:30:08 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0b7372f2
7
8 Linuxpatch 5.4.193
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1192_linux-5.4.193.patch | 2007 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2011 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index cab1fe9c..b454bd62 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -811,6 +811,10 @@ Patch: 1191_linux-5.4.192.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.192
23
24 +Patch: 1192_linux-5.4.193.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.193
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1192_linux-5.4.193.patch b/1192_linux-5.4.193.patch
33 new file mode 100644
34 index 00000000..a3b2b985
35 --- /dev/null
36 +++ b/1192_linux-5.4.193.patch
37 @@ -0,0 +1,2007 @@
38 +diff --git a/Makefile b/Makefile
39 +index 968470cf368ee..888d896058553 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 192
47 ++SUBLEVEL = 193
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
52 +index b05bb70a2e46f..8026baf46e729 100644
53 +--- a/arch/mips/include/asm/timex.h
54 ++++ b/arch/mips/include/asm/timex.h
55 +@@ -40,9 +40,9 @@
56 + typedef unsigned int cycles_t;
57 +
58 + /*
59 +- * On R4000/R4400 before version 5.0 an erratum exists such that if the
60 +- * cycle counter is read in the exact moment that it is matching the
61 +- * compare register, no interrupt will be generated.
62 ++ * On R4000/R4400 an erratum exists such that if the cycle counter is
63 ++ * read in the exact moment that it is matching the compare register,
64 ++ * no interrupt will be generated.
65 + *
66 + * There is a suggested workaround and also the erratum can't strike if
67 + * the compare interrupt isn't being used as the clock source device.
68 +@@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid)
69 + if (!__builtin_constant_p(cpu_has_counter))
70 + asm volatile("" : "=m" (cpu_data[0].options));
71 + if (likely(cpu_has_counter &&
72 +- prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
73 ++ prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
74 + return 1;
75 + else
76 + return 0;
77 +diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
78 +index caa01457dce60..ed339d7979f3f 100644
79 +--- a/arch/mips/kernel/time.c
80 ++++ b/arch/mips/kernel/time.c
81 +@@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void)
82 + case CPU_R4400MC:
83 + /*
84 + * The published errata for the R4400 up to 3.0 say the CPU
85 +- * has the mfc0 from count bug.
86 ++ * has the mfc0 from count bug. This seems the last version
87 ++ * produced.
88 + */
89 +- if ((current_cpu_data.processor_id & 0xff) <= 0x30)
90 +- return 1;
91 +-
92 +- /*
93 +- * we assume newer revisions are ok
94 +- */
95 +- return 0;
96 ++ return 1;
97 + }
98 +
99 + return 0;
100 +diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
101 +index 13f771f74ee3b..b0045889864c2 100644
102 +--- a/arch/parisc/kernel/processor.c
103 ++++ b/arch/parisc/kernel/processor.c
104 +@@ -419,8 +419,7 @@ show_cpuinfo (struct seq_file *m, void *v)
105 + }
106 + seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
107 +
108 +- seq_printf(m, "model\t\t: %s\n"
109 +- "model name\t: %s\n",
110 ++ seq_printf(m, "model\t\t: %s - %s\n",
111 + boot_cpu_data.pdc.sys_model_name,
112 + cpuinfo->dev ?
113 + cpuinfo->dev->name : "Unknown");
114 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
115 +index 408b51aba2930..f582dda8dd34f 100644
116 +--- a/arch/x86/kernel/kvm.c
117 ++++ b/arch/x86/kernel/kvm.c
118 +@@ -59,6 +59,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
119 + DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
120 + static int has_steal_clock = 0;
121 +
122 ++static int has_guest_poll = 0;
123 + /*
124 + * No need for any "IO delay" on KVM
125 + */
126 +@@ -584,14 +585,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
127 +
128 + static int kvm_suspend(void)
129 + {
130 ++ u64 val = 0;
131 ++
132 + kvm_guest_cpu_offline(false);
133 +
134 ++#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
135 ++ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
136 ++ rdmsrl(MSR_KVM_POLL_CONTROL, val);
137 ++ has_guest_poll = !(val & 1);
138 ++#endif
139 + return 0;
140 + }
141 +
142 + static void kvm_resume(void)
143 + {
144 + kvm_cpu_online(raw_smp_processor_id());
145 ++
146 ++#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
147 ++ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
148 ++ wrmsrl(MSR_KVM_POLL_CONTROL, 0);
149 ++#endif
150 + }
151 +
152 + static struct syscore_ops kvm_syscore_ops = {
153 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
154 +index 6a8db8eb0e943..62c7f771a7cf8 100644
155 +--- a/arch/x86/kvm/cpuid.c
156 ++++ b/arch/x86/kvm/cpuid.c
157 +@@ -592,6 +592,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
158 + union cpuid10_eax eax;
159 + union cpuid10_edx edx;
160 +
161 ++ if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
162 ++ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
163 ++ break;
164 ++ }
165 ++
166 + perf_get_x86_pmu_capability(&cap);
167 +
168 + /*
169 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
170 +index afe3b8e615146..3696b4de9d99d 100644
171 +--- a/arch/x86/kvm/lapic.c
172 ++++ b/arch/x86/kvm/lapic.c
173 +@@ -118,7 +118,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
174 +
175 + bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
176 + {
177 +- return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
178 ++ return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
179 ++ (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
180 + }
181 + EXPORT_SYMBOL_GPL(kvm_can_post_timer_interrupt);
182 +
183 +diff --git a/block/bio.c b/block/bio.c
184 +index 1c52d0196e15c..40004a3631a80 100644
185 +--- a/block/bio.c
186 ++++ b/block/bio.c
187 +@@ -1627,7 +1627,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
188 + if (bytes > len)
189 + bytes = len;
190 +
191 +- page = alloc_page(q->bounce_gfp | gfp_mask);
192 ++ page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask);
193 + if (!page)
194 + goto cleanup;
195 +
196 +diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
197 +index 3f045b5953b2e..a0c1a665dfc12 100644
198 +--- a/drivers/acpi/acpica/nsaccess.c
199 ++++ b/drivers/acpi/acpica/nsaccess.c
200 +@@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void)
201 + * just create and link the new node(s) here.
202 + */
203 + new_node =
204 +- ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
205 ++ acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name));
206 + if (!new_node) {
207 + status = AE_NO_MEMORY;
208 + goto unlock_and_exit;
209 + }
210 +
211 +- ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
212 + new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
213 + new_node->type = init_val->type;
214 +
215 +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
216 +index 54be88167c60b..f3b3953cac834 100644
217 +--- a/drivers/firewire/core-card.c
218 ++++ b/drivers/firewire/core-card.c
219 +@@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
220 + void fw_core_remove_card(struct fw_card *card)
221 + {
222 + struct fw_card_driver dummy_driver = dummy_driver_template;
223 ++ unsigned long flags;
224 +
225 + card->driver->update_phy_reg(card, 4,
226 + PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
227 +@@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card)
228 + dummy_driver.stop_iso = card->driver->stop_iso;
229 + card->driver = &dummy_driver;
230 +
231 ++ spin_lock_irqsave(&card->lock, flags);
232 + fw_destroy_nodes(card);
233 ++ spin_unlock_irqrestore(&card->lock, flags);
234 +
235 + /* Wait for all users, especially device workqueue jobs, to finish. */
236 + fw_card_put(card);
237 +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
238 +index 1da7ba18d3993..3a43e5d6ed3b2 100644
239 +--- a/drivers/firewire/core-cdev.c
240 ++++ b/drivers/firewire/core-cdev.c
241 +@@ -1482,6 +1482,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
242 + {
243 + struct outbound_phy_packet_event *e =
244 + container_of(packet, struct outbound_phy_packet_event, p);
245 ++ struct client *e_client;
246 +
247 + switch (status) {
248 + /* expected: */
249 +@@ -1498,9 +1499,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
250 + }
251 + e->phy_packet.data[0] = packet->timestamp;
252 +
253 ++ e_client = e->client;
254 + queue_event(e->client, &e->event, &e->phy_packet,
255 + sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
256 +- client_put(e->client);
257 ++ client_put(e_client);
258 + }
259 +
260 + static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
261 +diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
262 +index 94a13fca82673..5999dce11fc88 100644
263 +--- a/drivers/firewire/core-topology.c
264 ++++ b/drivers/firewire/core-topology.c
265 +@@ -374,16 +374,13 @@ static void report_found_node(struct fw_card *card,
266 + card->bm_retries = 0;
267 + }
268 +
269 ++/* Must be called with card->lock held */
270 + void fw_destroy_nodes(struct fw_card *card)
271 + {
272 +- unsigned long flags;
273 +-
274 +- spin_lock_irqsave(&card->lock, flags);
275 + card->color++;
276 + if (card->local_node != NULL)
277 + for_each_fw_node(card, card->local_node, report_lost_node);
278 + card->local_node = NULL;
279 +- spin_unlock_irqrestore(&card->lock, flags);
280 + }
281 +
282 + static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
283 +@@ -509,6 +506,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
284 + struct fw_node *local_node;
285 + unsigned long flags;
286 +
287 ++ spin_lock_irqsave(&card->lock, flags);
288 ++
289 + /*
290 + * If the selfID buffer is not the immediate successor of the
291 + * previously processed one, we cannot reliably compare the
292 +@@ -520,8 +519,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
293 + card->bm_retries = 0;
294 + }
295 +
296 +- spin_lock_irqsave(&card->lock, flags);
297 +-
298 + card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
299 + card->node_id = node_id;
300 + /*
301 +diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
302 +index 404a035f104d0..78574789a1872 100644
303 +--- a/drivers/firewire/core-transaction.c
304 ++++ b/drivers/firewire/core-transaction.c
305 +@@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
306 + static int close_transaction(struct fw_transaction *transaction,
307 + struct fw_card *card, int rcode)
308 + {
309 +- struct fw_transaction *t;
310 ++ struct fw_transaction *t = NULL, *iter;
311 + unsigned long flags;
312 +
313 + spin_lock_irqsave(&card->lock, flags);
314 +- list_for_each_entry(t, &card->transaction_list, link) {
315 +- if (t == transaction) {
316 +- if (!try_cancel_split_timeout(t)) {
317 ++ list_for_each_entry(iter, &card->transaction_list, link) {
318 ++ if (iter == transaction) {
319 ++ if (!try_cancel_split_timeout(iter)) {
320 + spin_unlock_irqrestore(&card->lock, flags);
321 + goto timed_out;
322 + }
323 +- list_del_init(&t->link);
324 +- card->tlabel_mask &= ~(1ULL << t->tlabel);
325 ++ list_del_init(&iter->link);
326 ++ card->tlabel_mask &= ~(1ULL << iter->tlabel);
327 ++ t = iter;
328 + break;
329 + }
330 + }
331 + spin_unlock_irqrestore(&card->lock, flags);
332 +
333 +- if (&t->link != &card->transaction_list) {
334 ++ if (t) {
335 + t->callback(card, rcode, NULL, 0, t->callback_data);
336 + return 0;
337 + }
338 +@@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
339 +
340 + void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
341 + {
342 +- struct fw_transaction *t;
343 ++ struct fw_transaction *t = NULL, *iter;
344 + unsigned long flags;
345 + u32 *data;
346 + size_t data_length;
347 +@@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
348 + rcode = HEADER_GET_RCODE(p->header[1]);
349 +
350 + spin_lock_irqsave(&card->lock, flags);
351 +- list_for_each_entry(t, &card->transaction_list, link) {
352 +- if (t->node_id == source && t->tlabel == tlabel) {
353 +- if (!try_cancel_split_timeout(t)) {
354 ++ list_for_each_entry(iter, &card->transaction_list, link) {
355 ++ if (iter->node_id == source && iter->tlabel == tlabel) {
356 ++ if (!try_cancel_split_timeout(iter)) {
357 + spin_unlock_irqrestore(&card->lock, flags);
358 + goto timed_out;
359 + }
360 +- list_del_init(&t->link);
361 +- card->tlabel_mask &= ~(1ULL << t->tlabel);
362 ++ list_del_init(&iter->link);
363 ++ card->tlabel_mask &= ~(1ULL << iter->tlabel);
364 ++ t = iter;
365 + break;
366 + }
367 + }
368 + spin_unlock_irqrestore(&card->lock, flags);
369 +
370 +- if (&t->link == &card->transaction_list) {
371 ++ if (!t) {
372 + timed_out:
373 + fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
374 + source, tlabel);
375 +diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
376 +index 4d5054211550b..2ceed9287435f 100644
377 +--- a/drivers/firewire/sbp2.c
378 ++++ b/drivers/firewire/sbp2.c
379 +@@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
380 + void *payload, size_t length, void *callback_data)
381 + {
382 + struct sbp2_logical_unit *lu = callback_data;
383 +- struct sbp2_orb *orb;
384 ++ struct sbp2_orb *orb = NULL, *iter;
385 + struct sbp2_status status;
386 + unsigned long flags;
387 +
388 +@@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
389 +
390 + /* Lookup the orb corresponding to this status write. */
391 + spin_lock_irqsave(&lu->tgt->lock, flags);
392 +- list_for_each_entry(orb, &lu->orb_list, link) {
393 ++ list_for_each_entry(iter, &lu->orb_list, link) {
394 + if (STATUS_GET_ORB_HIGH(status) == 0 &&
395 +- STATUS_GET_ORB_LOW(status) == orb->request_bus) {
396 +- orb->rcode = RCODE_COMPLETE;
397 +- list_del(&orb->link);
398 ++ STATUS_GET_ORB_LOW(status) == iter->request_bus) {
399 ++ iter->rcode = RCODE_COMPLETE;
400 ++ list_del(&iter->link);
401 ++ orb = iter;
402 + break;
403 + }
404 + }
405 + spin_unlock_irqrestore(&lu->tgt->lock, flags);
406 +
407 +- if (&orb->link != &lu->orb_list) {
408 ++ if (orb) {
409 + orb->callback(orb, &status);
410 + kref_put(&orb->kref, free_orb); /* orb callback reference */
411 + } else {
412 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
413 +index 3ece59185d372..b1dcd2dd52e6b 100644
414 +--- a/drivers/gpio/gpiolib-of.c
415 ++++ b/drivers/gpio/gpiolib-of.c
416 +@@ -783,7 +783,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
417 + i, &start);
418 + of_property_read_u32_index(np, "gpio-reserved-ranges",
419 + i + 1, &count);
420 +- if (start >= chip->ngpio || start + count >= chip->ngpio)
421 ++ if (start >= chip->ngpio || start + count > chip->ngpio)
422 + continue;
423 +
424 + bitmap_clear(chip->valid_mask, start, count);
425 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
426 +index 25af45adc03e7..49b52ac3e4731 100644
427 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
428 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
429 +@@ -951,11 +951,15 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
430 + struct dma_fence **ef)
431 + {
432 + struct amdgpu_device *adev = get_amdgpu_device(kgd);
433 +- struct drm_file *drm_priv = filp->private_data;
434 +- struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
435 +- struct amdgpu_vm *avm = &drv_priv->vm;
436 ++ struct amdgpu_fpriv *drv_priv;
437 ++ struct amdgpu_vm *avm;
438 + int ret;
439 +
440 ++ ret = amdgpu_file_to_fpriv(filp, &drv_priv);
441 ++ if (ret)
442 ++ return ret;
443 ++ avm = &drv_priv->vm;
444 ++
445 + /* Already a compute VM? */
446 + if (avm->process_info)
447 + return -EINVAL;
448 +diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
449 +index a30f34cf512c2..95e06886991dc 100644
450 +--- a/drivers/hwmon/adt7470.c
451 ++++ b/drivers/hwmon/adt7470.c
452 +@@ -20,6 +20,7 @@
453 + #include <linux/kthread.h>
454 + #include <linux/slab.h>
455 + #include <linux/util_macros.h>
456 ++#include <linux/sched.h>
457 +
458 + /* Addresses to scan */
459 + static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
460 +@@ -260,11 +261,10 @@ static int adt7470_update_thread(void *p)
461 + adt7470_read_temperatures(client, data);
462 + mutex_unlock(&data->lock);
463 +
464 +- set_current_state(TASK_INTERRUPTIBLE);
465 + if (kthread_should_stop())
466 + break;
467 +
468 +- schedule_timeout(msecs_to_jiffies(data->auto_update_interval));
469 ++ schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval));
470 + }
471 +
472 + return 0;
473 +diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
474 +index e3bac1a877bb7..3aed597103d3d 100644
475 +--- a/drivers/infiniband/sw/siw/siw_cm.c
476 ++++ b/drivers/infiniband/sw/siw/siw_cm.c
477 +@@ -976,14 +976,15 @@ static void siw_accept_newconn(struct siw_cep *cep)
478 +
479 + siw_cep_set_inuse(new_cep);
480 + rv = siw_proc_mpareq(new_cep);
481 +- siw_cep_set_free(new_cep);
482 +-
483 + if (rv != -EAGAIN) {
484 + siw_cep_put(cep);
485 + new_cep->listen_cep = NULL;
486 +- if (rv)
487 ++ if (rv) {
488 ++ siw_cep_set_free(new_cep);
489 + goto error;
490 ++ }
491 + }
492 ++ siw_cep_set_free(new_cep);
493 + }
494 + return;
495 +
496 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
497 +index 530c0fe142291..37b8bb4d80f0f 100644
498 +--- a/drivers/md/dm.c
499 ++++ b/drivers/md/dm.c
500 +@@ -676,19 +676,20 @@ static void start_io_acct(struct dm_io *io)
501 + false, 0, &io->stats_aux);
502 + }
503 +
504 +-static void end_io_acct(struct dm_io *io)
505 ++static void end_io_acct(struct mapped_device *md, struct bio *bio,
506 ++ unsigned long start_time, struct dm_stats_aux *stats_aux)
507 + {
508 +- struct mapped_device *md = io->md;
509 +- struct bio *bio = io->orig_bio;
510 +- unsigned long duration = jiffies - io->start_time;
511 +-
512 +- generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
513 +- io->start_time);
514 ++ unsigned long duration = jiffies - start_time;
515 +
516 + if (unlikely(dm_stats_used(&md->stats)))
517 + dm_stats_account_io(&md->stats, bio_data_dir(bio),
518 + bio->bi_iter.bi_sector, bio_sectors(bio),
519 +- true, duration, &io->stats_aux);
520 ++ true, duration, stats_aux);
521 ++
522 ++ smp_wmb();
523 ++
524 ++ generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
525 ++ start_time);
526 +
527 + /* nudge anyone waiting on suspend queue */
528 + if (unlikely(wq_has_sleeper(&md->wait)))
529 +@@ -909,6 +910,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
530 + blk_status_t io_error;
531 + struct bio *bio;
532 + struct mapped_device *md = io->md;
533 ++ unsigned long start_time = 0;
534 ++ struct dm_stats_aux stats_aux;
535 +
536 + /* Push-back supersedes any I/O errors */
537 + if (unlikely(error)) {
538 +@@ -935,8 +938,10 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
539 +
540 + io_error = io->status;
541 + bio = io->orig_bio;
542 +- end_io_acct(io);
543 ++ start_time = io->start_time;
544 ++ stats_aux = io->stats_aux;
545 + free_io(md, io);
546 ++ end_io_acct(md, bio, start_time, &stats_aux);
547 +
548 + if (io_error == BLK_STS_DM_REQUEUE)
549 + return;
550 +@@ -2491,6 +2496,8 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
551 + }
552 + finish_wait(&md->wait, &wait);
553 +
554 ++ smp_rmb();
555 ++
556 + return r;
557 + }
558 +
559 +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
560 +index 9ff718b61c72e..e5ae3346b05a9 100644
561 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
562 ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
563 +@@ -37,10 +37,7 @@ struct realtek_pci_sdmmc {
564 + bool double_clk;
565 + bool eject;
566 + bool initial_mode;
567 +- int power_state;
568 +-#define SDMMC_POWER_ON 1
569 +-#define SDMMC_POWER_OFF 0
570 +-
571 ++ int prev_power_state;
572 + int sg_count;
573 + s32 cookie;
574 + int cookie_sg_count;
575 +@@ -902,14 +899,21 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
576 + return err;
577 + }
578 +
579 +-static int sd_power_on(struct realtek_pci_sdmmc *host)
580 ++static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
581 + {
582 + struct rtsx_pcr *pcr = host->pcr;
583 + int err;
584 +
585 +- if (host->power_state == SDMMC_POWER_ON)
586 ++ if (host->prev_power_state == MMC_POWER_ON)
587 + return 0;
588 +
589 ++ if (host->prev_power_state == MMC_POWER_UP) {
590 ++ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0);
591 ++ goto finish;
592 ++ }
593 ++
594 ++ msleep(100);
595 ++
596 + rtsx_pci_init_cmd(pcr);
597 + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
598 + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
599 +@@ -928,11 +932,17 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
600 + if (err < 0)
601 + return err;
602 +
603 ++ mdelay(1);
604 ++
605 + err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
606 + if (err < 0)
607 + return err;
608 +
609 +- host->power_state = SDMMC_POWER_ON;
610 ++ /* send at least 74 clocks */
611 ++ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
612 ++
613 ++finish:
614 ++ host->prev_power_state = power_mode;
615 + return 0;
616 + }
617 +
618 +@@ -941,7 +951,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host)
619 + struct rtsx_pcr *pcr = host->pcr;
620 + int err;
621 +
622 +- host->power_state = SDMMC_POWER_OFF;
623 ++ host->prev_power_state = MMC_POWER_OFF;
624 +
625 + rtsx_pci_init_cmd(pcr);
626 +
627 +@@ -967,7 +977,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
628 + if (power_mode == MMC_POWER_OFF)
629 + err = sd_power_off(host);
630 + else
631 +- err = sd_power_on(host);
632 ++ err = sd_power_on(host, power_mode);
633 +
634 + return err;
635 + }
636 +@@ -1402,10 +1412,11 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
637 +
638 + host = mmc_priv(mmc);
639 + host->pcr = pcr;
640 ++ mmc->ios.power_delay_ms = 5;
641 + host->mmc = mmc;
642 + host->pdev = pdev;
643 + host->cookie = -1;
644 +- host->power_state = SDMMC_POWER_OFF;
645 ++ host->prev_power_state = MMC_POWER_OFF;
646 + INIT_WORK(&host->work, sd_request);
647 + platform_set_drvdata(pdev, host);
648 + pcr->slots[RTSX_SD_CARD].p_dev = pdev;
649 +diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
650 +index b8f1f2b69dd3e..3496dfa1b5217 100644
651 +--- a/drivers/net/can/grcan.c
652 ++++ b/drivers/net/can/grcan.c
653 +@@ -248,6 +248,7 @@ struct grcan_device_config {
654 + struct grcan_priv {
655 + struct can_priv can; /* must be the first member */
656 + struct net_device *dev;
657 ++ struct device *ofdev_dev;
658 + struct napi_struct napi;
659 +
660 + struct grcan_registers __iomem *regs; /* ioremap'ed registers */
661 +@@ -924,7 +925,7 @@ static void grcan_free_dma_buffers(struct net_device *dev)
662 + struct grcan_priv *priv = netdev_priv(dev);
663 + struct grcan_dma *dma = &priv->dma;
664 +
665 +- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
666 ++ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
667 + dma->base_handle);
668 + memset(dma, 0, sizeof(*dma));
669 + }
670 +@@ -949,7 +950,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev,
671 +
672 + /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
673 + dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
674 +- dma->base_buf = dma_alloc_coherent(&dev->dev,
675 ++ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
676 + dma->base_size,
677 + &dma->base_handle,
678 + GFP_KERNEL);
679 +@@ -1113,8 +1114,10 @@ static int grcan_close(struct net_device *dev)
680 +
681 + priv->closing = true;
682 + if (priv->need_txbug_workaround) {
683 ++ spin_unlock_irqrestore(&priv->lock, flags);
684 + del_timer_sync(&priv->hang_timer);
685 + del_timer_sync(&priv->rr_timer);
686 ++ spin_lock_irqsave(&priv->lock, flags);
687 + }
688 + netif_stop_queue(dev);
689 + grcan_stop_hardware(dev);
690 +@@ -1600,6 +1603,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
691 + memcpy(&priv->config, &grcan_module_config,
692 + sizeof(struct grcan_device_config));
693 + priv->dev = dev;
694 ++ priv->ofdev_dev = &ofdev->dev;
695 + priv->regs = base;
696 + priv->can.bittiming_const = &grcan_bittiming_const;
697 + priv->can.do_set_bittiming = grcan_set_bittiming;
698 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
699 +index 7f590a9e3af79..5a7d5e7f3b238 100644
700 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
701 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
702 +@@ -9791,7 +9791,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
703 +
704 + if (bp->flags & BNXT_FLAG_CHIP_P5)
705 + return bnxt_rfs_supported(bp);
706 +- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
707 ++ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
708 + return false;
709 +
710 + vnics = 1 + bp->rx_nr_rings;
711 +@@ -11725,10 +11725,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
712 + goto init_dflt_ring_err;
713 +
714 + bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
715 +- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
716 +- bp->flags |= BNXT_FLAG_RFS;
717 +- bp->dev->features |= NETIF_F_NTUPLE;
718 +- }
719 ++
720 ++ bnxt_set_dflt_rfs(bp);
721 ++
722 + init_dflt_ring_err:
723 + bnxt_ulp_irq_restart(bp, rc);
724 + return rc;
725 +diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
726 +index 4db27dfc7ec1f..6702d77030885 100644
727 +--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
728 ++++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
729 +@@ -26,6 +26,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
730 + break;
731 +
732 + ss->regmap[i] = syscon_node_to_regmap(np);
733 ++ of_node_put(np);
734 + if (IS_ERR(ss->regmap[i]))
735 + return PTR_ERR(ss->regmap[i]);
736 + }
737 +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
738 +index c7bdada4d1b97..7767d0ae9ebc1 100644
739 +--- a/drivers/net/ethernet/smsc/smsc911x.c
740 ++++ b/drivers/net/ethernet/smsc/smsc911x.c
741 +@@ -2433,7 +2433,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
742 + if (irq == -EPROBE_DEFER) {
743 + retval = -EPROBE_DEFER;
744 + goto out_0;
745 +- } else if (irq <= 0) {
746 ++ } else if (irq < 0) {
747 + pr_warn("Could not allocate irq resource\n");
748 + retval = -ENODEV;
749 + goto out_0;
750 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
751 +index 7c73d296b940d..497ce6e6b16ff 100644
752 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
753 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
754 +@@ -879,6 +879,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
755 +
756 + ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
757 + &gmac->mux_handle, priv, priv->mii);
758 ++ of_node_put(mdio_mux);
759 + return ret;
760 + }
761 +
762 +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
763 +index 63a2d1bcccfbc..bec09008997de 100644
764 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
765 ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
766 +@@ -820,10 +820,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
767 + static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
768 + {
769 + struct mii_bus *bus;
770 +- int rc;
771 + struct resource res;
772 + struct device_node *np = of_get_parent(lp->phy_node);
773 + struct device_node *npp;
774 ++ int rc, ret;
775 +
776 + /* Don't register the MDIO bus if the phy_node or its parent node
777 + * can't be found.
778 +@@ -833,8 +833,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
779 + return -ENODEV;
780 + }
781 + npp = of_get_parent(np);
782 +-
783 +- of_address_to_resource(npp, 0, &res);
784 ++ ret = of_address_to_resource(npp, 0, &res);
785 ++ of_node_put(npp);
786 ++ if (ret) {
787 ++ dev_err(dev, "%s resource error!\n",
788 ++ dev->of_node->full_name);
789 ++ of_node_put(np);
790 ++ return ret;
791 ++ }
792 + if (lp->ndev->mem_start != res.start) {
793 + struct phy_device *phydev;
794 + phydev = of_phy_find_device(lp->phy_node);
795 +@@ -843,6 +849,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
796 + "MDIO of the phy is not registered yet\n");
797 + else
798 + put_device(&phydev->mdio.dev);
799 ++ of_node_put(np);
800 + return 0;
801 + }
802 +
803 +@@ -855,6 +862,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
804 + bus = mdiobus_alloc();
805 + if (!bus) {
806 + dev_err(dev, "Failed to allocate mdiobus\n");
807 ++ of_node_put(np);
808 + return -ENOMEM;
809 + }
810 +
811 +@@ -867,6 +875,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
812 + bus->parent = dev;
813 +
814 + rc = of_mdiobus_register(bus, np);
815 ++ of_node_put(np);
816 + if (rc) {
817 + dev_err(dev, "Failed to register mdio bus.\n");
818 + goto err_register;
819 +diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
820 +index 529be35ac1782..54d228acc0f5d 100644
821 +--- a/drivers/nfc/nfcmrvl/main.c
822 ++++ b/drivers/nfc/nfcmrvl/main.c
823 +@@ -194,6 +194,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
824 + {
825 + struct nci_dev *ndev = priv->ndev;
826 +
827 ++ nci_unregister_device(ndev);
828 + if (priv->ndev->nfc_dev->fw_download_in_progress)
829 + nfcmrvl_fw_dnld_abort(priv);
830 +
831 +@@ -202,7 +203,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
832 + if (gpio_is_valid(priv->config.reset_n_io))
833 + gpio_free(priv->config.reset_n_io);
834 +
835 +- nci_unregister_device(ndev);
836 + nci_free_device(ndev);
837 + kfree(priv);
838 + }
839 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
840 +index 721d2652319ce..7219ca39aa909 100644
841 +--- a/drivers/pci/controller/pci-aardvark.c
842 ++++ b/drivers/pci/controller/pci-aardvark.c
843 +@@ -108,6 +108,7 @@
844 + #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
845 + #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
846 + #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
847 ++#define PCIE_MSI_ALL_MASK GENMASK(31, 0)
848 + #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
849 + #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
850 +
851 +@@ -561,6 +562,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
852 + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
853 +
854 + /* Clear all interrupts */
855 ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
856 + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
857 + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
858 + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
859 +@@ -573,7 +575,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
860 + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
861 +
862 + /* Unmask all MSIs */
863 +- advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
864 ++ advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
865 +
866 + /* Enable summary interrupt for GIC SPI source */
867 + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
868 +@@ -1370,23 +1372,19 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
869 + static void advk_pcie_handle_msi(struct advk_pcie *pcie)
870 + {
871 + u32 msi_val, msi_mask, msi_status, msi_idx;
872 +- u16 msi_data;
873 ++ int virq;
874 +
875 + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
876 + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
877 +- msi_status = msi_val & ~msi_mask;
878 ++ msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
879 +
880 + for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
881 + if (!(BIT(msi_idx) & msi_status))
882 + continue;
883 +
884 +- /*
885 +- * msi_idx contains bits [4:0] of the msi_data and msi_data
886 +- * contains 16bit MSI interrupt number
887 +- */
888 + advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
889 +- msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
890 +- generic_handle_irq(msi_data);
891 ++ virq = irq_find_mapping(pcie->msi_inner_domain, msi_idx);
892 ++ generic_handle_irq(virq);
893 + }
894 +
895 + advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
896 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
897 +index b577c8f7e3462..e0570cd0e520c 100644
898 +--- a/drivers/s390/block/dasd.c
899 ++++ b/drivers/s390/block/dasd.c
900 +@@ -1462,6 +1462,13 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
901 + if (!cqr->lpm)
902 + cqr->lpm = dasd_path_get_opm(device);
903 + }
904 ++ /*
905 ++ * remember the amount of formatted tracks to prevent double format on
906 ++ * ESE devices
907 ++ */
908 ++ if (cqr->block)
909 ++ cqr->trkcount = atomic_read(&cqr->block->trkcount);
910 ++
911 + if (cqr->cpmode == 1) {
912 + rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
913 + (long) cqr, cqr->lpm);
914 +@@ -1680,6 +1687,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
915 + unsigned long now;
916 + int nrf_suppressed = 0;
917 + int fp_suppressed = 0;
918 ++ struct request *req;
919 + u8 *sense = NULL;
920 + int expires;
921 +
922 +@@ -1780,7 +1788,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
923 + }
924 +
925 + if (dasd_ese_needs_format(cqr->block, irb)) {
926 +- if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
927 ++ req = dasd_get_callback_data(cqr);
928 ++ if (!req) {
929 ++ cqr->status = DASD_CQR_ERROR;
930 ++ return;
931 ++ }
932 ++ if (rq_data_dir(req) == READ) {
933 + device->discipline->ese_read(cqr, irb);
934 + cqr->status = DASD_CQR_SUCCESS;
935 + cqr->stopclk = now;
936 +@@ -2799,8 +2812,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
937 + * complete a request partially.
938 + */
939 + if (proc_bytes) {
940 +- blk_update_request(req, BLK_STS_OK,
941 +- blk_rq_bytes(req) - proc_bytes);
942 ++ blk_update_request(req, BLK_STS_OK, proc_bytes);
943 + blk_mq_requeue_request(req, true);
944 + } else {
945 + blk_mq_complete_request(req);
946 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
947 +index ad44d22e88591..7749deb614d75 100644
948 +--- a/drivers/s390/block/dasd_eckd.c
949 ++++ b/drivers/s390/block/dasd_eckd.c
950 +@@ -3026,13 +3026,24 @@ static int dasd_eckd_format_device(struct dasd_device *base,
951 + }
952 +
953 + static bool test_and_set_format_track(struct dasd_format_entry *to_format,
954 +- struct dasd_block *block)
955 ++ struct dasd_ccw_req *cqr)
956 + {
957 ++ struct dasd_block *block = cqr->block;
958 + struct dasd_format_entry *format;
959 + unsigned long flags;
960 + bool rc = false;
961 +
962 + spin_lock_irqsave(&block->format_lock, flags);
963 ++ if (cqr->trkcount != atomic_read(&block->trkcount)) {
964 ++ /*
965 ++ * The number of formatted tracks has changed after request
966 ++ * start and we can not tell if the current track was involved.
967 ++ * To avoid data corruption treat it as if the current track is
968 ++ * involved
969 ++ */
970 ++ rc = true;
971 ++ goto out;
972 ++ }
973 + list_for_each_entry(format, &block->format_list, list) {
974 + if (format->track == to_format->track) {
975 + rc = true;
976 +@@ -3052,6 +3063,7 @@ static void clear_format_track(struct dasd_format_entry *format,
977 + unsigned long flags;
978 +
979 + spin_lock_irqsave(&block->format_lock, flags);
980 ++ atomic_inc(&block->trkcount);
981 + list_del_init(&format->list);
982 + spin_unlock_irqrestore(&block->format_lock, flags);
983 + }
984 +@@ -3088,7 +3100,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
985 + sector_t curr_trk;
986 + int rc;
987 +
988 +- req = cqr->callback_data;
989 ++ req = dasd_get_callback_data(cqr);
990 + block = cqr->block;
991 + base = block->base;
992 + private = base->private;
993 +@@ -3113,8 +3125,11 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
994 + }
995 + format->track = curr_trk;
996 + /* test if track is already in formatting by another thread */
997 +- if (test_and_set_format_track(format, block))
998 ++ if (test_and_set_format_track(format, cqr)) {
999 ++ /* this is no real error so do not count down retries */
1000 ++ cqr->retries++;
1001 + return ERR_PTR(-EEXIST);
1002 ++ }
1003 +
1004 + fdata.start_unit = curr_trk;
1005 + fdata.stop_unit = curr_trk;
1006 +@@ -3213,12 +3228,11 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
1007 + cqr->proc_bytes = blk_count * blksize;
1008 + return 0;
1009 + }
1010 +- if (dst && !skip_block) {
1011 +- dst += off;
1012 ++ if (dst && !skip_block)
1013 + memset(dst, 0, blksize);
1014 +- } else {
1015 ++ else
1016 + skip_block--;
1017 +- }
1018 ++ dst += blksize;
1019 + blk_count++;
1020 + }
1021 + }
1022 +diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
1023 +index fa552f9f16667..9d9685c25253d 100644
1024 +--- a/drivers/s390/block/dasd_int.h
1025 ++++ b/drivers/s390/block/dasd_int.h
1026 +@@ -188,6 +188,7 @@ struct dasd_ccw_req {
1027 + void (*callback)(struct dasd_ccw_req *, void *data);
1028 + void *callback_data;
1029 + unsigned int proc_bytes; /* bytes for partial completion */
1030 ++ unsigned int trkcount; /* count formatted tracks */
1031 + };
1032 +
1033 + /*
1034 +@@ -575,6 +576,7 @@ struct dasd_block {
1035 +
1036 + struct list_head format_list;
1037 + spinlock_t format_lock;
1038 ++ atomic_t trkcount;
1039 + };
1040 +
1041 + struct dasd_attention_data {
1042 +@@ -723,6 +725,18 @@ dasd_check_blocksize(int bsize)
1043 + return 0;
1044 + }
1045 +
1046 ++/*
1047 ++ * return the callback data of the original request in case there are
1048 ++ * ERP requests build on top of it
1049 ++ */
1050 ++static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
1051 ++{
1052 ++ while (cqr->refers)
1053 ++ cqr = cqr->refers;
1054 ++
1055 ++ return cqr->callback_data;
1056 ++}
1057 ++
1058 + /* externals in dasd.c */
1059 + #define DASD_PROFILE_OFF 0
1060 + #define DASD_PROFILE_ON 1
1061 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1062 +index 9b703c0db9796..b7bfecfc2ea33 100644
1063 +--- a/fs/btrfs/tree-log.c
1064 ++++ b/fs/btrfs/tree-log.c
1065 +@@ -5294,6 +5294,18 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
1066 + mutex_lock(&inode->log_mutex);
1067 + }
1068 +
1069 ++ /*
1070 ++ * For symlinks, we must always log their content, which is stored in an
1071 ++ * inline extent, otherwise we could end up with an empty symlink after
1072 ++ * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
1073 ++ * one attempts to create an empty symlink).
1074 ++ * We don't need to worry about flushing delalloc, because when we create
1075 ++ * the inline extent when the symlink is created (we never have delalloc
1076 ++ * for symlinks).
1077 ++ */
1078 ++ if (S_ISLNK(inode->vfs_inode.i_mode))
1079 ++ inode_only = LOG_INODE_ALL;
1080 ++
1081 + /*
1082 + * a brute force approach to making sure we get the most uptodate
1083 + * copies of everything.
1084 +@@ -5707,7 +5719,7 @@ process_leaf:
1085 + }
1086 +
1087 + ctx->log_new_dentries = false;
1088 +- if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
1089 ++ if (type == BTRFS_FT_DIR)
1090 + log_mode = LOG_INODE_ALL;
1091 + ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
1092 + log_mode, 0, LLONG_MAX, ctx);
1093 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1094 +index 76baf7b441f3c..cf3b00751ff65 100644
1095 +--- a/fs/nfs/nfs4proc.c
1096 ++++ b/fs/nfs/nfs4proc.c
1097 +@@ -359,6 +359,14 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
1098 + kunmap_atomic(start);
1099 + }
1100 +
1101 ++static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
1102 ++{
1103 ++ if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
1104 ++ fattr->pre_change_attr = version;
1105 ++ fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
1106 ++ }
1107 ++}
1108 ++
1109 + static void nfs4_test_and_free_stateid(struct nfs_server *server,
1110 + nfs4_stateid *stateid,
1111 + const struct cred *cred)
1112 +@@ -6307,7 +6315,9 @@ static void nfs4_delegreturn_release(void *calldata)
1113 + pnfs_roc_release(&data->lr.arg, &data->lr.res,
1114 + data->res.lr_ret);
1115 + if (inode) {
1116 +- nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
1117 ++ nfs4_fattr_set_prechange(&data->fattr,
1118 ++ inode_peek_iversion_raw(inode));
1119 ++ nfs_refresh_inode(inode, &data->fattr);
1120 + nfs_iput_and_deactive(inode);
1121 + }
1122 + kfree(calldata);
1123 +diff --git a/include/net/tcp.h b/include/net/tcp.h
1124 +index 9237362e56065..65be8bd1f0f4a 100644
1125 +--- a/include/net/tcp.h
1126 ++++ b/include/net/tcp.h
1127 +@@ -2015,6 +2015,11 @@ struct tcp_request_sock_ops {
1128 + enum tcp_synack_type synack_type);
1129 + };
1130 +
1131 ++extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
1132 ++#if IS_ENABLED(CONFIG_IPV6)
1133 ++extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
1134 ++#endif
1135 ++
1136 + #ifdef CONFIG_SYN_COOKIES
1137 + static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1138 + const struct sock *sk, struct sk_buff *skb,
1139 +diff --git a/include/sound/pcm.h b/include/sound/pcm.h
1140 +index bbe6eb1ff5d22..f0045f842a604 100644
1141 +--- a/include/sound/pcm.h
1142 ++++ b/include/sound/pcm.h
1143 +@@ -395,6 +395,8 @@ struct snd_pcm_runtime {
1144 + wait_queue_head_t sleep; /* poll sleep */
1145 + wait_queue_head_t tsleep; /* transfer sleep */
1146 + struct fasync_struct *fasync;
1147 ++ struct mutex buffer_mutex; /* protect for buffer changes */
1148 ++ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */
1149 +
1150 + /* -- private section -- */
1151 + void *private_data;
1152 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
1153 +index c9d8eb7f5c029..ba4d742c1c655 100644
1154 +--- a/kernel/irq/internals.h
1155 ++++ b/kernel/irq/internals.h
1156 +@@ -29,12 +29,14 @@ extern struct irqaction chained_action;
1157 + * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
1158 + * IRQTF_AFFINITY - irq thread is requested to adjust affinity
1159 + * IRQTF_FORCED_THREAD - irq action is force threaded
1160 ++ * IRQTF_READY - signals that irq thread is ready
1161 + */
1162 + enum {
1163 + IRQTF_RUNTHREAD,
1164 + IRQTF_WARNED,
1165 + IRQTF_AFFINITY,
1166 + IRQTF_FORCED_THREAD,
1167 ++ IRQTF_READY,
1168 + };
1169 +
1170 + /*
1171 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
1172 +index 9be995fc3c5a1..172b5e6bc4c2f 100644
1173 +--- a/kernel/irq/irqdesc.c
1174 ++++ b/kernel/irq/irqdesc.c
1175 +@@ -405,6 +405,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
1176 + lockdep_set_class(&desc->lock, &irq_desc_lock_class);
1177 + mutex_init(&desc->request_mutex);
1178 + init_rcu_head(&desc->rcu);
1179 ++ init_waitqueue_head(&desc->wait_for_threads);
1180 +
1181 + desc_set_defaults(irq, desc, node, affinity, owner);
1182 + irqd_set(&desc->irq_data, flags);
1183 +@@ -573,6 +574,7 @@ int __init early_irq_init(void)
1184 + raw_spin_lock_init(&desc[i].lock);
1185 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
1186 + mutex_init(&desc[i].request_mutex);
1187 ++ init_waitqueue_head(&desc[i].wait_for_threads);
1188 + desc_set_defaults(i, &desc[i], node, NULL, NULL);
1189 + }
1190 + return arch_early_irq_init();
1191 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
1192 +index 918fe05933862..79214f9836243 100644
1193 +--- a/kernel/irq/manage.c
1194 ++++ b/kernel/irq/manage.c
1195 +@@ -1102,6 +1102,31 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1196 + raw_spin_unlock_irq(&desc->lock);
1197 + }
1198 +
1199 ++/*
1200 ++ * Internal function to notify that a interrupt thread is ready.
1201 ++ */
1202 ++static void irq_thread_set_ready(struct irq_desc *desc,
1203 ++ struct irqaction *action)
1204 ++{
1205 ++ set_bit(IRQTF_READY, &action->thread_flags);
1206 ++ wake_up(&desc->wait_for_threads);
1207 ++}
1208 ++
1209 ++/*
1210 ++ * Internal function to wake up a interrupt thread and wait until it is
1211 ++ * ready.
1212 ++ */
1213 ++static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1214 ++ struct irqaction *action)
1215 ++{
1216 ++ if (!action || !action->thread)
1217 ++ return;
1218 ++
1219 ++ wake_up_process(action->thread);
1220 ++ wait_event(desc->wait_for_threads,
1221 ++ test_bit(IRQTF_READY, &action->thread_flags));
1222 ++}
1223 ++
1224 + /*
1225 + * Interrupt handler thread
1226 + */
1227 +@@ -1113,6 +1138,8 @@ static int irq_thread(void *data)
1228 + irqreturn_t (*handler_fn)(struct irq_desc *desc,
1229 + struct irqaction *action);
1230 +
1231 ++ irq_thread_set_ready(desc, action);
1232 ++
1233 + if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1234 + &action->thread_flags))
1235 + handler_fn = irq_forced_thread_fn;
1236 +@@ -1541,8 +1568,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1237 + }
1238 +
1239 + if (!shared) {
1240 +- init_waitqueue_head(&desc->wait_for_threads);
1241 +-
1242 + /* Setup the type (level, edge polarity) if configured: */
1243 + if (new->flags & IRQF_TRIGGER_MASK) {
1244 + ret = __irq_set_trigger(desc,
1245 +@@ -1632,14 +1657,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1246 +
1247 + irq_setup_timings(desc, new);
1248 +
1249 +- /*
1250 +- * Strictly no need to wake it up, but hung_task complains
1251 +- * when no hard interrupt wakes the thread up.
1252 +- */
1253 +- if (new->thread)
1254 +- wake_up_process(new->thread);
1255 +- if (new->secondary)
1256 +- wake_up_process(new->secondary->thread);
1257 ++ wake_up_and_wait_for_irq_thread_ready(desc, new);
1258 ++ wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1259 +
1260 + register_irq_proc(irq, desc);
1261 + new->dir = NULL;
1262 +diff --git a/mm/page_io.c b/mm/page_io.c
1263 +index bcf27d0572534..f0e3f2be7b44c 100644
1264 +--- a/mm/page_io.c
1265 ++++ b/mm/page_io.c
1266 +@@ -69,54 +69,6 @@ void end_swap_bio_write(struct bio *bio)
1267 + bio_put(bio);
1268 + }
1269 +
1270 +-static void swap_slot_free_notify(struct page *page)
1271 +-{
1272 +- struct swap_info_struct *sis;
1273 +- struct gendisk *disk;
1274 +- swp_entry_t entry;
1275 +-
1276 +- /*
1277 +- * There is no guarantee that the page is in swap cache - the software
1278 +- * suspend code (at least) uses end_swap_bio_read() against a non-
1279 +- * swapcache page. So we must check PG_swapcache before proceeding with
1280 +- * this optimization.
1281 +- */
1282 +- if (unlikely(!PageSwapCache(page)))
1283 +- return;
1284 +-
1285 +- sis = page_swap_info(page);
1286 +- if (!(sis->flags & SWP_BLKDEV))
1287 +- return;
1288 +-
1289 +- /*
1290 +- * The swap subsystem performs lazy swap slot freeing,
1291 +- * expecting that the page will be swapped out again.
1292 +- * So we can avoid an unnecessary write if the page
1293 +- * isn't redirtied.
1294 +- * This is good for real swap storage because we can
1295 +- * reduce unnecessary I/O and enhance wear-leveling
1296 +- * if an SSD is used as the as swap device.
1297 +- * But if in-memory swap device (eg zram) is used,
1298 +- * this causes a duplicated copy between uncompressed
1299 +- * data in VM-owned memory and compressed data in
1300 +- * zram-owned memory. So let's free zram-owned memory
1301 +- * and make the VM-owned decompressed page *dirty*,
1302 +- * so the page should be swapped out somewhere again if
1303 +- * we again wish to reclaim it.
1304 +- */
1305 +- disk = sis->bdev->bd_disk;
1306 +- entry.val = page_private(page);
1307 +- if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
1308 +- unsigned long offset;
1309 +-
1310 +- offset = swp_offset(entry);
1311 +-
1312 +- SetPageDirty(page);
1313 +- disk->fops->swap_slot_free_notify(sis->bdev,
1314 +- offset);
1315 +- }
1316 +-}
1317 +-
1318 + static void end_swap_bio_read(struct bio *bio)
1319 + {
1320 + struct page *page = bio_first_page_all(bio);
1321 +@@ -132,7 +84,6 @@ static void end_swap_bio_read(struct bio *bio)
1322 + }
1323 +
1324 + SetPageUptodate(page);
1325 +- swap_slot_free_notify(page);
1326 + out:
1327 + unlock_page(page);
1328 + WRITE_ONCE(bio->bi_private, NULL);
1329 +@@ -371,11 +322,6 @@ int swap_readpage(struct page *page, bool synchronous)
1330 +
1331 + ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
1332 + if (!ret) {
1333 +- if (trylock_page(page)) {
1334 +- swap_slot_free_notify(page);
1335 +- unlock_page(page);
1336 +- }
1337 +-
1338 + count_vm_event(PSWPIN);
1339 + return 0;
1340 + }
1341 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1342 +index b1ecc91955172..cac2fdd08df05 100644
1343 +--- a/net/ipv4/igmp.c
1344 ++++ b/net/ipv4/igmp.c
1345 +@@ -2403,9 +2403,10 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1346 + newpsl->sl_addr[i] = psl->sl_addr[i];
1347 + /* decrease mem now to avoid the memleak warning */
1348 + atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
1349 +- kfree_rcu(psl, rcu);
1350 + }
1351 + rcu_assign_pointer(pmc->sflist, newpsl);
1352 ++ if (psl)
1353 ++ kfree_rcu(psl, rcu);
1354 + psl = newpsl;
1355 + }
1356 + rv = 1; /* > 0 for insert logic below if sl_count is 0 */
1357 +@@ -2503,11 +2504,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
1358 + psl->sl_count, psl->sl_addr, 0);
1359 + /* decrease mem now to avoid the memleak warning */
1360 + atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
1361 +- kfree_rcu(psl, rcu);
1362 +- } else
1363 ++ } else {
1364 + (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
1365 + 0, NULL, 0);
1366 ++ }
1367 + rcu_assign_pointer(pmc->sflist, newpsl);
1368 ++ if (psl)
1369 ++ kfree_rcu(psl, rcu);
1370 + pmc->sfmode = msf->imsf_fmode;
1371 + err = 0;
1372 + done:
1373 +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
1374 +index 2b45d14555926..6811174ad5189 100644
1375 +--- a/net/ipv4/syncookies.c
1376 ++++ b/net/ipv4/syncookies.c
1377 +@@ -332,6 +332,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
1378 +
1379 + ireq = inet_rsk(req);
1380 + treq = tcp_rsk(req);
1381 ++ treq->af_specific = &tcp_request_sock_ipv4_ops;
1382 + treq->rcv_isn = ntohl(th->seq) - 1;
1383 + treq->snt_isn = cookie;
1384 + treq->ts_off = 0;
1385 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1386 +index 426d70d45eda4..72fe93ace7d73 100644
1387 +--- a/net/ipv4/tcp_ipv4.c
1388 ++++ b/net/ipv4/tcp_ipv4.c
1389 +@@ -1383,7 +1383,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1390 + .syn_ack_timeout = tcp_syn_ack_timeout,
1391 + };
1392 +
1393 +-static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1394 ++const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1395 + .mss_clamp = TCP_MSS_DEFAULT,
1396 + #ifdef CONFIG_TCP_MD5SIG
1397 + .req_md5_lookup = tcp_v4_md5_lookup,
1398 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1399 +index 69aef71f32ea7..92b32d131e1c3 100644
1400 +--- a/net/ipv6/addrconf.c
1401 ++++ b/net/ipv6/addrconf.c
1402 +@@ -3715,6 +3715,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
1403 + struct inet6_dev *idev;
1404 + struct inet6_ifaddr *ifa, *tmp;
1405 + bool keep_addr = false;
1406 ++ bool was_ready;
1407 + int state, i;
1408 +
1409 + ASSERT_RTNL();
1410 +@@ -3780,7 +3781,10 @@ restart:
1411 +
1412 + addrconf_del_rs_timer(idev);
1413 +
1414 +- /* Step 2: clear flags for stateless addrconf */
1415 ++ /* Step 2: clear flags for stateless addrconf, repeated down
1416 ++ * detection
1417 ++ */
1418 ++ was_ready = idev->if_flags & IF_READY;
1419 + if (!how)
1420 + idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
1421 +
1422 +@@ -3854,7 +3858,7 @@ restart:
1423 + if (how) {
1424 + ipv6_ac_destroy_dev(idev);
1425 + ipv6_mc_destroy_dev(idev);
1426 +- } else {
1427 ++ } else if (was_ready) {
1428 + ipv6_mc_down(idev);
1429 + }
1430 +
1431 +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
1432 +index ec155844012b2..37ab254f7b92d 100644
1433 +--- a/net/ipv6/syncookies.c
1434 ++++ b/net/ipv6/syncookies.c
1435 +@@ -176,6 +176,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
1436 +
1437 + ireq = inet_rsk(req);
1438 + treq = tcp_rsk(req);
1439 ++ treq->af_specific = &tcp_request_sock_ipv6_ops;
1440 + treq->tfo_listener = false;
1441 +
1442 + if (security_inet_conn_request(sk, skb, req))
1443 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1444 +index 51c900e9bfe20..063898cae3e5c 100644
1445 +--- a/net/ipv6/tcp_ipv6.c
1446 ++++ b/net/ipv6/tcp_ipv6.c
1447 +@@ -800,7 +800,7 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1448 + .syn_ack_timeout = tcp_syn_ack_timeout,
1449 + };
1450 +
1451 +-static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
1452 ++const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
1453 + .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
1454 + sizeof(struct ipv6hdr),
1455 + #ifdef CONFIG_TCP_MD5SIG
1456 +diff --git a/net/nfc/core.c b/net/nfc/core.c
1457 +index e752692d36802..63701a980ee12 100644
1458 +--- a/net/nfc/core.c
1459 ++++ b/net/nfc/core.c
1460 +@@ -38,7 +38,7 @@ int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name)
1461 +
1462 + device_lock(&dev->dev);
1463 +
1464 +- if (!device_is_registered(&dev->dev)) {
1465 ++ if (dev->shutting_down) {
1466 + rc = -ENODEV;
1467 + goto error;
1468 + }
1469 +@@ -94,7 +94,7 @@ int nfc_dev_up(struct nfc_dev *dev)
1470 +
1471 + device_lock(&dev->dev);
1472 +
1473 +- if (!device_is_registered(&dev->dev)) {
1474 ++ if (dev->shutting_down) {
1475 + rc = -ENODEV;
1476 + goto error;
1477 + }
1478 +@@ -142,7 +142,7 @@ int nfc_dev_down(struct nfc_dev *dev)
1479 +
1480 + device_lock(&dev->dev);
1481 +
1482 +- if (!device_is_registered(&dev->dev)) {
1483 ++ if (dev->shutting_down) {
1484 + rc = -ENODEV;
1485 + goto error;
1486 + }
1487 +@@ -206,7 +206,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols)
1488 +
1489 + device_lock(&dev->dev);
1490 +
1491 +- if (!device_is_registered(&dev->dev)) {
1492 ++ if (dev->shutting_down) {
1493 + rc = -ENODEV;
1494 + goto error;
1495 + }
1496 +@@ -245,7 +245,7 @@ int nfc_stop_poll(struct nfc_dev *dev)
1497 +
1498 + device_lock(&dev->dev);
1499 +
1500 +- if (!device_is_registered(&dev->dev)) {
1501 ++ if (dev->shutting_down) {
1502 + rc = -ENODEV;
1503 + goto error;
1504 + }
1505 +@@ -290,7 +290,7 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
1506 +
1507 + device_lock(&dev->dev);
1508 +
1509 +- if (!device_is_registered(&dev->dev)) {
1510 ++ if (dev->shutting_down) {
1511 + rc = -ENODEV;
1512 + goto error;
1513 + }
1514 +@@ -334,7 +334,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
1515 +
1516 + device_lock(&dev->dev);
1517 +
1518 +- if (!device_is_registered(&dev->dev)) {
1519 ++ if (dev->shutting_down) {
1520 + rc = -ENODEV;
1521 + goto error;
1522 + }
1523 +@@ -400,7 +400,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
1524 +
1525 + device_lock(&dev->dev);
1526 +
1527 +- if (!device_is_registered(&dev->dev)) {
1528 ++ if (dev->shutting_down) {
1529 + rc = -ENODEV;
1530 + goto error;
1531 + }
1532 +@@ -446,7 +446,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode)
1533 +
1534 + device_lock(&dev->dev);
1535 +
1536 +- if (!device_is_registered(&dev->dev)) {
1537 ++ if (dev->shutting_down) {
1538 + rc = -ENODEV;
1539 + goto error;
1540 + }
1541 +@@ -493,7 +493,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
1542 +
1543 + device_lock(&dev->dev);
1544 +
1545 +- if (!device_is_registered(&dev->dev)) {
1546 ++ if (dev->shutting_down) {
1547 + rc = -ENODEV;
1548 + kfree_skb(skb);
1549 + goto error;
1550 +@@ -550,7 +550,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
1551 +
1552 + device_lock(&dev->dev);
1553 +
1554 +- if (!device_is_registered(&dev->dev)) {
1555 ++ if (dev->shutting_down) {
1556 + rc = -ENODEV;
1557 + goto error;
1558 + }
1559 +@@ -599,7 +599,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
1560 +
1561 + device_lock(&dev->dev);
1562 +
1563 +- if (!device_is_registered(&dev->dev)) {
1564 ++ if (dev->shutting_down) {
1565 + rc = -ENODEV;
1566 + goto error;
1567 + }
1568 +@@ -1127,6 +1127,7 @@ int nfc_register_device(struct nfc_dev *dev)
1569 + dev->rfkill = NULL;
1570 + }
1571 + }
1572 ++ dev->shutting_down = false;
1573 + device_unlock(&dev->dev);
1574 +
1575 + rc = nfc_genl_device_added(dev);
1576 +@@ -1159,12 +1160,10 @@ void nfc_unregister_device(struct nfc_dev *dev)
1577 + rfkill_unregister(dev->rfkill);
1578 + rfkill_destroy(dev->rfkill);
1579 + }
1580 ++ dev->shutting_down = true;
1581 + device_unlock(&dev->dev);
1582 +
1583 + if (dev->ops->check_presence) {
1584 +- device_lock(&dev->dev);
1585 +- dev->shutting_down = true;
1586 +- device_unlock(&dev->dev);
1587 + del_timer_sync(&dev->check_pres_timer);
1588 + cancel_work_sync(&dev->check_pres_work);
1589 + }
1590 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
1591 +index 4d90cbdc083b5..9e94f732e717c 100644
1592 +--- a/net/nfc/netlink.c
1593 ++++ b/net/nfc/netlink.c
1594 +@@ -1252,7 +1252,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
1595 + struct sk_buff *msg;
1596 + void *hdr;
1597 +
1598 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1599 ++ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1600 + if (!msg)
1601 + return -ENOMEM;
1602 +
1603 +@@ -1268,7 +1268,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
1604 +
1605 + genlmsg_end(msg, hdr);
1606 +
1607 +- genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
1608 ++ genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
1609 +
1610 + return 0;
1611 +
1612 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1613 +index 480e879e74ae5..43bc02dea80c8 100644
1614 +--- a/net/sunrpc/xprtsock.c
1615 ++++ b/net/sunrpc/xprtsock.c
1616 +@@ -2963,9 +2963,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
1617 + }
1618 + xprt_set_bound(xprt);
1619 + xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
1620 +- ret = ERR_PTR(xs_local_setup_socket(transport));
1621 +- if (ret)
1622 +- goto out_err;
1623 + break;
1624 + default:
1625 + ret = ERR_PTR(-EAFNOSUPPORT);
1626 +diff --git a/sound/core/pcm.c b/sound/core/pcm.c
1627 +index f8ce961c28d6e..3561cdceaadc2 100644
1628 +--- a/sound/core/pcm.c
1629 ++++ b/sound/core/pcm.c
1630 +@@ -969,6 +969,8 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
1631 + init_waitqueue_head(&runtime->tsleep);
1632 +
1633 + runtime->status->state = SNDRV_PCM_STATE_OPEN;
1634 ++ mutex_init(&runtime->buffer_mutex);
1635 ++ atomic_set(&runtime->buffer_accessing, 0);
1636 +
1637 + substream->runtime = runtime;
1638 + substream->private_data = pcm->private_data;
1639 +@@ -1000,6 +1002,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
1640 + substream->runtime = NULL;
1641 + if (substream->timer)
1642 + spin_unlock_irq(&substream->timer->lock);
1643 ++ mutex_destroy(&runtime->buffer_mutex);
1644 + kfree(runtime);
1645 + put_pid(substream->pid);
1646 + substream->pid = NULL;
1647 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
1648 +index fd300c3adddec..1bce55533519d 100644
1649 +--- a/sound/core/pcm_lib.c
1650 ++++ b/sound/core/pcm_lib.c
1651 +@@ -2211,10 +2211,15 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
1652 + err = -EINVAL;
1653 + goto _end_unlock;
1654 + }
1655 ++ if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
1656 ++ err = -EBUSY;
1657 ++ goto _end_unlock;
1658 ++ }
1659 + snd_pcm_stream_unlock_irq(substream);
1660 + err = writer(substream, appl_ofs, data, offset, frames,
1661 + transfer);
1662 + snd_pcm_stream_lock_irq(substream);
1663 ++ atomic_dec(&runtime->buffer_accessing);
1664 + if (err < 0)
1665 + goto _end_unlock;
1666 + err = pcm_accessible_state(runtime);
1667 +diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
1668 +index 7600dcdf5fd4d..9aea1d6fb0547 100644
1669 +--- a/sound/core/pcm_memory.c
1670 ++++ b/sound/core/pcm_memory.c
1671 +@@ -133,19 +133,20 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
1672 + size_t size;
1673 + struct snd_dma_buffer new_dmab;
1674 +
1675 ++ mutex_lock(&substream->pcm->open_mutex);
1676 + if (substream->runtime) {
1677 + buffer->error = -EBUSY;
1678 +- return;
1679 ++ goto unlock;
1680 + }
1681 + if (!snd_info_get_line(buffer, line, sizeof(line))) {
1682 + snd_info_get_str(str, line, sizeof(str));
1683 + size = simple_strtoul(str, NULL, 10) * 1024;
1684 + if ((size != 0 && size < 8192) || size > substream->dma_max) {
1685 + buffer->error = -EINVAL;
1686 +- return;
1687 ++ goto unlock;
1688 + }
1689 + if (substream->dma_buffer.bytes == size)
1690 +- return;
1691 ++ goto unlock;
1692 + memset(&new_dmab, 0, sizeof(new_dmab));
1693 + new_dmab.dev = substream->dma_buffer.dev;
1694 + if (size > 0) {
1695 +@@ -153,7 +154,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
1696 + substream->dma_buffer.dev.dev,
1697 + size, &new_dmab) < 0) {
1698 + buffer->error = -ENOMEM;
1699 +- return;
1700 ++ goto unlock;
1701 + }
1702 + substream->buffer_bytes_max = size;
1703 + } else {
1704 +@@ -165,6 +166,8 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
1705 + } else {
1706 + buffer->error = -EINVAL;
1707 + }
1708 ++ unlock:
1709 ++ mutex_unlock(&substream->pcm->open_mutex);
1710 + }
1711 +
1712 + static inline void preallocate_info_init(struct snd_pcm_substream *substream)
1713 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
1714 +index dbe9a65cc1d45..57a4991fa0f36 100644
1715 +--- a/sound/core/pcm_native.c
1716 ++++ b/sound/core/pcm_native.c
1717 +@@ -630,6 +630,30 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
1718 + return 0;
1719 + }
1720 +
1721 ++/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
1722 ++ * block the further r/w operations
1723 ++ */
1724 ++static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
1725 ++{
1726 ++ if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
1727 ++ return -EBUSY;
1728 ++ mutex_lock(&runtime->buffer_mutex);
1729 ++ return 0; /* keep buffer_mutex, unlocked by below */
1730 ++}
1731 ++
1732 ++/* release buffer_mutex and clear r/w access flag */
1733 ++static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
1734 ++{
1735 ++ mutex_unlock(&runtime->buffer_mutex);
1736 ++ atomic_inc(&runtime->buffer_accessing);
1737 ++}
1738 ++
1739 ++#if IS_ENABLED(CONFIG_SND_PCM_OSS)
1740 ++#define is_oss_stream(substream) ((substream)->oss.oss)
1741 ++#else
1742 ++#define is_oss_stream(substream) false
1743 ++#endif
1744 ++
1745 + static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
1746 + struct snd_pcm_hw_params *params)
1747 + {
1748 +@@ -641,22 +665,25 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
1749 + if (PCM_RUNTIME_CHECK(substream))
1750 + return -ENXIO;
1751 + runtime = substream->runtime;
1752 ++ err = snd_pcm_buffer_access_lock(runtime);
1753 ++ if (err < 0)
1754 ++ return err;
1755 + snd_pcm_stream_lock_irq(substream);
1756 + switch (runtime->status->state) {
1757 + case SNDRV_PCM_STATE_OPEN:
1758 + case SNDRV_PCM_STATE_SETUP:
1759 + case SNDRV_PCM_STATE_PREPARED:
1760 ++ if (!is_oss_stream(substream) &&
1761 ++ atomic_read(&substream->mmap_count))
1762 ++ err = -EBADFD;
1763 + break;
1764 + default:
1765 +- snd_pcm_stream_unlock_irq(substream);
1766 +- return -EBADFD;
1767 ++ err = -EBADFD;
1768 ++ break;
1769 + }
1770 + snd_pcm_stream_unlock_irq(substream);
1771 +-#if IS_ENABLED(CONFIG_SND_PCM_OSS)
1772 +- if (!substream->oss.oss)
1773 +-#endif
1774 +- if (atomic_read(&substream->mmap_count))
1775 +- return -EBADFD;
1776 ++ if (err)
1777 ++ goto unlock;
1778 +
1779 + params->rmask = ~0U;
1780 + err = snd_pcm_hw_refine(substream, params);
1781 +@@ -733,14 +760,19 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
1782 + if ((usecs = period_to_usecs(runtime)) >= 0)
1783 + pm_qos_add_request(&substream->latency_pm_qos_req,
1784 + PM_QOS_CPU_DMA_LATENCY, usecs);
1785 +- return 0;
1786 ++ err = 0;
1787 + _error:
1788 +- /* hardware might be unusable from this time,
1789 +- so we force application to retry to set
1790 +- the correct hardware parameter settings */
1791 +- snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
1792 +- if (substream->ops->hw_free != NULL)
1793 +- substream->ops->hw_free(substream);
1794 ++ if (err) {
1795 ++ /* hardware might be unusable from this time,
1796 ++ * so we force application to retry to set
1797 ++ * the correct hardware parameter settings
1798 ++ */
1799 ++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
1800 ++ if (substream->ops->hw_free != NULL)
1801 ++ substream->ops->hw_free(substream);
1802 ++ }
1803 ++ unlock:
1804 ++ snd_pcm_buffer_access_unlock(runtime);
1805 + return err;
1806 + }
1807 +
1808 +@@ -773,22 +805,29 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
1809 + if (PCM_RUNTIME_CHECK(substream))
1810 + return -ENXIO;
1811 + runtime = substream->runtime;
1812 ++ result = snd_pcm_buffer_access_lock(runtime);
1813 ++ if (result < 0)
1814 ++ return result;
1815 + snd_pcm_stream_lock_irq(substream);
1816 + switch (runtime->status->state) {
1817 + case SNDRV_PCM_STATE_SETUP:
1818 + case SNDRV_PCM_STATE_PREPARED:
1819 ++ if (atomic_read(&substream->mmap_count))
1820 ++ result = -EBADFD;
1821 + break;
1822 + default:
1823 +- snd_pcm_stream_unlock_irq(substream);
1824 +- return -EBADFD;
1825 ++ result = -EBADFD;
1826 ++ break;
1827 + }
1828 + snd_pcm_stream_unlock_irq(substream);
1829 +- if (atomic_read(&substream->mmap_count))
1830 +- return -EBADFD;
1831 ++ if (result)
1832 ++ goto unlock;
1833 + if (substream->ops->hw_free)
1834 + result = substream->ops->hw_free(substream);
1835 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
1836 + pm_qos_remove_request(&substream->latency_pm_qos_req);
1837 ++ unlock:
1838 ++ snd_pcm_buffer_access_unlock(runtime);
1839 + return result;
1840 + }
1841 +
1842 +@@ -1025,15 +1064,17 @@ struct action_ops {
1843 + */
1844 + static int snd_pcm_action_group(const struct action_ops *ops,
1845 + struct snd_pcm_substream *substream,
1846 +- int state, int do_lock)
1847 ++ int state, int stream_lock)
1848 + {
1849 + struct snd_pcm_substream *s = NULL;
1850 + struct snd_pcm_substream *s1;
1851 + int res = 0, depth = 1;
1852 +
1853 + snd_pcm_group_for_each_entry(s, substream) {
1854 +- if (do_lock && s != substream) {
1855 +- if (s->pcm->nonatomic)
1856 ++ if (s != substream) {
1857 ++ if (!stream_lock)
1858 ++ mutex_lock_nested(&s->runtime->buffer_mutex, depth);
1859 ++ else if (s->pcm->nonatomic)
1860 + mutex_lock_nested(&s->self_group.mutex, depth);
1861 + else
1862 + spin_lock_nested(&s->self_group.lock, depth);
1863 +@@ -1061,18 +1102,18 @@ static int snd_pcm_action_group(const struct action_ops *ops,
1864 + ops->post_action(s, state);
1865 + }
1866 + _unlock:
1867 +- if (do_lock) {
1868 +- /* unlock streams */
1869 +- snd_pcm_group_for_each_entry(s1, substream) {
1870 +- if (s1 != substream) {
1871 +- if (s1->pcm->nonatomic)
1872 +- mutex_unlock(&s1->self_group.mutex);
1873 +- else
1874 +- spin_unlock(&s1->self_group.lock);
1875 +- }
1876 +- if (s1 == s) /* end */
1877 +- break;
1878 ++ /* unlock streams */
1879 ++ snd_pcm_group_for_each_entry(s1, substream) {
1880 ++ if (s1 != substream) {
1881 ++ if (!stream_lock)
1882 ++ mutex_unlock(&s1->runtime->buffer_mutex);
1883 ++ else if (s1->pcm->nonatomic)
1884 ++ mutex_unlock(&s1->self_group.mutex);
1885 ++ else
1886 ++ spin_unlock(&s1->self_group.lock);
1887 + }
1888 ++ if (s1 == s) /* end */
1889 ++ break;
1890 + }
1891 + return res;
1892 + }
1893 +@@ -1202,10 +1243,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1894 +
1895 + /* Guarantee the group members won't change during non-atomic action */
1896 + down_read(&snd_pcm_link_rwsem);
1897 ++ res = snd_pcm_buffer_access_lock(substream->runtime);
1898 ++ if (res < 0)
1899 ++ goto unlock;
1900 + if (snd_pcm_stream_linked(substream))
1901 + res = snd_pcm_action_group(ops, substream, state, 0);
1902 + else
1903 + res = snd_pcm_action_single(ops, substream, state);
1904 ++ snd_pcm_buffer_access_unlock(substream->runtime);
1905 ++ unlock:
1906 + up_read(&snd_pcm_link_rwsem);
1907 + return res;
1908 + }
1909 +diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
1910 +index e93eb4616c5f4..c739173c668f3 100644
1911 +--- a/sound/firewire/fireworks/fireworks_hwdep.c
1912 ++++ b/sound/firewire/fireworks/fireworks_hwdep.c
1913 +@@ -34,6 +34,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
1914 + type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE;
1915 + if (copy_to_user(buf, &type, sizeof(type)))
1916 + return -EFAULT;
1917 ++ count += sizeof(type);
1918 + remained -= sizeof(type);
1919 + buf += sizeof(type);
1920 +
1921 +diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
1922 +index f83a6eaba12cb..ef8bd9e046374 100644
1923 +--- a/sound/soc/codecs/da7219.c
1924 ++++ b/sound/soc/codecs/da7219.c
1925 +@@ -446,7 +446,7 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
1926 + struct soc_mixer_control *mixer_ctrl =
1927 + (struct soc_mixer_control *) kcontrol->private_value;
1928 + unsigned int reg = mixer_ctrl->reg;
1929 +- __le16 val;
1930 ++ __le16 val_new, val_old;
1931 + int ret;
1932 +
1933 + /*
1934 +@@ -454,13 +454,19 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
1935 + * Therefore we need to convert to little endian here to align with
1936 + * HW registers.
1937 + */
1938 +- val = cpu_to_le16(ucontrol->value.integer.value[0]);
1939 ++ val_new = cpu_to_le16(ucontrol->value.integer.value[0]);
1940 +
1941 + mutex_lock(&da7219->ctrl_lock);
1942 +- ret = regmap_raw_write(da7219->regmap, reg, &val, sizeof(val));
1943 ++ ret = regmap_raw_read(da7219->regmap, reg, &val_old, sizeof(val_old));
1944 ++ if (ret == 0 && (val_old != val_new))
1945 ++ ret = regmap_raw_write(da7219->regmap, reg,
1946 ++ &val_new, sizeof(val_new));
1947 + mutex_unlock(&da7219->ctrl_lock);
1948 +
1949 +- return ret;
1950 ++ if (ret < 0)
1951 ++ return ret;
1952 ++
1953 ++ return val_old != val_new;
1954 + }
1955 +
1956 +
1957 +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
1958 +index 04f23477039a5..c677c068b05ec 100644
1959 +--- a/sound/soc/codecs/wm8958-dsp2.c
1960 ++++ b/sound/soc/codecs/wm8958-dsp2.c
1961 +@@ -534,7 +534,7 @@ static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
1962 +
1963 + wm8958_dsp_apply(component, mbc, wm8994->mbc_ena[mbc]);
1964 +
1965 +- return 0;
1966 ++ return 1;
1967 + }
1968 +
1969 + #define WM8958_MBC_SWITCH(xname, xval) {\
1970 +@@ -660,7 +660,7 @@ static int wm8958_vss_put(struct snd_kcontrol *kcontrol,
1971 +
1972 + wm8958_dsp_apply(component, vss, wm8994->vss_ena[vss]);
1973 +
1974 +- return 0;
1975 ++ return 1;
1976 + }
1977 +
1978 +
1979 +@@ -734,7 +734,7 @@ static int wm8958_hpf_put(struct snd_kcontrol *kcontrol,
1980 +
1981 + wm8958_dsp_apply(component, hpf % 3, ucontrol->value.integer.value[0]);
1982 +
1983 +- return 0;
1984 ++ return 1;
1985 + }
1986 +
1987 + #define WM8958_HPF_SWITCH(xname, xval) {\
1988 +@@ -828,7 +828,7 @@ static int wm8958_enh_eq_put(struct snd_kcontrol *kcontrol,
1989 +
1990 + wm8958_dsp_apply(component, eq, ucontrol->value.integer.value[0]);
1991 +
1992 +- return 0;
1993 ++ return 1;
1994 + }
1995 +
1996 + #define WM8958_ENH_EQ_SWITCH(xname, xval) {\
1997 +diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
1998 +index 9cfbd343a00c8..cbe47e0cae426 100644
1999 +--- a/sound/soc/meson/g12a-tohdmitx.c
2000 ++++ b/sound/soc/meson/g12a-tohdmitx.c
2001 +@@ -127,7 +127,7 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
2002 +
2003 + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
2004 +
2005 +- return 0;
2006 ++ return 1;
2007 + }
2008 +
2009 + static const struct snd_kcontrol_new g12a_tohdmitx_i2s_mux =
2010 +diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
2011 +index ca4b17bd95d14..5552c66ca6422 100644
2012 +--- a/sound/soc/soc-generic-dmaengine-pcm.c
2013 ++++ b/sound/soc/soc-generic-dmaengine-pcm.c
2014 +@@ -91,10 +91,10 @@ static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
2015 +
2016 + memset(&slave_config, 0, sizeof(slave_config));
2017 +
2018 +- if (pcm->config && pcm->config->prepare_slave_config)
2019 +- prepare_slave_config = pcm->config->prepare_slave_config;
2020 +- else
2021 ++ if (!pcm->config)
2022 + prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
2023 ++ else
2024 ++ prepare_slave_config = pcm->config->prepare_slave_config;
2025 +
2026 + if (prepare_slave_config) {
2027 + ret = prepare_slave_config(substream, params, &slave_config);
2028 +diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
2029 +index a3402cd8d5b68..9ff22f28032dd 100755
2030 +--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
2031 ++++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
2032 +@@ -61,9 +61,12 @@ setup_prepare()
2033 +
2034 + vrf_prepare
2035 + mirror_gre_topo_create
2036 ++ # Avoid changing br1's PVID while it is operational as a L3 interface.
2037 ++ ip link set dev br1 down
2038 +
2039 + ip link set dev $swp3 master br1
2040 + bridge vlan add dev br1 vid 555 pvid untagged self
2041 ++ ip link set dev br1 up
2042 + ip address add dev br1 192.0.2.129/28
2043 + ip address add dev br1 2001:db8:2::1/64
2044 +