Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Sun, 17 Oct 2021 13:11:35
Message-Id: 1634476278.875a8cc1cb1211803fb6a844855c96382f616dc1.mpagano@gentoo
1 commit: 875a8cc1cb1211803fb6a844855c96382f616dc1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Oct 17 13:11:18 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Oct 17 13:11:18 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=875a8cc1
7
8 Linux patch 5.10.74
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1073_linux-5.10.74.patch | 1038 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1042 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 9e6befb..11f68cc 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -335,6 +335,10 @@ Patch: 1072_linux-5.10.73.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.73
23
24 +Patch: 1073_linux-5.10.74.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.74
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1073_linux-5.10.74.patch b/1073_linux-5.10.74.patch
33 new file mode 100644
34 index 0000000..d6bbfb1
35 --- /dev/null
36 +++ b/1073_linux-5.10.74.patch
37 @@ -0,0 +1,1038 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3f62cea9afc0e..84d540aed24c9 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 73
47 ++SUBLEVEL = 74
48 + EXTRAVERSION =
49 + NAME = Dare mighty things
50 +
51 +diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
52 +index 46f91e0f6a082..fd916844a683f 100644
53 +--- a/arch/m68k/kernel/signal.c
54 ++++ b/arch/m68k/kernel/signal.c
55 +@@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
56 +
57 + if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
58 + fpu_version = sc->sc_fpstate[0];
59 +- if (CPU_IS_020_OR_030 &&
60 ++ if (CPU_IS_020_OR_030 && !regs->stkadj &&
61 + regs->vector >= (VEC_FPBRUC * 4) &&
62 + regs->vector <= (VEC_FPNAN * 4)) {
63 + /* Clear pending exception in 68882 idle frame */
64 +@@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
65 + if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
66 + context_size = fpstate[1];
67 + fpu_version = fpstate[0];
68 +- if (CPU_IS_020_OR_030 &&
69 ++ if (CPU_IS_020_OR_030 && !regs->stkadj &&
70 + regs->vector >= (VEC_FPBRUC * 4) &&
71 + regs->vector <= (VEC_FPNAN * 4)) {
72 + /* Clear pending exception in 68882 idle frame */
73 +@@ -828,18 +828,24 @@ badframe:
74 + return 0;
75 + }
76 +
77 ++static inline struct pt_regs *rte_regs(struct pt_regs *regs)
78 ++{
79 ++ return (void *)regs + regs->stkadj;
80 ++}
81 ++
82 + static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
83 + unsigned long mask)
84 + {
85 ++ struct pt_regs *tregs = rte_regs(regs);
86 + sc->sc_mask = mask;
87 + sc->sc_usp = rdusp();
88 + sc->sc_d0 = regs->d0;
89 + sc->sc_d1 = regs->d1;
90 + sc->sc_a0 = regs->a0;
91 + sc->sc_a1 = regs->a1;
92 +- sc->sc_sr = regs->sr;
93 +- sc->sc_pc = regs->pc;
94 +- sc->sc_formatvec = regs->format << 12 | regs->vector;
95 ++ sc->sc_sr = tregs->sr;
96 ++ sc->sc_pc = tregs->pc;
97 ++ sc->sc_formatvec = tregs->format << 12 | tregs->vector;
98 + save_a5_state(sc, regs);
99 + save_fpu_state(sc, regs);
100 + }
101 +@@ -847,6 +853,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
102 + static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
103 + {
104 + struct switch_stack *sw = (struct switch_stack *)regs - 1;
105 ++ struct pt_regs *tregs = rte_regs(regs);
106 + greg_t __user *gregs = uc->uc_mcontext.gregs;
107 + int err = 0;
108 +
109 +@@ -867,9 +874,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
110 + err |= __put_user(sw->a5, &gregs[13]);
111 + err |= __put_user(sw->a6, &gregs[14]);
112 + err |= __put_user(rdusp(), &gregs[15]);
113 +- err |= __put_user(regs->pc, &gregs[16]);
114 +- err |= __put_user(regs->sr, &gregs[17]);
115 +- err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
116 ++ err |= __put_user(tregs->pc, &gregs[16]);
117 ++ err |= __put_user(tregs->sr, &gregs[17]);
118 ++ err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
119 + err |= rt_save_fpu_state(uc, regs);
120 + return err;
121 + }
122 +@@ -886,13 +893,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
123 + struct pt_regs *regs)
124 + {
125 + struct sigframe __user *frame;
126 +- int fsize = frame_extra_sizes(regs->format);
127 ++ struct pt_regs *tregs = rte_regs(regs);
128 ++ int fsize = frame_extra_sizes(tregs->format);
129 + struct sigcontext context;
130 + int err = 0, sig = ksig->sig;
131 +
132 + if (fsize < 0) {
133 + pr_debug("setup_frame: Unknown frame format %#x\n",
134 +- regs->format);
135 ++ tregs->format);
136 + return -EFAULT;
137 + }
138 +
139 +@@ -903,7 +911,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
140 +
141 + err |= __put_user(sig, &frame->sig);
142 +
143 +- err |= __put_user(regs->vector, &frame->code);
144 ++ err |= __put_user(tregs->vector, &frame->code);
145 + err |= __put_user(&frame->sc, &frame->psc);
146 +
147 + if (_NSIG_WORDS > 1)
148 +@@ -929,34 +937,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
149 +
150 + push_cache ((unsigned long) &frame->retcode);
151 +
152 +- /*
153 +- * Set up registers for signal handler. All the state we are about
154 +- * to destroy is successfully copied to sigframe.
155 +- */
156 +- wrusp ((unsigned long) frame);
157 +- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
158 +- adjustformat(regs);
159 +-
160 + /*
161 + * This is subtle; if we build more than one sigframe, all but the
162 + * first one will see frame format 0 and have fsize == 0, so we won't
163 + * screw stkadj.
164 + */
165 +- if (fsize)
166 ++ if (fsize) {
167 + regs->stkadj = fsize;
168 +-
169 +- /* Prepare to skip over the extra stuff in the exception frame. */
170 +- if (regs->stkadj) {
171 +- struct pt_regs *tregs =
172 +- (struct pt_regs *)((ulong)regs + regs->stkadj);
173 ++ tregs = rte_regs(regs);
174 + pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
175 +- /* This must be copied with decreasing addresses to
176 +- handle overlaps. */
177 + tregs->vector = 0;
178 + tregs->format = 0;
179 +- tregs->pc = regs->pc;
180 + tregs->sr = regs->sr;
181 + }
182 ++
183 ++ /*
184 ++ * Set up registers for signal handler. All the state we are about
185 ++ * to destroy is successfully copied to sigframe.
186 ++ */
187 ++ wrusp ((unsigned long) frame);
188 ++ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
189 ++ adjustformat(regs);
190 ++
191 + return 0;
192 + }
193 +
194 +@@ -964,7 +966,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
195 + struct pt_regs *regs)
196 + {
197 + struct rt_sigframe __user *frame;
198 +- int fsize = frame_extra_sizes(regs->format);
199 ++ struct pt_regs *tregs = rte_regs(regs);
200 ++ int fsize = frame_extra_sizes(tregs->format);
201 + int err = 0, sig = ksig->sig;
202 +
203 + if (fsize < 0) {
204 +@@ -1014,34 +1017,27 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
205 +
206 + push_cache ((unsigned long) &frame->retcode);
207 +
208 +- /*
209 +- * Set up registers for signal handler. All the state we are about
210 +- * to destroy is successfully copied to sigframe.
211 +- */
212 +- wrusp ((unsigned long) frame);
213 +- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
214 +- adjustformat(regs);
215 +-
216 + /*
217 + * This is subtle; if we build more than one sigframe, all but the
218 + * first one will see frame format 0 and have fsize == 0, so we won't
219 + * screw stkadj.
220 + */
221 +- if (fsize)
222 ++ if (fsize) {
223 + regs->stkadj = fsize;
224 +-
225 +- /* Prepare to skip over the extra stuff in the exception frame. */
226 +- if (regs->stkadj) {
227 +- struct pt_regs *tregs =
228 +- (struct pt_regs *)((ulong)regs + regs->stkadj);
229 ++ tregs = rte_regs(regs);
230 + pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
231 +- /* This must be copied with decreasing addresses to
232 +- handle overlaps. */
233 + tregs->vector = 0;
234 + tregs->format = 0;
235 +- tregs->pc = regs->pc;
236 + tregs->sr = regs->sr;
237 + }
238 ++
239 ++ /*
240 ++ * Set up registers for signal handler. All the state we are about
241 ++ * to destroy is successfully copied to sigframe.
242 ++ */
243 ++ wrusp ((unsigned long) frame);
244 ++ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
245 ++ adjustformat(regs);
246 + return 0;
247 + }
248 +
249 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
250 +index dbc8b76b9b78e..150fa5258fb6f 100644
251 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
252 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
253 +@@ -1018,6 +1018,8 @@ static int gmc_v10_0_hw_fini(void *handle)
254 + {
255 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256 +
257 ++ gmc_v10_0_gart_disable(adev);
258 ++
259 + if (amdgpu_sriov_vf(adev)) {
260 + /* full access mode, so don't touch any GMC register */
261 + DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
262 +@@ -1026,7 +1028,6 @@ static int gmc_v10_0_hw_fini(void *handle)
263 +
264 + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
265 + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
266 +- gmc_v10_0_gart_disable(adev);
267 +
268 + return 0;
269 + }
270 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
271 +index 3ebbddb63705c..3a864041968f6 100644
272 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
273 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
274 +@@ -1677,6 +1677,8 @@ static int gmc_v9_0_hw_fini(void *handle)
275 + {
276 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
277 +
278 ++ gmc_v9_0_gart_disable(adev);
279 ++
280 + if (amdgpu_sriov_vf(adev)) {
281 + /* full access mode, so don't touch any GMC register */
282 + DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
283 +@@ -1685,7 +1687,6 @@ static int gmc_v9_0_hw_fini(void *handle)
284 +
285 + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
286 + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
287 +- gmc_v9_0_gart_disable(adev);
288 +
289 + return 0;
290 + }
291 +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
292 +index 6b8f0d004d345..5c1d33cda863b 100644
293 +--- a/drivers/hid/hid-apple.c
294 ++++ b/drivers/hid/hid-apple.c
295 +@@ -322,12 +322,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
296 +
297 + /*
298 + * MacBook JIS keyboard has wrong logical maximum
299 ++ * Magic Keyboard JIS has wrong logical maximum
300 + */
301 + static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
302 + unsigned int *rsize)
303 + {
304 + struct apple_sc *asc = hid_get_drvdata(hdev);
305 +
306 ++ if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
307 ++ hid_info(hdev,
308 ++ "fixing up Magic Keyboard JIS report descriptor\n");
309 ++ rdesc[64] = rdesc[70] = 0xe7;
310 ++ }
311 ++
312 + if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
313 + rdesc[53] == 0x65 && rdesc[59] == 0x65) {
314 + hid_info(hdev,
315 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
316 +index 4228ddc3df0e6..b2719cf37aa52 100644
317 +--- a/drivers/hid/wacom_wac.c
318 ++++ b/drivers/hid/wacom_wac.c
319 +@@ -4715,6 +4715,12 @@ static const struct wacom_features wacom_features_0x393 =
320 + { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
321 + INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
322 + .touch_max = 10 };
323 ++static const struct wacom_features wacom_features_0x3c6 =
324 ++ { "Wacom Intuos BT S", 15200, 9500, 4095, 63,
325 ++ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
326 ++static const struct wacom_features wacom_features_0x3c8 =
327 ++ { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
328 ++ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
329 +
330 + static const struct wacom_features wacom_features_HID_ANY_ID =
331 + { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
332 +@@ -4888,6 +4894,8 @@ const struct hid_device_id wacom_ids[] = {
333 + { USB_DEVICE_WACOM(0x37A) },
334 + { USB_DEVICE_WACOM(0x37B) },
335 + { BT_DEVICE_WACOM(0x393) },
336 ++ { BT_DEVICE_WACOM(0x3c6) },
337 ++ { BT_DEVICE_WACOM(0x3c8) },
338 + { USB_DEVICE_WACOM(0x4001) },
339 + { USB_DEVICE_WACOM(0x4004) },
340 + { USB_DEVICE_WACOM(0x5000) },
341 +diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
342 +index bb3f7749a0b00..5423466de697a 100644
343 +--- a/drivers/hwmon/ltc2947-core.c
344 ++++ b/drivers/hwmon/ltc2947-core.c
345 +@@ -989,8 +989,12 @@ static int ltc2947_setup(struct ltc2947_data *st)
346 + return ret;
347 +
348 + /* check external clock presence */
349 +- extclk = devm_clk_get(st->dev, NULL);
350 +- if (!IS_ERR(extclk)) {
351 ++ extclk = devm_clk_get_optional(st->dev, NULL);
352 ++ if (IS_ERR(extclk))
353 ++ return dev_err_probe(st->dev, PTR_ERR(extclk),
354 ++ "Failed to get external clock\n");
355 ++
356 ++ if (extclk) {
357 + unsigned long rate_hz;
358 + u8 pre = 0, div, tbctl;
359 + u64 aux;
360 +diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
361 +index 79bc2032dcb2a..da261d32450d0 100644
362 +--- a/drivers/hwmon/pmbus/ibm-cffps.c
363 ++++ b/drivers/hwmon/pmbus/ibm-cffps.c
364 +@@ -171,8 +171,14 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
365 + cmd = CFFPS_SN_CMD;
366 + break;
367 + case CFFPS_DEBUGFS_MAX_POWER_OUT:
368 +- rc = i2c_smbus_read_word_swapped(psu->client,
369 +- CFFPS_MAX_POWER_OUT_CMD);
370 ++ if (psu->version == cffps1) {
371 ++ rc = i2c_smbus_read_word_swapped(psu->client,
372 ++ CFFPS_MAX_POWER_OUT_CMD);
373 ++ } else {
374 ++ rc = i2c_smbus_read_word_data(psu->client,
375 ++ CFFPS_MAX_POWER_OUT_CMD);
376 ++ }
377 ++
378 + if (rc < 0)
379 + return rc;
380 +
381 +diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
382 +index 309de38a75304..b0d3f9a2950c0 100644
383 +--- a/drivers/net/ethernet/sun/Kconfig
384 ++++ b/drivers/net/ethernet/sun/Kconfig
385 +@@ -73,6 +73,7 @@ config CASSINI
386 + config SUNVNET_COMMON
387 + tristate "Common routines to support Sun Virtual Networking"
388 + depends on SUN_LDOMS
389 ++ depends on INET
390 + default m
391 +
392 + config SUNVNET
393 +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
394 +index 43e682297fd5f..0a1734f34587d 100644
395 +--- a/drivers/scsi/ses.c
396 ++++ b/drivers/scsi/ses.c
397 +@@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
398 + static int ses_send_diag(struct scsi_device *sdev, int page_code,
399 + void *buf, int bufflen)
400 + {
401 +- u32 result;
402 ++ int result;
403 +
404 + unsigned char cmd[] = {
405 + SEND_DIAGNOSTIC,
406 +diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
407 +index b9c86a7e3b97d..6dac58ae61206 100644
408 +--- a/drivers/scsi/virtio_scsi.c
409 ++++ b/drivers/scsi/virtio_scsi.c
410 +@@ -302,7 +302,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
411 + }
412 + break;
413 + default:
414 +- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
415 ++ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
416 + }
417 + }
418 +
419 +@@ -394,7 +394,7 @@ static void virtscsi_handle_event(struct work_struct *work)
420 + virtscsi_handle_param_change(vscsi, event);
421 + break;
422 + default:
423 +- pr_err("Unsupport virtio scsi event %x\n", event->event);
424 ++ pr_err("Unsupported virtio scsi event %x\n", event->event);
425 + }
426 + virtscsi_kick_event(vscsi, event_node);
427 + }
428 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
429 +index 0f7b53d5edea6..a96b688a0410f 100644
430 +--- a/fs/ext4/inline.c
431 ++++ b/fs/ext4/inline.c
432 +@@ -733,18 +733,13 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
433 + void *kaddr;
434 + struct ext4_iloc iloc;
435 +
436 +- if (unlikely(copied < len)) {
437 +- if (!PageUptodate(page)) {
438 +- copied = 0;
439 +- goto out;
440 +- }
441 +- }
442 ++ if (unlikely(copied < len) && !PageUptodate(page))
443 ++ return 0;
444 +
445 + ret = ext4_get_inode_loc(inode, &iloc);
446 + if (ret) {
447 + ext4_std_error(inode->i_sb, ret);
448 +- copied = 0;
449 +- goto out;
450 ++ return ret;
451 + }
452 +
453 + ext4_write_lock_xattr(inode, &no_expand);
454 +@@ -757,7 +752,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
455 + (void) ext4_find_inline_data_nolock(inode);
456 +
457 + kaddr = kmap_atomic(page);
458 +- ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
459 ++ ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
460 + kunmap_atomic(kaddr);
461 + SetPageUptodate(page);
462 + /* clear page dirty so that writepages wouldn't work for us. */
463 +@@ -766,7 +761,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
464 + ext4_write_unlock_xattr(inode, &no_expand);
465 + brelse(iloc.bh);
466 + mark_inode_dirty(inode);
467 +-out:
468 ++
469 + return copied;
470 + }
471 +
472 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
473 +index 63a292db75877..317aa1b90fb95 100644
474 +--- a/fs/ext4/inode.c
475 ++++ b/fs/ext4/inode.c
476 +@@ -1296,6 +1296,7 @@ static int ext4_write_end(struct file *file,
477 + goto errout;
478 + }
479 + copied = ret;
480 ++ ret = 0;
481 + } else
482 + copied = block_write_end(file, mapping, pos,
483 + len, copied, page, fsdata);
484 +@@ -1322,13 +1323,14 @@ static int ext4_write_end(struct file *file,
485 + if (i_size_changed || inline_data)
486 + ret = ext4_mark_inode_dirty(handle, inode);
487 +
488 ++errout:
489 + if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
490 + /* if we have allocated more blocks and copied
491 + * less. We will have blocks allocated outside
492 + * inode->i_size. So truncate them
493 + */
494 + ext4_orphan_add(handle, inode);
495 +-errout:
496 ++
497 + ret2 = ext4_journal_stop(handle);
498 + if (!ret)
499 + ret = ret2;
500 +@@ -1411,6 +1413,7 @@ static int ext4_journalled_write_end(struct file *file,
501 + goto errout;
502 + }
503 + copied = ret;
504 ++ ret = 0;
505 + } else if (unlikely(copied < len) && !PageUptodate(page)) {
506 + copied = 0;
507 + ext4_journalled_zero_new_buffers(handle, page, from, to);
508 +@@ -1440,6 +1443,7 @@ static int ext4_journalled_write_end(struct file *file,
509 + ret = ret2;
510 + }
511 +
512 ++errout:
513 + if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
514 + /* if we have allocated more blocks and copied
515 + * less. We will have blocks allocated outside
516 +@@ -1447,7 +1451,6 @@ static int ext4_journalled_write_end(struct file *file,
517 + */
518 + ext4_orphan_add(handle, inode);
519 +
520 +-errout:
521 + ret2 = ext4_journal_stop(handle);
522 + if (!ret)
523 + ret = ret2;
524 +@@ -3090,35 +3093,37 @@ static int ext4_da_write_end(struct file *file,
525 + end = start + copied - 1;
526 +
527 + /*
528 +- * generic_write_end() will run mark_inode_dirty() if i_size
529 +- * changes. So let's piggyback the i_disksize mark_inode_dirty
530 +- * into that.
531 ++ * Since we are holding inode lock, we are sure i_disksize <=
532 ++ * i_size. We also know that if i_disksize < i_size, there are
533 ++ * delalloc writes pending in the range upto i_size. If the end of
534 ++ * the current write is <= i_size, there's no need to touch
535 ++ * i_disksize since writeback will push i_disksize upto i_size
536 ++ * eventually. If the end of the current write is > i_size and
537 ++ * inside an allocated block (ext4_da_should_update_i_disksize()
538 ++ * check), we need to update i_disksize here as neither
539 ++ * ext4_writepage() nor certain ext4_writepages() paths not
540 ++ * allocating blocks update i_disksize.
541 ++ *
542 ++ * Note that we defer inode dirtying to generic_write_end() /
543 ++ * ext4_da_write_inline_data_end().
544 + */
545 + new_i_size = pos + copied;
546 +- if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
547 ++ if (copied && new_i_size > inode->i_size) {
548 + if (ext4_has_inline_data(inode) ||
549 +- ext4_da_should_update_i_disksize(page, end)) {
550 ++ ext4_da_should_update_i_disksize(page, end))
551 + ext4_update_i_disksize(inode, new_i_size);
552 +- /* We need to mark inode dirty even if
553 +- * new_i_size is less that inode->i_size
554 +- * bu greater than i_disksize.(hint delalloc)
555 +- */
556 +- ret = ext4_mark_inode_dirty(handle, inode);
557 +- }
558 + }
559 +
560 + if (write_mode != CONVERT_INLINE_DATA &&
561 + ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
562 + ext4_has_inline_data(inode))
563 +- ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
564 ++ ret = ext4_da_write_inline_data_end(inode, pos, len, copied,
565 + page);
566 + else
567 +- ret2 = generic_write_end(file, mapping, pos, len, copied,
568 ++ ret = generic_write_end(file, mapping, pos, len, copied,
569 + page, fsdata);
570 +
571 +- copied = ret2;
572 +- if (ret2 < 0)
573 +- ret = ret2;
574 ++ copied = ret;
575 + ret2 = ext4_journal_stop(handle);
576 + if (unlikely(ret2 && !ret))
577 + ret = ret2;
578 +diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
579 +index d7816c01a4f62..c578e772cbd58 100644
580 +--- a/fs/vboxsf/super.c
581 ++++ b/fs/vboxsf/super.c
582 +@@ -21,10 +21,7 @@
583 +
584 + #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
585 +
586 +-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
587 +-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
588 +-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
589 +-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
590 ++static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
591 +
592 + static int follow_symlinks;
593 + module_param(follow_symlinks, int, 0444);
594 +@@ -386,12 +383,7 @@ fail_nomem:
595 +
596 + static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
597 + {
598 +- unsigned char *options = data;
599 +-
600 +- if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
601 +- options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
602 +- options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
603 +- options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
604 ++ if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
605 + vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
606 + return -EINVAL;
607 + }
608 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
609 +index 072ac6c1ef2b6..c095e713cf08f 100644
610 +--- a/include/linux/perf_event.h
611 ++++ b/include/linux/perf_event.h
612 +@@ -682,7 +682,9 @@ struct perf_event {
613 + /*
614 + * timestamp shadows the actual context timing but it can
615 + * be safely used in NMI interrupt context. It reflects the
616 +- * context time as it was when the event was last scheduled in.
617 ++ * context time as it was when the event was last scheduled in,
618 ++ * or when ctx_sched_in failed to schedule the event because we
619 ++ * run out of PMC.
620 + *
621 + * ctx_time already accounts for ctx->timestamp. Therefore to
622 + * compute ctx_time for a sample, simply add perf_clock().
623 +diff --git a/include/linux/sched.h b/include/linux/sched.h
624 +index 29c7ccd5ae42e..b85b26d9ccefe 100644
625 +--- a/include/linux/sched.h
626 ++++ b/include/linux/sched.h
627 +@@ -1589,7 +1589,7 @@ extern struct pid *cad_pid;
628 + #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
629 + #define used_math() tsk_used_math(current)
630 +
631 +-static inline bool is_percpu_thread(void)
632 ++static __always_inline bool is_percpu_thread(void)
633 + {
634 + #ifdef CONFIG_SMP
635 + return (current->flags & PF_NO_SETAFFINITY) &&
636 +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
637 +index 2be90a54a4044..7e58b44705705 100644
638 +--- a/include/net/pkt_sched.h
639 ++++ b/include/net/pkt_sched.h
640 +@@ -11,6 +11,7 @@
641 + #include <uapi/linux/pkt_sched.h>
642 +
643 + #define DEFAULT_TX_QUEUE_LEN 1000
644 ++#define STAB_SIZE_LOG_MAX 30
645 +
646 + struct qdisc_walker {
647 + int stop;
648 +diff --git a/kernel/events/core.c b/kernel/events/core.c
649 +index c677f934353af..c811519261710 100644
650 +--- a/kernel/events/core.c
651 ++++ b/kernel/events/core.c
652 +@@ -3695,6 +3695,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
653 + return 0;
654 + }
655 +
656 ++static inline bool event_update_userpage(struct perf_event *event)
657 ++{
658 ++ if (likely(!atomic_read(&event->mmap_count)))
659 ++ return false;
660 ++
661 ++ perf_event_update_time(event);
662 ++ perf_set_shadow_time(event, event->ctx);
663 ++ perf_event_update_userpage(event);
664 ++
665 ++ return true;
666 ++}
667 ++
668 ++static inline void group_update_userpage(struct perf_event *group_event)
669 ++{
670 ++ struct perf_event *event;
671 ++
672 ++ if (!event_update_userpage(group_event))
673 ++ return;
674 ++
675 ++ for_each_sibling_event(event, group_event)
676 ++ event_update_userpage(event);
677 ++}
678 ++
679 + static int merge_sched_in(struct perf_event *event, void *data)
680 + {
681 + struct perf_event_context *ctx = event->ctx;
682 +@@ -3713,14 +3736,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
683 + }
684 +
685 + if (event->state == PERF_EVENT_STATE_INACTIVE) {
686 ++ *can_add_hw = 0;
687 + if (event->attr.pinned) {
688 + perf_cgroup_event_disable(event, ctx);
689 + perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
690 ++ } else {
691 ++ ctx->rotate_necessary = 1;
692 ++ perf_mux_hrtimer_restart(cpuctx);
693 ++ group_update_userpage(event);
694 + }
695 +-
696 +- *can_add_hw = 0;
697 +- ctx->rotate_necessary = 1;
698 +- perf_mux_hrtimer_restart(cpuctx);
699 + }
700 +
701 + return 0;
702 +@@ -6239,6 +6263,8 @@ accounting:
703 +
704 + ring_buffer_attach(event, rb);
705 +
706 ++ perf_event_update_time(event);
707 ++ perf_set_shadow_time(event, event->ctx);
708 + perf_event_init_userpage(event);
709 + perf_event_update_userpage(event);
710 + } else {
711 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
712 +index eb2b5404806c6..d36168baf6776 100644
713 +--- a/net/ipv6/netfilter/ip6_tables.c
714 ++++ b/net/ipv6/netfilter/ip6_tables.c
715 +@@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb,
716 + * things we don't know, ie. tcp syn flag or ports). If the
717 + * rule is also a fragment-specific rule, non-fragments won't
718 + * match it. */
719 ++ acpar.fragoff = 0;
720 + acpar.hotdrop = false;
721 + acpar.state = state;
722 +
723 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
724 +index 620ecf922408b..870c8eafef929 100644
725 +--- a/net/mac80211/mesh_pathtbl.c
726 ++++ b/net/mac80211/mesh_pathtbl.c
727 +@@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void)
728 + atomic_set(&newtbl->entries, 0);
729 + spin_lock_init(&newtbl->gates_lock);
730 + spin_lock_init(&newtbl->walk_lock);
731 +- rhashtable_init(&newtbl->rhead, &mesh_rht_params);
732 ++ if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
733 ++ kfree(newtbl);
734 ++ return NULL;
735 ++ }
736 +
737 + return newtbl;
738 + }
739 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
740 +index 38b5695c2a0c8..b7979c0bffd0f 100644
741 +--- a/net/mac80211/rx.c
742 ++++ b/net/mac80211/rx.c
743 +@@ -4064,7 +4064,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
744 + if (!bssid)
745 + return false;
746 + if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
747 +- ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
748 ++ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
749 ++ !is_valid_ether_addr(hdr->addr2))
750 + return false;
751 + if (ieee80211_is_beacon(hdr->frame_control))
752 + return true;
753 +diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c
754 +index 8e8a65d46345b..acd73f717a088 100644
755 +--- a/net/netfilter/nf_nat_masquerade.c
756 ++++ b/net/netfilter/nf_nat_masquerade.c
757 +@@ -9,8 +9,19 @@
758 +
759 + #include <net/netfilter/nf_nat_masquerade.h>
760 +
761 ++struct masq_dev_work {
762 ++ struct work_struct work;
763 ++ struct net *net;
764 ++ union nf_inet_addr addr;
765 ++ int ifindex;
766 ++ int (*iter)(struct nf_conn *i, void *data);
767 ++};
768 ++
769 ++#define MAX_MASQ_WORKER_COUNT 16
770 ++
771 + static DEFINE_MUTEX(masq_mutex);
772 + static unsigned int masq_refcnt __read_mostly;
773 ++static atomic_t masq_worker_count __read_mostly;
774 +
775 + unsigned int
776 + nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
777 +@@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
778 + }
779 + EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
780 +
781 +-static int device_cmp(struct nf_conn *i, void *ifindex)
782 ++static void iterate_cleanup_work(struct work_struct *work)
783 ++{
784 ++ struct masq_dev_work *w;
785 ++
786 ++ w = container_of(work, struct masq_dev_work, work);
787 ++
788 ++ nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
789 ++
790 ++ put_net(w->net);
791 ++ kfree(w);
792 ++ atomic_dec(&masq_worker_count);
793 ++ module_put(THIS_MODULE);
794 ++}
795 ++
796 ++/* Iterate conntrack table in the background and remove conntrack entries
797 ++ * that use the device/address being removed.
798 ++ *
799 ++ * In case too many work items have been queued already or memory allocation
800 ++ * fails iteration is skipped, conntrack entries will time out eventually.
801 ++ */
802 ++static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
803 ++ int ifindex,
804 ++ int (*iter)(struct nf_conn *i, void *data),
805 ++ gfp_t gfp_flags)
806 ++{
807 ++ struct masq_dev_work *w;
808 ++
809 ++ if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
810 ++ return;
811 ++
812 ++ net = maybe_get_net(net);
813 ++ if (!net)
814 ++ return;
815 ++
816 ++ if (!try_module_get(THIS_MODULE))
817 ++ goto err_module;
818 ++
819 ++ w = kzalloc(sizeof(*w), gfp_flags);
820 ++ if (w) {
821 ++ /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
822 ++ atomic_inc(&masq_worker_count);
823 ++
824 ++ INIT_WORK(&w->work, iterate_cleanup_work);
825 ++ w->ifindex = ifindex;
826 ++ w->net = net;
827 ++ w->iter = iter;
828 ++ if (addr)
829 ++ w->addr = *addr;
830 ++ schedule_work(&w->work);
831 ++ return;
832 ++ }
833 ++
834 ++ module_put(THIS_MODULE);
835 ++ err_module:
836 ++ put_net(net);
837 ++}
838 ++
839 ++static int device_cmp(struct nf_conn *i, void *arg)
840 + {
841 + const struct nf_conn_nat *nat = nfct_nat(i);
842 ++ const struct masq_dev_work *w = arg;
843 +
844 + if (!nat)
845 + return 0;
846 +- return nat->masq_index == (int)(long)ifindex;
847 ++ return nat->masq_index == w->ifindex;
848 + }
849 +
850 + static int masq_device_event(struct notifier_block *this,
851 +@@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this,
852 + * and forget them.
853 + */
854 +
855 +- nf_ct_iterate_cleanup_net(net, device_cmp,
856 +- (void *)(long)dev->ifindex, 0, 0);
857 ++ nf_nat_masq_schedule(net, NULL, dev->ifindex,
858 ++ device_cmp, GFP_KERNEL);
859 + }
860 +
861 + return NOTIFY_DONE;
862 +@@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this,
863 +
864 + static int inet_cmp(struct nf_conn *ct, void *ptr)
865 + {
866 +- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
867 +- struct net_device *dev = ifa->ifa_dev->dev;
868 + struct nf_conntrack_tuple *tuple;
869 ++ struct masq_dev_work *w = ptr;
870 +
871 +- if (!device_cmp(ct, (void *)(long)dev->ifindex))
872 ++ if (!device_cmp(ct, ptr))
873 + return 0;
874 +
875 + tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
876 +
877 +- return ifa->ifa_address == tuple->dst.u3.ip;
878 ++ return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
879 + }
880 +
881 + static int masq_inet_event(struct notifier_block *this,
882 + unsigned long event,
883 + void *ptr)
884 + {
885 +- struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
886 +- struct net *net = dev_net(idev->dev);
887 ++ const struct in_ifaddr *ifa = ptr;
888 ++ const struct in_device *idev;
889 ++ const struct net_device *dev;
890 ++ union nf_inet_addr addr;
891 ++
892 ++ if (event != NETDEV_DOWN)
893 ++ return NOTIFY_DONE;
894 +
895 + /* The masq_dev_notifier will catch the case of the device going
896 + * down. So if the inetdev is dead and being destroyed we have
897 + * no work to do. Otherwise this is an individual address removal
898 + * and we have to perform the flush.
899 + */
900 ++ idev = ifa->ifa_dev;
901 + if (idev->dead)
902 + return NOTIFY_DONE;
903 +
904 +- if (event == NETDEV_DOWN)
905 +- nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
906 ++ memset(&addr, 0, sizeof(addr));
907 ++
908 ++ addr.ip = ifa->ifa_address;
909 ++
910 ++ dev = idev->dev;
911 ++ nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
912 ++ inet_cmp, GFP_KERNEL);
913 +
914 + return NOTIFY_DONE;
915 + }
916 +@@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = {
917 + };
918 +
919 + #if IS_ENABLED(CONFIG_IPV6)
920 +-static atomic_t v6_worker_count __read_mostly;
921 +-
922 + static int
923 + nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
924 + const struct in6_addr *daddr, unsigned int srcprefs,
925 +@@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
926 + }
927 + EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
928 +
929 +-struct masq_dev_work {
930 +- struct work_struct work;
931 +- struct net *net;
932 +- struct in6_addr addr;
933 +- int ifindex;
934 +-};
935 +-
936 +-static int inet6_cmp(struct nf_conn *ct, void *work)
937 +-{
938 +- struct masq_dev_work *w = (struct masq_dev_work *)work;
939 +- struct nf_conntrack_tuple *tuple;
940 +-
941 +- if (!device_cmp(ct, (void *)(long)w->ifindex))
942 +- return 0;
943 +-
944 +- tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
945 +-
946 +- return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
947 +-}
948 +-
949 +-static void iterate_cleanup_work(struct work_struct *work)
950 +-{
951 +- struct masq_dev_work *w;
952 +-
953 +- w = container_of(work, struct masq_dev_work, work);
954 +-
955 +- nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
956 +-
957 +- put_net(w->net);
958 +- kfree(w);
959 +- atomic_dec(&v6_worker_count);
960 +- module_put(THIS_MODULE);
961 +-}
962 +-
963 + /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
964 + *
965 + * Defer it to the system workqueue.
966 +@@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this,
967 + {
968 + struct inet6_ifaddr *ifa = ptr;
969 + const struct net_device *dev;
970 +- struct masq_dev_work *w;
971 +- struct net *net;
972 ++ union nf_inet_addr addr;
973 +
974 +- if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
975 ++ if (event != NETDEV_DOWN)
976 + return NOTIFY_DONE;
977 +
978 + dev = ifa->idev->dev;
979 +- net = maybe_get_net(dev_net(dev));
980 +- if (!net)
981 +- return NOTIFY_DONE;
982 +
983 +- if (!try_module_get(THIS_MODULE))
984 +- goto err_module;
985 ++ memset(&addr, 0, sizeof(addr));
986 +
987 +- w = kmalloc(sizeof(*w), GFP_ATOMIC);
988 +- if (w) {
989 +- atomic_inc(&v6_worker_count);
990 +-
991 +- INIT_WORK(&w->work, iterate_cleanup_work);
992 +- w->ifindex = dev->ifindex;
993 +- w->net = net;
994 +- w->addr = ifa->addr;
995 +- schedule_work(&w->work);
996 ++ addr.in6 = ifa->addr;
997 +
998 +- return NOTIFY_DONE;
999 +- }
1000 +-
1001 +- module_put(THIS_MODULE);
1002 +- err_module:
1003 +- put_net(net);
1004 ++ nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
1005 ++ GFP_ATOMIC);
1006 + return NOTIFY_DONE;
1007 + }
1008 +
1009 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
1010 +index 54a8c363bcdda..7b24582a8a164 100644
1011 +--- a/net/sched/sch_api.c
1012 ++++ b/net/sched/sch_api.c
1013 +@@ -513,6 +513,12 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
1014 + return stab;
1015 + }
1016 +
1017 ++ if (s->size_log > STAB_SIZE_LOG_MAX ||
1018 ++ s->cell_log > STAB_SIZE_LOG_MAX) {
1019 ++ NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
1020 ++ return ERR_PTR(-EINVAL);
1021 ++ }
1022 ++
1023 + stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
1024 + if (!stab)
1025 + return ERR_PTR(-ENOMEM);
1026 +diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
1027 +index 2770e8179983a..25548555d8d79 100644
1028 +--- a/sound/soc/intel/boards/sof_sdw.c
1029 ++++ b/sound/soc/intel/boards/sof_sdw.c
1030 +@@ -847,6 +847,11 @@ static int create_sdw_dailink(struct device *dev, int *be_index,
1031 + cpus + *cpu_id, cpu_dai_num,
1032 + codecs, codec_num,
1033 + NULL, &sdw_ops);
1034 ++ /*
1035 ++ * SoundWire DAILINKs use 'stream' functions and Bank Switch operations
1036 ++ * based on wait_for_completion(), tag them as 'nonatomic'.
1037 ++ */
1038 ++ dai_links[*be_index].nonatomic = true;
1039 +
1040 + ret = set_codec_init_func(link, dai_links + (*be_index)++,
1041 + playback, group_id);
1042 +diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
1043 +index adc7c37145d64..feced9077dfe1 100644
1044 +--- a/sound/soc/sof/core.c
1045 ++++ b/sound/soc/sof/core.c
1046 +@@ -354,7 +354,6 @@ int snd_sof_device_remove(struct device *dev)
1047 + dev_warn(dev, "error: %d failed to prepare DSP for device removal",
1048 + ret);
1049 +
1050 +- snd_sof_fw_unload(sdev);
1051 + snd_sof_ipc_free(sdev);
1052 + snd_sof_free_debug(sdev);
1053 + snd_sof_free_trace(sdev);
1054 +@@ -377,8 +376,7 @@ int snd_sof_device_remove(struct device *dev)
1055 + snd_sof_remove(sdev);
1056 +
1057 + /* release firmware */
1058 +- release_firmware(pdata->fw);
1059 +- pdata->fw = NULL;
1060 ++ snd_sof_fw_unload(sdev);
1061 +
1062 + return 0;
1063 + }
1064 +diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
1065 +index ba9ed66f98bc7..2d5c3fc93bc5c 100644
1066 +--- a/sound/soc/sof/loader.c
1067 ++++ b/sound/soc/sof/loader.c
1068 +@@ -830,5 +830,7 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
1069 + void snd_sof_fw_unload(struct snd_sof_dev *sdev)
1070 + {
1071 + /* TODO: support module unloading at runtime */
1072 ++ release_firmware(sdev->pdata->fw);
1073 ++ sdev->pdata->fw = NULL;
1074 + }
1075 + EXPORT_SYMBOL(snd_sof_fw_unload);