Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Sat, 09 Jan 2021 17:58:36
Message-Id: 1610215057.6fb3f2bc4509c9c6cba565cc2c20b0fff199e0a9.mpagano@gentoo
1 commit: 6fb3f2bc4509c9c6cba565cc2c20b0fff199e0a9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 9 17:57:37 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 9 17:57:37 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6fb3f2bc
7
8 Linux patch 5.10.6
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1005_linux-5.10.6.patch | 1736 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1740 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2fb3d39..4881039 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -63,6 +63,10 @@ Patch: 1004_linux-5.10.5.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.5
23
24 +Patch: 1005_linux-5.10.6.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.6
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1005_linux-5.10.6.patch b/1005_linux-5.10.6.patch
33 new file mode 100644
34 index 0000000..f3e7f57
35 --- /dev/null
36 +++ b/1005_linux-5.10.6.patch
37 @@ -0,0 +1,1736 @@
38 +diff --git a/Documentation/devicetree/bindings/rtc/rtc.yaml b/Documentation/devicetree/bindings/rtc/rtc.yaml
39 +index 8acd2de3de3ad..d30dc045aac64 100644
40 +--- a/Documentation/devicetree/bindings/rtc/rtc.yaml
41 ++++ b/Documentation/devicetree/bindings/rtc/rtc.yaml
42 +@@ -63,6 +63,11 @@ properties:
43 + description:
44 + Enables wake up of host system on alarm.
45 +
46 ++ reset-source:
47 ++ $ref: /schemas/types.yaml#/definitions/flag
48 ++ description:
49 ++ The RTC is able to reset the machine.
50 ++
51 + additionalProperties: true
52 +
53 + ...
54 +diff --git a/Makefile b/Makefile
55 +index bb431fd473d2c..2b3f0d06b0054 100644
56 +--- a/Makefile
57 ++++ b/Makefile
58 +@@ -1,7 +1,7 @@
59 + # SPDX-License-Identifier: GPL-2.0
60 + VERSION = 5
61 + PATCHLEVEL = 10
62 +-SUBLEVEL = 5
63 ++SUBLEVEL = 6
64 + EXTRAVERSION =
65 + NAME = Kleptomaniac Octopus
66 +
67 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
68 +index 30c6b9edddb50..0f7749e9424d4 100644
69 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
70 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
71 +@@ -2278,8 +2278,7 @@ void amdgpu_dm_update_connector_after_detect(
72 +
73 + drm_connector_update_edid_property(connector,
74 + aconnector->edid);
75 +- aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
76 +- drm_connector_list_update(connector);
77 ++ drm_add_edid_modes(connector, aconnector->edid);
78 +
79 + if (aconnector->dc_link->aux_mode)
80 + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
81 +diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
82 +index e08684e34078a..91b37b76618d2 100644
83 +--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
84 ++++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
85 +@@ -2622,11 +2622,22 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
86 + return true;
87 + }
88 +
89 ++/*
90 ++ * Display WA #22010492432: tgl
91 ++ * Program half of the nominal DCO divider fraction value.
92 ++ */
93 ++static bool
94 ++tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
95 ++{
96 ++ return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400;
97 ++}
98 ++
99 + static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
100 + const struct intel_shared_dpll *pll,
101 + int ref_clock)
102 + {
103 + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
104 ++ u32 dco_fraction;
105 + u32 p0, p1, p2, dco_freq;
106 +
107 + p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
108 +@@ -2669,8 +2680,13 @@ static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
109 + dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
110 + ref_clock;
111 +
112 +- dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
113 +- DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
114 ++ dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
115 ++ DPLL_CFGCR0_DCO_FRACTION_SHIFT;
116 ++
117 ++ if (tgl_combo_pll_div_frac_wa_needed(dev_priv))
118 ++ dco_fraction *= 2;
119 ++
120 ++ dco_freq += (dco_fraction * ref_clock) / 0x8000;
121 +
122 + if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
123 + return 0;
124 +@@ -2948,16 +2964,6 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
125 + /* the following params are unused */
126 + };
127 +
128 +-/*
129 +- * Display WA #22010492432: tgl
130 +- * Divide the nominal .dco_fraction value by 2.
131 +- */
132 +-static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = {
133 +- .dco_integer = 0x54, .dco_fraction = 0x1800,
134 +- /* the following params are unused */
135 +- .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
136 +-};
137 +-
138 + static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
139 + struct skl_wrpll_params *pll_params)
140 + {
141 +@@ -2991,14 +2997,12 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
142 + MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
143 + fallthrough;
144 + case 19200:
145 ++ case 38400:
146 + *pll_params = tgl_tbt_pll_19_2MHz_values;
147 + break;
148 + case 24000:
149 + *pll_params = tgl_tbt_pll_24MHz_values;
150 + break;
151 +- case 38400:
152 +- *pll_params = tgl_tbt_pll_38_4MHz_values;
153 +- break;
154 + }
155 + } else {
156 + switch (dev_priv->dpll.ref_clks.nssc) {
157 +@@ -3065,9 +3069,14 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
158 + const struct skl_wrpll_params *pll_params,
159 + struct intel_dpll_hw_state *pll_state)
160 + {
161 ++ u32 dco_fraction = pll_params->dco_fraction;
162 ++
163 + memset(pll_state, 0, sizeof(*pll_state));
164 +
165 +- pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) |
166 ++ if (tgl_combo_pll_div_frac_wa_needed(i915))
167 ++ dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
168 ++
169 ++ pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
170 + pll_params->dco_integer;
171 +
172 + pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
173 +diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
174 +index 4a041511b70ec..76b9c436edcd2 100644
175 +--- a/drivers/infiniband/core/device.c
176 ++++ b/drivers/infiniband/core/device.c
177 +@@ -1177,25 +1177,6 @@ out:
178 + return ret;
179 + }
180 +
181 +-static void setup_dma_device(struct ib_device *device,
182 +- struct device *dma_device)
183 +-{
184 +- /*
185 +- * If the caller does not provide a DMA capable device then the IB
186 +- * device will be used. In this case the caller should fully setup the
187 +- * ibdev for DMA. This usually means using dma_virt_ops.
188 +- */
189 +-#ifdef CONFIG_DMA_VIRT_OPS
190 +- if (!dma_device) {
191 +- device->dev.dma_ops = &dma_virt_ops;
192 +- dma_device = &device->dev;
193 +- }
194 +-#endif
195 +- WARN_ON(!dma_device);
196 +- device->dma_device = dma_device;
197 +- WARN_ON(!device->dma_device->dma_parms);
198 +-}
199 +-
200 + /*
201 + * setup_device() allocates memory and sets up data that requires calling the
202 + * device ops, this is the only reason these actions are not done during
203 +@@ -1341,7 +1322,14 @@ int ib_register_device(struct ib_device *device, const char *name,
204 + if (ret)
205 + return ret;
206 +
207 +- setup_dma_device(device, dma_device);
208 ++ /*
209 ++ * If the caller does not provide a DMA capable device then the IB core
210 ++ * will set up ib_sge and scatterlist structures that stash the kernel
211 ++ * virtual address into the address field.
212 ++ */
213 ++ WARN_ON(dma_device && !dma_device->dma_parms);
214 ++ device->dma_device = dma_device;
215 ++
216 + ret = setup_device(device);
217 + if (ret)
218 + return ret;
219 +@@ -2676,6 +2664,21 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
220 + }
221 + EXPORT_SYMBOL(ib_set_device_ops);
222 +
223 ++#ifdef CONFIG_INFINIBAND_VIRT_DMA
224 ++int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
225 ++{
226 ++ struct scatterlist *s;
227 ++ int i;
228 ++
229 ++ for_each_sg(sg, s, nents, i) {
230 ++ sg_dma_address(s) = (uintptr_t)sg_virt(s);
231 ++ sg_dma_len(s) = s->length;
232 ++ }
233 ++ return nents;
234 ++}
235 ++EXPORT_SYMBOL(ib_dma_virt_map_sg);
236 ++#endif /* CONFIG_INFINIBAND_VIRT_DMA */
237 ++
238 + static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
239 + [RDMA_NL_LS_OP_RESOLVE] = {
240 + .doit = ib_nl_handle_resolve_resp,
241 +diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
242 +index 13f43ab7220b0..a96030b784eb2 100644
243 +--- a/drivers/infiniband/core/rw.c
244 ++++ b/drivers/infiniband/core/rw.c
245 +@@ -285,8 +285,11 @@ static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
246 + static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
247 + u32 sg_cnt, enum dma_data_direction dir)
248 + {
249 +- if (is_pci_p2pdma_page(sg_page(sg)))
250 ++ if (is_pci_p2pdma_page(sg_page(sg))) {
251 ++ if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
252 ++ return 0;
253 + return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
254 ++ }
255 + return ib_dma_map_sg(dev, sg, sg_cnt, dir);
256 + }
257 +
258 +diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
259 +index c8e268082952b..0df48b3a6b56c 100644
260 +--- a/drivers/infiniband/sw/rdmavt/Kconfig
261 ++++ b/drivers/infiniband/sw/rdmavt/Kconfig
262 +@@ -4,6 +4,5 @@ config INFINIBAND_RDMAVT
263 + depends on INFINIBAND_VIRT_DMA
264 + depends on X86_64
265 + depends on PCI
266 +- select DMA_VIRT_OPS
267 + help
268 + This is a common software verbs provider for RDMA networks.
269 +diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
270 +index 8490fdb9c91e5..90fc234f489ac 100644
271 +--- a/drivers/infiniband/sw/rdmavt/mr.c
272 ++++ b/drivers/infiniband/sw/rdmavt/mr.c
273 +@@ -324,8 +324,6 @@ static void __rvt_free_mr(struct rvt_mr *mr)
274 + * @acc: access flags
275 + *
276 + * Return: the memory region on success, otherwise returns an errno.
277 +- * Note that all DMA addresses should be created via the functions in
278 +- * struct dma_virt_ops.
279 + */
280 + struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
281 + {
282 +@@ -766,7 +764,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
283 +
284 + /*
285 + * We use LKEY == zero for kernel virtual addresses
286 +- * (see rvt_get_dma_mr() and dma_virt_ops).
287 ++ * (see rvt_get_dma_mr()).
288 + */
289 + if (sge->lkey == 0) {
290 + struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
291 +@@ -877,7 +875,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
292 +
293 + /*
294 + * We use RKEY == zero for kernel virtual addresses
295 +- * (see rvt_get_dma_mr() and dma_virt_ops).
296 ++ * (see rvt_get_dma_mr()).
297 + */
298 + rcu_read_lock();
299 + if (rkey == 0) {
300 +diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
301 +index 670a9623b46e1..d1bbe66610cfe 100644
302 +--- a/drivers/infiniband/sw/rdmavt/vt.c
303 ++++ b/drivers/infiniband/sw/rdmavt/vt.c
304 +@@ -524,7 +524,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
305 + int rvt_register_device(struct rvt_dev_info *rdi)
306 + {
307 + int ret = 0, i;
308 +- u64 dma_mask;
309 +
310 + if (!rdi)
311 + return -EINVAL;
312 +@@ -579,13 +578,6 @@ int rvt_register_device(struct rvt_dev_info *rdi)
313 + /* Completion queues */
314 + spin_lock_init(&rdi->n_cqs_lock);
315 +
316 +- /* DMA Operations */
317 +- rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
318 +- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
319 +- ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask);
320 +- if (ret)
321 +- goto bail_wss;
322 +-
323 + /* Protection Domain */
324 + spin_lock_init(&rdi->n_pds_lock);
325 + rdi->n_pds_allocated = 0;
326 +diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
327 +index 8810bfa680495..4521490667925 100644
328 +--- a/drivers/infiniband/sw/rxe/Kconfig
329 ++++ b/drivers/infiniband/sw/rxe/Kconfig
330 +@@ -5,7 +5,6 @@ config RDMA_RXE
331 + depends on INFINIBAND_VIRT_DMA
332 + select NET_UDP_TUNNEL
333 + select CRYPTO_CRC32
334 +- select DMA_VIRT_OPS
335 + help
336 + This driver implements the InfiniBand RDMA transport over
337 + the Linux network stack. It enables a system with a
338 +diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
339 +index 34bef7d8e6b41..943914c2a50c7 100644
340 +--- a/drivers/infiniband/sw/rxe/rxe_net.c
341 ++++ b/drivers/infiniband/sw/rxe/rxe_net.c
342 +@@ -20,18 +20,6 @@
343 +
344 + static struct rxe_recv_sockets recv_sockets;
345 +
346 +-struct device *rxe_dma_device(struct rxe_dev *rxe)
347 +-{
348 +- struct net_device *ndev;
349 +-
350 +- ndev = rxe->ndev;
351 +-
352 +- if (is_vlan_dev(ndev))
353 +- ndev = vlan_dev_real_dev(ndev);
354 +-
355 +- return ndev->dev.parent;
356 +-}
357 +-
358 + int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
359 + {
360 + int err;
361 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
362 +index f9c832e82552f..512868c230238 100644
363 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
364 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
365 +@@ -1118,23 +1118,15 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
366 + int err;
367 + struct ib_device *dev = &rxe->ib_dev;
368 + struct crypto_shash *tfm;
369 +- u64 dma_mask;
370 +
371 + strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
372 +
373 + dev->node_type = RDMA_NODE_IB_CA;
374 + dev->phys_port_cnt = 1;
375 + dev->num_comp_vectors = num_possible_cpus();
376 +- dev->dev.parent = rxe_dma_device(rxe);
377 + dev->local_dma_lkey = 0;
378 + addrconf_addr_eui48((unsigned char *)&dev->node_guid,
379 + rxe->ndev->dev_addr);
380 +- dev->dev.dma_parms = &rxe->dma_parms;
381 +- dma_set_max_seg_size(&dev->dev, UINT_MAX);
382 +- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
383 +- err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask);
384 +- if (err)
385 +- return err;
386 +
387 + dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
388 + | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
389 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
390 +index 3414b341b7091..4bf5d85a1ab3c 100644
391 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
392 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
393 +@@ -352,7 +352,6 @@ struct rxe_port {
394 + struct rxe_dev {
395 + struct ib_device ib_dev;
396 + struct ib_device_attr attr;
397 +- struct device_dma_parameters dma_parms;
398 + int max_ucontext;
399 + int max_inline_data;
400 + struct mutex usdev_lock;
401 +diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
402 +index 3450ba5081df5..1b5105cbabaee 100644
403 +--- a/drivers/infiniband/sw/siw/Kconfig
404 ++++ b/drivers/infiniband/sw/siw/Kconfig
405 +@@ -2,7 +2,6 @@ config RDMA_SIW
406 + tristate "Software RDMA over TCP/IP (iWARP) driver"
407 + depends on INET && INFINIBAND && LIBCRC32C
408 + depends on INFINIBAND_VIRT_DMA
409 +- select DMA_VIRT_OPS
410 + help
411 + This driver implements the iWARP RDMA transport over
412 + the Linux TCP/IP network stack. It enables a system with a
413 +diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
414 +index e9753831ac3f3..adda789962196 100644
415 +--- a/drivers/infiniband/sw/siw/siw.h
416 ++++ b/drivers/infiniband/sw/siw/siw.h
417 +@@ -69,7 +69,6 @@ struct siw_pd {
418 +
419 + struct siw_device {
420 + struct ib_device base_dev;
421 +- struct device_dma_parameters dma_parms;
422 + struct net_device *netdev;
423 + struct siw_dev_cap attrs;
424 +
425 +diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
426 +index 181e06c1c43d7..9d152e198a59b 100644
427 +--- a/drivers/infiniband/sw/siw/siw_main.c
428 ++++ b/drivers/infiniband/sw/siw/siw_main.c
429 +@@ -305,25 +305,8 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
430 + {
431 + struct siw_device *sdev = NULL;
432 + struct ib_device *base_dev;
433 +- struct device *parent = netdev->dev.parent;
434 +- u64 dma_mask;
435 + int rv;
436 +
437 +- if (!parent) {
438 +- /*
439 +- * The loopback device has no parent device,
440 +- * so it appears as a top-level device. To support
441 +- * loopback device connectivity, take this device
442 +- * as the parent device. Skip all other devices
443 +- * w/o parent device.
444 +- */
445 +- if (netdev->type != ARPHRD_LOOPBACK) {
446 +- pr_warn("siw: device %s error: no parent device\n",
447 +- netdev->name);
448 +- return NULL;
449 +- }
450 +- parent = &netdev->dev;
451 +- }
452 + sdev = ib_alloc_device(siw_device, base_dev);
453 + if (!sdev)
454 + return NULL;
455 +@@ -382,13 +365,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
456 + * per physical port.
457 + */
458 + base_dev->phys_port_cnt = 1;
459 +- base_dev->dev.parent = parent;
460 +- base_dev->dev.dma_parms = &sdev->dma_parms;
461 +- dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
462 +- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
463 +- if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask))
464 +- goto error;
465 +-
466 + base_dev->num_comp_vectors = num_possible_cpus();
467 +
468 + xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
469 +@@ -430,7 +406,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
470 + atomic_set(&sdev->num_mr, 0);
471 + atomic_set(&sdev->num_pd, 0);
472 +
473 +- sdev->numa_node = dev_to_node(parent);
474 ++ sdev->numa_node = dev_to_node(&netdev->dev);
475 + spin_lock_init(&sdev->lock);
476 +
477 + return sdev;
478 +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
479 +index 7900571fc85b3..c352217946455 100644
480 +--- a/drivers/mtd/nand/spi/core.c
481 ++++ b/drivers/mtd/nand/spi/core.c
482 +@@ -318,10 +318,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
483 + buf += ret;
484 + }
485 +
486 +- if (req->ooblen)
487 +- memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
488 +- req->ooblen);
489 +-
490 + return 0;
491 + }
492 +
493 +diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
494 +index 5934f71475477..173ccf79cbfcc 100644
495 +--- a/drivers/net/wireless/marvell/mwifiex/join.c
496 ++++ b/drivers/net/wireless/marvell/mwifiex/join.c
497 +@@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
498 +
499 + memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
500 +
501 ++ if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN)
502 ++ req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN;
503 + memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
504 +
505 + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
506 +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
507 +index ae6620489457d..5c1e7cb7fe0de 100644
508 +--- a/drivers/nvme/target/rdma.c
509 ++++ b/drivers/nvme/target/rdma.c
510 +@@ -414,7 +414,8 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
511 + if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
512 + goto out_free_rsp;
513 +
514 +- r->req.p2p_client = &ndev->device->dev;
515 ++ if (!ib_uses_virt_dma(ndev->device))
516 ++ r->req.p2p_client = &ndev->device->dev;
517 + r->send_sge.length = sizeof(*r->req.cqe);
518 + r->send_sge.lkey = ndev->pd->local_dma_lkey;
519 +
520 +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
521 +index 4d9711d51f8f3..f0a6861ff3aef 100644
522 +--- a/drivers/rtc/rtc-pcf2127.c
523 ++++ b/drivers/rtc/rtc-pcf2127.c
524 +@@ -331,6 +331,37 @@ static const struct watchdog_ops pcf2127_watchdog_ops = {
525 + .set_timeout = pcf2127_wdt_set_timeout,
526 + };
527 +
528 ++static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
529 ++{
530 ++ u32 wdd_timeout;
531 ++ int ret;
532 ++
533 ++ if (!IS_ENABLED(CONFIG_WATCHDOG) ||
534 ++ !device_property_read_bool(dev, "reset-source"))
535 ++ return 0;
536 ++
537 ++ pcf2127->wdd.parent = dev;
538 ++ pcf2127->wdd.info = &pcf2127_wdt_info;
539 ++ pcf2127->wdd.ops = &pcf2127_watchdog_ops;
540 ++ pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN;
541 ++ pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
542 ++ pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
543 ++ pcf2127->wdd.min_hw_heartbeat_ms = 500;
544 ++ pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
545 ++
546 ++ watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
547 ++
548 ++ /* Test if watchdog timer is started by bootloader */
549 ++ ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
550 ++ if (ret)
551 ++ return ret;
552 ++
553 ++ if (wdd_timeout)
554 ++ set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
555 ++
556 ++ return devm_watchdog_register_device(dev, &pcf2127->wdd);
557 ++}
558 ++
559 + /* Alarm */
560 + static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
561 + {
562 +@@ -532,7 +563,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
563 + int alarm_irq, const char *name, bool has_nvmem)
564 + {
565 + struct pcf2127 *pcf2127;
566 +- u32 wdd_timeout;
567 + int ret = 0;
568 +
569 + dev_dbg(dev, "%s\n", __func__);
570 +@@ -571,17 +601,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
571 + pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
572 + }
573 +
574 +- pcf2127->wdd.parent = dev;
575 +- pcf2127->wdd.info = &pcf2127_wdt_info;
576 +- pcf2127->wdd.ops = &pcf2127_watchdog_ops;
577 +- pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN;
578 +- pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
579 +- pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
580 +- pcf2127->wdd.min_hw_heartbeat_ms = 500;
581 +- pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
582 +-
583 +- watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
584 +-
585 + if (has_nvmem) {
586 + struct nvmem_config nvmem_cfg = {
587 + .priv = pcf2127,
588 +@@ -611,19 +630,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
589 + return ret;
590 + }
591 +
592 +- /* Test if watchdog timer is started by bootloader */
593 +- ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
594 +- if (ret)
595 +- return ret;
596 +-
597 +- if (wdd_timeout)
598 +- set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
599 +-
600 +-#ifdef CONFIG_WATCHDOG
601 +- ret = devm_watchdog_register_device(dev, &pcf2127->wdd);
602 +- if (ret)
603 +- return ret;
604 +-#endif /* CONFIG_WATCHDOG */
605 ++ pcf2127_watchdog_init(dev, pcf2127);
606 +
607 + /*
608 + * Disable battery low/switch-over timestamp and interrupts.
609 +diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
610 +index 8df73bc2f8cb2..914a827a93ee8 100644
611 +--- a/drivers/scsi/ufs/ufs-mediatek.c
612 ++++ b/drivers/scsi/ufs/ufs-mediatek.c
613 +@@ -743,7 +743,7 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
614 + return ret;
615 + }
616 +
617 +-static void ufs_mtk_device_reset(struct ufs_hba *hba)
618 ++static int ufs_mtk_device_reset(struct ufs_hba *hba)
619 + {
620 + struct arm_smccc_res res;
621 +
622 +@@ -764,6 +764,8 @@ static void ufs_mtk_device_reset(struct ufs_hba *hba)
623 + usleep_range(10000, 15000);
624 +
625 + dev_info(hba->dev, "device reset done\n");
626 ++
627 ++ return 0;
628 + }
629 +
630 + static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
631 +diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
632 +index f9d6ef3565407..a244c8ae1b4eb 100644
633 +--- a/drivers/scsi/ufs/ufs-qcom.c
634 ++++ b/drivers/scsi/ufs/ufs-qcom.c
635 +@@ -1421,13 +1421,13 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
636 + *
637 + * Toggles the (optional) reset line to reset the attached device.
638 + */
639 +-static void ufs_qcom_device_reset(struct ufs_hba *hba)
640 ++static int ufs_qcom_device_reset(struct ufs_hba *hba)
641 + {
642 + struct ufs_qcom_host *host = ufshcd_get_variant(hba);
643 +
644 + /* reset gpio is optional */
645 + if (!host->device_reset)
646 +- return;
647 ++ return -EOPNOTSUPP;
648 +
649 + /*
650 + * The UFS device shall detect reset pulses of 1us, sleep for 10us to
651 +@@ -1438,6 +1438,8 @@ static void ufs_qcom_device_reset(struct ufs_hba *hba)
652 +
653 + gpiod_set_value_cansleep(host->device_reset, 0);
654 + usleep_range(10, 15);
655 ++
656 ++ return 0;
657 + }
658 +
659 + #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
660 +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
661 +index e0f00a42371c5..cd51553e522da 100644
662 +--- a/drivers/scsi/ufs/ufshcd.h
663 ++++ b/drivers/scsi/ufs/ufshcd.h
664 +@@ -318,7 +318,7 @@ struct ufs_hba_variant_ops {
665 + int (*resume)(struct ufs_hba *, enum ufs_pm_op);
666 + void (*dbg_register_dump)(struct ufs_hba *hba);
667 + int (*phy_initialization)(struct ufs_hba *);
668 +- void (*device_reset)(struct ufs_hba *hba);
669 ++ int (*device_reset)(struct ufs_hba *hba);
670 + void (*config_scaling_param)(struct ufs_hba *hba,
671 + struct devfreq_dev_profile *profile,
672 + void *data);
673 +@@ -1181,9 +1181,17 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
674 + static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
675 + {
676 + if (hba->vops && hba->vops->device_reset) {
677 +- hba->vops->device_reset(hba);
678 +- ufshcd_set_ufs_dev_active(hba);
679 +- ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
680 ++ int err = hba->vops->device_reset(hba);
681 ++
682 ++ if (!err) {
683 ++ ufshcd_set_ufs_dev_active(hba);
684 ++ if (ufshcd_is_wb_allowed(hba)) {
685 ++ hba->wb_enabled = false;
686 ++ hba->wb_buf_flush_enabled = false;
687 ++ }
688 ++ }
689 ++ if (err != -EOPNOTSUPP)
690 ++ ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, err);
691 + }
692 + }
693 +
694 +diff --git a/fs/exec.c b/fs/exec.c
695 +index 547a2390baf54..ca89e0e3ef10f 100644
696 +--- a/fs/exec.c
697 ++++ b/fs/exec.c
698 +@@ -965,8 +965,8 @@ EXPORT_SYMBOL(read_code);
699 +
700 + /*
701 + * Maps the mm_struct mm into the current task struct.
702 +- * On success, this function returns with the mutex
703 +- * exec_update_mutex locked.
704 ++ * On success, this function returns with exec_update_lock
705 ++ * held for writing.
706 + */
707 + static int exec_mmap(struct mm_struct *mm)
708 + {
709 +@@ -981,7 +981,7 @@ static int exec_mmap(struct mm_struct *mm)
710 + if (old_mm)
711 + sync_mm_rss(old_mm);
712 +
713 +- ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
714 ++ ret = down_write_killable(&tsk->signal->exec_update_lock);
715 + if (ret)
716 + return ret;
717 +
718 +@@ -995,7 +995,7 @@ static int exec_mmap(struct mm_struct *mm)
719 + mmap_read_lock(old_mm);
720 + if (unlikely(old_mm->core_state)) {
721 + mmap_read_unlock(old_mm);
722 +- mutex_unlock(&tsk->signal->exec_update_mutex);
723 ++ up_write(&tsk->signal->exec_update_lock);
724 + return -EINTR;
725 + }
726 + }
727 +@@ -1382,7 +1382,7 @@ int begin_new_exec(struct linux_binprm * bprm)
728 + return 0;
729 +
730 + out_unlock:
731 +- mutex_unlock(&me->signal->exec_update_mutex);
732 ++ up_write(&me->signal->exec_update_lock);
733 + out:
734 + return retval;
735 + }
736 +@@ -1423,7 +1423,7 @@ void setup_new_exec(struct linux_binprm * bprm)
737 + * some architectures like powerpc
738 + */
739 + me->mm->task_size = TASK_SIZE;
740 +- mutex_unlock(&me->signal->exec_update_mutex);
741 ++ up_write(&me->signal->exec_update_lock);
742 + mutex_unlock(&me->signal->cred_guard_mutex);
743 + }
744 + EXPORT_SYMBOL(setup_new_exec);
745 +diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
746 +index 5a48cee6d7d33..f529075a2ce87 100644
747 +--- a/fs/fuse/acl.c
748 ++++ b/fs/fuse/acl.c
749 +@@ -19,6 +19,9 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type)
750 + void *value = NULL;
751 + struct posix_acl *acl;
752 +
753 ++ if (fuse_is_bad(inode))
754 ++ return ERR_PTR(-EIO);
755 ++
756 + if (!fc->posix_acl || fc->no_getxattr)
757 + return NULL;
758 +
759 +@@ -53,6 +56,9 @@ int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type)
760 + const char *name;
761 + int ret;
762 +
763 ++ if (fuse_is_bad(inode))
764 ++ return -EIO;
765 ++
766 + if (!fc->posix_acl || fc->no_setxattr)
767 + return -EOPNOTSUPP;
768 +
769 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
770 +index ff7dbeb16f88d..ffa031fe52933 100644
771 +--- a/fs/fuse/dir.c
772 ++++ b/fs/fuse/dir.c
773 +@@ -202,7 +202,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
774 + int ret;
775 +
776 + inode = d_inode_rcu(entry);
777 +- if (inode && is_bad_inode(inode))
778 ++ if (inode && fuse_is_bad(inode))
779 + goto invalid;
780 + else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
781 + (flags & LOOKUP_REVAL)) {
782 +@@ -463,6 +463,9 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
783 + bool outarg_valid = true;
784 + bool locked;
785 +
786 ++ if (fuse_is_bad(dir))
787 ++ return ERR_PTR(-EIO);
788 ++
789 + locked = fuse_lock_inode(dir);
790 + err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
791 + &outarg, &inode);
792 +@@ -606,6 +609,9 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
793 + struct fuse_conn *fc = get_fuse_conn(dir);
794 + struct dentry *res = NULL;
795 +
796 ++ if (fuse_is_bad(dir))
797 ++ return -EIO;
798 ++
799 + if (d_in_lookup(entry)) {
800 + res = fuse_lookup(dir, entry, 0);
801 + if (IS_ERR(res))
802 +@@ -654,6 +660,9 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
803 + int err;
804 + struct fuse_forget_link *forget;
805 +
806 ++ if (fuse_is_bad(dir))
807 ++ return -EIO;
808 ++
809 + forget = fuse_alloc_forget();
810 + if (!forget)
811 + return -ENOMEM;
812 +@@ -781,6 +790,9 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
813 + struct fuse_mount *fm = get_fuse_mount(dir);
814 + FUSE_ARGS(args);
815 +
816 ++ if (fuse_is_bad(dir))
817 ++ return -EIO;
818 ++
819 + args.opcode = FUSE_UNLINK;
820 + args.nodeid = get_node_id(dir);
821 + args.in_numargs = 1;
822 +@@ -817,6 +829,9 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
823 + struct fuse_mount *fm = get_fuse_mount(dir);
824 + FUSE_ARGS(args);
825 +
826 ++ if (fuse_is_bad(dir))
827 ++ return -EIO;
828 ++
829 + args.opcode = FUSE_RMDIR;
830 + args.nodeid = get_node_id(dir);
831 + args.in_numargs = 1;
832 +@@ -895,6 +910,9 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
833 + struct fuse_conn *fc = get_fuse_conn(olddir);
834 + int err;
835 +
836 ++ if (fuse_is_bad(olddir))
837 ++ return -EIO;
838 ++
839 + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
840 + return -EINVAL;
841 +
842 +@@ -1030,7 +1048,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
843 + if (!err) {
844 + if (fuse_invalid_attr(&outarg.attr) ||
845 + (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
846 +- make_bad_inode(inode);
847 ++ fuse_make_bad(inode);
848 + err = -EIO;
849 + } else {
850 + fuse_change_attributes(inode, &outarg.attr,
851 +@@ -1232,6 +1250,9 @@ static int fuse_permission(struct inode *inode, int mask)
852 + bool refreshed = false;
853 + int err = 0;
854 +
855 ++ if (fuse_is_bad(inode))
856 ++ return -EIO;
857 ++
858 + if (!fuse_allow_current_process(fc))
859 + return -EACCES;
860 +
861 +@@ -1327,7 +1348,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
862 + int err;
863 +
864 + err = -EIO;
865 +- if (is_bad_inode(inode))
866 ++ if (fuse_is_bad(inode))
867 + goto out_err;
868 +
869 + if (fc->cache_symlinks)
870 +@@ -1375,7 +1396,7 @@ static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
871 + struct fuse_conn *fc = get_fuse_conn(inode);
872 + int err;
873 +
874 +- if (is_bad_inode(inode))
875 ++ if (fuse_is_bad(inode))
876 + return -EIO;
877 +
878 + if (fc->no_fsyncdir)
879 +@@ -1664,7 +1685,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
880 +
881 + if (fuse_invalid_attr(&outarg.attr) ||
882 + (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
883 +- make_bad_inode(inode);
884 ++ fuse_make_bad(inode);
885 + err = -EIO;
886 + goto error;
887 + }
888 +@@ -1727,6 +1748,9 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
889 + struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
890 + int ret;
891 +
892 ++ if (fuse_is_bad(inode))
893 ++ return -EIO;
894 ++
895 + if (!fuse_allow_current_process(get_fuse_conn(inode)))
896 + return -EACCES;
897 +
898 +@@ -1785,6 +1809,9 @@ static int fuse_getattr(const struct path *path, struct kstat *stat,
899 + struct inode *inode = d_inode(path->dentry);
900 + struct fuse_conn *fc = get_fuse_conn(inode);
901 +
902 ++ if (fuse_is_bad(inode))
903 ++ return -EIO;
904 ++
905 + if (!fuse_allow_current_process(fc)) {
906 + if (!request_mask) {
907 + /*
908 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
909 +index c03034e8c1529..8b306005453cc 100644
910 +--- a/fs/fuse/file.c
911 ++++ b/fs/fuse/file.c
912 +@@ -226,6 +226,9 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
913 + bool dax_truncate = (file->f_flags & O_TRUNC) &&
914 + fc->atomic_o_trunc && FUSE_IS_DAX(inode);
915 +
916 ++ if (fuse_is_bad(inode))
917 ++ return -EIO;
918 ++
919 + err = generic_file_open(inode, file);
920 + if (err)
921 + return err;
922 +@@ -463,7 +466,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
923 + FUSE_ARGS(args);
924 + int err;
925 +
926 +- if (is_bad_inode(inode))
927 ++ if (fuse_is_bad(inode))
928 + return -EIO;
929 +
930 + err = write_inode_now(inode, 1);
931 +@@ -535,7 +538,7 @@ static int fuse_fsync(struct file *file, loff_t start, loff_t end,
932 + struct fuse_conn *fc = get_fuse_conn(inode);
933 + int err;
934 +
935 +- if (is_bad_inode(inode))
936 ++ if (fuse_is_bad(inode))
937 + return -EIO;
938 +
939 + inode_lock(inode);
940 +@@ -859,7 +862,7 @@ static int fuse_readpage(struct file *file, struct page *page)
941 + int err;
942 +
943 + err = -EIO;
944 +- if (is_bad_inode(inode))
945 ++ if (fuse_is_bad(inode))
946 + goto out;
947 +
948 + err = fuse_do_readpage(file, page);
949 +@@ -952,7 +955,7 @@ static void fuse_readahead(struct readahead_control *rac)
950 + struct fuse_conn *fc = get_fuse_conn(inode);
951 + unsigned int i, max_pages, nr_pages = 0;
952 +
953 +- if (is_bad_inode(inode))
954 ++ if (fuse_is_bad(inode))
955 + return;
956 +
957 + max_pages = min_t(unsigned int, fc->max_pages,
958 +@@ -1555,7 +1558,7 @@ static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
959 + struct fuse_file *ff = file->private_data;
960 + struct inode *inode = file_inode(file);
961 +
962 +- if (is_bad_inode(inode))
963 ++ if (fuse_is_bad(inode))
964 + return -EIO;
965 +
966 + if (FUSE_IS_DAX(inode))
967 +@@ -1573,7 +1576,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
968 + struct fuse_file *ff = file->private_data;
969 + struct inode *inode = file_inode(file);
970 +
971 +- if (is_bad_inode(inode))
972 ++ if (fuse_is_bad(inode))
973 + return -EIO;
974 +
975 + if (FUSE_IS_DAX(inode))
976 +@@ -2172,7 +2175,7 @@ static int fuse_writepages(struct address_space *mapping,
977 + int err;
978 +
979 + err = -EIO;
980 +- if (is_bad_inode(inode))
981 ++ if (fuse_is_bad(inode))
982 + goto out;
983 +
984 + data.inode = inode;
985 +@@ -2954,7 +2957,7 @@ long fuse_ioctl_common(struct file *file, unsigned int cmd,
986 + if (!fuse_allow_current_process(fc))
987 + return -EACCES;
988 +
989 +- if (is_bad_inode(inode))
990 ++ if (fuse_is_bad(inode))
991 + return -EIO;
992 +
993 + return fuse_do_ioctl(file, cmd, arg, flags);
994 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
995 +index d51598017d133..404d66f01e8d7 100644
996 +--- a/fs/fuse/fuse_i.h
997 ++++ b/fs/fuse/fuse_i.h
998 +@@ -172,6 +172,8 @@ enum {
999 + FUSE_I_INIT_RDPLUS,
1000 + /** An operation changing file size is in progress */
1001 + FUSE_I_SIZE_UNSTABLE,
1002 ++ /* Bad inode */
1003 ++ FUSE_I_BAD,
1004 + };
1005 +
1006 + struct fuse_conn;
1007 +@@ -858,6 +860,16 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
1008 + return atomic64_read(&fc->attr_version);
1009 + }
1010 +
1011 ++static inline void fuse_make_bad(struct inode *inode)
1012 ++{
1013 ++ set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
1014 ++}
1015 ++
1016 ++static inline bool fuse_is_bad(struct inode *inode)
1017 ++{
1018 ++ return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state));
1019 ++}
1020 ++
1021 + /** Device operations */
1022 + extern const struct file_operations fuse_dev_operations;
1023 +
1024 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
1025 +index 1a47afc95f800..f94b0bb57619c 100644
1026 +--- a/fs/fuse/inode.c
1027 ++++ b/fs/fuse/inode.c
1028 +@@ -132,7 +132,7 @@ static void fuse_evict_inode(struct inode *inode)
1029 + fi->forget = NULL;
1030 + }
1031 + }
1032 +- if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) {
1033 ++ if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
1034 + WARN_ON(!list_empty(&fi->write_files));
1035 + WARN_ON(!list_empty(&fi->queued_writes));
1036 + }
1037 +@@ -342,7 +342,7 @@ retry:
1038 + unlock_new_inode(inode);
1039 + } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
1040 + /* Inode has changed type, any I/O on the old should fail */
1041 +- make_bad_inode(inode);
1042 ++ fuse_make_bad(inode);
1043 + iput(inode);
1044 + goto retry;
1045 + }
1046 +diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
1047 +index 3b5e91045871a..3441ffa740f3d 100644
1048 +--- a/fs/fuse/readdir.c
1049 ++++ b/fs/fuse/readdir.c
1050 +@@ -207,7 +207,7 @@ retry:
1051 + dput(dentry);
1052 + goto retry;
1053 + }
1054 +- if (is_bad_inode(inode)) {
1055 ++ if (fuse_is_bad(inode)) {
1056 + dput(dentry);
1057 + return -EIO;
1058 + }
1059 +@@ -568,7 +568,7 @@ int fuse_readdir(struct file *file, struct dir_context *ctx)
1060 + struct inode *inode = file_inode(file);
1061 + int err;
1062 +
1063 +- if (is_bad_inode(inode))
1064 ++ if (fuse_is_bad(inode))
1065 + return -EIO;
1066 +
1067 + mutex_lock(&ff->readdir.lock);
1068 +diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
1069 +index 371bdcbc72337..cdea18de94f7e 100644
1070 +--- a/fs/fuse/xattr.c
1071 ++++ b/fs/fuse/xattr.c
1072 +@@ -113,6 +113,9 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
1073 + struct fuse_getxattr_out outarg;
1074 + ssize_t ret;
1075 +
1076 ++ if (fuse_is_bad(inode))
1077 ++ return -EIO;
1078 ++
1079 + if (!fuse_allow_current_process(fm->fc))
1080 + return -EACCES;
1081 +
1082 +@@ -178,6 +181,9 @@ static int fuse_xattr_get(const struct xattr_handler *handler,
1083 + struct dentry *dentry, struct inode *inode,
1084 + const char *name, void *value, size_t size)
1085 + {
1086 ++ if (fuse_is_bad(inode))
1087 ++ return -EIO;
1088 ++
1089 + return fuse_getxattr(inode, name, value, size);
1090 + }
1091 +
1092 +@@ -186,6 +192,9 @@ static int fuse_xattr_set(const struct xattr_handler *handler,
1093 + const char *name, const void *value, size_t size,
1094 + int flags)
1095 + {
1096 ++ if (fuse_is_bad(inode))
1097 ++ return -EIO;
1098 ++
1099 + if (!value)
1100 + return fuse_removexattr(inode, name);
1101 +
1102 +diff --git a/fs/proc/base.c b/fs/proc/base.c
1103 +index b362523a9829a..55ce0ee9c5c73 100644
1104 +--- a/fs/proc/base.c
1105 ++++ b/fs/proc/base.c
1106 +@@ -405,11 +405,11 @@ print0:
1107 +
1108 + static int lock_trace(struct task_struct *task)
1109 + {
1110 +- int err = mutex_lock_killable(&task->signal->exec_update_mutex);
1111 ++ int err = down_read_killable(&task->signal->exec_update_lock);
1112 + if (err)
1113 + return err;
1114 + if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
1115 +- mutex_unlock(&task->signal->exec_update_mutex);
1116 ++ up_read(&task->signal->exec_update_lock);
1117 + return -EPERM;
1118 + }
1119 + return 0;
1120 +@@ -417,7 +417,7 @@ static int lock_trace(struct task_struct *task)
1121 +
1122 + static void unlock_trace(struct task_struct *task)
1123 + {
1124 +- mutex_unlock(&task->signal->exec_update_mutex);
1125 ++ up_read(&task->signal->exec_update_lock);
1126 + }
1127 +
1128 + #ifdef CONFIG_STACKTRACE
1129 +@@ -2930,7 +2930,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
1130 + unsigned long flags;
1131 + int result;
1132 +
1133 +- result = mutex_lock_killable(&task->signal->exec_update_mutex);
1134 ++ result = down_read_killable(&task->signal->exec_update_lock);
1135 + if (result)
1136 + return result;
1137 +
1138 +@@ -2966,7 +2966,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
1139 + result = 0;
1140 +
1141 + out_unlock:
1142 +- mutex_unlock(&task->signal->exec_update_mutex);
1143 ++ up_read(&task->signal->exec_update_lock);
1144 + return result;
1145 + }
1146 +
1147 +diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
1148 +index 85b5151911cfd..4856706fbfeb4 100644
1149 +--- a/include/linux/kdev_t.h
1150 ++++ b/include/linux/kdev_t.h
1151 +@@ -21,61 +21,61 @@
1152 + })
1153 +
1154 + /* acceptable for old filesystems */
1155 +-static inline bool old_valid_dev(dev_t dev)
1156 ++static __always_inline bool old_valid_dev(dev_t dev)
1157 + {
1158 + return MAJOR(dev) < 256 && MINOR(dev) < 256;
1159 + }
1160 +
1161 +-static inline u16 old_encode_dev(dev_t dev)
1162 ++static __always_inline u16 old_encode_dev(dev_t dev)
1163 + {
1164 + return (MAJOR(dev) << 8) | MINOR(dev);
1165 + }
1166 +
1167 +-static inline dev_t old_decode_dev(u16 val)
1168 ++static __always_inline dev_t old_decode_dev(u16 val)
1169 + {
1170 + return MKDEV((val >> 8) & 255, val & 255);
1171 + }
1172 +
1173 +-static inline u32 new_encode_dev(dev_t dev)
1174 ++static __always_inline u32 new_encode_dev(dev_t dev)
1175 + {
1176 + unsigned major = MAJOR(dev);
1177 + unsigned minor = MINOR(dev);
1178 + return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
1179 + }
1180 +
1181 +-static inline dev_t new_decode_dev(u32 dev)
1182 ++static __always_inline dev_t new_decode_dev(u32 dev)
1183 + {
1184 + unsigned major = (dev & 0xfff00) >> 8;
1185 + unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
1186 + return MKDEV(major, minor);
1187 + }
1188 +
1189 +-static inline u64 huge_encode_dev(dev_t dev)
1190 ++static __always_inline u64 huge_encode_dev(dev_t dev)
1191 + {
1192 + return new_encode_dev(dev);
1193 + }
1194 +
1195 +-static inline dev_t huge_decode_dev(u64 dev)
1196 ++static __always_inline dev_t huge_decode_dev(u64 dev)
1197 + {
1198 + return new_decode_dev(dev);
1199 + }
1200 +
1201 +-static inline int sysv_valid_dev(dev_t dev)
1202 ++static __always_inline int sysv_valid_dev(dev_t dev)
1203 + {
1204 + return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
1205 + }
1206 +
1207 +-static inline u32 sysv_encode_dev(dev_t dev)
1208 ++static __always_inline u32 sysv_encode_dev(dev_t dev)
1209 + {
1210 + return MINOR(dev) | (MAJOR(dev) << 18);
1211 + }
1212 +
1213 +-static inline unsigned sysv_major(u32 dev)
1214 ++static __always_inline unsigned sysv_major(u32 dev)
1215 + {
1216 + return (dev >> 18) & 0x3fff;
1217 + }
1218 +
1219 +-static inline unsigned sysv_minor(u32 dev)
1220 ++static __always_inline unsigned sysv_minor(u32 dev)
1221 + {
1222 + return dev & 0x3ffff;
1223 + }
1224 +diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
1225 +index 25e3fde856178..4c715be487171 100644
1226 +--- a/include/linux/rwsem.h
1227 ++++ b/include/linux/rwsem.h
1228 +@@ -123,6 +123,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
1229 + * lock for reading
1230 + */
1231 + extern void down_read(struct rw_semaphore *sem);
1232 ++extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
1233 + extern int __must_check down_read_killable(struct rw_semaphore *sem);
1234 +
1235 + /*
1236 +@@ -171,6 +172,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
1237 + * See Documentation/locking/lockdep-design.rst for more details.)
1238 + */
1239 + extern void down_read_nested(struct rw_semaphore *sem, int subclass);
1240 ++extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
1241 + extern void down_write_nested(struct rw_semaphore *sem, int subclass);
1242 + extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
1243 + extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
1244 +@@ -191,6 +193,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
1245 + extern void up_read_non_owner(struct rw_semaphore *sem);
1246 + #else
1247 + # define down_read_nested(sem, subclass) down_read(sem)
1248 ++# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
1249 + # define down_write_nest_lock(sem, nest_lock) down_write(sem)
1250 + # define down_write_nested(sem, subclass) down_write(sem)
1251 + # define down_write_killable_nested(sem, subclass) down_write_killable(sem)
1252 +diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
1253 +index 1bad18a1d8ba7..4b6a8234d7fc2 100644
1254 +--- a/include/linux/sched/signal.h
1255 ++++ b/include/linux/sched/signal.h
1256 +@@ -228,12 +228,13 @@ struct signal_struct {
1257 + * credential calculations
1258 + * (notably. ptrace)
1259 + * Deprecated do not use in new code.
1260 +- * Use exec_update_mutex instead.
1261 +- */
1262 +- struct mutex exec_update_mutex; /* Held while task_struct is being
1263 +- * updated during exec, and may have
1264 +- * inconsistent permissions.
1265 ++ * Use exec_update_lock instead.
1266 + */
1267 ++ struct rw_semaphore exec_update_lock; /* Held while task_struct is
1268 ++ * being updated during exec,
1269 ++ * and may have inconsistent
1270 ++ * permissions.
1271 ++ */
1272 + } __randomize_layout;
1273 +
1274 + /*
1275 +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
1276 +index 9bf6c319a670e..65771bef5e654 100644
1277 +--- a/include/rdma/ib_verbs.h
1278 ++++ b/include/rdma/ib_verbs.h
1279 +@@ -3943,6 +3943,16 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1280 + -ENOSYS;
1281 + }
1282 +
1283 ++/*
1284 ++ * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
1285 ++ * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
1286 ++ * address into the dma address.
1287 ++ */
1288 ++static inline bool ib_uses_virt_dma(struct ib_device *dev)
1289 ++{
1290 ++ return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
1291 ++}
1292 ++
1293 + /**
1294 + * ib_dma_mapping_error - check a DMA addr for error
1295 + * @dev: The device for which the dma_addr was created
1296 +@@ -3950,6 +3960,8 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1297 + */
1298 + static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1299 + {
1300 ++ if (ib_uses_virt_dma(dev))
1301 ++ return 0;
1302 + return dma_mapping_error(dev->dma_device, dma_addr);
1303 + }
1304 +
1305 +@@ -3964,6 +3976,8 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
1306 + void *cpu_addr, size_t size,
1307 + enum dma_data_direction direction)
1308 + {
1309 ++ if (ib_uses_virt_dma(dev))
1310 ++ return (uintptr_t)cpu_addr;
1311 + return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1312 + }
1313 +
1314 +@@ -3978,7 +3992,8 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
1315 + u64 addr, size_t size,
1316 + enum dma_data_direction direction)
1317 + {
1318 +- dma_unmap_single(dev->dma_device, addr, size, direction);
1319 ++ if (!ib_uses_virt_dma(dev))
1320 ++ dma_unmap_single(dev->dma_device, addr, size, direction);
1321 + }
1322 +
1323 + /**
1324 +@@ -3995,6 +4010,8 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
1325 + size_t size,
1326 + enum dma_data_direction direction)
1327 + {
1328 ++ if (ib_uses_virt_dma(dev))
1329 ++ return (uintptr_t)(page_address(page) + offset);
1330 + return dma_map_page(dev->dma_device, page, offset, size, direction);
1331 + }
1332 +
1333 +@@ -4009,7 +4026,30 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
1334 + u64 addr, size_t size,
1335 + enum dma_data_direction direction)
1336 + {
1337 +- dma_unmap_page(dev->dma_device, addr, size, direction);
1338 ++ if (!ib_uses_virt_dma(dev))
1339 ++ dma_unmap_page(dev->dma_device, addr, size, direction);
1340 ++}
1341 ++
1342 ++int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
1343 ++static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1344 ++ struct scatterlist *sg, int nents,
1345 ++ enum dma_data_direction direction,
1346 ++ unsigned long dma_attrs)
1347 ++{
1348 ++ if (ib_uses_virt_dma(dev))
1349 ++ return ib_dma_virt_map_sg(dev, sg, nents);
1350 ++ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
1351 ++ dma_attrs);
1352 ++}
1353 ++
1354 ++static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1355 ++ struct scatterlist *sg, int nents,
1356 ++ enum dma_data_direction direction,
1357 ++ unsigned long dma_attrs)
1358 ++{
1359 ++ if (!ib_uses_virt_dma(dev))
1360 ++ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
1361 ++ dma_attrs);
1362 + }
1363 +
1364 + /**
1365 +@@ -4023,7 +4063,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
1366 + struct scatterlist *sg, int nents,
1367 + enum dma_data_direction direction)
1368 + {
1369 +- return dma_map_sg(dev->dma_device, sg, nents, direction);
1370 ++ return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
1371 + }
1372 +
1373 + /**
1374 +@@ -4037,24 +4077,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
1375 + struct scatterlist *sg, int nents,
1376 + enum dma_data_direction direction)
1377 + {
1378 +- dma_unmap_sg(dev->dma_device, sg, nents, direction);
1379 +-}
1380 +-
1381 +-static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1382 +- struct scatterlist *sg, int nents,
1383 +- enum dma_data_direction direction,
1384 +- unsigned long dma_attrs)
1385 +-{
1386 +- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
1387 +- dma_attrs);
1388 +-}
1389 +-
1390 +-static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1391 +- struct scatterlist *sg, int nents,
1392 +- enum dma_data_direction direction,
1393 +- unsigned long dma_attrs)
1394 +-{
1395 +- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
1396 ++ ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
1397 + }
1398 +
1399 + /**
1400 +@@ -4065,6 +4088,8 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1401 + */
1402 + static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
1403 + {
1404 ++ if (ib_uses_virt_dma(dev))
1405 ++ return UINT_MAX;
1406 + return dma_get_max_seg_size(dev->dma_device);
1407 + }
1408 +
1409 +@@ -4080,7 +4105,8 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1410 + size_t size,
1411 + enum dma_data_direction dir)
1412 + {
1413 +- dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1414 ++ if (!ib_uses_virt_dma(dev))
1415 ++ dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1416 + }
1417 +
1418 + /**
1419 +@@ -4095,7 +4121,8 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1420 + size_t size,
1421 + enum dma_data_direction dir)
1422 + {
1423 +- dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1424 ++ if (!ib_uses_virt_dma(dev))
1425 ++ dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1426 + }
1427 +
1428 + /**
1429 +diff --git a/init/init_task.c b/init/init_task.c
1430 +index a56f0abb63e93..15f6eb93a04fa 100644
1431 +--- a/init/init_task.c
1432 ++++ b/init/init_task.c
1433 +@@ -26,7 +26,7 @@ static struct signal_struct init_signals = {
1434 + .multiprocess = HLIST_HEAD_INIT,
1435 + .rlim = INIT_RLIMITS,
1436 + .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
1437 +- .exec_update_mutex = __MUTEX_INITIALIZER(init_signals.exec_update_mutex),
1438 ++ .exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock),
1439 + #ifdef CONFIG_POSIX_TIMERS
1440 + .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
1441 + .cputimer = {
1442 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1443 +index dc568ca295bdc..c3ba29d058b73 100644
1444 +--- a/kernel/events/core.c
1445 ++++ b/kernel/events/core.c
1446 +@@ -1325,7 +1325,7 @@ static void put_ctx(struct perf_event_context *ctx)
1447 + * function.
1448 + *
1449 + * Lock order:
1450 +- * exec_update_mutex
1451 ++ * exec_update_lock
1452 + * task_struct::perf_event_mutex
1453 + * perf_event_context::mutex
1454 + * perf_event::child_mutex;
1455 +@@ -11720,24 +11720,6 @@ SYSCALL_DEFINE5(perf_event_open,
1456 + goto err_task;
1457 + }
1458 +
1459 +- if (task) {
1460 +- err = mutex_lock_interruptible(&task->signal->exec_update_mutex);
1461 +- if (err)
1462 +- goto err_task;
1463 +-
1464 +- /*
1465 +- * Preserve ptrace permission check for backwards compatibility.
1466 +- *
1467 +- * We must hold exec_update_mutex across this and any potential
1468 +- * perf_install_in_context() call for this new event to
1469 +- * serialize against exec() altering our credentials (and the
1470 +- * perf_event_exit_task() that could imply).
1471 +- */
1472 +- err = -EACCES;
1473 +- if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
1474 +- goto err_cred;
1475 +- }
1476 +-
1477 + if (flags & PERF_FLAG_PID_CGROUP)
1478 + cgroup_fd = pid;
1479 +
1480 +@@ -11745,7 +11727,7 @@ SYSCALL_DEFINE5(perf_event_open,
1481 + NULL, NULL, cgroup_fd);
1482 + if (IS_ERR(event)) {
1483 + err = PTR_ERR(event);
1484 +- goto err_cred;
1485 ++ goto err_task;
1486 + }
1487 +
1488 + if (is_sampling_event(event)) {
1489 +@@ -11864,6 +11846,24 @@ SYSCALL_DEFINE5(perf_event_open,
1490 + goto err_context;
1491 + }
1492 +
1493 ++ if (task) {
1494 ++ err = down_read_interruptible(&task->signal->exec_update_lock);
1495 ++ if (err)
1496 ++ goto err_file;
1497 ++
1498 ++ /*
1499 ++ * Preserve ptrace permission check for backwards compatibility.
1500 ++ *
1501 ++ * We must hold exec_update_lock across this and any potential
1502 ++ * perf_install_in_context() call for this new event to
1503 ++ * serialize against exec() altering our credentials (and the
1504 ++ * perf_event_exit_task() that could imply).
1505 ++ */
1506 ++ err = -EACCES;
1507 ++ if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
1508 ++ goto err_cred;
1509 ++ }
1510 ++
1511 + if (move_group) {
1512 + gctx = __perf_event_ctx_lock_double(group_leader, ctx);
1513 +
1514 +@@ -12017,7 +12017,7 @@ SYSCALL_DEFINE5(perf_event_open,
1515 + mutex_unlock(&ctx->mutex);
1516 +
1517 + if (task) {
1518 +- mutex_unlock(&task->signal->exec_update_mutex);
1519 ++ up_read(&task->signal->exec_update_lock);
1520 + put_task_struct(task);
1521 + }
1522 +
1523 +@@ -12039,7 +12039,10 @@ err_locked:
1524 + if (move_group)
1525 + perf_event_ctx_unlock(group_leader, gctx);
1526 + mutex_unlock(&ctx->mutex);
1527 +-/* err_file: */
1528 ++err_cred:
1529 ++ if (task)
1530 ++ up_read(&task->signal->exec_update_lock);
1531 ++err_file:
1532 + fput(event_file);
1533 + err_context:
1534 + perf_unpin_context(ctx);
1535 +@@ -12051,9 +12054,6 @@ err_alloc:
1536 + */
1537 + if (!event_file)
1538 + free_event(event);
1539 +-err_cred:
1540 +- if (task)
1541 +- mutex_unlock(&task->signal->exec_update_mutex);
1542 + err_task:
1543 + if (task)
1544 + put_task_struct(task);
1545 +@@ -12358,7 +12358,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
1546 + /*
1547 + * When a child task exits, feed back event values to parent events.
1548 + *
1549 +- * Can be called with exec_update_mutex held when called from
1550 ++ * Can be called with exec_update_lock held when called from
1551 + * setup_new_exec().
1552 + */
1553 + void perf_event_exit_task(struct task_struct *child)
1554 +diff --git a/kernel/fork.c b/kernel/fork.c
1555 +index dc55f68a6ee36..c675fdbd3dce1 100644
1556 +--- a/kernel/fork.c
1557 ++++ b/kernel/fork.c
1558 +@@ -1222,7 +1222,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1559 + struct mm_struct *mm;
1560 + int err;
1561 +
1562 +- err = mutex_lock_killable(&task->signal->exec_update_mutex);
1563 ++ err = down_read_killable(&task->signal->exec_update_lock);
1564 + if (err)
1565 + return ERR_PTR(err);
1566 +
1567 +@@ -1232,7 +1232,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1568 + mmput(mm);
1569 + mm = ERR_PTR(-EACCES);
1570 + }
1571 +- mutex_unlock(&task->signal->exec_update_mutex);
1572 ++ up_read(&task->signal->exec_update_lock);
1573 +
1574 + return mm;
1575 + }
1576 +@@ -1592,7 +1592,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1577 + sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1578 +
1579 + mutex_init(&sig->cred_guard_mutex);
1580 +- mutex_init(&sig->exec_update_mutex);
1581 ++ init_rwsem(&sig->exec_update_lock);
1582 +
1583 + return 0;
1584 + }
1585 +diff --git a/kernel/kcmp.c b/kernel/kcmp.c
1586 +index b3ff9288c6cc9..c0d2ad9b4705d 100644
1587 +--- a/kernel/kcmp.c
1588 ++++ b/kernel/kcmp.c
1589 +@@ -75,25 +75,25 @@ get_file_raw_ptr(struct task_struct *task, unsigned int idx)
1590 + return file;
1591 + }
1592 +
1593 +-static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
1594 ++static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2)
1595 + {
1596 +- if (likely(m2 != m1))
1597 +- mutex_unlock(m2);
1598 +- mutex_unlock(m1);
1599 ++ if (likely(l2 != l1))
1600 ++ up_read(l2);
1601 ++ up_read(l1);
1602 + }
1603 +
1604 +-static int kcmp_lock(struct mutex *m1, struct mutex *m2)
1605 ++static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2)
1606 + {
1607 + int err;
1608 +
1609 +- if (m2 > m1)
1610 +- swap(m1, m2);
1611 ++ if (l2 > l1)
1612 ++ swap(l1, l2);
1613 +
1614 +- err = mutex_lock_killable(m1);
1615 +- if (!err && likely(m1 != m2)) {
1616 +- err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
1617 ++ err = down_read_killable(l1);
1618 ++ if (!err && likely(l1 != l2)) {
1619 ++ err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING);
1620 + if (err)
1621 +- mutex_unlock(m1);
1622 ++ up_read(l1);
1623 + }
1624 +
1625 + return err;
1626 +@@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
1627 + /*
1628 + * One should have enough rights to inspect task details.
1629 + */
1630 +- ret = kcmp_lock(&task1->signal->exec_update_mutex,
1631 +- &task2->signal->exec_update_mutex);
1632 ++ ret = kcmp_lock(&task1->signal->exec_update_lock,
1633 ++ &task2->signal->exec_update_lock);
1634 + if (ret)
1635 + goto err;
1636 + if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
1637 +@@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
1638 + }
1639 +
1640 + err_unlock:
1641 +- kcmp_unlock(&task1->signal->exec_update_mutex,
1642 +- &task2->signal->exec_update_mutex);
1643 ++ kcmp_unlock(&task1->signal->exec_update_lock,
1644 ++ &task2->signal->exec_update_lock);
1645 + err:
1646 + put_task_struct(task1);
1647 + put_task_struct(task2);
1648 +diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
1649 +index f11b9bd3431d2..a163542d178ee 100644
1650 +--- a/kernel/locking/rwsem.c
1651 ++++ b/kernel/locking/rwsem.c
1652 +@@ -1345,6 +1345,18 @@ static inline void __down_read(struct rw_semaphore *sem)
1653 + }
1654 + }
1655 +
1656 ++static inline int __down_read_interruptible(struct rw_semaphore *sem)
1657 ++{
1658 ++ if (!rwsem_read_trylock(sem)) {
1659 ++ if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
1660 ++ return -EINTR;
1661 ++ DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
1662 ++ } else {
1663 ++ rwsem_set_reader_owned(sem);
1664 ++ }
1665 ++ return 0;
1666 ++}
1667 ++
1668 + static inline int __down_read_killable(struct rw_semaphore *sem)
1669 + {
1670 + if (!rwsem_read_trylock(sem)) {
1671 +@@ -1495,6 +1507,20 @@ void __sched down_read(struct rw_semaphore *sem)
1672 + }
1673 + EXPORT_SYMBOL(down_read);
1674 +
1675 ++int __sched down_read_interruptible(struct rw_semaphore *sem)
1676 ++{
1677 ++ might_sleep();
1678 ++ rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1679 ++
1680 ++ if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1681 ++ rwsem_release(&sem->dep_map, _RET_IP_);
1682 ++ return -EINTR;
1683 ++ }
1684 ++
1685 ++ return 0;
1686 ++}
1687 ++EXPORT_SYMBOL(down_read_interruptible);
1688 ++
1689 + int __sched down_read_killable(struct rw_semaphore *sem)
1690 + {
1691 + might_sleep();
1692 +@@ -1605,6 +1631,20 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
1693 + }
1694 + EXPORT_SYMBOL(down_read_nested);
1695 +
1696 ++int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1697 ++{
1698 ++ might_sleep();
1699 ++ rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1700 ++
1701 ++ if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1702 ++ rwsem_release(&sem->dep_map, _RET_IP_);
1703 ++ return -EINTR;
1704 ++ }
1705 ++
1706 ++ return 0;
1707 ++}
1708 ++EXPORT_SYMBOL(down_read_killable_nested);
1709 ++
1710 + void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1711 + {
1712 + might_sleep();
1713 +diff --git a/kernel/pid.c b/kernel/pid.c
1714 +index a96bc4bf4f869..4856818c9de1a 100644
1715 +--- a/kernel/pid.c
1716 ++++ b/kernel/pid.c
1717 +@@ -628,7 +628,7 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd)
1718 + struct file *file;
1719 + int ret;
1720 +
1721 +- ret = mutex_lock_killable(&task->signal->exec_update_mutex);
1722 ++ ret = down_read_killable(&task->signal->exec_update_lock);
1723 + if (ret)
1724 + return ERR_PTR(ret);
1725 +
1726 +@@ -637,7 +637,7 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd)
1727 + else
1728 + file = ERR_PTR(-EPERM);
1729 +
1730 +- mutex_unlock(&task->signal->exec_update_mutex);
1731 ++ up_read(&task->signal->exec_update_lock);
1732 +
1733 + return file ?: ERR_PTR(-EBADF);
1734 + }
1735 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
1736 +index 502552d6e9aff..c4aa2cbb92697 100644
1737 +--- a/net/bluetooth/hci_core.c
1738 ++++ b/net/bluetooth/hci_core.c
1739 +@@ -763,7 +763,7 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
1740 + hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
1741 + }
1742 +
1743 +- if (hdev->commands[35] & 0x40) {
1744 ++ if (hdev->commands[35] & 0x04) {
1745 + __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
1746 +
1747 + /* Set RPA timeout */
1748 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
1749 +index 2ddc27db8c012..d12b4799c3cb7 100644
1750 +--- a/sound/pci/hda/patch_hdmi.c
1751 ++++ b/sound/pci/hda/patch_hdmi.c
1752 +@@ -1736,7 +1736,7 @@ static void silent_stream_disable(struct hda_codec *codec,
1753 + per_pin->silent_stream = false;
1754 +
1755 + unlock_out:
1756 +- mutex_unlock(&spec->pcm_lock);
1757 ++ mutex_unlock(&per_pin->lock);
1758 + }
1759 +
1760 + /* update ELD and jack state via audio component */
1761 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1762 +index dde5ba2095415..006af6541dada 100644
1763 +--- a/sound/pci/hda/patch_realtek.c
1764 ++++ b/sound/pci/hda/patch_realtek.c
1765 +@@ -7885,7 +7885,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1766 + SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
1767 + SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
1768 + SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
1769 +- SND_PCI_QUIRK(0x1028, 0x0a58, "Dell Precision 3650 Tower", ALC255_FIXUP_DELL_HEADSET_MIC),
1770 ++ SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
1771 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
1772 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
1773 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),