Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Tue, 08 Feb 2022 17:54:46
Message-Id: 1644342871.277602a0cea72da681393c0720e62637f700b541.mpagano@gentoo
1 commit: 277602a0cea72da681393c0720e62637f700b541
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Feb 8 17:54:31 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Feb 8 17:54:31 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=277602a0
7
8 Linux patch 5.10.99
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1098_linux-5.10.99.patch | 2812 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2816 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f1c5090c..c04d5d96 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -435,6 +435,10 @@ Patch: 1097_linux-5.10.98.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.98
23
24 +Patch: 1098_linux-5.10.99.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.99
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1098_linux-5.10.99.patch b/1098_linux-5.10.99.patch
33 new file mode 100644
34 index 00000000..9c87e134
35 --- /dev/null
36 +++ b/1098_linux-5.10.99.patch
37 @@ -0,0 +1,2812 @@
38 +diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
39 +index 7272a4bd74dd0..28841609aa4f8 100644
40 +--- a/Documentation/gpu/todo.rst
41 ++++ b/Documentation/gpu/todo.rst
42 +@@ -273,24 +273,6 @@ Contact: Daniel Vetter, Noralf Tronnes
43 +
44 + Level: Advanced
45 +
46 +-Garbage collect fbdev scrolling acceleration
47 +---------------------------------------------
48 +-
49 +-Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode =
50 +-SCROLL_REDRAW. There's a ton of code this will allow us to remove:
51 +-- lots of code in fbcon.c
52 +-- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called
53 +- directly instead of the function table (with a switch on p->rotate)
54 +-- fb_copyarea is unused after this, and can be deleted from all drivers
55 +-
56 +-Note that not all acceleration code can be deleted, since clearing and cursor
57 +-support is still accelerated, which might be good candidates for further
58 +-deletion projects.
59 +-
60 +-Contact: Daniel Vetter
61 +-
62 +-Level: Intermediate
63 +-
64 + idr_init_base()
65 + ---------------
66 +
67 +diff --git a/Makefile b/Makefile
68 +index 10827bec74d8f..593638785d293 100644
69 +--- a/Makefile
70 ++++ b/Makefile
71 +@@ -1,7 +1,7 @@
72 + # SPDX-License-Identifier: GPL-2.0
73 + VERSION = 5
74 + PATCHLEVEL = 10
75 +-SUBLEVEL = 98
76 ++SUBLEVEL = 99
77 + EXTRAVERSION =
78 + NAME = Dare mighty things
79 +
80 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
81 +index 6525693e7aeaa..5ba13b00e3a71 100644
82 +--- a/arch/x86/events/intel/core.c
83 ++++ b/arch/x86/events/intel/core.c
84 +@@ -4353,6 +4353,19 @@ static __initconst const struct x86_pmu intel_pmu = {
85 + .lbr_read = intel_pmu_lbr_read_64,
86 + .lbr_save = intel_pmu_lbr_save,
87 + .lbr_restore = intel_pmu_lbr_restore,
88 ++
89 ++ /*
90 ++ * SMM has access to all 4 rings and while traditionally SMM code only
91 ++ * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
92 ++ *
93 ++ * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
94 ++ * between SMM or not, this results in what should be pure userspace
95 ++ * counters including SMM data.
96 ++ *
97 ++ * This is a clear privilege issue, therefore globally disable
98 ++ * counting SMM by default.
99 ++ */
100 ++ .attr_freeze_on_smi = 1,
101 + };
102 +
103 + static __init void intel_clovertown_quirk(void)
104 +diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
105 +index 37129b76135a1..c084899e95825 100644
106 +--- a/arch/x86/events/intel/pt.c
107 ++++ b/arch/x86/events/intel/pt.c
108 +@@ -897,8 +897,9 @@ static void pt_handle_status(struct pt *pt)
109 + * means we are already losing data; need to let the decoder
110 + * know.
111 + */
112 +- if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
113 +- buf->output_off == pt_buffer_region_size(buf)) {
114 ++ if (!buf->single &&
115 ++ (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
116 ++ buf->output_off == pt_buffer_region_size(buf))) {
117 + perf_aux_output_flag(&pt->handle,
118 + PERF_AUX_FLAG_TRUNCATED);
119 + advance++;
120 +diff --git a/block/bio-integrity.c b/block/bio-integrity.c
121 +index 9ffd7e2895547..4f6f140a44e06 100644
122 +--- a/block/bio-integrity.c
123 ++++ b/block/bio-integrity.c
124 +@@ -384,7 +384,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
125 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
126 + unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
127 +
128 +- bip->bip_iter.bi_sector += bytes_done >> 9;
129 ++ bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
130 + bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
131 + }
132 +
133 +diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
134 +index afd22c9dbdcfa..798f86fcd50fa 100644
135 +--- a/drivers/dma-buf/dma-heap.c
136 ++++ b/drivers/dma-buf/dma-heap.c
137 +@@ -14,6 +14,7 @@
138 + #include <linux/xarray.h>
139 + #include <linux/list.h>
140 + #include <linux/slab.h>
141 ++#include <linux/nospec.h>
142 + #include <linux/uaccess.h>
143 + #include <linux/syscalls.h>
144 + #include <linux/dma-heap.h>
145 +@@ -123,6 +124,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
146 + if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
147 + return -EINVAL;
148 +
149 ++ nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
150 + /* Get the kernel ioctl cmd that matches */
151 + kcmd = dma_heap_ioctl_cmds[nr];
152 +
153 +diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
154 +index e91cf1147a4e0..be38fd71f731a 100644
155 +--- a/drivers/edac/altera_edac.c
156 ++++ b/drivers/edac/altera_edac.c
157 +@@ -349,7 +349,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
158 + if (irq < 0) {
159 + edac_printk(KERN_ERR, EDAC_MC,
160 + "No irq %d in DT\n", irq);
161 +- return -ENODEV;
162 ++ return irq;
163 + }
164 +
165 + /* Arria10 has a 2nd IRQ */
166 +diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
167 +index 1d2c27a00a4a8..cd1eefeff1923 100644
168 +--- a/drivers/edac/xgene_edac.c
169 ++++ b/drivers/edac/xgene_edac.c
170 +@@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
171 + irq = platform_get_irq(pdev, i);
172 + if (irq < 0) {
173 + dev_err(&pdev->dev, "No IRQ resource\n");
174 +- rc = -EINVAL;
175 ++ rc = irq;
176 + goto out_err;
177 + }
178 + rc = devm_request_irq(&pdev->dev, irq,
179 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
180 +index a7f8caf1086b9..0e359a299f9ec 100644
181 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
182 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
183 +@@ -3587,6 +3587,26 @@ static bool retrieve_link_cap(struct dc_link *link)
184 + dp_hw_fw_revision.ieee_fw_rev,
185 + sizeof(dp_hw_fw_revision.ieee_fw_rev));
186 +
187 ++ /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
188 ++ {
189 ++ uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
190 ++ uint8_t fwrev_mbp_2018[] = { 7, 4 };
191 ++ uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
192 ++
193 ++ /* We also check for the firmware revision as 16,1 models have an
194 ++ * identical device id and are incorrectly quirked otherwise.
195 ++ */
196 ++ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
197 ++ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
198 ++ sizeof(str_mbp_2018)) &&
199 ++ (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
200 ++ sizeof(fwrev_mbp_2018)) ||
201 ++ !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
202 ++ sizeof(fwrev_mbp_2018_vega)))) {
203 ++ link->reported_link_cap.link_rate = LINK_RATE_RBR2;
204 ++ }
205 ++ }
206 ++
207 + memset(&link->dpcd_caps.dsc_caps, '\0',
208 + sizeof(link->dpcd_caps.dsc_caps));
209 + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
210 +diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
211 +index 0e60aec0bb191..b561e9e00153e 100644
212 +--- a/drivers/gpu/drm/i915/display/intel_overlay.c
213 ++++ b/drivers/gpu/drm/i915/display/intel_overlay.c
214 +@@ -932,6 +932,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
215 + const struct intel_crtc_state *pipe_config =
216 + overlay->crtc->config;
217 +
218 ++ if (rec->dst_height == 0 || rec->dst_width == 0)
219 ++ return -EINVAL;
220 ++
221 + if (rec->dst_x < pipe_config->pipe_src_w &&
222 + rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
223 + rec->dst_y < pipe_config->pipe_src_h &&
224 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
225 +index f3c30b2a788e8..8bff14ae16b0e 100644
226 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
227 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
228 +@@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
229 + *addr += bios->imaged_addr;
230 + }
231 +
232 +- if (unlikely(*addr + size >= bios->size)) {
233 ++ if (unlikely(*addr + size > bios->size)) {
234 + nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
235 + return false;
236 + }
237 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
238 +index 4d4ba09f6cf93..ce492134c1e5c 100644
239 +--- a/drivers/infiniband/core/cma.c
240 ++++ b/drivers/infiniband/core/cma.c
241 +@@ -68,8 +68,8 @@ static const char * const cma_events[] = {
242 + [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
243 + };
244 +
245 +-static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
246 +- union ib_gid *mgid);
247 ++static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
248 ++ enum ib_gid_type gid_type);
249 +
250 + const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
251 + {
252 +@@ -1840,17 +1840,19 @@ static void destroy_mc(struct rdma_id_private *id_priv,
253 + if (dev_addr->bound_dev_if)
254 + ndev = dev_get_by_index(dev_addr->net,
255 + dev_addr->bound_dev_if);
256 +- if (ndev) {
257 ++ if (ndev && !send_only) {
258 ++ enum ib_gid_type gid_type;
259 + union ib_gid mgid;
260 +
261 +- cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
262 +- &mgid);
263 +-
264 +- if (!send_only)
265 +- cma_igmp_send(ndev, &mgid, false);
266 +-
267 +- dev_put(ndev);
268 ++ gid_type = id_priv->cma_dev->default_gid_type
269 ++ [id_priv->id.port_num -
270 ++ rdma_start_port(
271 ++ id_priv->cma_dev->device)];
272 ++ cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
273 ++ gid_type);
274 ++ cma_igmp_send(ndev, &mgid, false);
275 + }
276 ++ dev_put(ndev);
277 +
278 + cancel_work_sync(&mc->iboe_join.work);
279 + }
280 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
281 +index 2cc785c1970b4..d12018c4c86e9 100644
282 +--- a/drivers/infiniband/core/ucma.c
283 ++++ b/drivers/infiniband/core/ucma.c
284 +@@ -95,6 +95,7 @@ struct ucma_context {
285 + u64 uid;
286 +
287 + struct list_head list;
288 ++ struct list_head mc_list;
289 + struct work_struct close_work;
290 + };
291 +
292 +@@ -105,6 +106,7 @@ struct ucma_multicast {
293 +
294 + u64 uid;
295 + u8 join_state;
296 ++ struct list_head list;
297 + struct sockaddr_storage addr;
298 + };
299 +
300 +@@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
301 +
302 + INIT_WORK(&ctx->close_work, ucma_close_id);
303 + init_completion(&ctx->comp);
304 ++ INIT_LIST_HEAD(&ctx->mc_list);
305 + /* So list_del() will work if we don't do ucma_finish_ctx() */
306 + INIT_LIST_HEAD(&ctx->list);
307 + ctx->file = file;
308 +@@ -484,19 +487,19 @@ err1:
309 +
310 + static void ucma_cleanup_multicast(struct ucma_context *ctx)
311 + {
312 +- struct ucma_multicast *mc;
313 +- unsigned long index;
314 ++ struct ucma_multicast *mc, *tmp;
315 +
316 +- xa_for_each(&multicast_table, index, mc) {
317 +- if (mc->ctx != ctx)
318 +- continue;
319 ++ xa_lock(&multicast_table);
320 ++ list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
321 ++ list_del(&mc->list);
322 + /*
323 + * At this point mc->ctx->ref is 0 so the mc cannot leave the
324 + * lock on the reader and this is enough serialization
325 + */
326 +- xa_erase(&multicast_table, index);
327 ++ __xa_erase(&multicast_table, mc->id);
328 + kfree(mc);
329 + }
330 ++ xa_unlock(&multicast_table);
331 + }
332 +
333 + static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
334 +@@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file,
335 + mc->uid = cmd->uid;
336 + memcpy(&mc->addr, addr, cmd->addr_size);
337 +
338 +- if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
339 ++ xa_lock(&multicast_table);
340 ++ if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
341 + GFP_KERNEL)) {
342 + ret = -ENOMEM;
343 + goto err_free_mc;
344 + }
345 +
346 ++ list_add_tail(&mc->list, &ctx->mc_list);
347 ++ xa_unlock(&multicast_table);
348 ++
349 + mutex_lock(&ctx->mutex);
350 + ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
351 + join_state, mc);
352 +@@ -1500,8 +1507,11 @@ err_leave_multicast:
353 + mutex_unlock(&ctx->mutex);
354 + ucma_cleanup_mc_events(mc);
355 + err_xa_erase:
356 +- xa_erase(&multicast_table, mc->id);
357 ++ xa_lock(&multicast_table);
358 ++ list_del(&mc->list);
359 ++ __xa_erase(&multicast_table, mc->id);
360 + err_free_mc:
361 ++ xa_unlock(&multicast_table);
362 + kfree(mc);
363 + err_put_ctx:
364 + ucma_put_ctx(ctx);
365 +@@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
366 + mc = ERR_PTR(-EINVAL);
367 + else if (!refcount_inc_not_zero(&mc->ctx->ref))
368 + mc = ERR_PTR(-ENXIO);
369 +- else
370 +- __xa_erase(&multicast_table, mc->id);
371 +- xa_unlock(&multicast_table);
372 +
373 + if (IS_ERR(mc)) {
374 ++ xa_unlock(&multicast_table);
375 + ret = PTR_ERR(mc);
376 + goto out;
377 + }
378 +
379 ++ list_del(&mc->list);
380 ++ __xa_erase(&multicast_table, mc->id);
381 ++ xa_unlock(&multicast_table);
382 ++
383 + mutex_lock(&mc->ctx->mutex);
384 + rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
385 + mutex_unlock(&mc->ctx->mutex);
386 +diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
387 +index 9f71b9d706bd9..22299b0b7df0e 100644
388 +--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
389 ++++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
390 +@@ -185,12 +185,6 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
391 + free_percpu(priv->netstats);
392 + }
393 +
394 +-static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
395 +-{
396 +- hfi1_ipoib_netdev_dtor(dev);
397 +- free_netdev(dev);
398 +-}
399 +-
400 + static void hfi1_ipoib_set_id(struct net_device *dev, int id)
401 + {
402 + struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
403 +@@ -227,24 +221,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
404 + priv->port_num = port_num;
405 + priv->netdev_ops = netdev->netdev_ops;
406 +
407 +- netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
408 +-
409 + ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
410 +
411 + rc = hfi1_ipoib_txreq_init(priv);
412 + if (rc) {
413 + dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
414 +- hfi1_ipoib_free_rdma_netdev(netdev);
415 + return rc;
416 + }
417 +
418 + rc = hfi1_ipoib_rxq_init(netdev);
419 + if (rc) {
420 + dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
421 +- hfi1_ipoib_free_rdma_netdev(netdev);
422 ++ hfi1_ipoib_txreq_deinit(priv);
423 + return rc;
424 + }
425 +
426 ++ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
427 ++
428 + netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
429 + netdev->needs_free_netdev = true;
430 +
431 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
432 +index 7b11aff8a5ea7..05c7200751e50 100644
433 +--- a/drivers/infiniband/hw/mlx4/main.c
434 ++++ b/drivers/infiniband/hw/mlx4/main.c
435 +@@ -3273,7 +3273,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
436 + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
437 + ew = kmalloc(sizeof *ew, GFP_ATOMIC);
438 + if (!ew)
439 +- break;
440 ++ return;
441 +
442 + INIT_WORK(&ew->work, handle_port_mgmt_change_event);
443 + memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
444 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
445 +index ee48befc89786..09f0dbf941c06 100644
446 +--- a/drivers/infiniband/sw/rdmavt/qp.c
447 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
448 +@@ -3124,6 +3124,8 @@ do_write:
449 + case IB_WR_ATOMIC_FETCH_AND_ADD:
450 + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
451 + goto inv_err;
452 ++ if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
453 ++ goto inv_err;
454 + if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
455 + wqe->atomic_wr.remote_addr,
456 + wqe->atomic_wr.rkey,
457 +diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
458 +index 368959ae9a8cc..df03d84c6868a 100644
459 +--- a/drivers/infiniband/sw/siw/siw.h
460 ++++ b/drivers/infiniband/sw/siw/siw.h
461 +@@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
462 + return &qp->orq[qp->orq_get % qp->attrs.orq_size];
463 + }
464 +
465 +-static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp)
466 +-{
467 +- return &qp->orq[qp->orq_put % qp->attrs.orq_size];
468 +-}
469 +-
470 + static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
471 + {
472 +- struct siw_sqe *orq_e = orq_get_tail(qp);
473 ++ struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
474 +
475 + if (READ_ONCE(orq_e->flags) == 0)
476 + return orq_e;
477 +diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
478 +index 60116f20653c7..875ea6f1b04a2 100644
479 +--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
480 ++++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
481 +@@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
482 +
483 + spin_lock_irqsave(&qp->orq_lock, flags);
484 +
485 +- rreq = orq_get_current(qp);
486 +-
487 + /* free current orq entry */
488 ++ rreq = orq_get_current(qp);
489 + WRITE_ONCE(rreq->flags, 0);
490 +
491 ++ qp->orq_get++;
492 ++
493 + if (qp->tx_ctx.orq_fence) {
494 + if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
495 + pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
496 +@@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
497 + rv = -EPROTO;
498 + goto out;
499 + }
500 +- /* resume SQ processing */
501 ++ /* resume SQ processing, if possible */
502 + if (tx_waiting->sqe.opcode == SIW_OP_READ ||
503 + tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
504 +- rreq = orq_get_tail(qp);
505 ++
506 ++ /* SQ processing was stopped because of a full ORQ */
507 ++ rreq = orq_get_free(qp);
508 + if (unlikely(!rreq)) {
509 + pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
510 + rv = -EPROTO;
511 +@@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp)
512 + resume_tx = 1;
513 +
514 + } else if (siw_orq_empty(qp)) {
515 ++ /*
516 ++ * SQ processing was stopped by fenced work request.
517 ++ * Resume since all previous Read's are now completed.
518 ++ */
519 + qp->tx_ctx.orq_fence = 0;
520 + resume_tx = 1;
521 +- } else {
522 +- pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n",
523 +- qp_id(qp), qp->orq_get, qp->orq_put);
524 +- rv = -EPROTO;
525 + }
526 + }
527 +- qp->orq_get++;
528 + out:
529 + spin_unlock_irqrestore(&qp->orq_lock, flags);
530 +
531 +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
532 +index 3f31a52f7044f..502e6532dd549 100644
533 +--- a/drivers/iommu/amd/init.c
534 ++++ b/drivers/iommu/amd/init.c
535 +@@ -20,6 +20,7 @@
536 + #include <linux/export.h>
537 + #include <linux/kmemleak.h>
538 + #include <linux/mem_encrypt.h>
539 ++#include <linux/iopoll.h>
540 + #include <asm/pci-direct.h>
541 + #include <asm/iommu.h>
542 + #include <asm/apic.h>
543 +@@ -833,6 +834,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
544 + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
545 + if (status & (MMIO_STATUS_GALOG_RUN_MASK))
546 + break;
547 ++ udelay(10);
548 + }
549 +
550 + if (WARN_ON(i >= LOOP_TIMEOUT))
551 +diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
552 +index aedaae4630bc8..b853888774e65 100644
553 +--- a/drivers/iommu/intel/irq_remapping.c
554 ++++ b/drivers/iommu/intel/irq_remapping.c
555 +@@ -576,9 +576,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
556 + fn, &intel_ir_domain_ops,
557 + iommu);
558 + if (!iommu->ir_domain) {
559 +- irq_domain_free_fwnode(fn);
560 + pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
561 +- goto out_free_bitmap;
562 ++ goto out_free_fwnode;
563 + }
564 + iommu->ir_msi_domain =
565 + arch_create_remap_msi_irq_domain(iommu->ir_domain,
566 +@@ -602,7 +601,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
567 +
568 + if (dmar_enable_qi(iommu)) {
569 + pr_err("Failed to enable queued invalidation\n");
570 +- goto out_free_bitmap;
571 ++ goto out_free_ir_domain;
572 + }
573 + }
574 +
575 +@@ -626,6 +625,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
576 +
577 + return 0;
578 +
579 ++out_free_ir_domain:
580 ++ if (iommu->ir_msi_domain)
581 ++ irq_domain_remove(iommu->ir_msi_domain);
582 ++ iommu->ir_msi_domain = NULL;
583 ++ irq_domain_remove(iommu->ir_domain);
584 ++ iommu->ir_domain = NULL;
585 ++out_free_fwnode:
586 ++ irq_domain_free_fwnode(fn);
587 + out_free_bitmap:
588 + bitmap_free(bitmap);
589 + out_free_pages:
590 +diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
591 +index 2451f61a38e4a..9e32ea9c11647 100644
592 +--- a/drivers/net/dsa/Kconfig
593 ++++ b/drivers/net/dsa/Kconfig
594 +@@ -36,6 +36,7 @@ config NET_DSA_MT7530
595 + tristate "MediaTek MT753x and MT7621 Ethernet switch support"
596 + depends on NET_DSA
597 + select NET_DSA_TAG_MTK
598 ++ select MEDIATEK_GE_PHY
599 + help
600 + This enables support for the MediaTek MT7530, MT7531, and MT7621
601 + Ethernet switch chips.
602 +diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
603 +index 6009d76e41fc4..67f2b9a61463a 100644
604 +--- a/drivers/net/ethernet/google/gve/gve_adminq.c
605 ++++ b/drivers/net/ethernet/google/gve/gve_adminq.c
606 +@@ -141,7 +141,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
607 + */
608 + static int gve_adminq_kick_and_wait(struct gve_priv *priv)
609 + {
610 +- u32 tail, head;
611 ++ int tail, head;
612 + int i;
613 +
614 + tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
615 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
616 +index e5dbd0bc257e7..82889c363c777 100644
617 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
618 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
619 +@@ -130,6 +130,7 @@
620 +
621 + #define NUM_DWMAC100_DMA_REGS 9
622 + #define NUM_DWMAC1000_DMA_REGS 23
623 ++#define NUM_DWMAC4_DMA_REGS 27
624 +
625 + void dwmac_enable_dma_transmission(void __iomem *ioaddr);
626 + void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
627 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
628 +index 9e54f953634b7..0c0f01f490057 100644
629 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
630 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
631 +@@ -21,10 +21,18 @@
632 + #include "dwxgmac2.h"
633 +
634 + #define REG_SPACE_SIZE 0x1060
635 ++#define GMAC4_REG_SPACE_SIZE 0x116C
636 + #define MAC100_ETHTOOL_NAME "st_mac100"
637 + #define GMAC_ETHTOOL_NAME "st_gmac"
638 + #define XGMAC_ETHTOOL_NAME "st_xgmac"
639 +
640 ++/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h
641 ++ *
642 ++ * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the
643 ++ * same time due to the conflicting macro names.
644 ++ */
645 ++#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100
646 ++
647 + #define ETHTOOL_DMA_OFFSET 55
648 +
649 + struct stmmac_stats {
650 +@@ -413,6 +421,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev)
651 +
652 + if (priv->plat->has_xgmac)
653 + return XGMAC_REGSIZE * 4;
654 ++ else if (priv->plat->has_gmac4)
655 ++ return GMAC4_REG_SPACE_SIZE;
656 + return REG_SPACE_SIZE;
657 + }
658 +
659 +@@ -425,8 +435,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
660 + stmmac_dump_mac_regs(priv, priv->hw, reg_space);
661 + stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
662 +
663 +- if (!priv->plat->has_xgmac) {
664 +- /* Copy DMA registers to where ethtool expects them */
665 ++ /* Copy DMA registers to where ethtool expects them */
666 ++ if (priv->plat->has_gmac4) {
667 ++ /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
668 ++ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
669 ++ &reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
670 ++ NUM_DWMAC4_DMA_REGS * 4);
671 ++ } else if (!priv->plat->has_xgmac) {
672 + memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
673 + &reg_space[DMA_BUS_MODE / 4],
674 + NUM_DWMAC1000_DMA_REGS * 4);
675 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
676 +index d291612eeafb9..07b1b8374cd26 100644
677 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
678 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
679 +@@ -142,15 +142,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
680 +
681 + static void get_systime(void __iomem *ioaddr, u64 *systime)
682 + {
683 +- u64 ns;
684 +-
685 +- /* Get the TSSS value */
686 +- ns = readl(ioaddr + PTP_STNSR);
687 +- /* Get the TSS and convert sec time value to nanosecond */
688 +- ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
689 ++ u64 ns, sec0, sec1;
690 ++
691 ++ /* Get the TSS value */
692 ++ sec1 = readl_relaxed(ioaddr + PTP_STSR);
693 ++ do {
694 ++ sec0 = sec1;
695 ++ /* Get the TSSS value */
696 ++ ns = readl_relaxed(ioaddr + PTP_STNSR);
697 ++ /* Get the TSS value */
698 ++ sec1 = readl_relaxed(ioaddr + PTP_STSR);
699 ++ } while (sec0 != sec1);
700 +
701 + if (systime)
702 +- *systime = ns;
703 ++ *systime = ns + (sec1 * 1000000000ULL);
704 + }
705 +
706 + const struct stmmac_hwtimestamp stmmac_ptp = {
707 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
708 +index 4eb64709d44cb..fea8b681f567c 100644
709 +--- a/drivers/net/ieee802154/ca8210.c
710 ++++ b/drivers/net/ieee802154/ca8210.c
711 +@@ -1771,6 +1771,7 @@ static int ca8210_async_xmit_complete(
712 + status
713 + );
714 + if (status != MAC_TRANSACTION_OVERFLOW) {
715 ++ dev_kfree_skb_any(priv->tx_skb);
716 + ieee802154_wake_queue(priv->hw);
717 + return 0;
718 + }
719 +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
720 +index 080b15fc00601..97981cf7661ad 100644
721 +--- a/drivers/net/ieee802154/mac802154_hwsim.c
722 ++++ b/drivers/net/ieee802154/mac802154_hwsim.c
723 +@@ -786,6 +786,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
724 + goto err_pib;
725 + }
726 +
727 ++ pib->channel = 13;
728 + rcu_assign_pointer(phy->pib, pib);
729 + phy->idx = idx;
730 + INIT_LIST_HEAD(&phy->edges);
731 +diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
732 +index 8dc04e2590b18..383231b854642 100644
733 +--- a/drivers/net/ieee802154/mcr20a.c
734 ++++ b/drivers/net/ieee802154/mcr20a.c
735 +@@ -976,8 +976,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
736 + dev_dbg(printdev(lp), "%s\n", __func__);
737 +
738 + phy->symbol_duration = 16;
739 +- phy->lifs_period = 40;
740 +- phy->sifs_period = 12;
741 ++ phy->lifs_period = 40 * phy->symbol_duration;
742 ++ phy->sifs_period = 12 * phy->symbol_duration;
743 +
744 + hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
745 + IEEE802154_HW_AFILT |
746 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
747 +index c601d3df27220..789a124809e3c 100644
748 +--- a/drivers/net/macsec.c
749 ++++ b/drivers/net/macsec.c
750 +@@ -3869,6 +3869,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head
751 + struct macsec_dev *macsec = macsec_priv(dev);
752 + struct net_device *real_dev = macsec->real_dev;
753 +
754 ++ /* If h/w offloading is available, propagate to the device */
755 ++ if (macsec_is_offloaded(macsec)) {
756 ++ const struct macsec_ops *ops;
757 ++ struct macsec_context ctx;
758 ++
759 ++ ops = macsec_get_ops(netdev_priv(dev), &ctx);
760 ++ if (ops) {
761 ++ ctx.secy = &macsec->secy;
762 ++ macsec_offload(ops->mdo_del_secy, &ctx);
763 ++ }
764 ++ }
765 ++
766 + unregister_netdevice_queue(dev, head);
767 + list_del_rcu(&macsec->secys);
768 + macsec_del_dev(macsec);
769 +@@ -3883,18 +3895,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
770 + struct net_device *real_dev = macsec->real_dev;
771 + struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
772 +
773 +- /* If h/w offloading is available, propagate to the device */
774 +- if (macsec_is_offloaded(macsec)) {
775 +- const struct macsec_ops *ops;
776 +- struct macsec_context ctx;
777 +-
778 +- ops = macsec_get_ops(netdev_priv(dev), &ctx);
779 +- if (ops) {
780 +- ctx.secy = &macsec->secy;
781 +- macsec_offload(ops->mdo_del_secy, &ctx);
782 +- }
783 +- }
784 +-
785 + macsec_common_dellink(dev, head);
786 +
787 + if (list_empty(&rxd->secys)) {
788 +@@ -4017,6 +4017,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
789 + !macsec_check_offload(macsec->offload, macsec))
790 + return -EOPNOTSUPP;
791 +
792 ++ /* send_sci must be set to true when transmit sci explicitly is set */
793 ++ if ((data && data[IFLA_MACSEC_SCI]) &&
794 ++ (data && data[IFLA_MACSEC_INC_SCI])) {
795 ++ u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
796 ++
797 ++ if (!send_sci)
798 ++ return -EINVAL;
799 ++ }
800 ++
801 + if (data && data[IFLA_MACSEC_ICV_LEN])
802 + icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
803 + mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
804 +diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
805 +index a9c1e3b4585ec..78467cb3f343e 100644
806 +--- a/drivers/nvme/host/fabrics.h
807 ++++ b/drivers/nvme/host/fabrics.h
808 +@@ -153,6 +153,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
809 + struct nvmf_ctrl_options *opts)
810 + {
811 + if (ctrl->state == NVME_CTRL_DELETING ||
812 ++ ctrl->state == NVME_CTRL_DELETING_NOIO ||
813 + ctrl->state == NVME_CTRL_DEAD ||
814 + strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
815 + strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
816 +diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
817 +index 40ce18a0d0190..6768b2f03d685 100644
818 +--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
819 ++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
820 +@@ -1264,16 +1264,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
821 + sizeof(*girq->parents),
822 + GFP_KERNEL);
823 + if (!girq->parents) {
824 +- pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
825 +- return -ENOMEM;
826 ++ err = -ENOMEM;
827 ++ goto out_remove;
828 + }
829 +
830 + if (is_7211) {
831 + pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
832 + sizeof(*pc->wake_irq),
833 + GFP_KERNEL);
834 +- if (!pc->wake_irq)
835 +- return -ENOMEM;
836 ++ if (!pc->wake_irq) {
837 ++ err = -ENOMEM;
838 ++ goto out_remove;
839 ++ }
840 + }
841 +
842 + /*
843 +@@ -1297,8 +1299,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
844 +
845 + len = strlen(dev_name(pc->dev)) + 16;
846 + name = devm_kzalloc(pc->dev, len, GFP_KERNEL);
847 +- if (!name)
848 +- return -ENOMEM;
849 ++ if (!name) {
850 ++ err = -ENOMEM;
851 ++ goto out_remove;
852 ++ }
853 +
854 + snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i);
855 +
856 +@@ -1317,11 +1321,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
857 + err = gpiochip_add_data(&pc->gpio_chip, pc);
858 + if (err) {
859 + dev_err(dev, "could not add GPIO chip\n");
860 +- pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
861 +- return err;
862 ++ goto out_remove;
863 + }
864 +
865 + return 0;
866 ++
867 ++out_remove:
868 ++ pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
869 ++ return err;
870 + }
871 +
872 + static struct platform_driver bcm2835_pinctrl_driver = {
873 +diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
874 +index b6ef1911c1dd1..348c670a7b07d 100644
875 +--- a/drivers/pinctrl/intel/pinctrl-intel.c
876 ++++ b/drivers/pinctrl/intel/pinctrl-intel.c
877 +@@ -441,8 +441,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
878 + value &= ~PADCFG0_PMODE_MASK;
879 + value |= PADCFG0_PMODE_GPIO;
880 +
881 +- /* Disable input and output buffers */
882 +- value |= PADCFG0_GPIORXDIS;
883 ++ /* Disable TX buffer and enable RX (this will be input) */
884 ++ value &= ~PADCFG0_GPIORXDIS;
885 + value |= PADCFG0_GPIOTXDIS;
886 +
887 + /* Disable SCI/SMI/NMI generation */
888 +@@ -487,9 +487,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
889 +
890 + intel_gpio_set_gpio_mode(padcfg0);
891 +
892 +- /* Disable TX buffer and enable RX (this will be input) */
893 +- __intel_gpio_set_direction(padcfg0, true);
894 +-
895 + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
896 +
897 + return 0;
898 +@@ -1105,9 +1102,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
899 +
900 + intel_gpio_set_gpio_mode(reg);
901 +
902 +- /* Disable TX buffer and enable RX (this will be input) */
903 +- __intel_gpio_set_direction(reg, true);
904 +-
905 + value = readl(reg);
906 +
907 + value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
908 +@@ -1207,6 +1201,39 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
909 + return IRQ_RETVAL(ret);
910 + }
911 +
912 ++static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
913 ++{
914 ++ int i;
915 ++
916 ++ for (i = 0; i < pctrl->ncommunities; i++) {
917 ++ const struct intel_community *community;
918 ++ void __iomem *base;
919 ++ unsigned int gpp;
920 ++
921 ++ community = &pctrl->communities[i];
922 ++ base = community->regs;
923 ++
924 ++ for (gpp = 0; gpp < community->ngpps; gpp++) {
925 ++ /* Mask and clear all interrupts */
926 ++ writel(0, base + community->ie_offset + gpp * 4);
927 ++ writel(0xffff, base + community->is_offset + gpp * 4);
928 ++ }
929 ++ }
930 ++}
931 ++
932 ++static int intel_gpio_irq_init_hw(struct gpio_chip *gc)
933 ++{
934 ++ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
935 ++
936 ++ /*
937 ++ * Make sure the interrupt lines are in a proper state before
938 ++ * further configuration.
939 ++ */
940 ++ intel_gpio_irq_init(pctrl);
941 ++
942 ++ return 0;
943 ++}
944 ++
945 + static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
946 + const struct intel_community *community)
947 + {
948 +@@ -1311,6 +1338,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
949 + girq->num_parents = 0;
950 + girq->default_type = IRQ_TYPE_NONE;
951 + girq->handler = handle_bad_irq;
952 ++ girq->init_hw = intel_gpio_irq_init_hw;
953 +
954 + ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
955 + if (ret) {
956 +@@ -1640,26 +1668,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev)
957 + }
958 + EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq);
959 +
960 +-static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
961 +-{
962 +- size_t i;
963 +-
964 +- for (i = 0; i < pctrl->ncommunities; i++) {
965 +- const struct intel_community *community;
966 +- void __iomem *base;
967 +- unsigned int gpp;
968 +-
969 +- community = &pctrl->communities[i];
970 +- base = community->regs;
971 +-
972 +- for (gpp = 0; gpp < community->ngpps; gpp++) {
973 +- /* Mask and clear all interrupts */
974 +- writel(0, base + community->ie_offset + gpp * 4);
975 +- writel(0xffff, base + community->is_offset + gpp * 4);
976 +- }
977 +- }
978 +-}
979 +-
980 + static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
981 + {
982 + u32 curr, updated;
983 +diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
984 +index 2ecd8752b088b..5add637c9ad23 100644
985 +--- a/drivers/rtc/rtc-mc146818-lib.c
986 ++++ b/drivers/rtc/rtc-mc146818-lib.c
987 +@@ -83,7 +83,7 @@ unsigned int mc146818_get_time(struct rtc_time *time)
988 + time->tm_year += real_year - 72;
989 + #endif
990 +
991 +- if (century > 20)
992 ++ if (century > 19)
993 + time->tm_year += (century - 19) * 100;
994 +
995 + /*
996 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
997 +index 052e7879704a5..8f47bf83694f6 100644
998 +--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
999 ++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1000 +@@ -506,7 +506,8 @@ static int bnx2fc_l2_rcv_thread(void *arg)
1001 +
1002 + static void bnx2fc_recv_frame(struct sk_buff *skb)
1003 + {
1004 +- u32 fr_len;
1005 ++ u64 crc_err;
1006 ++ u32 fr_len, fr_crc;
1007 + struct fc_lport *lport;
1008 + struct fcoe_rcv_info *fr;
1009 + struct fc_stats *stats;
1010 +@@ -540,6 +541,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
1011 + skb_pull(skb, sizeof(struct fcoe_hdr));
1012 + fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1013 +
1014 ++ stats = per_cpu_ptr(lport->stats, get_cpu());
1015 ++ stats->RxFrames++;
1016 ++ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1017 ++ put_cpu();
1018 ++
1019 + fp = (struct fc_frame *)skb;
1020 + fc_frame_init(fp);
1021 + fr_dev(fp) = lport;
1022 +@@ -622,16 +628,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
1023 + return;
1024 + }
1025 +
1026 +- stats = per_cpu_ptr(lport->stats, smp_processor_id());
1027 +- stats->RxFrames++;
1028 +- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1029 ++ fr_crc = le32_to_cpu(fr_crc(fp));
1030 +
1031 +- if (le32_to_cpu(fr_crc(fp)) !=
1032 +- ~crc32(~0, skb->data, fr_len)) {
1033 +- if (stats->InvalidCRCCount < 5)
1034 ++ if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
1035 ++ stats = per_cpu_ptr(lport->stats, get_cpu());
1036 ++ crc_err = (stats->InvalidCRCCount++);
1037 ++ put_cpu();
1038 ++ if (crc_err < 5)
1039 + printk(KERN_WARNING PFX "dropping frame with "
1040 + "CRC error\n");
1041 +- stats->InvalidCRCCount++;
1042 + kfree_skb(skb);
1043 + return;
1044 + }
1045 +diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
1046 +index 670cc82d17dc2..ca75b14931ec9 100644
1047 +--- a/drivers/soc/mediatek/mtk-scpsys.c
1048 ++++ b/drivers/soc/mediatek/mtk-scpsys.c
1049 +@@ -411,17 +411,12 @@ out:
1050 + return ret;
1051 + }
1052 +
1053 +-static int init_clks(struct platform_device *pdev, struct clk **clk)
1054 ++static void init_clks(struct platform_device *pdev, struct clk **clk)
1055 + {
1056 + int i;
1057 +
1058 +- for (i = CLK_NONE + 1; i < CLK_MAX; i++) {
1059 ++ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
1060 + clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
1061 +- if (IS_ERR(clk[i]))
1062 +- return PTR_ERR(clk[i]);
1063 +- }
1064 +-
1065 +- return 0;
1066 + }
1067 +
1068 + static struct scp *init_scp(struct platform_device *pdev,
1069 +@@ -431,7 +426,7 @@ static struct scp *init_scp(struct platform_device *pdev,
1070 + {
1071 + struct genpd_onecell_data *pd_data;
1072 + struct resource *res;
1073 +- int i, j, ret;
1074 ++ int i, j;
1075 + struct scp *scp;
1076 + struct clk *clk[CLK_MAX];
1077 +
1078 +@@ -486,9 +481,7 @@ static struct scp *init_scp(struct platform_device *pdev,
1079 +
1080 + pd_data->num_domains = num;
1081 +
1082 +- ret = init_clks(pdev, clk);
1083 +- if (ret)
1084 +- return ERR_PTR(ret);
1085 ++ init_clks(pdev, clk);
1086 +
1087 + for (i = 0; i < num; i++) {
1088 + struct scp_domain *scpd = &scp->domains[i];
1089 +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
1090 +index 3c0ae6dbc43e2..4a80f043b7b17 100644
1091 +--- a/drivers/spi/spi-bcm-qspi.c
1092 ++++ b/drivers/spi/spi-bcm-qspi.c
1093 +@@ -551,7 +551,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
1094 + u32 rd = 0;
1095 + u32 wr = 0;
1096 +
1097 +- if (qspi->base[CHIP_SELECT]) {
1098 ++ if (cs >= 0 && qspi->base[CHIP_SELECT]) {
1099 + rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
1100 + wr = (rd & ~0xff) | (1 << cs);
1101 + if (rd == wr)
1102 +diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
1103 +index c208efeadd184..0bc7daa7afc83 100644
1104 +--- a/drivers/spi/spi-meson-spicc.c
1105 ++++ b/drivers/spi/spi-meson-spicc.c
1106 +@@ -693,6 +693,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
1107 + writel_relaxed(0, spicc->base + SPICC_INTREG);
1108 +
1109 + irq = platform_get_irq(pdev, 0);
1110 ++ if (irq < 0) {
1111 ++ ret = irq;
1112 ++ goto out_master;
1113 ++ }
1114 ++
1115 + ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
1116 + 0, NULL, spicc);
1117 + if (ret) {
1118 +diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
1119 +index 83e56ee62649d..92a09dfb99a8e 100644
1120 +--- a/drivers/spi/spi-mt65xx.c
1121 ++++ b/drivers/spi/spi-mt65xx.c
1122 +@@ -540,7 +540,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
1123 + else
1124 + mdata->state = MTK_SPI_IDLE;
1125 +
1126 +- if (!master->can_dma(master, master->cur_msg->spi, trans)) {
1127 ++ if (!master->can_dma(master, NULL, trans)) {
1128 + if (trans->rx_buf) {
1129 + cnt = mdata->xfer_len / 4;
1130 + ioread32_rep(mdata->base + SPI_RX_DATA_REG,
1131 +diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
1132 +index e5c234aecf675..ad0088e394723 100644
1133 +--- a/drivers/spi/spi-uniphier.c
1134 ++++ b/drivers/spi/spi-uniphier.c
1135 +@@ -726,7 +726,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
1136 + if (ret) {
1137 + dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
1138 + ret);
1139 +- goto out_disable_clk;
1140 ++ goto out_release_dma;
1141 + }
1142 + dma_tx_burst = caps.max_burst;
1143 + }
1144 +@@ -735,7 +735,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
1145 + if (IS_ERR_OR_NULL(master->dma_rx)) {
1146 + if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
1147 + ret = -EPROBE_DEFER;
1148 +- goto out_disable_clk;
1149 ++ goto out_release_dma;
1150 + }
1151 + master->dma_rx = NULL;
1152 + dma_rx_burst = INT_MAX;
1153 +@@ -744,7 +744,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
1154 + if (ret) {
1155 + dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
1156 + ret);
1157 +- goto out_disable_clk;
1158 ++ goto out_release_dma;
1159 + }
1160 + dma_rx_burst = caps.max_burst;
1161 + }
1162 +@@ -753,10 +753,20 @@ static int uniphier_spi_probe(struct platform_device *pdev)
1163 +
1164 + ret = devm_spi_register_master(&pdev->dev, master);
1165 + if (ret)
1166 +- goto out_disable_clk;
1167 ++ goto out_release_dma;
1168 +
1169 + return 0;
1170 +
1171 ++out_release_dma:
1172 ++ if (!IS_ERR_OR_NULL(master->dma_rx)) {
1173 ++ dma_release_channel(master->dma_rx);
1174 ++ master->dma_rx = NULL;
1175 ++ }
1176 ++ if (!IS_ERR_OR_NULL(master->dma_tx)) {
1177 ++ dma_release_channel(master->dma_tx);
1178 ++ master->dma_tx = NULL;
1179 ++ }
1180 ++
1181 + out_disable_clk:
1182 + clk_disable_unprepare(priv->clk);
1183 +
1184 +diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
1185 +index ee33b8ec62bb2..47c4939577725 100644
1186 +--- a/drivers/video/console/Kconfig
1187 ++++ b/drivers/video/console/Kconfig
1188 +@@ -78,6 +78,26 @@ config FRAMEBUFFER_CONSOLE
1189 + help
1190 + Low-level framebuffer-based console driver.
1191 +
1192 ++config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
1193 ++ bool "Enable legacy fbcon hardware acceleration code"
1194 ++ depends on FRAMEBUFFER_CONSOLE
1195 ++ default y if PARISC
1196 ++ default n
1197 ++ help
1198 ++ This option enables the fbcon (framebuffer text-based) hardware
1199 ++ acceleration for graphics drivers which were written for the fbdev
1200 ++ graphics interface.
1201 ++
1202 ++ On modern machines, on mainstream machines (like x86-64) or when
1203 ++ using a modern Linux distribution those fbdev drivers usually aren't used.
1204 ++ So enabling this option wouldn't have any effect, which is why you want
1205 ++ to disable this option on such newer machines.
1206 ++
1207 ++ If you compile this kernel for older machines which still require the
1208 ++ fbdev drivers, you may want to say Y.
1209 ++
1210 ++ If unsure, select n.
1211 ++
1212 + config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
1213 + bool "Map the console to the primary display device"
1214 + depends on FRAMEBUFFER_CONSOLE
1215 +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
1216 +index 42c72d051158f..f102519ccefb4 100644
1217 +--- a/drivers/video/fbdev/core/fbcon.c
1218 ++++ b/drivers/video/fbdev/core/fbcon.c
1219 +@@ -1033,7 +1033,7 @@ static void fbcon_init(struct vc_data *vc, int init)
1220 + struct vc_data *svc = *default_mode;
1221 + struct fbcon_display *t, *p = &fb_display[vc->vc_num];
1222 + int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256;
1223 +- int ret;
1224 ++ int cap, ret;
1225 +
1226 + if (WARN_ON(info_idx == -1))
1227 + return;
1228 +@@ -1042,6 +1042,7 @@ static void fbcon_init(struct vc_data *vc, int init)
1229 + con2fb_map[vc->vc_num] = info_idx;
1230 +
1231 + info = registered_fb[con2fb_map[vc->vc_num]];
1232 ++ cap = info->flags;
1233 +
1234 + if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
1235 + logo_shown = FBCON_LOGO_DONTSHOW;
1236 +@@ -1146,13 +1147,13 @@ static void fbcon_init(struct vc_data *vc, int init)
1237 +
1238 + ops->graphics = 0;
1239 +
1240 +- /*
1241 +- * No more hw acceleration for fbcon.
1242 +- *
1243 +- * FIXME: Garbage collect all the now dead code after sufficient time
1244 +- * has passed.
1245 +- */
1246 +- p->scrollmode = SCROLL_REDRAW;
1247 ++#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
1248 ++ if ((cap & FBINFO_HWACCEL_COPYAREA) &&
1249 ++ !(cap & FBINFO_HWACCEL_DISABLED))
1250 ++ p->scrollmode = SCROLL_MOVE;
1251 ++ else /* default to something safe */
1252 ++ p->scrollmode = SCROLL_REDRAW;
1253 ++#endif
1254 +
1255 + /*
1256 + * ++guenther: console.c:vc_allocate() relies on initializing
1257 +@@ -1718,7 +1719,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
1258 + count = vc->vc_rows;
1259 + if (logo_shown >= 0)
1260 + goto redraw_up;
1261 +- switch (p->scrollmode) {
1262 ++ switch (fb_scrollmode(p)) {
1263 + case SCROLL_MOVE:
1264 + fbcon_redraw_blit(vc, info, p, t, b - t - count,
1265 + count);
1266 +@@ -1808,7 +1809,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
1267 + count = vc->vc_rows;
1268 + if (logo_shown >= 0)
1269 + goto redraw_down;
1270 +- switch (p->scrollmode) {
1271 ++ switch (fb_scrollmode(p)) {
1272 + case SCROLL_MOVE:
1273 + fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
1274 + -count);
1275 +@@ -1959,6 +1960,48 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy,
1276 + height, width);
1277 + }
1278 +
1279 ++static void updatescrollmode_accel(struct fbcon_display *p,
1280 ++ struct fb_info *info,
1281 ++ struct vc_data *vc)
1282 ++{
1283 ++#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
1284 ++ struct fbcon_ops *ops = info->fbcon_par;
1285 ++ int cap = info->flags;
1286 ++ u16 t = 0;
1287 ++ int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
1288 ++ info->fix.xpanstep);
1289 ++ int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
1290 ++ int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
1291 ++ int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
1292 ++ info->var.xres_virtual);
1293 ++ int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
1294 ++ divides(ypan, vc->vc_font.height) && vyres > yres;
1295 ++ int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
1296 ++ divides(ywrap, vc->vc_font.height) &&
1297 ++ divides(vc->vc_font.height, vyres) &&
1298 ++ divides(vc->vc_font.height, yres);
1299 ++ int reading_fast = cap & FBINFO_READS_FAST;
1300 ++ int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
1301 ++ !(cap & FBINFO_HWACCEL_DISABLED);
1302 ++ int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) &&
1303 ++ !(cap & FBINFO_HWACCEL_DISABLED);
1304 ++
1305 ++ if (good_wrap || good_pan) {
1306 ++ if (reading_fast || fast_copyarea)
1307 ++ p->scrollmode = good_wrap ?
1308 ++ SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
1309 ++ else
1310 ++ p->scrollmode = good_wrap ? SCROLL_REDRAW :
1311 ++ SCROLL_PAN_REDRAW;
1312 ++ } else {
1313 ++ if (reading_fast || (fast_copyarea && !fast_imageblit))
1314 ++ p->scrollmode = SCROLL_MOVE;
1315 ++ else
1316 ++ p->scrollmode = SCROLL_REDRAW;
1317 ++ }
1318 ++#endif
1319 ++}
1320 ++
1321 + static void updatescrollmode(struct fbcon_display *p,
1322 + struct fb_info *info,
1323 + struct vc_data *vc)
1324 +@@ -1974,6 +2017,9 @@ static void updatescrollmode(struct fbcon_display *p,
1325 + p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
1326 + if ((yres % fh) && (vyres % fh < yres % fh))
1327 + p->vrows--;
1328 ++
1329 ++ /* update scrollmode in case hardware acceleration is used */
1330 ++ updatescrollmode_accel(p, info, vc);
1331 + }
1332 +
1333 + #define PITCH(w) (((w) + 7) >> 3)
1334 +@@ -2134,7 +2180,7 @@ static int fbcon_switch(struct vc_data *vc)
1335 +
1336 + updatescrollmode(p, info, vc);
1337 +
1338 +- switch (p->scrollmode) {
1339 ++ switch (fb_scrollmode(p)) {
1340 + case SCROLL_WRAP_MOVE:
1341 + scrollback_phys_max = p->vrows - vc->vc_rows;
1342 + break;
1343 +diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
1344 +index 9315b360c8981..0f16cbc99e6a4 100644
1345 +--- a/drivers/video/fbdev/core/fbcon.h
1346 ++++ b/drivers/video/fbdev/core/fbcon.h
1347 +@@ -29,7 +29,9 @@ struct fbcon_display {
1348 + /* Filled in by the low-level console driver */
1349 + const u_char *fontdata;
1350 + int userfont; /* != 0 if fontdata kmalloc()ed */
1351 +- u_short scrollmode; /* Scroll Method */
1352 ++#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
1353 ++ u_short scrollmode; /* Scroll Method, use fb_scrollmode() */
1354 ++#endif
1355 + u_short inverse; /* != 0 text black on white as default */
1356 + short yscroll; /* Hardware scrolling */
1357 + int vrows; /* number of virtual rows */
1358 +@@ -208,6 +210,17 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
1359 + #define SCROLL_REDRAW 0x004
1360 + #define SCROLL_PAN_REDRAW 0x005
1361 +
1362 ++static inline u_short fb_scrollmode(struct fbcon_display *fb)
1363 ++{
1364 ++#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
1365 ++ return fb->scrollmode;
1366 ++#else
1367 ++ /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */
1368 ++ return SCROLL_REDRAW;
1369 ++#endif
1370 ++}
1371 ++
1372 ++
1373 + #ifdef CONFIG_FB_TILEBLITTING
1374 + extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
1375 + #endif
1376 +diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
1377 +index bbd869efd03bc..f75b24c32d497 100644
1378 +--- a/drivers/video/fbdev/core/fbcon_ccw.c
1379 ++++ b/drivers/video/fbdev/core/fbcon_ccw.c
1380 +@@ -65,7 +65,7 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
1381 + {
1382 + struct fbcon_ops *ops = info->fbcon_par;
1383 + struct fb_copyarea area;
1384 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1385 ++ u32 vyres = GETVYRES(ops->p, info);
1386 +
1387 + area.sx = sy * vc->vc_font.height;
1388 + area.sy = vyres - ((sx + width) * vc->vc_font.width);
1389 +@@ -83,7 +83,7 @@ static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
1390 + struct fbcon_ops *ops = info->fbcon_par;
1391 + struct fb_fillrect region;
1392 + int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
1393 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1394 ++ u32 vyres = GETVYRES(ops->p, info);
1395 +
1396 + region.color = attr_bgcol_ec(bgshift,vc,info);
1397 + region.dx = sy * vc->vc_font.height;
1398 +@@ -140,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
1399 + u32 cnt, pitch, size;
1400 + u32 attribute = get_attribute(info, scr_readw(s));
1401 + u8 *dst, *buf = NULL;
1402 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1403 ++ u32 vyres = GETVYRES(ops->p, info);
1404 +
1405 + if (!ops->fontbuffer)
1406 + return;
1407 +@@ -229,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
1408 + int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
1409 + int err = 1, dx, dy;
1410 + char *src;
1411 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1412 ++ u32 vyres = GETVYRES(ops->p, info);
1413 +
1414 + if (!ops->fontbuffer)
1415 + return;
1416 +@@ -387,7 +387,7 @@ static int ccw_update_start(struct fb_info *info)
1417 + {
1418 + struct fbcon_ops *ops = info->fbcon_par;
1419 + u32 yoffset;
1420 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1421 ++ u32 vyres = GETVYRES(ops->p, info);
1422 + int err;
1423 +
1424 + yoffset = (vyres - info->var.yres) - ops->var.xoffset;
1425 +diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
1426 +index a34cbe8e98744..cf03dc62f35d3 100644
1427 +--- a/drivers/video/fbdev/core/fbcon_cw.c
1428 ++++ b/drivers/video/fbdev/core/fbcon_cw.c
1429 +@@ -50,7 +50,7 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
1430 + {
1431 + struct fbcon_ops *ops = info->fbcon_par;
1432 + struct fb_copyarea area;
1433 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1434 ++ u32 vxres = GETVXRES(ops->p, info);
1435 +
1436 + area.sx = vxres - ((sy + height) * vc->vc_font.height);
1437 + area.sy = sx * vc->vc_font.width;
1438 +@@ -68,7 +68,7 @@ static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
1439 + struct fbcon_ops *ops = info->fbcon_par;
1440 + struct fb_fillrect region;
1441 + int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
1442 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1443 ++ u32 vxres = GETVXRES(ops->p, info);
1444 +
1445 + region.color = attr_bgcol_ec(bgshift,vc,info);
1446 + region.dx = vxres - ((sy + height) * vc->vc_font.height);
1447 +@@ -125,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
1448 + u32 cnt, pitch, size;
1449 + u32 attribute = get_attribute(info, scr_readw(s));
1450 + u8 *dst, *buf = NULL;
1451 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1452 ++ u32 vxres = GETVXRES(ops->p, info);
1453 +
1454 + if (!ops->fontbuffer)
1455 + return;
1456 +@@ -212,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
1457 + int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
1458 + int err = 1, dx, dy;
1459 + char *src;
1460 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1461 ++ u32 vxres = GETVXRES(ops->p, info);
1462 +
1463 + if (!ops->fontbuffer)
1464 + return;
1465 +@@ -369,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
1466 + static int cw_update_start(struct fb_info *info)
1467 + {
1468 + struct fbcon_ops *ops = info->fbcon_par;
1469 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1470 ++ u32 vxres = GETVXRES(ops->p, info);
1471 + u32 xoffset;
1472 + int err;
1473 +
1474 +diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h
1475 +index e233444cda664..01cbe303b8a29 100644
1476 +--- a/drivers/video/fbdev/core/fbcon_rotate.h
1477 ++++ b/drivers/video/fbdev/core/fbcon_rotate.h
1478 +@@ -12,11 +12,11 @@
1479 + #define _FBCON_ROTATE_H
1480 +
1481 + #define GETVYRES(s,i) ({ \
1482 +- (s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \
1483 ++ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \
1484 + (i)->var.yres : (i)->var.yres_virtual; })
1485 +
1486 + #define GETVXRES(s,i) ({ \
1487 +- (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \
1488 ++ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \
1489 + (i)->var.xres : (i)->var.xres_virtual; })
1490 +
1491 +
1492 +diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
1493 +index 199cbc7abe353..c5d2da731d686 100644
1494 +--- a/drivers/video/fbdev/core/fbcon_ud.c
1495 ++++ b/drivers/video/fbdev/core/fbcon_ud.c
1496 +@@ -50,8 +50,8 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
1497 + {
1498 + struct fbcon_ops *ops = info->fbcon_par;
1499 + struct fb_copyarea area;
1500 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1501 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1502 ++ u32 vyres = GETVYRES(ops->p, info);
1503 ++ u32 vxres = GETVXRES(ops->p, info);
1504 +
1505 + area.sy = vyres - ((sy + height) * vc->vc_font.height);
1506 + area.sx = vxres - ((sx + width) * vc->vc_font.width);
1507 +@@ -69,8 +69,8 @@ static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
1508 + struct fbcon_ops *ops = info->fbcon_par;
1509 + struct fb_fillrect region;
1510 + int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
1511 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1512 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1513 ++ u32 vyres = GETVYRES(ops->p, info);
1514 ++ u32 vxres = GETVXRES(ops->p, info);
1515 +
1516 + region.color = attr_bgcol_ec(bgshift,vc,info);
1517 + region.dy = vyres - ((sy + height) * vc->vc_font.height);
1518 +@@ -162,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
1519 + u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
1520 + u32 attribute = get_attribute(info, scr_readw(s));
1521 + u8 *dst, *buf = NULL;
1522 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1523 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1524 ++ u32 vyres = GETVYRES(ops->p, info);
1525 ++ u32 vxres = GETVXRES(ops->p, info);
1526 +
1527 + if (!ops->fontbuffer)
1528 + return;
1529 +@@ -259,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
1530 + int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
1531 + int err = 1, dx, dy;
1532 + char *src;
1533 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1534 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1535 ++ u32 vyres = GETVYRES(ops->p, info);
1536 ++ u32 vxres = GETVXRES(ops->p, info);
1537 +
1538 + if (!ops->fontbuffer)
1539 + return;
1540 +@@ -410,8 +410,8 @@ static int ud_update_start(struct fb_info *info)
1541 + {
1542 + struct fbcon_ops *ops = info->fbcon_par;
1543 + int xoffset, yoffset;
1544 +- u32 vyres = GETVYRES(ops->p->scrollmode, info);
1545 +- u32 vxres = GETVXRES(ops->p->scrollmode, info);
1546 ++ u32 vyres = GETVYRES(ops->p, info);
1547 ++ u32 vxres = GETVXRES(ops->p, info);
1548 + int err;
1549 +
1550 + xoffset = vxres - info->var.xres - ops->var.xoffset;
1551 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
1552 +index f65aa4ed5ca1e..e39a12037b403 100644
1553 +--- a/fs/btrfs/qgroup.c
1554 ++++ b/fs/btrfs/qgroup.c
1555 +@@ -1186,9 +1186,24 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1556 + struct btrfs_trans_handle *trans = NULL;
1557 + int ret = 0;
1558 +
1559 ++ /*
1560 ++ * We need to have subvol_sem write locked, to prevent races between
1561 ++ * concurrent tasks trying to disable quotas, because we will unlock
1562 ++ * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
1563 ++ */
1564 ++ lockdep_assert_held_write(&fs_info->subvol_sem);
1565 ++
1566 + mutex_lock(&fs_info->qgroup_ioctl_lock);
1567 + if (!fs_info->quota_root)
1568 + goto out;
1569 ++
1570 ++ /*
1571 ++ * Request qgroup rescan worker to complete and wait for it. This wait
1572 ++ * must be done before transaction start for quota disable since it may
1573 ++ * deadlock with transaction by the qgroup rescan worker.
1574 ++ */
1575 ++ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1576 ++ btrfs_qgroup_wait_for_completion(fs_info, false);
1577 + mutex_unlock(&fs_info->qgroup_ioctl_lock);
1578 +
1579 + /*
1580 +@@ -1206,14 +1221,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1581 + if (IS_ERR(trans)) {
1582 + ret = PTR_ERR(trans);
1583 + trans = NULL;
1584 ++ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1585 + goto out;
1586 + }
1587 +
1588 + if (!fs_info->quota_root)
1589 + goto out;
1590 +
1591 +- clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1592 +- btrfs_qgroup_wait_for_completion(fs_info, false);
1593 + spin_lock(&fs_info->qgroup_lock);
1594 + quota_root = fs_info->quota_root;
1595 + fs_info->quota_root = NULL;
1596 +@@ -3390,6 +3404,9 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
1597 + btrfs_warn(fs_info,
1598 + "qgroup rescan init failed, qgroup is not enabled");
1599 + ret = -EINVAL;
1600 ++ } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
1601 ++ /* Quota disable is in progress */
1602 ++ ret = -EBUSY;
1603 + }
1604 +
1605 + if (ret) {
1606 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1607 +index 99d98d1010217..455eb349c76f8 100644
1608 +--- a/fs/ext4/ext4.h
1609 ++++ b/fs/ext4/ext4.h
1610 +@@ -2779,6 +2779,9 @@ void ext4_fc_replay_cleanup(struct super_block *sb);
1611 + int ext4_fc_commit(journal_t *journal, tid_t commit_tid);
1612 + int __init ext4_fc_init_dentry_cache(void);
1613 + void ext4_fc_destroy_dentry_cache(void);
1614 ++int ext4_fc_record_regions(struct super_block *sb, int ino,
1615 ++ ext4_lblk_t lblk, ext4_fsblk_t pblk,
1616 ++ int len, int replay);
1617 +
1618 + /* mballoc.c */
1619 + extern const struct seq_operations ext4_mb_seq_groups_ops;
1620 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1621 +index b297b14de7509..0fda3051760d1 100644
1622 +--- a/fs/ext4/extents.c
1623 ++++ b/fs/ext4/extents.c
1624 +@@ -6088,11 +6088,15 @@ int ext4_ext_clear_bb(struct inode *inode)
1625 +
1626 + ext4_mb_mark_bb(inode->i_sb,
1627 + path[j].p_block, 1, 0);
1628 ++ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
1629 ++ 0, path[j].p_block, 1, 1);
1630 + }
1631 + ext4_ext_drop_refs(path);
1632 + kfree(path);
1633 + }
1634 + ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
1635 ++ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
1636 ++ map.m_lblk, map.m_pblk, map.m_len, 1);
1637 + }
1638 + cur = cur + map.m_len;
1639 + }
1640 +diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
1641 +index f483abcd5213a..501e60713010e 100644
1642 +--- a/fs/ext4/fast_commit.c
1643 ++++ b/fs/ext4/fast_commit.c
1644 +@@ -1388,14 +1388,15 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
1645 + if (state->fc_modified_inodes[i] == ino)
1646 + return 0;
1647 + if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) {
1648 +- state->fc_modified_inodes_size +=
1649 +- EXT4_FC_REPLAY_REALLOC_INCREMENT;
1650 + state->fc_modified_inodes = krealloc(
1651 +- state->fc_modified_inodes, sizeof(int) *
1652 +- state->fc_modified_inodes_size,
1653 +- GFP_KERNEL);
1654 ++ state->fc_modified_inodes,
1655 ++ sizeof(int) * (state->fc_modified_inodes_size +
1656 ++ EXT4_FC_REPLAY_REALLOC_INCREMENT),
1657 ++ GFP_KERNEL);
1658 + if (!state->fc_modified_inodes)
1659 + return -ENOMEM;
1660 ++ state->fc_modified_inodes_size +=
1661 ++ EXT4_FC_REPLAY_REALLOC_INCREMENT;
1662 + }
1663 + state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino;
1664 + return 0;
1665 +@@ -1427,7 +1428,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
1666 + }
1667 + inode = NULL;
1668 +
1669 +- ext4_fc_record_modified_inode(sb, ino);
1670 ++ ret = ext4_fc_record_modified_inode(sb, ino);
1671 ++ if (ret)
1672 ++ goto out;
1673 +
1674 + raw_fc_inode = (struct ext4_inode *)
1675 + (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
1676 +@@ -1558,16 +1561,23 @@ out:
1677 + }
1678 +
1679 + /*
1680 +- * Record physical disk regions which are in use as per fast commit area. Our
1681 +- * simple replay phase allocator excludes these regions from allocation.
1682 ++ * Record physical disk regions which are in use as per fast commit area,
1683 ++ * and used by inodes during replay phase. Our simple replay phase
1684 ++ * allocator excludes these regions from allocation.
1685 + */
1686 +-static int ext4_fc_record_regions(struct super_block *sb, int ino,
1687 +- ext4_lblk_t lblk, ext4_fsblk_t pblk, int len)
1688 ++int ext4_fc_record_regions(struct super_block *sb, int ino,
1689 ++ ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay)
1690 + {
1691 + struct ext4_fc_replay_state *state;
1692 + struct ext4_fc_alloc_region *region;
1693 +
1694 + state = &EXT4_SB(sb)->s_fc_replay_state;
1695 ++ /*
1696 ++ * during replay phase, the fc_regions_valid may not same as
1697 ++ * fc_regions_used, update it when do new additions.
1698 ++ */
1699 ++ if (replay && state->fc_regions_used != state->fc_regions_valid)
1700 ++ state->fc_regions_used = state->fc_regions_valid;
1701 + if (state->fc_regions_used == state->fc_regions_size) {
1702 + state->fc_regions_size +=
1703 + EXT4_FC_REPLAY_REALLOC_INCREMENT;
1704 +@@ -1585,6 +1595,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
1705 + region->pblk = pblk;
1706 + region->len = len;
1707 +
1708 ++ if (replay)
1709 ++ state->fc_regions_valid++;
1710 ++
1711 + return 0;
1712 + }
1713 +
1714 +@@ -1616,6 +1629,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
1715 + }
1716 +
1717 + ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
1718 ++ if (ret)
1719 ++ goto out;
1720 +
1721 + start = le32_to_cpu(ex->ee_block);
1722 + start_pblk = ext4_ext_pblock(ex);
1723 +@@ -1633,18 +1648,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
1724 + map.m_pblk = 0;
1725 + ret = ext4_map_blocks(NULL, inode, &map, 0);
1726 +
1727 +- if (ret < 0) {
1728 +- iput(inode);
1729 +- return 0;
1730 +- }
1731 ++ if (ret < 0)
1732 ++ goto out;
1733 +
1734 + if (ret == 0) {
1735 + /* Range is not mapped */
1736 + path = ext4_find_extent(inode, cur, NULL, 0);
1737 +- if (IS_ERR(path)) {
1738 +- iput(inode);
1739 +- return 0;
1740 +- }
1741 ++ if (IS_ERR(path))
1742 ++ goto out;
1743 + memset(&newex, 0, sizeof(newex));
1744 + newex.ee_block = cpu_to_le32(cur);
1745 + ext4_ext_store_pblock(
1746 +@@ -1658,10 +1669,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
1747 + up_write((&EXT4_I(inode)->i_data_sem));
1748 + ext4_ext_drop_refs(path);
1749 + kfree(path);
1750 +- if (ret) {
1751 +- iput(inode);
1752 +- return 0;
1753 +- }
1754 ++ if (ret)
1755 ++ goto out;
1756 + goto next;
1757 + }
1758 +
1759 +@@ -1674,10 +1683,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
1760 + ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
1761 + ext4_ext_is_unwritten(ex),
1762 + start_pblk + cur - start);
1763 +- if (ret) {
1764 +- iput(inode);
1765 +- return 0;
1766 +- }
1767 ++ if (ret)
1768 ++ goto out;
1769 + /*
1770 + * Mark the old blocks as free since they aren't used
1771 + * anymore. We maintain an array of all the modified
1772 +@@ -1697,10 +1704,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
1773 + ext4_ext_is_unwritten(ex), map.m_pblk);
1774 + ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
1775 + ext4_ext_is_unwritten(ex), map.m_pblk);
1776 +- if (ret) {
1777 +- iput(inode);
1778 +- return 0;
1779 +- }
1780 ++ if (ret)
1781 ++ goto out;
1782 + /*
1783 + * We may have split the extent tree while toggling the state.
1784 + * Try to shrink the extent tree now.
1785 +@@ -1712,6 +1717,7 @@ next:
1786 + }
1787 + ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
1788 + sb->s_blocksize_bits);
1789 ++out:
1790 + iput(inode);
1791 + return 0;
1792 + }
1793 +@@ -1741,6 +1747,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
1794 + }
1795 +
1796 + ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
1797 ++ if (ret)
1798 ++ goto out;
1799 +
1800 + jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
1801 + inode->i_ino, le32_to_cpu(lrange.fc_lblk),
1802 +@@ -1750,10 +1758,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
1803 + map.m_len = remaining;
1804 +
1805 + ret = ext4_map_blocks(NULL, inode, &map, 0);
1806 +- if (ret < 0) {
1807 +- iput(inode);
1808 +- return 0;
1809 +- }
1810 ++ if (ret < 0)
1811 ++ goto out;
1812 + if (ret > 0) {
1813 + remaining -= ret;
1814 + cur += ret;
1815 +@@ -1765,18 +1771,17 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
1816 + }
1817 +
1818 + down_write(&EXT4_I(inode)->i_data_sem);
1819 +- ret = ext4_ext_remove_space(inode, lrange.fc_lblk,
1820 +- lrange.fc_lblk + lrange.fc_len - 1);
1821 ++ ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk),
1822 ++ le32_to_cpu(lrange.fc_lblk) +
1823 ++ le32_to_cpu(lrange.fc_len) - 1);
1824 + up_write(&EXT4_I(inode)->i_data_sem);
1825 +- if (ret) {
1826 +- iput(inode);
1827 +- return 0;
1828 +- }
1829 ++ if (ret)
1830 ++ goto out;
1831 + ext4_ext_replay_shrink_inode(inode,
1832 + i_size_read(inode) >> sb->s_blocksize_bits);
1833 + ext4_mark_inode_dirty(NULL, inode);
1834 ++out:
1835 + iput(inode);
1836 +-
1837 + return 0;
1838 + }
1839 +
1840 +@@ -1954,7 +1959,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
1841 + ret = ext4_fc_record_regions(sb,
1842 + le32_to_cpu(ext.fc_ino),
1843 + le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
1844 +- ext4_ext_get_actual_len(ex));
1845 ++ ext4_ext_get_actual_len(ex), 0);
1846 + if (ret < 0)
1847 + break;
1848 + ret = JBD2_FC_REPLAY_CONTINUE;
1849 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1850 +index a96b688a0410f..ae1f0c57f54d2 100644
1851 +--- a/fs/ext4/inline.c
1852 ++++ b/fs/ext4/inline.c
1853 +@@ -1120,7 +1120,15 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
1854 + struct ext4_iloc *iloc,
1855 + void *buf, int inline_size)
1856 + {
1857 +- ext4_create_inline_data(handle, inode, inline_size);
1858 ++ int ret;
1859 ++
1860 ++ ret = ext4_create_inline_data(handle, inode, inline_size);
1861 ++ if (ret) {
1862 ++ ext4_msg(inode->i_sb, KERN_EMERG,
1863 ++ "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)",
1864 ++ inode->i_ino, ret);
1865 ++ return;
1866 ++ }
1867 + ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
1868 + ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1869 + }
1870 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1871 +index e40f87d07783a..110c25824a67f 100644
1872 +--- a/fs/ext4/mballoc.c
1873 ++++ b/fs/ext4/mballoc.c
1874 +@@ -5173,7 +5173,8 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
1875 + struct super_block *sb = ar->inode->i_sb;
1876 + ext4_group_t group;
1877 + ext4_grpblk_t blkoff;
1878 +- int i = sb->s_blocksize;
1879 ++ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1880 ++ ext4_grpblk_t i = 0;
1881 + ext4_fsblk_t goal, block;
1882 + struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1883 +
1884 +@@ -5195,19 +5196,26 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
1885 + ext4_get_group_no_and_offset(sb,
1886 + max(ext4_group_first_block_no(sb, group), goal),
1887 + NULL, &blkoff);
1888 +- i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize,
1889 ++ while (1) {
1890 ++ i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
1891 + blkoff);
1892 ++ if (i >= max)
1893 ++ break;
1894 ++ if (ext4_fc_replay_check_excluded(sb,
1895 ++ ext4_group_first_block_no(sb, group) + i)) {
1896 ++ blkoff = i + 1;
1897 ++ } else
1898 ++ break;
1899 ++ }
1900 + brelse(bitmap_bh);
1901 +- if (i >= sb->s_blocksize)
1902 +- continue;
1903 +- if (ext4_fc_replay_check_excluded(sb,
1904 +- ext4_group_first_block_no(sb, group) + i))
1905 +- continue;
1906 +- break;
1907 ++ if (i < max)
1908 ++ break;
1909 + }
1910 +
1911 +- if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize)
1912 ++ if (group >= ext4_get_groups_count(sb) || i >= max) {
1913 ++ *errp = -ENOSPC;
1914 + return 0;
1915 ++ }
1916 +
1917 + block = ext4_group_first_block_no(sb, group) + i;
1918 + ext4_mb_mark_bb(sb, block, 1, 1);
1919 +diff --git a/fs/fs_context.c b/fs/fs_context.c
1920 +index b11677802ee13..740322dff4a30 100644
1921 +--- a/fs/fs_context.c
1922 ++++ b/fs/fs_context.c
1923 +@@ -231,7 +231,7 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
1924 + struct fs_context *fc;
1925 + int ret = -ENOMEM;
1926 +
1927 +- fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
1928 ++ fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL_ACCOUNT);
1929 + if (!fc)
1930 + return ERR_PTR(-ENOMEM);
1931 +
1932 +@@ -631,7 +631,7 @@ const struct fs_context_operations legacy_fs_context_ops = {
1933 + */
1934 + static int legacy_init_fs_context(struct fs_context *fc)
1935 + {
1936 +- fc->fs_private = kzalloc(sizeof(struct legacy_fs_context), GFP_KERNEL);
1937 ++ fc->fs_private = kzalloc(sizeof(struct legacy_fs_context), GFP_KERNEL_ACCOUNT);
1938 + if (!fc->fs_private)
1939 + return -ENOMEM;
1940 + fc->ops = &legacy_fs_context_ops;
1941 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1942 +index 210147960c52e..d01d7929753ef 100644
1943 +--- a/fs/nfsd/nfs4state.c
1944 ++++ b/fs/nfsd/nfs4state.c
1945 +@@ -4047,8 +4047,10 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
1946 + status = nfserr_clid_inuse;
1947 + if (client_has_state(old)
1948 + && !same_creds(&unconf->cl_cred,
1949 +- &old->cl_cred))
1950 ++ &old->cl_cred)) {
1951 ++ old = NULL;
1952 + goto out;
1953 ++ }
1954 + status = mark_client_expired_locked(old);
1955 + if (status) {
1956 + old = NULL;
1957 +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
1958 +index 7c869ea8dffc8..9def1ac19546b 100644
1959 +--- a/include/linux/pgtable.h
1960 ++++ b/include/linux/pgtable.h
1961 +@@ -44,6 +44,7 @@ static inline unsigned long pte_index(unsigned long address)
1962 + {
1963 + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
1964 + }
1965 ++#define pte_index pte_index
1966 +
1967 + #ifndef pmd_index
1968 + static inline unsigned long pmd_index(unsigned long address)
1969 +diff --git a/kernel/audit.c b/kernel/audit.c
1970 +index 2a38cbaf3ddb7..aeec86ed47088 100644
1971 +--- a/kernel/audit.c
1972 ++++ b/kernel/audit.c
1973 +@@ -541,20 +541,22 @@ static void kauditd_printk_skb(struct sk_buff *skb)
1974 + /**
1975 + * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
1976 + * @skb: audit record
1977 ++ * @error: error code (unused)
1978 + *
1979 + * Description:
1980 + * This should only be used by the kauditd_thread when it fails to flush the
1981 + * hold queue.
1982 + */
1983 +-static void kauditd_rehold_skb(struct sk_buff *skb)
1984 ++static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error)
1985 + {
1986 +- /* put the record back in the queue at the same place */
1987 +- skb_queue_head(&audit_hold_queue, skb);
1988 ++ /* put the record back in the queue */
1989 ++ skb_queue_tail(&audit_hold_queue, skb);
1990 + }
1991 +
1992 + /**
1993 + * kauditd_hold_skb - Queue an audit record, waiting for auditd
1994 + * @skb: audit record
1995 ++ * @error: error code
1996 + *
1997 + * Description:
1998 + * Queue the audit record, waiting for an instance of auditd. When this
1999 +@@ -564,19 +566,31 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
2000 + * and queue it, if we have room. If we want to hold on to the record, but we
2001 + * don't have room, record a record lost message.
2002 + */
2003 +-static void kauditd_hold_skb(struct sk_buff *skb)
2004 ++static void kauditd_hold_skb(struct sk_buff *skb, int error)
2005 + {
2006 + /* at this point it is uncertain if we will ever send this to auditd so
2007 + * try to send the message via printk before we go any further */
2008 + kauditd_printk_skb(skb);
2009 +
2010 + /* can we just silently drop the message? */
2011 +- if (!audit_default) {
2012 +- kfree_skb(skb);
2013 +- return;
2014 ++ if (!audit_default)
2015 ++ goto drop;
2016 ++
2017 ++ /* the hold queue is only for when the daemon goes away completely,
2018 ++ * not -EAGAIN failures; if we are in a -EAGAIN state requeue the
2019 ++ * record on the retry queue unless it's full, in which case drop it
2020 ++ */
2021 ++ if (error == -EAGAIN) {
2022 ++ if (!audit_backlog_limit ||
2023 ++ skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
2024 ++ skb_queue_tail(&audit_retry_queue, skb);
2025 ++ return;
2026 ++ }
2027 ++ audit_log_lost("kauditd retry queue overflow");
2028 ++ goto drop;
2029 + }
2030 +
2031 +- /* if we have room, queue the message */
2032 ++ /* if we have room in the hold queue, queue the message */
2033 + if (!audit_backlog_limit ||
2034 + skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
2035 + skb_queue_tail(&audit_hold_queue, skb);
2036 +@@ -585,24 +599,32 @@ static void kauditd_hold_skb(struct sk_buff *skb)
2037 +
2038 + /* we have no other options - drop the message */
2039 + audit_log_lost("kauditd hold queue overflow");
2040 ++drop:
2041 + kfree_skb(skb);
2042 + }
2043 +
2044 + /**
2045 + * kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
2046 + * @skb: audit record
2047 ++ * @error: error code (unused)
2048 + *
2049 + * Description:
2050 + * Not as serious as kauditd_hold_skb() as we still have a connected auditd,
2051 + * but for some reason we are having problems sending it audit records so
2052 + * queue the given record and attempt to resend.
2053 + */
2054 +-static void kauditd_retry_skb(struct sk_buff *skb)
2055 ++static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error)
2056 + {
2057 +- /* NOTE: because records should only live in the retry queue for a
2058 +- * short period of time, before either being sent or moved to the hold
2059 +- * queue, we don't currently enforce a limit on this queue */
2060 +- skb_queue_tail(&audit_retry_queue, skb);
2061 ++ if (!audit_backlog_limit ||
2062 ++ skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
2063 ++ skb_queue_tail(&audit_retry_queue, skb);
2064 ++ return;
2065 ++ }
2066 ++
2067 ++ /* we have to drop the record, send it via printk as a last effort */
2068 ++ kauditd_printk_skb(skb);
2069 ++ audit_log_lost("kauditd retry queue overflow");
2070 ++ kfree_skb(skb);
2071 + }
2072 +
2073 + /**
2074 +@@ -640,7 +662,7 @@ static void auditd_reset(const struct auditd_connection *ac)
2075 + /* flush the retry queue to the hold queue, but don't touch the main
2076 + * queue since we need to process that normally for multicast */
2077 + while ((skb = skb_dequeue(&audit_retry_queue)))
2078 +- kauditd_hold_skb(skb);
2079 ++ kauditd_hold_skb(skb, -ECONNREFUSED);
2080 + }
2081 +
2082 + /**
2083 +@@ -714,16 +736,18 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
2084 + struct sk_buff_head *queue,
2085 + unsigned int retry_limit,
2086 + void (*skb_hook)(struct sk_buff *skb),
2087 +- void (*err_hook)(struct sk_buff *skb))
2088 ++ void (*err_hook)(struct sk_buff *skb, int error))
2089 + {
2090 + int rc = 0;
2091 +- struct sk_buff *skb;
2092 ++ struct sk_buff *skb = NULL;
2093 ++ struct sk_buff *skb_tail;
2094 + unsigned int failed = 0;
2095 +
2096 + /* NOTE: kauditd_thread takes care of all our locking, we just use
2097 + * the netlink info passed to us (e.g. sk and portid) */
2098 +
2099 +- while ((skb = skb_dequeue(queue))) {
2100 ++ skb_tail = skb_peek_tail(queue);
2101 ++ while ((skb != skb_tail) && (skb = skb_dequeue(queue))) {
2102 + /* call the skb_hook for each skb we touch */
2103 + if (skb_hook)
2104 + (*skb_hook)(skb);
2105 +@@ -731,7 +755,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
2106 + /* can we send to anyone via unicast? */
2107 + if (!sk) {
2108 + if (err_hook)
2109 +- (*err_hook)(skb);
2110 ++ (*err_hook)(skb, -ECONNREFUSED);
2111 + continue;
2112 + }
2113 +
2114 +@@ -745,7 +769,7 @@ retry:
2115 + rc == -ECONNREFUSED || rc == -EPERM) {
2116 + sk = NULL;
2117 + if (err_hook)
2118 +- (*err_hook)(skb);
2119 ++ (*err_hook)(skb, rc);
2120 + if (rc == -EAGAIN)
2121 + rc = 0;
2122 + /* continue to drain the queue */
2123 +diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
2124 +index f9913bc65ef8d..1e4bf23528a3d 100644
2125 +--- a/kernel/bpf/ringbuf.c
2126 ++++ b/kernel/bpf/ringbuf.c
2127 +@@ -108,7 +108,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
2128 + }
2129 +
2130 + rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
2131 +- VM_ALLOC | VM_USERMAP, PAGE_KERNEL);
2132 ++ VM_MAP | VM_USERMAP, PAGE_KERNEL);
2133 + if (rb) {
2134 + kmemleak_not_leak(pages);
2135 + rb->pages = pages;
2136 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
2137 +index 7c7758a9e2c24..ef6b3a7f31c17 100644
2138 +--- a/kernel/cgroup/cpuset.c
2139 ++++ b/kernel/cgroup/cpuset.c
2140 +@@ -1481,10 +1481,15 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2141 + struct cpuset *sibling;
2142 + struct cgroup_subsys_state *pos_css;
2143 +
2144 ++ percpu_rwsem_assert_held(&cpuset_rwsem);
2145 ++
2146 + /*
2147 + * Check all its siblings and call update_cpumasks_hier()
2148 + * if their use_parent_ecpus flag is set in order for them
2149 + * to use the right effective_cpus value.
2150 ++ *
2151 ++ * The update_cpumasks_hier() function may sleep. So we have to
2152 ++ * release the RCU read lock before calling it.
2153 + */
2154 + rcu_read_lock();
2155 + cpuset_for_each_child(sibling, pos_css, parent) {
2156 +@@ -1492,8 +1497,13 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2157 + continue;
2158 + if (!sibling->use_parent_ecpus)
2159 + continue;
2160 ++ if (!css_tryget_online(&sibling->css))
2161 ++ continue;
2162 +
2163 ++ rcu_read_unlock();
2164 + update_cpumasks_hier(sibling, tmp);
2165 ++ rcu_read_lock();
2166 ++ css_put(&sibling->css);
2167 + }
2168 + rcu_read_unlock();
2169 + }
2170 +diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
2171 +index 12ebc97e8b435..d6fbf28ebf72c 100644
2172 +--- a/mm/debug_vm_pgtable.c
2173 ++++ b/mm/debug_vm_pgtable.c
2174 +@@ -128,6 +128,8 @@ static void __init pte_advanced_tests(struct mm_struct *mm,
2175 + ptep_test_and_clear_young(vma, vaddr, ptep);
2176 + pte = ptep_get(ptep);
2177 + WARN_ON(pte_young(pte));
2178 ++
2179 ++ ptep_get_and_clear_full(mm, vaddr, ptep, 1);
2180 + }
2181 +
2182 + static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
2183 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
2184 +index c0014d3b91c10..56fcfcb8e6173 100644
2185 +--- a/mm/kmemleak.c
2186 ++++ b/mm/kmemleak.c
2187 +@@ -1401,7 +1401,8 @@ static void kmemleak_scan(void)
2188 + {
2189 + unsigned long flags;
2190 + struct kmemleak_object *object;
2191 +- int i;
2192 ++ struct zone *zone;
2193 ++ int __maybe_unused i;
2194 + int new_leaks = 0;
2195 +
2196 + jiffies_last_scan = jiffies;
2197 +@@ -1441,9 +1442,9 @@ static void kmemleak_scan(void)
2198 + * Struct page scanning for each node.
2199 + */
2200 + get_online_mems();
2201 +- for_each_online_node(i) {
2202 +- unsigned long start_pfn = node_start_pfn(i);
2203 +- unsigned long end_pfn = node_end_pfn(i);
2204 ++ for_each_populated_zone(zone) {
2205 ++ unsigned long start_pfn = zone->zone_start_pfn;
2206 ++ unsigned long end_pfn = zone_end_pfn(zone);
2207 + unsigned long pfn;
2208 +
2209 + for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2210 +@@ -1452,8 +1453,8 @@ static void kmemleak_scan(void)
2211 + if (!page)
2212 + continue;
2213 +
2214 +- /* only scan pages belonging to this node */
2215 +- if (page_to_nid(page) != i)
2216 ++ /* only scan pages belonging to this zone */
2217 ++ if (page_zone(page) != zone)
2218 + continue;
2219 + /* only scan if page is in use */
2220 + if (page_count(page) == 0)
2221 +diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
2222 +index b34e4f827e756..a493965f157f2 100644
2223 +--- a/net/ieee802154/nl802154.c
2224 ++++ b/net/ieee802154/nl802154.c
2225 +@@ -1441,7 +1441,7 @@ static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid,
2226 +
2227 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
2228 + if (!hdr)
2229 +- return -1;
2230 ++ return -ENOBUFS;
2231 +
2232 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
2233 + goto nla_put_failure;
2234 +@@ -1634,7 +1634,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
2235 +
2236 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
2237 + if (!hdr)
2238 +- return -1;
2239 ++ return -ENOBUFS;
2240 +
2241 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
2242 + goto nla_put_failure;
2243 +@@ -1812,7 +1812,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
2244 +
2245 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
2246 + if (!hdr)
2247 +- return -1;
2248 ++ return -ENOBUFS;
2249 +
2250 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
2251 + goto nla_put_failure;
2252 +@@ -1988,7 +1988,7 @@ static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid,
2253 +
2254 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
2255 + if (!hdr)
2256 +- return -1;
2257 ++ return -ENOBUFS;
2258 +
2259 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
2260 + goto nla_put_failure;
2261 +diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
2262 +index 1ef74c085f2b0..865611127357e 100644
2263 +--- a/security/selinux/ss/conditional.c
2264 ++++ b/security/selinux/ss/conditional.c
2265 +@@ -152,6 +152,8 @@ static void cond_list_destroy(struct policydb *p)
2266 + for (i = 0; i < p->cond_list_len; i++)
2267 + cond_node_destroy(&p->cond_list[i]);
2268 + kfree(p->cond_list);
2269 ++ p->cond_list = NULL;
2270 ++ p->cond_list_len = 0;
2271 + }
2272 +
2273 + void cond_policydb_destroy(struct policydb *p)
2274 +@@ -440,7 +442,6 @@ int cond_read_list(struct policydb *p, void *fp)
2275 + return 0;
2276 + err:
2277 + cond_list_destroy(p);
2278 +- p->cond_list = NULL;
2279 + return rc;
2280 + }
2281 +
2282 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
2283 +index 323df011b94a3..8ee3be7bbd24e 100644
2284 +--- a/sound/pci/hda/hda_generic.c
2285 ++++ b/sound/pci/hda/hda_generic.c
2286 +@@ -91,6 +91,12 @@ static void snd_hda_gen_spec_free(struct hda_gen_spec *spec)
2287 + free_kctls(spec);
2288 + snd_array_free(&spec->paths);
2289 + snd_array_free(&spec->loopback_list);
2290 ++#ifdef CONFIG_SND_HDA_GENERIC_LEDS
2291 ++ if (spec->led_cdevs[LED_AUDIO_MUTE])
2292 ++ led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MUTE]);
2293 ++ if (spec->led_cdevs[LED_AUDIO_MICMUTE])
2294 ++ led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MICMUTE]);
2295 ++#endif
2296 + }
2297 +
2298 + /*
2299 +@@ -3911,7 +3917,10 @@ static int create_mute_led_cdev(struct hda_codec *codec,
2300 + enum led_brightness),
2301 + bool micmute)
2302 + {
2303 ++ struct hda_gen_spec *spec = codec->spec;
2304 + struct led_classdev *cdev;
2305 ++ int idx = micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE;
2306 ++ int err;
2307 +
2308 + cdev = devm_kzalloc(&codec->core.dev, sizeof(*cdev), GFP_KERNEL);
2309 + if (!cdev)
2310 +@@ -3921,10 +3930,14 @@ static int create_mute_led_cdev(struct hda_codec *codec,
2311 + cdev->max_brightness = 1;
2312 + cdev->default_trigger = micmute ? "audio-micmute" : "audio-mute";
2313 + cdev->brightness_set_blocking = callback;
2314 +- cdev->brightness = ledtrig_audio_get(micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE);
2315 ++ cdev->brightness = ledtrig_audio_get(idx);
2316 + cdev->flags = LED_CORE_SUSPENDRESUME;
2317 +
2318 +- return devm_led_classdev_register(&codec->core.dev, cdev);
2319 ++ err = led_classdev_register(&codec->core.dev, cdev);
2320 ++ if (err < 0)
2321 ++ return err;
2322 ++ spec->led_cdevs[idx] = cdev;
2323 ++ return 0;
2324 + }
2325 +
2326 + static void vmaster_update_mute_led(void *private_data, int enabled)
2327 +diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
2328 +index 0886bc81f40be..578faa9adcdcd 100644
2329 +--- a/sound/pci/hda/hda_generic.h
2330 ++++ b/sound/pci/hda/hda_generic.h
2331 +@@ -305,6 +305,9 @@ struct hda_gen_spec {
2332 + struct hda_jack_callback *cb);
2333 + void (*mic_autoswitch_hook)(struct hda_codec *codec,
2334 + struct hda_jack_callback *cb);
2335 ++
2336 ++ /* leds */
2337 ++ struct led_classdev *led_cdevs[NUM_AUDIO_LEDS];
2338 + };
2339 +
2340 + /* values for add_stereo_mix_input flag */
2341 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2342 +index a858bb9e99270..aef017ba00708 100644
2343 +--- a/sound/pci/hda/patch_realtek.c
2344 ++++ b/sound/pci/hda/patch_realtek.c
2345 +@@ -97,6 +97,7 @@ struct alc_spec {
2346 + unsigned int gpio_mic_led_mask;
2347 + struct alc_coef_led mute_led_coef;
2348 + struct alc_coef_led mic_led_coef;
2349 ++ struct mutex coef_mutex;
2350 +
2351 + hda_nid_t headset_mic_pin;
2352 + hda_nid_t headphone_mic_pin;
2353 +@@ -133,8 +134,8 @@ struct alc_spec {
2354 + * COEF access helper functions
2355 + */
2356 +
2357 +-static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2358 +- unsigned int coef_idx)
2359 ++static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2360 ++ unsigned int coef_idx)
2361 + {
2362 + unsigned int val;
2363 +
2364 +@@ -143,28 +144,61 @@ static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2365 + return val;
2366 + }
2367 +
2368 ++static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2369 ++ unsigned int coef_idx)
2370 ++{
2371 ++ struct alc_spec *spec = codec->spec;
2372 ++ unsigned int val;
2373 ++
2374 ++ mutex_lock(&spec->coef_mutex);
2375 ++ val = __alc_read_coefex_idx(codec, nid, coef_idx);
2376 ++ mutex_unlock(&spec->coef_mutex);
2377 ++ return val;
2378 ++}
2379 ++
2380 + #define alc_read_coef_idx(codec, coef_idx) \
2381 + alc_read_coefex_idx(codec, 0x20, coef_idx)
2382 +
2383 +-static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2384 +- unsigned int coef_idx, unsigned int coef_val)
2385 ++static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2386 ++ unsigned int coef_idx, unsigned int coef_val)
2387 + {
2388 + snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx);
2389 + snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF, coef_val);
2390 + }
2391 +
2392 ++static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2393 ++ unsigned int coef_idx, unsigned int coef_val)
2394 ++{
2395 ++ struct alc_spec *spec = codec->spec;
2396 ++
2397 ++ mutex_lock(&spec->coef_mutex);
2398 ++ __alc_write_coefex_idx(codec, nid, coef_idx, coef_val);
2399 ++ mutex_unlock(&spec->coef_mutex);
2400 ++}
2401 ++
2402 + #define alc_write_coef_idx(codec, coef_idx, coef_val) \
2403 + alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val)
2404 +
2405 ++static void __alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2406 ++ unsigned int coef_idx, unsigned int mask,
2407 ++ unsigned int bits_set)
2408 ++{
2409 ++ unsigned int val = __alc_read_coefex_idx(codec, nid, coef_idx);
2410 ++
2411 ++ if (val != -1)
2412 ++ __alc_write_coefex_idx(codec, nid, coef_idx,
2413 ++ (val & ~mask) | bits_set);
2414 ++}
2415 ++
2416 + static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
2417 + unsigned int coef_idx, unsigned int mask,
2418 + unsigned int bits_set)
2419 + {
2420 +- unsigned int val = alc_read_coefex_idx(codec, nid, coef_idx);
2421 ++ struct alc_spec *spec = codec->spec;
2422 +
2423 +- if (val != -1)
2424 +- alc_write_coefex_idx(codec, nid, coef_idx,
2425 +- (val & ~mask) | bits_set);
2426 ++ mutex_lock(&spec->coef_mutex);
2427 ++ __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set);
2428 ++ mutex_unlock(&spec->coef_mutex);
2429 + }
2430 +
2431 + #define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \
2432 +@@ -197,13 +231,17 @@ struct coef_fw {
2433 + static void alc_process_coef_fw(struct hda_codec *codec,
2434 + const struct coef_fw *fw)
2435 + {
2436 ++ struct alc_spec *spec = codec->spec;
2437 ++
2438 ++ mutex_lock(&spec->coef_mutex);
2439 + for (; fw->nid; fw++) {
2440 + if (fw->mask == (unsigned short)-1)
2441 +- alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
2442 ++ __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
2443 + else
2444 +- alc_update_coefex_idx(codec, fw->nid, fw->idx,
2445 +- fw->mask, fw->val);
2446 ++ __alc_update_coefex_idx(codec, fw->nid, fw->idx,
2447 ++ fw->mask, fw->val);
2448 + }
2449 ++ mutex_unlock(&spec->coef_mutex);
2450 + }
2451 +
2452 + /*
2453 +@@ -1160,6 +1198,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
2454 + codec->spdif_status_reset = 1;
2455 + codec->forced_resume = 1;
2456 + codec->patch_ops = alc_patch_ops;
2457 ++ mutex_init(&spec->coef_mutex);
2458 +
2459 + err = alc_codec_rename_from_preset(codec);
2460 + if (err < 0) {
2461 +@@ -2132,6 +2171,7 @@ static void alc1220_fixup_gb_x570(struct hda_codec *codec,
2462 + {
2463 + static const hda_nid_t conn1[] = { 0x0c };
2464 + static const struct coef_fw gb_x570_coefs[] = {
2465 ++ WRITE_COEF(0x07, 0x03c0),
2466 + WRITE_COEF(0x1a, 0x01c1),
2467 + WRITE_COEF(0x1b, 0x0202),
2468 + WRITE_COEF(0x43, 0x3005),
2469 +@@ -2558,7 +2598,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2470 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
2471 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
2472 + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570),
2473 +- SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
2474 ++ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570),
2475 ++ SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570),
2476 + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
2477 + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
2478 + SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950),
2479 +@@ -2633,6 +2674,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
2480 + {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
2481 + {.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"},
2482 + {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"},
2483 ++ {.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"},
2484 + {.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"},
2485 + {}
2486 + };
2487 +@@ -8750,6 +8792,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2488 + SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
2489 + SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
2490 + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
2491 ++ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
2492 + SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
2493 + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
2494 + SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
2495 +diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c
2496 +index c0425e3707d9c..a3597137fee3e 100644
2497 +--- a/sound/soc/codecs/cpcap.c
2498 ++++ b/sound/soc/codecs/cpcap.c
2499 +@@ -1544,6 +1544,8 @@ static int cpcap_codec_probe(struct platform_device *pdev)
2500 + {
2501 + struct device_node *codec_node =
2502 + of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec");
2503 ++ if (!codec_node)
2504 ++ return -ENODEV;
2505 +
2506 + pdev->dev.of_node = codec_node;
2507 +
2508 +diff --git a/sound/soc/codecs/max9759.c b/sound/soc/codecs/max9759.c
2509 +index 00e9d4fd1651f..0c261335c8a16 100644
2510 +--- a/sound/soc/codecs/max9759.c
2511 ++++ b/sound/soc/codecs/max9759.c
2512 +@@ -64,7 +64,8 @@ static int speaker_gain_control_put(struct snd_kcontrol *kcontrol,
2513 + struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
2514 + struct max9759 *priv = snd_soc_component_get_drvdata(c);
2515 +
2516 +- if (ucontrol->value.integer.value[0] > 3)
2517 ++ if (ucontrol->value.integer.value[0] < 0 ||
2518 ++ ucontrol->value.integer.value[0] > 3)
2519 + return -EINVAL;
2520 +
2521 + priv->gain = ucontrol->value.integer.value[0];
2522 +diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
2523 +index af3c3b90c0aca..83b4a22bf15ac 100644
2524 +--- a/sound/soc/fsl/pcm030-audio-fabric.c
2525 ++++ b/sound/soc/fsl/pcm030-audio-fabric.c
2526 +@@ -93,16 +93,21 @@ static int pcm030_fabric_probe(struct platform_device *op)
2527 + dev_err(&op->dev, "platform_device_alloc() failed\n");
2528 +
2529 + ret = platform_device_add(pdata->codec_device);
2530 +- if (ret)
2531 ++ if (ret) {
2532 + dev_err(&op->dev, "platform_device_add() failed: %d\n", ret);
2533 ++ platform_device_put(pdata->codec_device);
2534 ++ }
2535 +
2536 + ret = snd_soc_register_card(card);
2537 +- if (ret)
2538 ++ if (ret) {
2539 + dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
2540 ++ platform_device_del(pdata->codec_device);
2541 ++ platform_device_put(pdata->codec_device);
2542 ++ }
2543 +
2544 + platform_set_drvdata(op, pdata);
2545 +-
2546 + return ret;
2547 ++
2548 + }
2549 +
2550 + static int pcm030_fabric_remove(struct platform_device *op)
2551 +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
2552 +index 10f48827bb0e0..f24f7354f46fe 100644
2553 +--- a/sound/soc/soc-ops.c
2554 ++++ b/sound/soc/soc-ops.c
2555 +@@ -316,13 +316,27 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
2556 + if (sign_bit)
2557 + mask = BIT(sign_bit + 1) - 1;
2558 +
2559 +- val = ((ucontrol->value.integer.value[0] + min) & mask);
2560 ++ val = ucontrol->value.integer.value[0];
2561 ++ if (mc->platform_max && val > mc->platform_max)
2562 ++ return -EINVAL;
2563 ++ if (val > max - min)
2564 ++ return -EINVAL;
2565 ++ if (val < 0)
2566 ++ return -EINVAL;
2567 ++ val = (val + min) & mask;
2568 + if (invert)
2569 + val = max - val;
2570 + val_mask = mask << shift;
2571 + val = val << shift;
2572 + if (snd_soc_volsw_is_stereo(mc)) {
2573 +- val2 = ((ucontrol->value.integer.value[1] + min) & mask);
2574 ++ val2 = ucontrol->value.integer.value[1];
2575 ++ if (mc->platform_max && val2 > mc->platform_max)
2576 ++ return -EINVAL;
2577 ++ if (val2 > max - min)
2578 ++ return -EINVAL;
2579 ++ if (val2 < 0)
2580 ++ return -EINVAL;
2581 ++ val2 = (val2 + min) & mask;
2582 + if (invert)
2583 + val2 = max - val2;
2584 + if (reg == reg2) {
2585 +@@ -409,8 +423,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
2586 + int err = 0;
2587 + unsigned int val, val_mask, val2 = 0;
2588 +
2589 ++ val = ucontrol->value.integer.value[0];
2590 ++ if (mc->platform_max && val > mc->platform_max)
2591 ++ return -EINVAL;
2592 ++ if (val > max - min)
2593 ++ return -EINVAL;
2594 ++ if (val < 0)
2595 ++ return -EINVAL;
2596 + val_mask = mask << shift;
2597 +- val = (ucontrol->value.integer.value[0] + min) & mask;
2598 ++ val = (val + min) & mask;
2599 + val = val << shift;
2600 +
2601 + err = snd_soc_component_update_bits(component, reg, val_mask, val);
2602 +@@ -859,6 +880,8 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
2603 + unsigned int i, regval, regmask;
2604 + int err;
2605 +
2606 ++ if (val < mc->min || val > mc->max)
2607 ++ return -EINVAL;
2608 + if (invert)
2609 + val = max - val;
2610 + val &= mask;
2611 +diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
2612 +index 91afea9d5de67..ce19a6058b279 100644
2613 +--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
2614 ++++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
2615 +@@ -37,6 +37,7 @@
2616 + #define XLNX_AUD_XFER_COUNT 0x28
2617 + #define XLNX_AUD_CH_STS_START 0x2C
2618 + #define XLNX_BYTES_PER_CH 0x44
2619 ++#define XLNX_AUD_ALIGN_BYTES 64
2620 +
2621 + #define AUD_STS_IOC_IRQ_MASK BIT(31)
2622 + #define AUD_STS_CH_STS_MASK BIT(29)
2623 +@@ -368,12 +369,32 @@ static int xlnx_formatter_pcm_open(struct snd_soc_component *component,
2624 + snd_soc_set_runtime_hwparams(substream, &xlnx_pcm_hardware);
2625 + runtime->private_data = stream_data;
2626 +
2627 +- /* Resize the period size divisible by 64 */
2628 ++ /* Resize the period bytes as divisible by 64 */
2629 + err = snd_pcm_hw_constraint_step(runtime, 0,
2630 +- SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
2631 ++ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2632 ++ XLNX_AUD_ALIGN_BYTES);
2633 + if (err) {
2634 + dev_err(component->dev,
2635 +- "unable to set constraint on period bytes\n");
2636 ++ "Unable to set constraint on period bytes\n");
2637 ++ return err;
2638 ++ }
2639 ++
2640 ++ /* Resize the buffer bytes as divisible by 64 */
2641 ++ err = snd_pcm_hw_constraint_step(runtime, 0,
2642 ++ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2643 ++ XLNX_AUD_ALIGN_BYTES);
2644 ++ if (err) {
2645 ++ dev_err(component->dev,
2646 ++ "Unable to set constraint on buffer bytes\n");
2647 ++ return err;
2648 ++ }
2649 ++
2650 ++ /* Set periods as integer multiple */
2651 ++ err = snd_pcm_hw_constraint_integer(runtime,
2652 ++ SNDRV_PCM_HW_PARAM_PERIODS);
2653 ++ if (err < 0) {
2654 ++ dev_err(component->dev,
2655 ++ "Unable to set constraint on periods to be integer\n");
2656 + return err;
2657 + }
2658 +
2659 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
2660 +index 949c6d129f2a9..aabd3a10ec5b4 100644
2661 +--- a/sound/usb/quirks-table.h
2662 ++++ b/sound/usb/quirks-table.h
2663 +@@ -84,7 +84,7 @@
2664 + * combination.
2665 + */
2666 + {
2667 +- USB_DEVICE(0x041e, 0x4095),
2668 ++ USB_AUDIO_DEVICE(0x041e, 0x4095),
2669 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
2670 + .ifnum = QUIRK_ANY_INTERFACE,
2671 + .type = QUIRK_COMPOSITE,
2672 +diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
2673 +index bb9fa8de7e625..af9f9d3534c96 100644
2674 +--- a/tools/bpf/resolve_btfids/Makefile
2675 ++++ b/tools/bpf/resolve_btfids/Makefile
2676 +@@ -9,7 +9,11 @@ ifeq ($(V),1)
2677 + msg =
2678 + else
2679 + Q = @
2680 +- msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
2681 ++ ifeq ($(silent),1)
2682 ++ msg =
2683 ++ else
2684 ++ msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
2685 ++ endif
2686 + MAKEFLAGS=--no-print-directory
2687 + endif
2688 +
2689 +diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
2690 +index a963b5b8eb724..96fe9c1af3364 100644
2691 +--- a/tools/perf/util/stat-display.c
2692 ++++ b/tools/perf/util/stat-display.c
2693 +@@ -555,15 +555,16 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c
2694 +
2695 + alias = list_prepare_entry(counter, &(evlist->core.entries), core.node);
2696 + list_for_each_entry_continue (alias, &evlist->core.entries, core.node) {
2697 +- if (strcmp(evsel__name(alias), evsel__name(counter)) ||
2698 +- alias->scale != counter->scale ||
2699 +- alias->cgrp != counter->cgrp ||
2700 +- strcmp(alias->unit, counter->unit) ||
2701 +- evsel__is_clock(alias) != evsel__is_clock(counter) ||
2702 +- !strcmp(alias->pmu_name, counter->pmu_name))
2703 +- break;
2704 +- alias->merged_stat = true;
2705 +- cb(config, alias, data, false);
2706 ++ /* Merge events with the same name, etc. but on different PMUs. */
2707 ++ if (!strcmp(evsel__name(alias), evsel__name(counter)) &&
2708 ++ alias->scale == counter->scale &&
2709 ++ alias->cgrp == counter->cgrp &&
2710 ++ !strcmp(alias->unit, counter->unit) &&
2711 ++ evsel__is_clock(alias) == evsel__is_clock(counter) &&
2712 ++ strcmp(alias->pmu_name, counter->pmu_name)) {
2713 ++ alias->merged_stat = true;
2714 ++ cb(config, alias, data, false);
2715 ++ }
2716 + }
2717 + }
2718 +
2719 +diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
2720 +index dd61118df66ed..12c5e27d32c16 100644
2721 +--- a/tools/testing/selftests/exec/Makefile
2722 ++++ b/tools/testing/selftests/exec/Makefile
2723 +@@ -5,7 +5,7 @@ CFLAGS += -D_GNU_SOURCE
2724 +
2725 + TEST_PROGS := binfmt_script non-regular
2726 + TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216
2727 +-TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir pipe
2728 ++TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
2729 + # Makefile is a run-time dependency, since it's accessed by the execveat test
2730 + TEST_FILES := Makefile
2731 +
2732 +diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
2733 +index 12631f0076a10..11e157d7533b8 100644
2734 +--- a/tools/testing/selftests/futex/Makefile
2735 ++++ b/tools/testing/selftests/futex/Makefile
2736 +@@ -11,7 +11,7 @@ all:
2737 + @for DIR in $(SUBDIRS); do \
2738 + BUILD_TARGET=$(OUTPUT)/$$DIR; \
2739 + mkdir $$BUILD_TARGET -p; \
2740 +- make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
2741 ++ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
2742 + if [ -e $$DIR/$(TEST_PROGS) ]; then \
2743 + rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \
2744 + fi \
2745 +@@ -32,6 +32,6 @@ override define CLEAN
2746 + @for DIR in $(SUBDIRS); do \
2747 + BUILD_TARGET=$(OUTPUT)/$$DIR; \
2748 + mkdir $$BUILD_TARGET -p; \
2749 +- make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
2750 ++ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
2751 + done
2752 + endef
2753 +diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
2754 +index 5a4938d6dcf25..9313fa32bef13 100755
2755 +--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
2756 ++++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
2757 +@@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
2758 + net_port_mac_proto_net"
2759 +
2760 + # Reported bugs, also described by TYPE_ variables below
2761 +-BUGS="flush_remove_add"
2762 ++BUGS="flush_remove_add reload"
2763 +
2764 + # List of possible paths to pktgen script from kernel tree for performance tests
2765 + PKTGEN_SCRIPT_PATHS="
2766 +@@ -337,6 +337,23 @@ TYPE_flush_remove_add="
2767 + display Add two elements, flush, re-add
2768 + "
2769 +
2770 ++TYPE_reload="
2771 ++display net,mac with reload
2772 ++type_spec ipv4_addr . ether_addr
2773 ++chain_spec ip daddr . ether saddr
2774 ++dst addr4
2775 ++src mac
2776 ++start 1
2777 ++count 1
2778 ++src_delta 2000
2779 ++tools sendip nc bash
2780 ++proto udp
2781 ++
2782 ++race_repeat 0
2783 ++
2784 ++perf_duration 0
2785 ++"
2786 ++
2787 + # Set template for all tests, types and rules are filled in depending on test
2788 + set_template='
2789 + flush ruleset
2790 +@@ -1455,6 +1472,59 @@ test_bug_flush_remove_add() {
2791 + nft flush ruleset
2792 + }
2793 +
2794 ++# - add ranged element, check that packets match it
2795 ++# - reload the set, check packets still match
2796 ++test_bug_reload() {
2797 ++ setup veth send_"${proto}" set || return ${KSELFTEST_SKIP}
2798 ++ rstart=${start}
2799 ++
2800 ++ range_size=1
2801 ++ for i in $(seq "${start}" $((start + count))); do
2802 ++ end=$((start + range_size))
2803 ++
2804 ++ # Avoid negative or zero-sized port ranges
2805 ++ if [ $((end / 65534)) -gt $((start / 65534)) ]; then
2806 ++ start=${end}
2807 ++ end=$((end + 1))
2808 ++ fi
2809 ++ srcstart=$((start + src_delta))
2810 ++ srcend=$((end + src_delta))
2811 ++
2812 ++ add "$(format)" || return 1
2813 ++ range_size=$((range_size + 1))
2814 ++ start=$((end + range_size))
2815 ++ done
2816 ++
2817 ++ # check kernel does allocate pcpu sctrach map
2818 ++ # for reload with no elemet add/delete
2819 ++ ( echo flush set inet filter test ;
2820 ++ nft list set inet filter test ) | nft -f -
2821 ++
2822 ++ start=${rstart}
2823 ++ range_size=1
2824 ++
2825 ++ for i in $(seq "${start}" $((start + count))); do
2826 ++ end=$((start + range_size))
2827 ++
2828 ++ # Avoid negative or zero-sized port ranges
2829 ++ if [ $((end / 65534)) -gt $((start / 65534)) ]; then
2830 ++ start=${end}
2831 ++ end=$((end + 1))
2832 ++ fi
2833 ++ srcstart=$((start + src_delta))
2834 ++ srcend=$((end + src_delta))
2835 ++
2836 ++ for j in $(seq ${start} $((range_size / 2 + 1)) ${end}); do
2837 ++ send_match "${j}" $((j + src_delta)) || return 1
2838 ++ done
2839 ++
2840 ++ range_size=$((range_size + 1))
2841 ++ start=$((end + range_size))
2842 ++ done
2843 ++
2844 ++ nft flush ruleset
2845 ++}
2846 ++
2847 + test_reported_issues() {
2848 + eval test_bug_"${subtest}"
2849 + }