Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.20 commit in: /
Date: Sun, 13 Jan 2019 19:30:19
Message-Id: 1547407792.255753c89d48f9d5e6428111ab1439c1ed6d1985.mpagano@gentoo
1 commit: 255753c89d48f9d5e6428111ab1439c1ed6d1985
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jan 13 19:29:52 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Jan 13 19:29:52 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=255753c8
7
8 proj/linux-patches: Linux patch 4.20.2
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1001_linux-4.20.2.patch | 3086 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3090 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 543d775..0e0dc28 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -47,6 +47,10 @@ Patch: 1000_linux-4.20.1.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.20.1
23
24 +Patch: 1001_linux-4.20.2.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.20.2
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1001_linux-4.20.2.patch b/1001_linux-4.20.2.patch
33 new file mode 100644
34 index 0000000..212bec9
35 --- /dev/null
36 +++ b/1001_linux-4.20.2.patch
37 @@ -0,0 +1,3086 @@
38 +diff --git a/Makefile b/Makefile
39 +index 84d2f8deea30..4ba3dd0bf35d 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 20
46 +-SUBLEVEL = 1
47 ++SUBLEVEL = 2
48 + EXTRAVERSION =
49 + NAME = Shy Crocodile
50 +
51 +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
52 +index 6327fd79b0fb..fd59fef9931b 100644
53 +--- a/arch/powerpc/kernel/signal_32.c
54 ++++ b/arch/powerpc/kernel/signal_32.c
55 +@@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs,
56 + /* If TM bits are set to the reserved value, it's an invalid context */
57 + if (MSR_TM_RESV(msr_hi))
58 + return 1;
59 +- /* Pull in the MSR TM bits from the user context */
60 ++
61 ++ /*
62 ++ * Disabling preemption, since it is unsafe to be preempted
63 ++ * with MSR[TS] set without recheckpointing.
64 ++ */
65 ++ preempt_disable();
66 ++
67 ++ /*
68 ++ * CAUTION:
69 ++ * After regs->MSR[TS] being updated, make sure that get_user(),
70 ++ * put_user() or similar functions are *not* called. These
71 ++ * functions can generate page faults which will cause the process
72 ++ * to be de-scheduled with MSR[TS] set but without calling
73 ++ * tm_recheckpoint(). This can cause a bug.
74 ++ *
75 ++ * Pull in the MSR TM bits from the user context
76 ++ */
77 + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
78 + /* Now, recheckpoint. This loads up all of the checkpointed (older)
79 + * registers, including FP and V[S]Rs. After recheckpointing, the
80 +@@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
81 + }
82 + #endif
83 +
84 ++ preempt_enable();
85 ++
86 + return 0;
87 + }
88 + #endif
89 +@@ -1140,11 +1158,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
90 + {
91 + struct rt_sigframe __user *rt_sf;
92 + struct pt_regs *regs = current_pt_regs();
93 +- int tm_restore = 0;
94 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
95 + struct ucontext __user *uc_transact;
96 + unsigned long msr_hi;
97 + unsigned long tmp;
98 ++ int tm_restore = 0;
99 + #endif
100 + /* Always make any pending restarted system calls return -EINTR */
101 + current->restart_block.fn = do_no_restart_syscall;
102 +@@ -1192,19 +1210,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
103 + goto bad;
104 + }
105 + }
106 +- if (!tm_restore) {
107 +- /*
108 +- * Unset regs->msr because ucontext MSR TS is not
109 +- * set, and recheckpoint was not called. This avoid
110 +- * hitting a TM Bad thing at RFID
111 +- */
112 +- regs->msr &= ~MSR_TS_MASK;
113 +- }
114 +- /* Fall through, for non-TM restore */
115 +-#endif
116 + if (!tm_restore)
117 +- if (do_setcontext(&rt_sf->uc, regs, 1))
118 +- goto bad;
119 ++ /* Fall through, for non-TM restore */
120 ++#endif
121 ++ if (do_setcontext(&rt_sf->uc, regs, 1))
122 ++ goto bad;
123 +
124 + /*
125 + * It's not clear whether or why it is desirable to save the
126 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
127 +index daa28cb72272..bbd1c73243d7 100644
128 +--- a/arch/powerpc/kernel/signal_64.c
129 ++++ b/arch/powerpc/kernel/signal_64.c
130 +@@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
131 + if (MSR_TM_RESV(msr))
132 + return -EINVAL;
133 +
134 +- /* pull in MSR TS bits from user context */
135 +- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
136 +-
137 +- /*
138 +- * Ensure that TM is enabled in regs->msr before we leave the signal
139 +- * handler. It could be the case that (a) user disabled the TM bit
140 +- * through the manipulation of the MSR bits in uc_mcontext or (b) the
141 +- * TM bit was disabled because a sufficient number of context switches
142 +- * happened whilst in the signal handler and load_tm overflowed,
143 +- * disabling the TM bit. In either case we can end up with an illegal
144 +- * TM state leading to a TM Bad Thing when we return to userspace.
145 +- */
146 +- regs->msr |= MSR_TM;
147 +-
148 + /* pull in MSR LE from user context */
149 + regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
150 +
151 +@@ -572,6 +558,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
152 + tm_enable();
153 + /* Make sure the transaction is marked as failed */
154 + tsk->thread.tm_texasr |= TEXASR_FS;
155 ++
156 ++ /*
157 ++ * Disabling preemption, since it is unsafe to be preempted
158 ++ * with MSR[TS] set without recheckpointing.
159 ++ */
160 ++ preempt_disable();
161 ++
162 ++ /* pull in MSR TS bits from user context */
163 ++ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
164 ++
165 ++ /*
166 ++ * Ensure that TM is enabled in regs->msr before we leave the signal
167 ++ * handler. It could be the case that (a) user disabled the TM bit
168 ++ * through the manipulation of the MSR bits in uc_mcontext or (b) the
169 ++ * TM bit was disabled because a sufficient number of context switches
170 ++ * happened whilst in the signal handler and load_tm overflowed,
171 ++ * disabling the TM bit. In either case we can end up with an illegal
172 ++ * TM state leading to a TM Bad Thing when we return to userspace.
173 ++ *
174 ++ * CAUTION:
175 ++ * After regs->MSR[TS] being updated, make sure that get_user(),
176 ++ * put_user() or similar functions are *not* called. These
177 ++ * functions can generate page faults which will cause the process
178 ++ * to be de-scheduled with MSR[TS] set but without calling
179 ++ * tm_recheckpoint(). This can cause a bug.
180 ++ */
181 ++ regs->msr |= MSR_TM;
182 ++
183 + /* This loads the checkpointed FP/VEC state, if used */
184 + tm_recheckpoint(&tsk->thread);
185 +
186 +@@ -585,6 +599,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
187 + regs->msr |= MSR_VEC;
188 + }
189 +
190 ++ preempt_enable();
191 ++
192 + return err;
193 + }
194 + #endif
195 +@@ -740,23 +756,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
196 + &uc_transact->uc_mcontext))
197 + goto badframe;
198 + }
199 +-#endif
200 ++ else
201 + /* Fall through, for non-TM restore */
202 +- if (!MSR_TM_ACTIVE(msr)) {
203 +- /*
204 +- * Unset MSR[TS] on the thread regs since MSR from user
205 +- * context does not have MSR active, and recheckpoint was
206 +- * not called since restore_tm_sigcontexts() was not called
207 +- * also.
208 +- *
209 +- * If not unsetting it, the code can RFID to userspace with
210 +- * MSR[TS] set, but without CPU in the proper state,
211 +- * causing a TM bad thing.
212 +- */
213 +- current->thread.regs->msr &= ~MSR_TS_MASK;
214 +- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
215 +- goto badframe;
216 +- }
217 ++#endif
218 ++ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
219 ++ goto badframe;
220 +
221 + if (restore_altstack(&uc->uc_stack))
222 + goto badframe;
223 +diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c
224 +index f5bbd4563342..3632de52db0a 100644
225 +--- a/arch/powerpc/platforms/4xx/ocm.c
226 ++++ b/arch/powerpc/platforms/4xx/ocm.c
227 +@@ -179,7 +179,7 @@ static void __init ocm_init_node(int count, struct device_node *node)
228 + /* ioremap the non-cached region */
229 + if (ocm->nc.memtotal) {
230 + ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal,
231 +- _PAGE_EXEC | PAGE_KERNEL_NCG);
232 ++ _PAGE_EXEC | pgprot_val(PAGE_KERNEL_NCG));
233 +
234 + if (!ocm->nc.virt) {
235 + printk(KERN_ERR
236 +@@ -194,7 +194,7 @@ static void __init ocm_init_node(int count, struct device_node *node)
237 +
238 + if (ocm->c.memtotal) {
239 + ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal,
240 +- _PAGE_EXEC | PAGE_KERNEL);
241 ++ _PAGE_EXEC | pgprot_val(PAGE_KERNEL));
242 +
243 + if (!ocm->c.virt) {
244 + printk(KERN_ERR
245 +diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
246 +index 29bfe8017a2d..da1de190a3b1 100644
247 +--- a/block/blk-mq-sched.c
248 ++++ b/block/blk-mq-sched.c
249 +@@ -54,13 +54,14 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
250 + * Mark a hardware queue as needing a restart. For shared queues, maintain
251 + * a count of how many hardware queues are marked for restart.
252 + */
253 +-static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
254 ++void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
255 + {
256 + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
257 + return;
258 +
259 + set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
260 + }
261 ++EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
262 +
263 + void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
264 + {
265 +diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
266 +index 8a9544203173..38e06e23821f 100644
267 +--- a/block/blk-mq-sched.h
268 ++++ b/block/blk-mq-sched.h
269 +@@ -15,6 +15,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
270 + struct request **merged_request);
271 + bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
272 + bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
273 ++void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
274 + void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
275 +
276 + void blk_mq_sched_insert_request(struct request *rq, bool at_head,
277 +diff --git a/block/blk-stat.h b/block/blk-stat.h
278 +index f4a1568e81a4..17b47a86eefb 100644
279 +--- a/block/blk-stat.h
280 ++++ b/block/blk-stat.h
281 +@@ -145,6 +145,11 @@ static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb,
282 + mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
283 + }
284 +
285 ++static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
286 ++{
287 ++ del_timer_sync(&cb->timer);
288 ++}
289 ++
290 + /**
291 + * blk_stat_activate_msecs() - Gather block statistics during a time window in
292 + * milliseconds.
293 +diff --git a/block/blk-wbt.c b/block/blk-wbt.c
294 +index 8ac93fcbaa2e..0c62bf4eca75 100644
295 +--- a/block/blk-wbt.c
296 ++++ b/block/blk-wbt.c
297 +@@ -760,8 +760,10 @@ void wbt_disable_default(struct request_queue *q)
298 + if (!rqos)
299 + return;
300 + rwb = RQWB(rqos);
301 +- if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
302 ++ if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
303 ++ blk_stat_deactivate(rwb->cb);
304 + rwb->wb_normal = 0;
305 ++ }
306 + }
307 + EXPORT_SYMBOL_GPL(wbt_disable_default);
308 +
309 +diff --git a/block/mq-deadline.c b/block/mq-deadline.c
310 +index 099a9e05854c..d5e21ce44d2c 100644
311 +--- a/block/mq-deadline.c
312 ++++ b/block/mq-deadline.c
313 +@@ -373,9 +373,16 @@ done:
314 +
315 + /*
316 + * One confusing aspect here is that we get called for a specific
317 +- * hardware queue, but we return a request that may not be for a
318 ++ * hardware queue, but we may return a request that is for a
319 + * different hardware queue. This is because mq-deadline has shared
320 + * state for all hardware queues, in terms of sorting, FIFOs, etc.
321 ++ *
322 ++ * For a zoned block device, __dd_dispatch_request() may return NULL
323 ++ * if all the queued write requests are directed at zones that are already
324 ++ * locked due to on-going write requests. In this case, make sure to mark
325 ++ * the queue as needing a restart to ensure that the queue is run again
326 ++ * and the pending writes dispatched once the target zones for the ongoing
327 ++ * write requests are unlocked in dd_finish_request().
328 + */
329 + static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
330 + {
331 +@@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
332 +
333 + spin_lock(&dd->lock);
334 + rq = __dd_dispatch_request(dd);
335 ++ if (!rq && blk_queue_is_zoned(hctx->queue) &&
336 ++ !list_empty(&dd->fifo_list[WRITE]))
337 ++ blk_mq_sched_mark_restart_hctx(hctx);
338 + spin_unlock(&dd->lock);
339 +
340 + return rq;
341 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
342 +index 169412ee4ae8..dbba123e058d 100644
343 +--- a/drivers/base/dd.c
344 ++++ b/drivers/base/dd.c
345 +@@ -933,11 +933,11 @@ static void __device_release_driver(struct device *dev, struct device *parent)
346 +
347 + while (device_links_busy(dev)) {
348 + device_unlock(dev);
349 +- if (parent)
350 ++ if (parent && dev->bus->need_parent_lock)
351 + device_unlock(parent);
352 +
353 + device_links_unbind_consumers(dev);
354 +- if (parent)
355 ++ if (parent && dev->bus->need_parent_lock)
356 + device_lock(parent);
357 +
358 + device_lock(dev);
359 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
360 +index 4879595200e1..8e6a0db6555f 100644
361 +--- a/drivers/block/zram/zram_drv.c
362 ++++ b/drivers/block/zram/zram_drv.c
363 +@@ -382,8 +382,10 @@ static ssize_t backing_dev_store(struct device *dev,
364 +
365 + bdev = bdgrab(I_BDEV(inode));
366 + err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
367 +- if (err < 0)
368 ++ if (err < 0) {
369 ++ bdev = NULL;
370 + goto out;
371 ++ }
372 +
373 + nr_pages = i_size_read(inode) >> PAGE_SHIFT;
374 + bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
375 +diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
376 +index 99e2aace8078..2c1f459c0c63 100644
377 +--- a/drivers/dax/pmem.c
378 ++++ b/drivers/dax/pmem.c
379 +@@ -48,9 +48,8 @@ static void dax_pmem_percpu_exit(void *data)
380 + percpu_ref_exit(ref);
381 + }
382 +
383 +-static void dax_pmem_percpu_kill(void *data)
384 ++static void dax_pmem_percpu_kill(struct percpu_ref *ref)
385 + {
386 +- struct percpu_ref *ref = data;
387 + struct dax_pmem *dax_pmem = to_dax_pmem(ref);
388 +
389 + dev_dbg(dax_pmem->dev, "trace\n");
390 +@@ -112,17 +111,10 @@ static int dax_pmem_probe(struct device *dev)
391 + }
392 +
393 + dax_pmem->pgmap.ref = &dax_pmem->ref;
394 ++ dax_pmem->pgmap.kill = dax_pmem_percpu_kill;
395 + addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
396 +- if (IS_ERR(addr)) {
397 +- devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
398 +- percpu_ref_exit(&dax_pmem->ref);
399 ++ if (IS_ERR(addr))
400 + return PTR_ERR(addr);
401 +- }
402 +-
403 +- rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
404 +- &dax_pmem->ref);
405 +- if (rc)
406 +- return rc;
407 +
408 + /* adjust the dax_region resource to the start of data */
409 + memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
410 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
411 +index db1bf7f88c1f..e0e6d66de745 100644
412 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
413 ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
414 +@@ -1262,8 +1262,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
415 + static void
416 + nv50_mstm_init(struct nv50_mstm *mstm)
417 + {
418 +- if (mstm && mstm->mgr.mst_state)
419 +- drm_dp_mst_topology_mgr_resume(&mstm->mgr);
420 ++ int ret;
421 ++
422 ++ if (!mstm || !mstm->mgr.mst_state)
423 ++ return;
424 ++
425 ++ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
426 ++ if (ret == -1) {
427 ++ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
428 ++ drm_kms_helper_hotplug_event(mstm->mgr.dev);
429 ++ }
430 + }
431 +
432 + static void
433 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
434 +index 79d00d861a31..01ff3c858875 100644
435 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
436 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
437 +@@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
438 + int rockchip_drm_psr_register(struct drm_encoder *encoder,
439 + int (*psr_set)(struct drm_encoder *, bool enable))
440 + {
441 +- struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
442 ++ struct rockchip_drm_private *drm_drv;
443 + struct psr_drv *psr;
444 +
445 + if (!encoder || !psr_set)
446 + return -EINVAL;
447 +
448 ++ drm_drv = encoder->dev->dev_private;
449 ++
450 + psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
451 + if (!psr)
452 + return -ENOMEM;
453 +diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
454 +index c6635f23918a..ae6254b0b1ae 100644
455 +--- a/drivers/gpu/drm/vc4/vc4_plane.c
456 ++++ b/drivers/gpu/drm/vc4/vc4_plane.c
457 +@@ -321,6 +321,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
458 + if (vc4_state->is_unity)
459 + vc4_state->x_scaling[0] = VC4_SCALING_PPF;
460 + } else {
461 ++ vc4_state->is_yuv = false;
462 + vc4_state->x_scaling[1] = VC4_SCALING_NONE;
463 + vc4_state->y_scaling[1] = VC4_SCALING_NONE;
464 + }
465 +diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
466 +index d293e55553bd..ba7aaf421f36 100644
467 +--- a/drivers/hwtracing/intel_th/msu.c
468 ++++ b/drivers/hwtracing/intel_th/msu.c
469 +@@ -1423,7 +1423,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
470 + if (!end)
471 + break;
472 +
473 +- len -= end - p;
474 ++ /* consume the number and the following comma, hence +1 */
475 ++ len -= end - p + 1;
476 + p = end + 1;
477 + } while (len);
478 +
479 +diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
480 +index 0910ec807187..4b9e44b227d8 100644
481 +--- a/drivers/hwtracing/stm/policy.c
482 ++++ b/drivers/hwtracing/stm/policy.c
483 +@@ -440,10 +440,8 @@ stp_policy_make(struct config_group *group, const char *name)
484 +
485 + stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
486 + if (!stm->policy) {
487 +- mutex_unlock(&stm->policy_mutex);
488 +- stm_put_protocol(pdrv);
489 +- stm_put_device(stm);
490 +- return ERR_PTR(-ENOMEM);
491 ++ ret = ERR_PTR(-ENOMEM);
492 ++ goto unlock_policy;
493 + }
494 +
495 + config_group_init_type_name(&stm->policy->group, name,
496 +@@ -458,7 +456,11 @@ unlock_policy:
497 + mutex_unlock(&stm->policy_mutex);
498 +
499 + if (IS_ERR(ret)) {
500 +- stm_put_protocol(stm->pdrv);
501 ++ /*
502 ++ * pdrv and stm->pdrv at this point can be quite different,
503 ++ * and only one of them needs to be 'put'
504 ++ */
505 ++ stm_put_protocol(pdrv);
506 + stm_put_device(stm);
507 + }
508 +
509 +diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
510 +index f9af6b082916..6a866cc187f7 100644
511 +--- a/drivers/iio/adc/qcom-spmi-adc5.c
512 ++++ b/drivers/iio/adc/qcom-spmi-adc5.c
513 +@@ -423,6 +423,7 @@ struct adc5_channels {
514 + enum vadc_scale_fn_type scale_fn_type;
515 + };
516 +
517 ++/* In these definitions, _pre refers to an index into adc5_prescale_ratios. */
518 + #define ADC5_CHAN(_dname, _type, _mask, _pre, _scale) \
519 + { \
520 + .datasheet_name = _dname, \
521 +@@ -443,63 +444,63 @@ struct adc5_channels {
522 + _pre, _scale) \
523 +
524 + static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
525 +- [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 1,
526 ++ [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 0,
527 + SCALE_HW_CALIB_DEFAULT)
528 +- [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 1,
529 ++ [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 0,
530 + SCALE_HW_CALIB_DEFAULT)
531 +- [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 3,
532 ++ [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 1,
533 + SCALE_HW_CALIB_DEFAULT)
534 +- [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 3,
535 ++ [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1,
536 + SCALE_HW_CALIB_DEFAULT)
537 +- [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 1,
538 ++ [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 0,
539 + SCALE_HW_CALIB_PMIC_THERM)
540 +- [ADC5_USB_IN_I] = ADC5_CHAN_VOLT("usb_in_i_uv", 1,
541 ++ [ADC5_USB_IN_I] = ADC5_CHAN_VOLT("usb_in_i_uv", 0,
542 + SCALE_HW_CALIB_DEFAULT)
543 +- [ADC5_USB_IN_V_16] = ADC5_CHAN_VOLT("usb_in_v_div_16", 16,
544 ++ [ADC5_USB_IN_V_16] = ADC5_CHAN_VOLT("usb_in_v_div_16", 8,
545 + SCALE_HW_CALIB_DEFAULT)
546 +- [ADC5_CHG_TEMP] = ADC5_CHAN_TEMP("chg_temp", 1,
547 ++ [ADC5_CHG_TEMP] = ADC5_CHAN_TEMP("chg_temp", 0,
548 + SCALE_HW_CALIB_PM5_CHG_TEMP)
549 + /* Charger prescales SBUx and MID_CHG to fit within 1.8V upper unit */
550 +- [ADC5_SBUx] = ADC5_CHAN_VOLT("chg_sbux", 3,
551 ++ [ADC5_SBUx] = ADC5_CHAN_VOLT("chg_sbux", 1,
552 + SCALE_HW_CALIB_DEFAULT)
553 +- [ADC5_MID_CHG_DIV6] = ADC5_CHAN_VOLT("chg_mid_chg", 6,
554 ++ [ADC5_MID_CHG_DIV6] = ADC5_CHAN_VOLT("chg_mid_chg", 3,
555 + SCALE_HW_CALIB_DEFAULT)
556 +- [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 1,
557 ++ [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 0,
558 + SCALE_HW_CALIB_XOTHERM)
559 +- [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 1,
560 ++ [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0,
561 + SCALE_HW_CALIB_THERM_100K_PULLUP)
562 +- [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 1,
563 ++ [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0,
564 + SCALE_HW_CALIB_THERM_100K_PULLUP)
565 +- [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 1,
566 ++ [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 0,
567 + SCALE_HW_CALIB_THERM_100K_PULLUP)
568 +- [ADC5_AMUX_THM2] = ADC5_CHAN_TEMP("amux_thm2", 1,
569 ++ [ADC5_AMUX_THM2] = ADC5_CHAN_TEMP("amux_thm2", 0,
570 + SCALE_HW_CALIB_PM5_SMB_TEMP)
571 + };
572 +
573 + static const struct adc5_channels adc5_chans_rev2[ADC5_MAX_CHANNEL] = {
574 +- [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 1,
575 ++ [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 0,
576 + SCALE_HW_CALIB_DEFAULT)
577 +- [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 1,
578 ++ [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 0,
579 + SCALE_HW_CALIB_DEFAULT)
580 +- [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 3,
581 ++ [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 1,
582 + SCALE_HW_CALIB_DEFAULT)
583 +- [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 3,
584 ++ [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1,
585 + SCALE_HW_CALIB_DEFAULT)
586 +- [ADC5_VCOIN] = ADC5_CHAN_VOLT("vcoin", 3,
587 ++ [ADC5_VCOIN] = ADC5_CHAN_VOLT("vcoin", 1,
588 + SCALE_HW_CALIB_DEFAULT)
589 +- [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 1,
590 ++ [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 0,
591 + SCALE_HW_CALIB_PMIC_THERM)
592 +- [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 1,
593 ++ [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0,
594 + SCALE_HW_CALIB_THERM_100K_PULLUP)
595 +- [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 1,
596 ++ [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0,
597 + SCALE_HW_CALIB_THERM_100K_PULLUP)
598 +- [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 1,
599 ++ [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 0,
600 + SCALE_HW_CALIB_THERM_100K_PULLUP)
601 +- [ADC5_AMUX_THM4_100K_PU] = ADC5_CHAN_TEMP("amux_thm4_100k_pu", 1,
602 ++ [ADC5_AMUX_THM4_100K_PU] = ADC5_CHAN_TEMP("amux_thm4_100k_pu", 0,
603 + SCALE_HW_CALIB_THERM_100K_PULLUP)
604 +- [ADC5_AMUX_THM5_100K_PU] = ADC5_CHAN_TEMP("amux_thm5_100k_pu", 1,
605 ++ [ADC5_AMUX_THM5_100K_PU] = ADC5_CHAN_TEMP("amux_thm5_100k_pu", 0,
606 + SCALE_HW_CALIB_THERM_100K_PULLUP)
607 +- [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm_100k_pu", 1,
608 ++ [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm_100k_pu", 0,
609 + SCALE_HW_CALIB_THERM_100K_PULLUP)
610 + };
611 +
612 +@@ -558,6 +559,9 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
613 + return ret;
614 + }
615 + prop->prescale = ret;
616 ++ } else {
617 ++ prop->prescale =
618 ++ adc->data->adc_chans[prop->channel].prescale_index;
619 + }
620 +
621 + ret = of_property_read_u32(node, "qcom,hw-settle-time", &value);
622 +diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
623 +index 0e134b13967a..eae740fceed9 100644
624 +--- a/drivers/iio/dac/ad5686.c
625 ++++ b/drivers/iio/dac/ad5686.c
626 +@@ -124,7 +124,8 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
627 + mutex_unlock(&indio_dev->mlock);
628 + if (ret < 0)
629 + return ret;
630 +- *val = ret;
631 ++ *val = (ret >> chan->scan_type.shift) &
632 ++ GENMASK(chan->scan_type.realbits - 1, 0);
633 + return IIO_VAL_INT;
634 + case IIO_CHAN_INFO_SCALE:
635 + *val = st->vref_mv;
636 +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
637 +index ba668d49c751..476abc74178e 100644
638 +--- a/drivers/infiniband/core/iwcm.c
639 ++++ b/drivers/infiniband/core/iwcm.c
640 +@@ -502,17 +502,21 @@ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
641 + */
642 + static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
643 + {
644 ++ const char *devname = dev_name(&cm_id->device->dev);
645 ++ const char *ifname = cm_id->device->iwcm->ifname;
646 + struct iwpm_dev_data pm_reg_msg;
647 + struct iwpm_sa_data pm_msg;
648 + int status;
649 +
650 ++ if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
651 ++ strlen(ifname) >= sizeof(pm_reg_msg.if_name))
652 ++ return -EINVAL;
653 ++
654 + cm_id->m_local_addr = cm_id->local_addr;
655 + cm_id->m_remote_addr = cm_id->remote_addr;
656 +
657 +- memcpy(pm_reg_msg.dev_name, dev_name(&cm_id->device->dev),
658 +- sizeof(pm_reg_msg.dev_name));
659 +- memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
660 +- sizeof(pm_reg_msg.if_name));
661 ++ strncpy(pm_reg_msg.dev_name, devname, sizeof(pm_reg_msg.dev_name));
662 ++ strncpy(pm_reg_msg.if_name, ifname, sizeof(pm_reg_msg.if_name));
663 +
664 + if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
665 + !iwpm_valid_pid())
666 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
667 +index c962160292f4..f0438bc6df88 100644
668 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
669 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
670 +@@ -844,11 +844,16 @@ static enum resp_states do_complete(struct rxe_qp *qp,
671 +
672 + memset(&cqe, 0, sizeof(cqe));
673 +
674 +- wc->wr_id = wqe->wr_id;
675 +- wc->status = qp->resp.status;
676 +- wc->qp = &qp->ibqp;
677 ++ if (qp->rcq->is_user) {
678 ++ uwc->status = qp->resp.status;
679 ++ uwc->qp_num = qp->ibqp.qp_num;
680 ++ uwc->wr_id = wqe->wr_id;
681 ++ } else {
682 ++ wc->status = qp->resp.status;
683 ++ wc->qp = &qp->ibqp;
684 ++ wc->wr_id = wqe->wr_id;
685 ++ }
686 +
687 +- /* fields after status are not required for errors */
688 + if (wc->status == IB_WC_SUCCESS) {
689 + wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
690 + pkt->mask & RXE_WRITE_MASK) ?
691 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
692 +index 2357aa727dcf..96c767324575 100644
693 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
694 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
695 +@@ -2010,6 +2010,14 @@ static void srpt_free_ch(struct kref *kref)
696 + kfree_rcu(ch, rcu);
697 + }
698 +
699 ++/*
700 ++ * Shut down the SCSI target session, tell the connection manager to
701 ++ * disconnect the associated RDMA channel, transition the QP to the error
702 ++ * state and remove the channel from the channel list. This function is
703 ++ * typically called from inside srpt_zerolength_write_done(). Concurrent
704 ++ * srpt_zerolength_write() calls from inside srpt_close_ch() are possible
705 ++ * as long as the channel is on sport->nexus_list.
706 ++ */
707 + static void srpt_release_channel_work(struct work_struct *w)
708 + {
709 + struct srpt_rdma_ch *ch;
710 +@@ -2037,6 +2045,11 @@ static void srpt_release_channel_work(struct work_struct *w)
711 + else
712 + ib_destroy_cm_id(ch->ib_cm.cm_id);
713 +
714 ++ sport = ch->sport;
715 ++ mutex_lock(&sport->mutex);
716 ++ list_del_rcu(&ch->list);
717 ++ mutex_unlock(&sport->mutex);
718 ++
719 + srpt_destroy_ch_ib(ch);
720 +
721 + srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
722 +@@ -2047,11 +2060,6 @@ static void srpt_release_channel_work(struct work_struct *w)
723 + sdev, ch->rq_size,
724 + srp_max_req_size, DMA_FROM_DEVICE);
725 +
726 +- sport = ch->sport;
727 +- mutex_lock(&sport->mutex);
728 +- list_del_rcu(&ch->list);
729 +- mutex_unlock(&sport->mutex);
730 +-
731 + wake_up(&sport->ch_releaseQ);
732 +
733 + kref_put(&ch->kref, srpt_free_ch);
734 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
735 +index 41a4b8808802..f3afab82f3ee 100644
736 +--- a/drivers/iommu/intel-iommu.c
737 ++++ b/drivers/iommu/intel-iommu.c
738 +@@ -2044,7 +2044,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
739 + * than default. Unnecessary for PT mode.
740 + */
741 + if (translation != CONTEXT_TT_PASS_THROUGH) {
742 +- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
743 ++ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
744 + ret = -ENOMEM;
745 + pgd = phys_to_virt(dma_pte_addr(pgd));
746 + if (!dma_pte_present(pgd))
747 +@@ -2058,7 +2058,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
748 + translation = CONTEXT_TT_MULTI_LEVEL;
749 +
750 + context_set_address_root(context, virt_to_phys(pgd));
751 +- context_set_address_width(context, iommu->agaw);
752 ++ context_set_address_width(context, agaw);
753 + } else {
754 + /*
755 + * In pass through mode, AW must be programmed to
756 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
757 +index 9038c302d5c2..44f180e47622 100644
758 +--- a/drivers/md/dm-table.c
759 ++++ b/drivers/md/dm-table.c
760 +@@ -1927,6 +1927,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
761 + */
762 + if (blk_queue_is_zoned(q))
763 + blk_revalidate_disk_zones(t->md->disk);
764 ++
765 ++ /* Allow reads to exceed readahead limits */
766 ++ q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
767 + }
768 +
769 + unsigned int dm_table_get_num_targets(struct dm_table *t)
770 +diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
771 +index 39804d830305..fd5c52b21436 100644
772 +--- a/drivers/media/pci/cx23885/cx23885-core.c
773 ++++ b/drivers/media/pci/cx23885/cx23885-core.c
774 +@@ -23,6 +23,7 @@
775 + #include <linux/moduleparam.h>
776 + #include <linux/kmod.h>
777 + #include <linux/kernel.h>
778 ++#include <linux/pci.h>
779 + #include <linux/slab.h>
780 + #include <linux/interrupt.h>
781 + #include <linux/delay.h>
782 +@@ -41,6 +42,18 @@ MODULE_AUTHOR("Steven Toth <stoth@×××××××.org>");
783 + MODULE_LICENSE("GPL");
784 + MODULE_VERSION(CX23885_VERSION);
785 +
786 ++/*
787 ++ * Some platforms have been found to require periodic resetting of the DMA
788 ++ * engine. Ryzen and XEON platforms are known to be affected. The symptom
789 ++ * encountered is "mpeg risc op code error". Only Ryzen platforms employ
790 ++ * this workaround if the option equals 1. The workaround can be explicitly
791 ++ * disabled for all platforms by setting to 0, the workaround can be forced
792 ++ * on for any platform by setting to 2.
793 ++ */
794 ++static unsigned int dma_reset_workaround = 1;
795 ++module_param(dma_reset_workaround, int, 0644);
796 ++MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
797 ++
798 + static unsigned int debug;
799 + module_param(debug, int, 0644);
800 + MODULE_PARM_DESC(debug, "enable debug messages");
801 +@@ -603,8 +616,13 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port,
802 +
803 + static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
804 + {
805 +- uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
806 +- uint32_t reg2_val = cx_read(TC_REQ_SET);
807 ++ uint32_t reg1_val, reg2_val;
808 ++
809 ++ if (!dev->need_dma_reset)
810 ++ return;
811 ++
812 ++ reg1_val = cx_read(TC_REQ); /* read-only */
813 ++ reg2_val = cx_read(TC_REQ_SET);
814 +
815 + if (reg1_val && reg2_val) {
816 + cx_write(TC_REQ, reg1_val);
817 +@@ -2058,6 +2076,37 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
818 + /* TODO: 23-19 */
819 + }
820 +
821 ++static struct {
822 ++ int vendor, dev;
823 ++} const broken_dev_id[] = {
824 ++ /* According with
825 ++ * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
826 ++ * 0x1451 is PCI ID for the IOMMU found on Ryzen
827 ++ */
828 ++ { PCI_VENDOR_ID_AMD, 0x1451 },
829 ++};
830 ++
831 ++static bool cx23885_does_need_dma_reset(void)
832 ++{
833 ++ int i;
834 ++ struct pci_dev *pdev = NULL;
835 ++
836 ++ if (dma_reset_workaround == 0)
837 ++ return false;
838 ++ else if (dma_reset_workaround == 2)
839 ++ return true;
840 ++
841 ++ for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
842 ++ pdev = pci_get_device(broken_dev_id[i].vendor,
843 ++ broken_dev_id[i].dev, NULL);
844 ++ if (pdev) {
845 ++ pci_dev_put(pdev);
846 ++ return true;
847 ++ }
848 ++ }
849 ++ return false;
850 ++}
851 ++
852 + static int cx23885_initdev(struct pci_dev *pci_dev,
853 + const struct pci_device_id *pci_id)
854 + {
855 +@@ -2069,6 +2118,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
856 + if (NULL == dev)
857 + return -ENOMEM;
858 +
859 ++ dev->need_dma_reset = cx23885_does_need_dma_reset();
860 ++
861 + err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
862 + if (err < 0)
863 + goto fail_free;
864 +diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
865 +index d54c7ee1ab21..cf965efabe66 100644
866 +--- a/drivers/media/pci/cx23885/cx23885.h
867 ++++ b/drivers/media/pci/cx23885/cx23885.h
868 +@@ -451,6 +451,8 @@ struct cx23885_dev {
869 + /* Analog raw audio */
870 + struct cx23885_audio_dev *audio_dev;
871 +
872 ++ /* Does the system require periodic DMA resets? */
873 ++ unsigned int need_dma_reset:1;
874 + };
875 +
876 + static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
877 +diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
878 +index 3fcb9a2fe1c9..efe2fb72d54b 100644
879 +--- a/drivers/misc/genwqe/card_utils.c
880 ++++ b/drivers/misc/genwqe/card_utils.c
881 +@@ -215,7 +215,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
882 + void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
883 + dma_addr_t *dma_handle)
884 + {
885 +- if (get_order(size) > MAX_ORDER)
886 ++ if (get_order(size) >= MAX_ORDER)
887 + return NULL;
888 +
889 + return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
890 +diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
891 +index 85f2ca989565..ef3ffa5ad466 100644
892 +--- a/drivers/net/wireless/broadcom/b43/phy_common.c
893 ++++ b/drivers/net/wireless/broadcom/b43/phy_common.c
894 +@@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta)
895 + u8 i;
896 + s32 tmp;
897 + s8 signx = 1;
898 +- u32 angle = 0;
899 ++ s32 angle = 0;
900 + struct b43_c32 ret = { .i = 39797, .q = 0, };
901 +
902 + while (theta > (180 << 16))
903 +diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
904 +index 0e39e3d1846f..d28418b05a04 100644
905 +--- a/drivers/nvdimm/pmem.c
906 ++++ b/drivers/nvdimm/pmem.c
907 +@@ -309,8 +309,11 @@ static void pmem_release_queue(void *q)
908 + blk_cleanup_queue(q);
909 + }
910 +
911 +-static void pmem_freeze_queue(void *q)
912 ++static void pmem_freeze_queue(struct percpu_ref *ref)
913 + {
914 ++ struct request_queue *q;
915 ++
916 ++ q = container_of(ref, typeof(*q), q_usage_counter);
917 + blk_freeze_queue_start(q);
918 + }
919 +
920 +@@ -402,6 +405,7 @@ static int pmem_attach_disk(struct device *dev,
921 +
922 + pmem->pfn_flags = PFN_DEV;
923 + pmem->pgmap.ref = &q->q_usage_counter;
924 ++ pmem->pgmap.kill = pmem_freeze_queue;
925 + if (is_nd_pfn(dev)) {
926 + if (setup_pagemap_fsdax(dev, &pmem->pgmap))
927 + return -ENOMEM;
928 +@@ -427,13 +431,6 @@ static int pmem_attach_disk(struct device *dev,
929 + memcpy(&bb_res, &nsio->res, sizeof(bb_res));
930 + }
931 +
932 +- /*
933 +- * At release time the queue must be frozen before
934 +- * devm_memremap_pages is unwound
935 +- */
936 +- if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
937 +- return -ENOMEM;
938 +-
939 + if (IS_ERR(addr))
940 + return PTR_ERR(addr);
941 + pmem->virt_addr = addr;
942 +diff --git a/drivers/of/base.c b/drivers/of/base.c
943 +index 09692c9b32a7..6d20b6dcf034 100644
944 +--- a/drivers/of/base.c
945 ++++ b/drivers/of/base.c
946 +@@ -116,9 +116,6 @@ int __weak of_node_to_nid(struct device_node *np)
947 + }
948 + #endif
949 +
950 +-static struct device_node **phandle_cache;
951 +-static u32 phandle_cache_mask;
952 +-
953 + /*
954 + * Assumptions behind phandle_cache implementation:
955 + * - phandle property values are in a contiguous range of 1..n
956 +@@ -127,6 +124,66 @@ static u32 phandle_cache_mask;
957 + * - the phandle lookup overhead reduction provided by the cache
958 + * will likely be less
959 + */
960 ++
961 ++static struct device_node **phandle_cache;
962 ++static u32 phandle_cache_mask;
963 ++
964 ++/*
965 ++ * Caller must hold devtree_lock.
966 ++ */
967 ++static void __of_free_phandle_cache(void)
968 ++{
969 ++ u32 cache_entries = phandle_cache_mask + 1;
970 ++ u32 k;
971 ++
972 ++ if (!phandle_cache)
973 ++ return;
974 ++
975 ++ for (k = 0; k < cache_entries; k++)
976 ++ of_node_put(phandle_cache[k]);
977 ++
978 ++ kfree(phandle_cache);
979 ++ phandle_cache = NULL;
980 ++}
981 ++
982 ++int of_free_phandle_cache(void)
983 ++{
984 ++ unsigned long flags;
985 ++
986 ++ raw_spin_lock_irqsave(&devtree_lock, flags);
987 ++
988 ++ __of_free_phandle_cache();
989 ++
990 ++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
991 ++
992 ++ return 0;
993 ++}
994 ++#if !defined(CONFIG_MODULES)
995 ++late_initcall_sync(of_free_phandle_cache);
996 ++#endif
997 ++
998 ++/*
999 ++ * Caller must hold devtree_lock.
1000 ++ */
1001 ++void __of_free_phandle_cache_entry(phandle handle)
1002 ++{
1003 ++ phandle masked_handle;
1004 ++ struct device_node *np;
1005 ++
1006 ++ if (!handle)
1007 ++ return;
1008 ++
1009 ++ masked_handle = handle & phandle_cache_mask;
1010 ++
1011 ++ if (phandle_cache) {
1012 ++ np = phandle_cache[masked_handle];
1013 ++ if (np && handle == np->phandle) {
1014 ++ of_node_put(np);
1015 ++ phandle_cache[masked_handle] = NULL;
1016 ++ }
1017 ++ }
1018 ++}
1019 ++
1020 + void of_populate_phandle_cache(void)
1021 + {
1022 + unsigned long flags;
1023 +@@ -136,8 +193,7 @@ void of_populate_phandle_cache(void)
1024 +
1025 + raw_spin_lock_irqsave(&devtree_lock, flags);
1026 +
1027 +- kfree(phandle_cache);
1028 +- phandle_cache = NULL;
1029 ++ __of_free_phandle_cache();
1030 +
1031 + for_each_of_allnodes(np)
1032 + if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
1033 +@@ -155,30 +211,15 @@ void of_populate_phandle_cache(void)
1034 + goto out;
1035 +
1036 + for_each_of_allnodes(np)
1037 +- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
1038 ++ if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
1039 ++ of_node_get(np);
1040 + phandle_cache[np->phandle & phandle_cache_mask] = np;
1041 ++ }
1042 +
1043 + out:
1044 + raw_spin_unlock_irqrestore(&devtree_lock, flags);
1045 + }
1046 +
1047 +-int of_free_phandle_cache(void)
1048 +-{
1049 +- unsigned long flags;
1050 +-
1051 +- raw_spin_lock_irqsave(&devtree_lock, flags);
1052 +-
1053 +- kfree(phandle_cache);
1054 +- phandle_cache = NULL;
1055 +-
1056 +- raw_spin_unlock_irqrestore(&devtree_lock, flags);
1057 +-
1058 +- return 0;
1059 +-}
1060 +-#if !defined(CONFIG_MODULES)
1061 +-late_initcall_sync(of_free_phandle_cache);
1062 +-#endif
1063 +-
1064 + void __init of_core_init(void)
1065 + {
1066 + struct device_node *np;
1067 +@@ -1190,13 +1231,23 @@ struct device_node *of_find_node_by_phandle(phandle handle)
1068 + if (phandle_cache[masked_handle] &&
1069 + handle == phandle_cache[masked_handle]->phandle)
1070 + np = phandle_cache[masked_handle];
1071 ++ if (np && of_node_check_flag(np, OF_DETACHED)) {
1072 ++ WARN_ON(1); /* did not uncache np on node removal */
1073 ++ of_node_put(np);
1074 ++ phandle_cache[masked_handle] = NULL;
1075 ++ np = NULL;
1076 ++ }
1077 + }
1078 +
1079 + if (!np) {
1080 + for_each_of_allnodes(np)
1081 +- if (np->phandle == handle) {
1082 +- if (phandle_cache)
1083 ++ if (np->phandle == handle &&
1084 ++ !of_node_check_flag(np, OF_DETACHED)) {
1085 ++ if (phandle_cache) {
1086 ++ /* will put when removed from cache */
1087 ++ of_node_get(np);
1088 + phandle_cache[masked_handle] = np;
1089 ++ }
1090 + break;
1091 + }
1092 + }
1093 +diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
1094 +index f4f8ed9b5454..ecea92f68c87 100644
1095 +--- a/drivers/of/dynamic.c
1096 ++++ b/drivers/of/dynamic.c
1097 +@@ -268,6 +268,9 @@ void __of_detach_node(struct device_node *np)
1098 + }
1099 +
1100 + of_node_set_flag(np, OF_DETACHED);
1101 ++
1102 ++ /* race with of_find_node_by_phandle() prevented by devtree_lock */
1103 ++ __of_free_phandle_cache_entry(np->phandle);
1104 + }
1105 +
1106 + /**
1107 +diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
1108 +index 5d1567025358..24786818e32e 100644
1109 +--- a/drivers/of/of_private.h
1110 ++++ b/drivers/of/of_private.h
1111 +@@ -84,6 +84,10 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {}
1112 + int of_resolve_phandles(struct device_node *tree);
1113 + #endif
1114 +
1115 ++#if defined(CONFIG_OF_DYNAMIC)
1116 ++void __of_free_phandle_cache_entry(phandle handle);
1117 ++#endif
1118 ++
1119 + #if defined(CONFIG_OF_OVERLAY)
1120 + void of_overlay_mutex_lock(void);
1121 + void of_overlay_mutex_unlock(void);
1122 +diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
1123 +index ae3c5b25dcc7..a2eb25271c96 100644
1124 +--- a/drivers/pci/p2pdma.c
1125 ++++ b/drivers/pci/p2pdma.c
1126 +@@ -82,10 +82,8 @@ static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
1127 + complete_all(&p2p->devmap_ref_done);
1128 + }
1129 +
1130 +-static void pci_p2pdma_percpu_kill(void *data)
1131 ++static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
1132 + {
1133 +- struct percpu_ref *ref = data;
1134 +-
1135 + /*
1136 + * pci_p2pdma_add_resource() may be called multiple times
1137 + * by a driver and may register the percpu_kill devm action multiple
1138 +@@ -198,6 +196,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
1139 + pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
1140 + pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
1141 + pci_resource_start(pdev, bar);
1142 ++ pgmap->kill = pci_p2pdma_percpu_kill;
1143 +
1144 + addr = devm_memremap_pages(&pdev->dev, pgmap);
1145 + if (IS_ERR(addr)) {
1146 +@@ -211,11 +210,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
1147 + if (error)
1148 + goto pgmap_free;
1149 +
1150 +- error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill,
1151 +- &pdev->p2pdma->devmap_ref);
1152 +- if (error)
1153 +- goto pgmap_free;
1154 +-
1155 + pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
1156 + &pgmap->res);
1157 +
1158 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
1159 +index bef17c3fca67..33f3f475e5c6 100644
1160 +--- a/drivers/pci/pci-driver.c
1161 ++++ b/drivers/pci/pci-driver.c
1162 +@@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev)
1163 + return 0;
1164 + }
1165 +
1166 +- if (!pm || !pm->runtime_suspend)
1167 +- return -ENOSYS;
1168 +-
1169 + pci_dev->state_saved = false;
1170 +- error = pm->runtime_suspend(dev);
1171 +- if (error) {
1172 ++ if (pm && pm->runtime_suspend) {
1173 ++ error = pm->runtime_suspend(dev);
1174 + /*
1175 + * -EBUSY and -EAGAIN is used to request the runtime PM core
1176 + * to schedule a new suspend, so log the event only with debug
1177 + * log level.
1178 + */
1179 +- if (error == -EBUSY || error == -EAGAIN)
1180 ++ if (error == -EBUSY || error == -EAGAIN) {
1181 + dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
1182 + pm->runtime_suspend, error);
1183 +- else
1184 ++ return error;
1185 ++ } else if (error) {
1186 + dev_err(dev, "can't suspend (%pf returned %d)\n",
1187 + pm->runtime_suspend, error);
1188 +-
1189 +- return error;
1190 ++ return error;
1191 ++ }
1192 + }
1193 +
1194 + pci_fixup_device(pci_fixup_suspend, pci_dev);
1195 +
1196 +- if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
1197 ++ if (pm && pm->runtime_suspend
1198 ++ && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
1199 + && pci_dev->current_state != PCI_UNKNOWN) {
1200 + WARN_ONCE(pci_dev->current_state != prev,
1201 + "PCI PM: State of device not saved by %pF\n",
1202 +@@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
1203 +
1204 + static int pci_pm_runtime_resume(struct device *dev)
1205 + {
1206 +- int rc;
1207 ++ int rc = 0;
1208 + struct pci_dev *pci_dev = to_pci_dev(dev);
1209 + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
1210 +
1211 +@@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev)
1212 + if (!pci_dev->driver)
1213 + return 0;
1214 +
1215 +- if (!pm || !pm->runtime_resume)
1216 +- return -ENOSYS;
1217 +-
1218 + pci_fixup_device(pci_fixup_resume_early, pci_dev);
1219 + pci_enable_wake(pci_dev, PCI_D0, false);
1220 + pci_fixup_device(pci_fixup_resume, pci_dev);
1221 +
1222 +- rc = pm->runtime_resume(dev);
1223 ++ if (pm && pm->runtime_resume)
1224 ++ rc = pm->runtime_resume(dev);
1225 +
1226 + pci_dev->runtime_d3cold = false;
1227 +
1228 +diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
1229 +index 1b10ea05a914..69372e2bc93c 100644
1230 +--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
1231 ++++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
1232 +@@ -30,8 +30,8 @@
1233 + #define DDRC_FLUX_RCMD 0x38c
1234 + #define DDRC_PRE_CMD 0x3c0
1235 + #define DDRC_ACT_CMD 0x3c4
1236 +-#define DDRC_BNK_CHG 0x3c8
1237 + #define DDRC_RNK_CHG 0x3cc
1238 ++#define DDRC_RW_CHG 0x3d0
1239 + #define DDRC_EVENT_CTRL 0x6C0
1240 + #define DDRC_INT_MASK 0x6c8
1241 + #define DDRC_INT_STATUS 0x6cc
1242 +@@ -51,7 +51,7 @@
1243 +
1244 + static const u32 ddrc_reg_off[] = {
1245 + DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
1246 +- DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
1247 ++ DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
1248 + };
1249 +
1250 + /*
1251 +diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
1252 +index 6da79ae14860..5a97e42a3547 100644
1253 +--- a/drivers/power/supply/olpc_battery.c
1254 ++++ b/drivers/power/supply/olpc_battery.c
1255 +@@ -428,14 +428,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
1256 + if (ret)
1257 + return ret;
1258 +
1259 +- val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
1260 ++ val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
1261 + break;
1262 + case POWER_SUPPLY_PROP_TEMP_AMBIENT:
1263 + ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
1264 + if (ret)
1265 + return ret;
1266 +
1267 +- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
1268 ++ val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
1269 + break;
1270 + case POWER_SUPPLY_PROP_CHARGE_COUNTER:
1271 + ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
1272 +diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
1273 +index 94f4d8fe85e0..d1b531fe9ada 100644
1274 +--- a/drivers/s390/scsi/zfcp_aux.c
1275 ++++ b/drivers/s390/scsi/zfcp_aux.c
1276 +@@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
1277 + */
1278 + int zfcp_status_read_refill(struct zfcp_adapter *adapter)
1279 + {
1280 +- while (atomic_read(&adapter->stat_miss) > 0)
1281 ++ while (atomic_add_unless(&adapter->stat_miss, -1, 0))
1282 + if (zfcp_fsf_status_read(adapter->qdio)) {
1283 ++ atomic_inc(&adapter->stat_miss); /* undo add -1 */
1284 + if (atomic_read(&adapter->stat_miss) >=
1285 + adapter->stat_read_buf_num) {
1286 + zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
1287 + return 1;
1288 + }
1289 + break;
1290 +- } else
1291 +- atomic_dec(&adapter->stat_miss);
1292 ++ }
1293 + return 0;
1294 + }
1295 +
1296 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
1297 +index b9e5cd79931a..462ed4ad21d2 100644
1298 +--- a/drivers/scsi/lpfc/lpfc_sli.c
1299 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
1300 +@@ -14501,7 +14501,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
1301 + hw_page_size))/hw_page_size;
1302 +
1303 + /* If needed, Adjust page count to match the max the adapter supports */
1304 +- if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
1305 ++ if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
1306 ++ (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
1307 + queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
1308 +
1309 + INIT_LIST_HEAD(&queue->list);
1310 +diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
1311 +index bbed039617a4..d59c8a59f582 100644
1312 +--- a/drivers/video/fbdev/pxafb.c
1313 ++++ b/drivers/video/fbdev/pxafb.c
1314 +@@ -2234,10 +2234,8 @@ static struct pxafb_mach_info *of_pxafb_of_mach_info(struct device *dev)
1315 + if (!info)
1316 + return ERR_PTR(-ENOMEM);
1317 + ret = of_get_pxafb_mode_info(dev, info);
1318 +- if (ret) {
1319 +- kfree(info->modes);
1320 ++ if (ret)
1321 + return ERR_PTR(ret);
1322 +- }
1323 +
1324 + /*
1325 + * On purpose, neither lccrX registers nor video memory size can be
1326 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
1327 +index f3496db4bb3e..a58666a3f8dd 100644
1328 +--- a/fs/ceph/caps.c
1329 ++++ b/fs/ceph/caps.c
1330 +@@ -3569,7 +3569,6 @@ retry:
1331 + tcap->cap_id = t_cap_id;
1332 + tcap->seq = t_seq - 1;
1333 + tcap->issue_seq = t_seq - 1;
1334 +- tcap->mseq = t_mseq;
1335 + tcap->issued |= issued;
1336 + tcap->implemented |= issued;
1337 + if (cap == ci->i_auth_cap)
1338 +diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
1339 +index cc91963683de..a928ba008d7d 100644
1340 +--- a/fs/dlm/lock.c
1341 ++++ b/fs/dlm/lock.c
1342 +@@ -1209,6 +1209,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1343 +
1344 + if (rv < 0) {
1345 + log_error(ls, "create_lkb idr error %d", rv);
1346 ++ dlm_free_lkb(lkb);
1347 + return rv;
1348 + }
1349 +
1350 +@@ -4179,6 +4180,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
1351 + (unsigned long long)lkb->lkb_recover_seq,
1352 + ms->m_header.h_nodeid, ms->m_lkid);
1353 + error = -ENOENT;
1354 ++ dlm_put_lkb(lkb);
1355 + goto fail;
1356 + }
1357 +
1358 +@@ -4232,6 +4234,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
1359 + lkb->lkb_id, lkb->lkb_remid,
1360 + ms->m_header.h_nodeid, ms->m_lkid);
1361 + error = -ENOENT;
1362 ++ dlm_put_lkb(lkb);
1363 + goto fail;
1364 + }
1365 +
1366 +@@ -5792,20 +5795,20 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
1367 + goto out;
1368 + }
1369 + }
1370 +-
1371 +- /* After ua is attached to lkb it will be freed by dlm_free_lkb().
1372 +- When DLM_IFL_USER is set, the dlm knows that this is a userspace
1373 +- lock and that lkb_astparam is the dlm_user_args structure. */
1374 +-
1375 + error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
1376 + fake_astfn, ua, fake_bastfn, &args);
1377 +- lkb->lkb_flags |= DLM_IFL_USER;
1378 +-
1379 + if (error) {
1380 ++ kfree(ua->lksb.sb_lvbptr);
1381 ++ ua->lksb.sb_lvbptr = NULL;
1382 ++ kfree(ua);
1383 + __put_lkb(ls, lkb);
1384 + goto out;
1385 + }
1386 +
1387 ++ /* After ua is attached to lkb it will be freed by dlm_free_lkb().
1388 ++ When DLM_IFL_USER is set, the dlm knows that this is a userspace
1389 ++ lock and that lkb_astparam is the dlm_user_args structure. */
1390 ++ lkb->lkb_flags |= DLM_IFL_USER;
1391 + error = request_lock(ls, lkb, name, namelen, &args);
1392 +
1393 + switch (error) {
1394 +diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
1395 +index 5ba94be006ee..6a1529e478f3 100644
1396 +--- a/fs/dlm/lockspace.c
1397 ++++ b/fs/dlm/lockspace.c
1398 +@@ -680,11 +680,11 @@ static int new_lockspace(const char *name, const char *cluster,
1399 + kfree(ls->ls_recover_buf);
1400 + out_lkbidr:
1401 + idr_destroy(&ls->ls_lkbidr);
1402 ++ out_rsbtbl:
1403 + for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
1404 + if (ls->ls_remove_names[i])
1405 + kfree(ls->ls_remove_names[i]);
1406 + }
1407 +- out_rsbtbl:
1408 + vfree(ls->ls_rsbtbl);
1409 + out_lsfree:
1410 + if (do_unreg)
1411 +diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
1412 +index 648f0ca1ad57..998051c4aea7 100644
1413 +--- a/fs/gfs2/inode.c
1414 ++++ b/fs/gfs2/inode.c
1415 +@@ -744,17 +744,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
1416 + the gfs2 structures. */
1417 + if (default_acl) {
1418 + error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
1419 ++ if (error)
1420 ++ goto fail_gunlock3;
1421 + posix_acl_release(default_acl);
1422 ++ default_acl = NULL;
1423 + }
1424 + if (acl) {
1425 +- if (!error)
1426 +- error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
1427 ++ error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
1428 ++ if (error)
1429 ++ goto fail_gunlock3;
1430 + posix_acl_release(acl);
1431 ++ acl = NULL;
1432 + }
1433 +
1434 +- if (error)
1435 +- goto fail_gunlock3;
1436 +-
1437 + error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
1438 + &gfs2_initxattrs, NULL);
1439 + if (error)
1440 +@@ -789,10 +791,8 @@ fail_free_inode:
1441 + }
1442 + gfs2_rsqa_delete(ip, NULL);
1443 + fail_free_acls:
1444 +- if (default_acl)
1445 +- posix_acl_release(default_acl);
1446 +- if (acl)
1447 +- posix_acl_release(acl);
1448 ++ posix_acl_release(default_acl);
1449 ++ posix_acl_release(acl);
1450 + fail_gunlock:
1451 + gfs2_dir_no_add(&da);
1452 + gfs2_glock_dq_uninit(ghs);
1453 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
1454 +index b08a530433ad..8d7916570362 100644
1455 +--- a/fs/gfs2/rgrp.c
1456 ++++ b/fs/gfs2/rgrp.c
1457 +@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1458 + goto next_iter;
1459 + }
1460 + if (ret == -E2BIG) {
1461 ++ n += rbm->bii - initial_bii;
1462 + rbm->bii = 0;
1463 + rbm->offset = 0;
1464 +- n += (rbm->bii - initial_bii);
1465 + goto res_covered_end_of_rgrp;
1466 + }
1467 + return ret;
1468 +diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
1469 +index d20b92f271c2..0a67dd4250e9 100644
1470 +--- a/fs/lockd/clntproc.c
1471 ++++ b/fs/lockd/clntproc.c
1472 +@@ -442,7 +442,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
1473 + fl->fl_start = req->a_res.lock.fl.fl_start;
1474 + fl->fl_end = req->a_res.lock.fl.fl_end;
1475 + fl->fl_type = req->a_res.lock.fl.fl_type;
1476 +- fl->fl_pid = 0;
1477 ++ fl->fl_pid = -req->a_res.lock.fl.fl_pid;
1478 + break;
1479 + default:
1480 + status = nlm_stat_to_errno(req->a_res.status);
1481 +diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
1482 +index 7147e4aebecc..9846f7e95282 100644
1483 +--- a/fs/lockd/xdr.c
1484 ++++ b/fs/lockd/xdr.c
1485 +@@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
1486 +
1487 + locks_init_lock(fl);
1488 + fl->fl_owner = current->files;
1489 +- fl->fl_pid = (pid_t)lock->svid;
1490 ++ fl->fl_pid = current->tgid;
1491 + fl->fl_flags = FL_POSIX;
1492 + fl->fl_type = F_RDLCK; /* as good as anything else */
1493 + start = ntohl(*p++);
1494 +@@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
1495 + memset(lock, 0, sizeof(*lock));
1496 + locks_init_lock(&lock->fl);
1497 + lock->svid = ~(u32) 0;
1498 +- lock->fl.fl_pid = (pid_t)lock->svid;
1499 ++ lock->fl.fl_pid = current->tgid;
1500 +
1501 + if (!(p = nlm_decode_cookie(p, &argp->cookie))
1502 + || !(p = xdr_decode_string_inplace(p, &lock->caller,
1503 +diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
1504 +index 7ed9edf9aed4..70154f376695 100644
1505 +--- a/fs/lockd/xdr4.c
1506 ++++ b/fs/lockd/xdr4.c
1507 +@@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
1508 +
1509 + locks_init_lock(fl);
1510 + fl->fl_owner = current->files;
1511 +- fl->fl_pid = (pid_t)lock->svid;
1512 ++ fl->fl_pid = current->tgid;
1513 + fl->fl_flags = FL_POSIX;
1514 + fl->fl_type = F_RDLCK; /* as good as anything else */
1515 + p = xdr_decode_hyper(p, &start);
1516 +@@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
1517 + memset(lock, 0, sizeof(*lock));
1518 + locks_init_lock(&lock->fl);
1519 + lock->svid = ~(u32) 0;
1520 +- lock->fl.fl_pid = (pid_t)lock->svid;
1521 ++ lock->fl.fl_pid = current->tgid;
1522 +
1523 + if (!(p = nlm4_decode_cookie(p, &argp->cookie))
1524 + || !(p = xdr_decode_string_inplace(p, &lock->caller,
1525 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
1526 +index d505990dac7c..c364acbb6aba 100644
1527 +--- a/fs/nfsd/nfs4proc.c
1528 ++++ b/fs/nfsd/nfs4proc.c
1529 +@@ -1016,8 +1016,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1530 +
1531 + nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
1532 + &write->wr_head, write->wr_buflen);
1533 +- if (!nvecs)
1534 +- return nfserr_io;
1535 + WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
1536 +
1537 + status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
1538 +diff --git a/include/linux/hmm.h b/include/linux/hmm.h
1539 +index c6fb869a81c0..ed89fbc525d2 100644
1540 +--- a/include/linux/hmm.h
1541 ++++ b/include/linux/hmm.h
1542 +@@ -512,8 +512,7 @@ struct hmm_devmem {
1543 + * enough and allocate struct page for it.
1544 + *
1545 + * The device driver can wrap the hmm_devmem struct inside a private device
1546 +- * driver struct. The device driver must call hmm_devmem_remove() before the
1547 +- * device goes away and before freeing the hmm_devmem struct memory.
1548 ++ * driver struct.
1549 + */
1550 + struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1551 + struct device *device,
1552 +@@ -521,7 +520,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1553 + struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1554 + struct device *device,
1555 + struct resource *res);
1556 +-void hmm_devmem_remove(struct hmm_devmem *devmem);
1557 +
1558 + /*
1559 + * hmm_devmem_page_set_drvdata - set per-page driver data field
1560 +diff --git a/include/linux/memremap.h b/include/linux/memremap.h
1561 +index 0ac69ddf5fc4..55db66b3716f 100644
1562 +--- a/include/linux/memremap.h
1563 ++++ b/include/linux/memremap.h
1564 +@@ -111,6 +111,7 @@ typedef void (*dev_page_free_t)(struct page *page, void *data);
1565 + * @altmap: pre-allocated/reserved memory for vmemmap allocations
1566 + * @res: physical address range covered by @ref
1567 + * @ref: reference count that pins the devm_memremap_pages() mapping
1568 ++ * @kill: callback to transition @ref to the dead state
1569 + * @dev: host device of the mapping for debug
1570 + * @data: private data pointer for page_free()
1571 + * @type: memory type: see MEMORY_* in memory_hotplug.h
1572 +@@ -122,6 +123,7 @@ struct dev_pagemap {
1573 + bool altmap_valid;
1574 + struct resource res;
1575 + struct percpu_ref *ref;
1576 ++ void (*kill)(struct percpu_ref *ref);
1577 + struct device *dev;
1578 + void *data;
1579 + enum memory_type type;
1580 +diff --git a/kernel/fork.c b/kernel/fork.c
1581 +index e2a5156bc9c3..3c16bc490583 100644
1582 +--- a/kernel/fork.c
1583 ++++ b/kernel/fork.c
1584 +@@ -1837,8 +1837,6 @@ static __latent_entropy struct task_struct *copy_process(
1585 +
1586 + posix_cpu_timers_init(p);
1587 +
1588 +- p->start_time = ktime_get_ns();
1589 +- p->real_start_time = ktime_get_boot_ns();
1590 + p->io_context = NULL;
1591 + audit_set_context(p, NULL);
1592 + cgroup_fork(p);
1593 +@@ -2004,6 +2002,17 @@ static __latent_entropy struct task_struct *copy_process(
1594 + if (retval)
1595 + goto bad_fork_free_pid;
1596 +
1597 ++ /*
1598 ++ * From this point on we must avoid any synchronous user-space
1599 ++ * communication until we take the tasklist-lock. In particular, we do
1600 ++ * not want user-space to be able to predict the process start-time by
1601 ++ * stalling fork(2) after we recorded the start_time but before it is
1602 ++ * visible to the system.
1603 ++ */
1604 ++
1605 ++ p->start_time = ktime_get_ns();
1606 ++ p->real_start_time = ktime_get_boot_ns();
1607 ++
1608 + /*
1609 + * Make it visible to the rest of the system, but dont wake it up yet.
1610 + * Need tasklist lock for parent etc handling!
1611 +diff --git a/kernel/memremap.c b/kernel/memremap.c
1612 +index 9eced2cc9f94..3eef989ef035 100644
1613 +--- a/kernel/memremap.c
1614 ++++ b/kernel/memremap.c
1615 +@@ -88,23 +88,25 @@ static void devm_memremap_pages_release(void *data)
1616 + resource_size_t align_start, align_size;
1617 + unsigned long pfn;
1618 +
1619 ++ pgmap->kill(pgmap->ref);
1620 + for_each_device_pfn(pfn, pgmap)
1621 + put_page(pfn_to_page(pfn));
1622 +
1623 +- if (percpu_ref_tryget_live(pgmap->ref)) {
1624 +- dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
1625 +- percpu_ref_put(pgmap->ref);
1626 +- }
1627 +-
1628 + /* pages are dead and unused, undo the arch mapping */
1629 + align_start = res->start & ~(SECTION_SIZE - 1);
1630 + align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
1631 + - align_start;
1632 +
1633 + mem_hotplug_begin();
1634 +- arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
1635 +- &pgmap->altmap : NULL);
1636 +- kasan_remove_zero_shadow(__va(align_start), align_size);
1637 ++ if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
1638 ++ pfn = align_start >> PAGE_SHIFT;
1639 ++ __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
1640 ++ align_size >> PAGE_SHIFT, NULL);
1641 ++ } else {
1642 ++ arch_remove_memory(align_start, align_size,
1643 ++ pgmap->altmap_valid ? &pgmap->altmap : NULL);
1644 ++ kasan_remove_zero_shadow(__va(align_start), align_size);
1645 ++ }
1646 + mem_hotplug_done();
1647 +
1648 + untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
1649 +@@ -116,7 +118,7 @@ static void devm_memremap_pages_release(void *data)
1650 + /**
1651 + * devm_memremap_pages - remap and provide memmap backing for the given resource
1652 + * @dev: hosting device for @res
1653 +- * @pgmap: pointer to a struct dev_pgmap
1654 ++ * @pgmap: pointer to a struct dev_pagemap
1655 + *
1656 + * Notes:
1657 + * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
1658 +@@ -125,11 +127,8 @@ static void devm_memremap_pages_release(void *data)
1659 + * 2/ The altmap field may optionally be initialized, in which case altmap_valid
1660 + * must be set to true
1661 + *
1662 +- * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
1663 +- * time (or devm release event). The expected order of events is that ref has
1664 +- * been through percpu_ref_kill() before devm_memremap_pages_release(). The
1665 +- * wait for the completion of all references being dropped and
1666 +- * percpu_ref_exit() must occur after devm_memremap_pages_release().
1667 ++ * 3/ pgmap->ref must be 'live' on entry and will be killed at
1668 ++ * devm_memremap_pages_release() time, or if this routine fails.
1669 + *
1670 + * 4/ res is expected to be a host memory range that could feasibly be
1671 + * treated as a "System RAM" range, i.e. not a device mmio range, but
1672 +@@ -145,6 +144,9 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
1673 + pgprot_t pgprot = PAGE_KERNEL;
1674 + int error, nid, is_ram;
1675 +
1676 ++ if (!pgmap->ref || !pgmap->kill)
1677 ++ return ERR_PTR(-EINVAL);
1678 ++
1679 + align_start = res->start & ~(SECTION_SIZE - 1);
1680 + align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
1681 + - align_start;
1682 +@@ -167,18 +169,13 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
1683 + is_ram = region_intersects(align_start, align_size,
1684 + IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
1685 +
1686 +- if (is_ram == REGION_MIXED) {
1687 +- WARN_ONCE(1, "%s attempted on mixed region %pr\n",
1688 +- __func__, res);
1689 +- return ERR_PTR(-ENXIO);
1690 ++ if (is_ram != REGION_DISJOINT) {
1691 ++ WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
1692 ++ is_ram == REGION_MIXED ? "mixed" : "ram", res);
1693 ++ error = -ENXIO;
1694 ++ goto err_array;
1695 + }
1696 +
1697 +- if (is_ram == REGION_INTERSECTS)
1698 +- return __va(res->start);
1699 +-
1700 +- if (!pgmap->ref)
1701 +- return ERR_PTR(-EINVAL);
1702 +-
1703 + pgmap->dev = dev;
1704 +
1705 + error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
1706 +@@ -196,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
1707 + goto err_pfn_remap;
1708 +
1709 + mem_hotplug_begin();
1710 +- error = kasan_add_zero_shadow(__va(align_start), align_size);
1711 +- if (error) {
1712 +- mem_hotplug_done();
1713 +- goto err_kasan;
1714 ++
1715 ++ /*
1716 ++ * For device private memory we call add_pages() as we only need to
1717 ++ * allocate and initialize struct page for the device memory. More-
1718 ++ * over the device memory is un-accessible thus we do not want to
1719 ++ * create a linear mapping for the memory like arch_add_memory()
1720 ++ * would do.
1721 ++ *
1722 ++ * For all other device memory types, which are accessible by
1723 ++ * the CPU, we do want the linear mapping and thus use
1724 ++ * arch_add_memory().
1725 ++ */
1726 ++ if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
1727 ++ error = add_pages(nid, align_start >> PAGE_SHIFT,
1728 ++ align_size >> PAGE_SHIFT, NULL, false);
1729 ++ } else {
1730 ++ error = kasan_add_zero_shadow(__va(align_start), align_size);
1731 ++ if (error) {
1732 ++ mem_hotplug_done();
1733 ++ goto err_kasan;
1734 ++ }
1735 ++
1736 ++ error = arch_add_memory(nid, align_start, align_size, altmap,
1737 ++ false);
1738 ++ }
1739 ++
1740 ++ if (!error) {
1741 ++ struct zone *zone;
1742 ++
1743 ++ zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
1744 ++ move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
1745 ++ align_size >> PAGE_SHIFT, altmap);
1746 + }
1747 +
1748 +- error = arch_add_memory(nid, align_start, align_size, altmap, false);
1749 +- if (!error)
1750 +- move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1751 +- align_start >> PAGE_SHIFT,
1752 +- align_size >> PAGE_SHIFT, altmap);
1753 + mem_hotplug_done();
1754 + if (error)
1755 + goto err_add_memory;
1756 +@@ -220,7 +240,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
1757 + align_size >> PAGE_SHIFT, pgmap);
1758 + percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
1759 +
1760 +- devm_add_action(dev, devm_memremap_pages_release, pgmap);
1761 ++ error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
1762 ++ pgmap);
1763 ++ if (error)
1764 ++ return ERR_PTR(error);
1765 +
1766 + return __va(res->start);
1767 +
1768 +@@ -231,9 +254,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
1769 + err_pfn_remap:
1770 + pgmap_array_delete(res);
1771 + err_array:
1772 ++ pgmap->kill(pgmap->ref);
1773 + return ERR_PTR(error);
1774 + }
1775 +-EXPORT_SYMBOL(devm_memremap_pages);
1776 ++EXPORT_SYMBOL_GPL(devm_memremap_pages);
1777 +
1778 + unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
1779 + {
1780 +diff --git a/kernel/pid.c b/kernel/pid.c
1781 +index b2f6c506035d..20881598bdfa 100644
1782 +--- a/kernel/pid.c
1783 ++++ b/kernel/pid.c
1784 +@@ -233,8 +233,10 @@ out_unlock:
1785 +
1786 + out_free:
1787 + spin_lock_irq(&pidmap_lock);
1788 +- while (++i <= ns->level)
1789 +- idr_remove(&ns->idr, (pid->numbers + i)->nr);
1790 ++ while (++i <= ns->level) {
1791 ++ upid = pid->numbers + i;
1792 ++ idr_remove(&upid->ns->idr, upid->nr);
1793 ++ }
1794 +
1795 + /* On failure to allocate the first pid, reset the state */
1796 + if (ns->pid_allocated == PIDNS_ADDING)
1797 +diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
1798 +index a8846ed7f352..a180abc8c925 100644
1799 +--- a/kernel/rcu/srcutree.c
1800 ++++ b/kernel/rcu/srcutree.c
1801 +@@ -451,10 +451,12 @@ static void srcu_gp_start(struct srcu_struct *sp)
1802 +
1803 + lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
1804 + WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
1805 ++ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
1806 + rcu_segcblist_advance(&sdp->srcu_cblist,
1807 + rcu_seq_current(&sp->srcu_gp_seq));
1808 + (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1809 + rcu_seq_snap(&sp->srcu_gp_seq));
1810 ++ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
1811 + smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
1812 + rcu_seq_start(&sp->srcu_gp_seq);
1813 + state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1814 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1815 +index ac855b2f4774..e8f191ba3fe5 100644
1816 +--- a/kernel/sched/fair.c
1817 ++++ b/kernel/sched/fair.c
1818 +@@ -352,10 +352,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
1819 + }
1820 + }
1821 +
1822 +-/* Iterate thr' all leaf cfs_rq's on a runqueue */
1823 +-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
1824 +- list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
1825 +- leaf_cfs_rq_list)
1826 ++/* Iterate through all leaf cfs_rq's on a runqueue: */
1827 ++#define for_each_leaf_cfs_rq(rq, cfs_rq) \
1828 ++ list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
1829 +
1830 + /* Do the two (enqueued) entities belong to the same group ? */
1831 + static inline struct cfs_rq *
1832 +@@ -447,8 +446,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
1833 + {
1834 + }
1835 +
1836 +-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
1837 +- for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
1838 ++#define for_each_leaf_cfs_rq(rq, cfs_rq) \
1839 ++ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
1840 +
1841 + static inline struct sched_entity *parent_entity(struct sched_entity *se)
1842 + {
1843 +@@ -7387,27 +7386,10 @@ static inline bool others_have_blocked(struct rq *rq)
1844 +
1845 + #ifdef CONFIG_FAIR_GROUP_SCHED
1846 +
1847 +-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
1848 +-{
1849 +- if (cfs_rq->load.weight)
1850 +- return false;
1851 +-
1852 +- if (cfs_rq->avg.load_sum)
1853 +- return false;
1854 +-
1855 +- if (cfs_rq->avg.util_sum)
1856 +- return false;
1857 +-
1858 +- if (cfs_rq->avg.runnable_load_sum)
1859 +- return false;
1860 +-
1861 +- return true;
1862 +-}
1863 +-
1864 + static void update_blocked_averages(int cpu)
1865 + {
1866 + struct rq *rq = cpu_rq(cpu);
1867 +- struct cfs_rq *cfs_rq, *pos;
1868 ++ struct cfs_rq *cfs_rq;
1869 + const struct sched_class *curr_class;
1870 + struct rq_flags rf;
1871 + bool done = true;
1872 +@@ -7419,7 +7401,7 @@ static void update_blocked_averages(int cpu)
1873 + * Iterates the task_group tree in a bottom up fashion, see
1874 + * list_add_leaf_cfs_rq() for details.
1875 + */
1876 +- for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
1877 ++ for_each_leaf_cfs_rq(rq, cfs_rq) {
1878 + struct sched_entity *se;
1879 +
1880 + /* throttled entities do not contribute to load */
1881 +@@ -7434,13 +7416,6 @@ static void update_blocked_averages(int cpu)
1882 + if (se && !skip_blocked_update(se))
1883 + update_load_avg(cfs_rq_of(se), se, 0);
1884 +
1885 +- /*
1886 +- * There can be a lot of idle CPU cgroups. Don't let fully
1887 +- * decayed cfs_rqs linger on the list.
1888 +- */
1889 +- if (cfs_rq_is_decayed(cfs_rq))
1890 +- list_del_leaf_cfs_rq(cfs_rq);
1891 +-
1892 + /* Don't need periodic decay once load/util_avg are null */
1893 + if (cfs_rq_has_blocked(cfs_rq))
1894 + done = false;
1895 +@@ -10289,10 +10264,10 @@ const struct sched_class fair_sched_class = {
1896 + #ifdef CONFIG_SCHED_DEBUG
1897 + void print_cfs_stats(struct seq_file *m, int cpu)
1898 + {
1899 +- struct cfs_rq *cfs_rq, *pos;
1900 ++ struct cfs_rq *cfs_rq;
1901 +
1902 + rcu_read_lock();
1903 +- for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
1904 ++ for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1905 + print_cfs_rq(m, cpu, cfs_rq);
1906 + rcu_read_unlock();
1907 + }
1908 +diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
1909 +index d5a06addeb27..bf864c73e462 100644
1910 +--- a/lib/test_debug_virtual.c
1911 ++++ b/lib/test_debug_virtual.c
1912 +@@ -5,6 +5,7 @@
1913 + #include <linux/vmalloc.h>
1914 + #include <linux/slab.h>
1915 + #include <linux/sizes.h>
1916 ++#include <linux/io.h>
1917 +
1918 + #include <asm/page.h>
1919 + #ifdef CONFIG_MIPS
1920 +diff --git a/mm/hmm.c b/mm/hmm.c
1921 +index 90c34f3d1243..50fbaf80f95e 100644
1922 +--- a/mm/hmm.c
1923 ++++ b/mm/hmm.c
1924 +@@ -986,19 +986,16 @@ static void hmm_devmem_ref_exit(void *data)
1925 + struct hmm_devmem *devmem;
1926 +
1927 + devmem = container_of(ref, struct hmm_devmem, ref);
1928 ++ wait_for_completion(&devmem->completion);
1929 + percpu_ref_exit(ref);
1930 +- devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
1931 + }
1932 +
1933 +-static void hmm_devmem_ref_kill(void *data)
1934 ++static void hmm_devmem_ref_kill(struct percpu_ref *ref)
1935 + {
1936 +- struct percpu_ref *ref = data;
1937 + struct hmm_devmem *devmem;
1938 +
1939 + devmem = container_of(ref, struct hmm_devmem, ref);
1940 + percpu_ref_kill(ref);
1941 +- wait_for_completion(&devmem->completion);
1942 +- devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
1943 + }
1944 +
1945 + static int hmm_devmem_fault(struct vm_area_struct *vma,
1946 +@@ -1021,172 +1018,6 @@ static void hmm_devmem_free(struct page *page, void *data)
1947 + devmem->ops->free(devmem, page);
1948 + }
1949 +
1950 +-static DEFINE_MUTEX(hmm_devmem_lock);
1951 +-static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
1952 +-
1953 +-static void hmm_devmem_radix_release(struct resource *resource)
1954 +-{
1955 +- resource_size_t key;
1956 +-
1957 +- mutex_lock(&hmm_devmem_lock);
1958 +- for (key = resource->start;
1959 +- key <= resource->end;
1960 +- key += PA_SECTION_SIZE)
1961 +- radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
1962 +- mutex_unlock(&hmm_devmem_lock);
1963 +-}
1964 +-
1965 +-static void hmm_devmem_release(struct device *dev, void *data)
1966 +-{
1967 +- struct hmm_devmem *devmem = data;
1968 +- struct resource *resource = devmem->resource;
1969 +- unsigned long start_pfn, npages;
1970 +- struct zone *zone;
1971 +- struct page *page;
1972 +-
1973 +- if (percpu_ref_tryget_live(&devmem->ref)) {
1974 +- dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
1975 +- percpu_ref_put(&devmem->ref);
1976 +- }
1977 +-
1978 +- /* pages are dead and unused, undo the arch mapping */
1979 +- start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
1980 +- npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
1981 +-
1982 +- page = pfn_to_page(start_pfn);
1983 +- zone = page_zone(page);
1984 +-
1985 +- mem_hotplug_begin();
1986 +- if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
1987 +- __remove_pages(zone, start_pfn, npages, NULL);
1988 +- else
1989 +- arch_remove_memory(start_pfn << PAGE_SHIFT,
1990 +- npages << PAGE_SHIFT, NULL);
1991 +- mem_hotplug_done();
1992 +-
1993 +- hmm_devmem_radix_release(resource);
1994 +-}
1995 +-
1996 +-static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
1997 +-{
1998 +- resource_size_t key, align_start, align_size, align_end;
1999 +- struct device *device = devmem->device;
2000 +- int ret, nid, is_ram;
2001 +-
2002 +- align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
2003 +- align_size = ALIGN(devmem->resource->start +
2004 +- resource_size(devmem->resource),
2005 +- PA_SECTION_SIZE) - align_start;
2006 +-
2007 +- is_ram = region_intersects(align_start, align_size,
2008 +- IORESOURCE_SYSTEM_RAM,
2009 +- IORES_DESC_NONE);
2010 +- if (is_ram == REGION_MIXED) {
2011 +- WARN_ONCE(1, "%s attempted on mixed region %pr\n",
2012 +- __func__, devmem->resource);
2013 +- return -ENXIO;
2014 +- }
2015 +- if (is_ram == REGION_INTERSECTS)
2016 +- return -ENXIO;
2017 +-
2018 +- if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
2019 +- devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
2020 +- else
2021 +- devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
2022 +-
2023 +- devmem->pagemap.res = *devmem->resource;
2024 +- devmem->pagemap.page_fault = hmm_devmem_fault;
2025 +- devmem->pagemap.page_free = hmm_devmem_free;
2026 +- devmem->pagemap.dev = devmem->device;
2027 +- devmem->pagemap.ref = &devmem->ref;
2028 +- devmem->pagemap.data = devmem;
2029 +-
2030 +- mutex_lock(&hmm_devmem_lock);
2031 +- align_end = align_start + align_size - 1;
2032 +- for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
2033 +- struct hmm_devmem *dup;
2034 +-
2035 +- dup = radix_tree_lookup(&hmm_devmem_radix,
2036 +- key >> PA_SECTION_SHIFT);
2037 +- if (dup) {
2038 +- dev_err(device, "%s: collides with mapping for %s\n",
2039 +- __func__, dev_name(dup->device));
2040 +- mutex_unlock(&hmm_devmem_lock);
2041 +- ret = -EBUSY;
2042 +- goto error;
2043 +- }
2044 +- ret = radix_tree_insert(&hmm_devmem_radix,
2045 +- key >> PA_SECTION_SHIFT,
2046 +- devmem);
2047 +- if (ret) {
2048 +- dev_err(device, "%s: failed: %d\n", __func__, ret);
2049 +- mutex_unlock(&hmm_devmem_lock);
2050 +- goto error_radix;
2051 +- }
2052 +- }
2053 +- mutex_unlock(&hmm_devmem_lock);
2054 +-
2055 +- nid = dev_to_node(device);
2056 +- if (nid < 0)
2057 +- nid = numa_mem_id();
2058 +-
2059 +- mem_hotplug_begin();
2060 +- /*
2061 +- * For device private memory we call add_pages() as we only need to
2062 +- * allocate and initialize struct page for the device memory. More-
2063 +- * over the device memory is un-accessible thus we do not want to
2064 +- * create a linear mapping for the memory like arch_add_memory()
2065 +- * would do.
2066 +- *
2067 +- * For device public memory, which is accesible by the CPU, we do
2068 +- * want the linear mapping and thus use arch_add_memory().
2069 +- */
2070 +- if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
2071 +- ret = arch_add_memory(nid, align_start, align_size, NULL,
2072 +- false);
2073 +- else
2074 +- ret = add_pages(nid, align_start >> PAGE_SHIFT,
2075 +- align_size >> PAGE_SHIFT, NULL, false);
2076 +- if (ret) {
2077 +- mem_hotplug_done();
2078 +- goto error_add_memory;
2079 +- }
2080 +- move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
2081 +- align_start >> PAGE_SHIFT,
2082 +- align_size >> PAGE_SHIFT, NULL);
2083 +- mem_hotplug_done();
2084 +-
2085 +- /*
2086 +- * Initialization of the pages has been deferred until now in order
2087 +- * to allow us to do the work while not holding the hotplug lock.
2088 +- */
2089 +- memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
2090 +- align_start >> PAGE_SHIFT,
2091 +- align_size >> PAGE_SHIFT, &devmem->pagemap);
2092 +-
2093 +- return 0;
2094 +-
2095 +-error_add_memory:
2096 +- untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
2097 +-error_radix:
2098 +- hmm_devmem_radix_release(devmem->resource);
2099 +-error:
2100 +- return ret;
2101 +-}
2102 +-
2103 +-static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
2104 +-{
2105 +- struct hmm_devmem *devmem = data;
2106 +-
2107 +- return devmem->resource == match_data;
2108 +-}
2109 +-
2110 +-static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
2111 +-{
2112 +- devres_release(devmem->device, &hmm_devmem_release,
2113 +- &hmm_devmem_match, devmem->resource);
2114 +-}
2115 +-
2116 + /*
2117 + * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
2118 + *
2119 +@@ -1210,12 +1041,12 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
2120 + {
2121 + struct hmm_devmem *devmem;
2122 + resource_size_t addr;
2123 ++ void *result;
2124 + int ret;
2125 +
2126 + dev_pagemap_get_ops();
2127 +
2128 +- devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
2129 +- GFP_KERNEL, dev_to_node(device));
2130 ++ devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
2131 + if (!devmem)
2132 + return ERR_PTR(-ENOMEM);
2133 +
2134 +@@ -1229,11 +1060,11 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
2135 + ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
2136 + 0, GFP_KERNEL);
2137 + if (ret)
2138 +- goto error_percpu_ref;
2139 ++ return ERR_PTR(ret);
2140 +
2141 +- ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
2142 ++ ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
2143 + if (ret)
2144 +- goto error_devm_add_action;
2145 ++ return ERR_PTR(ret);
2146 +
2147 + size = ALIGN(size, PA_SECTION_SIZE);
2148 + addr = min((unsigned long)iomem_resource.end,
2149 +@@ -1253,54 +1084,40 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
2150 +
2151 + devmem->resource = devm_request_mem_region(device, addr, size,
2152 + dev_name(device));
2153 +- if (!devmem->resource) {
2154 +- ret = -ENOMEM;
2155 +- goto error_no_resource;
2156 +- }
2157 ++ if (!devmem->resource)
2158 ++ return ERR_PTR(-ENOMEM);
2159 + break;
2160 + }
2161 +- if (!devmem->resource) {
2162 +- ret = -ERANGE;
2163 +- goto error_no_resource;
2164 +- }
2165 ++ if (!devmem->resource)
2166 ++ return ERR_PTR(-ERANGE);
2167 +
2168 + devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
2169 + devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
2170 + devmem->pfn_last = devmem->pfn_first +
2171 + (resource_size(devmem->resource) >> PAGE_SHIFT);
2172 +
2173 +- ret = hmm_devmem_pages_create(devmem);
2174 +- if (ret)
2175 +- goto error_pages;
2176 +-
2177 +- devres_add(device, devmem);
2178 +-
2179 +- ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
2180 +- if (ret) {
2181 +- hmm_devmem_remove(devmem);
2182 +- return ERR_PTR(ret);
2183 +- }
2184 ++ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
2185 ++ devmem->pagemap.res = *devmem->resource;
2186 ++ devmem->pagemap.page_fault = hmm_devmem_fault;
2187 ++ devmem->pagemap.page_free = hmm_devmem_free;
2188 ++ devmem->pagemap.altmap_valid = false;
2189 ++ devmem->pagemap.ref = &devmem->ref;
2190 ++ devmem->pagemap.data = devmem;
2191 ++ devmem->pagemap.kill = hmm_devmem_ref_kill;
2192 +
2193 ++ result = devm_memremap_pages(devmem->device, &devmem->pagemap);
2194 ++ if (IS_ERR(result))
2195 ++ return result;
2196 + return devmem;
2197 +-
2198 +-error_pages:
2199 +- devm_release_mem_region(device, devmem->resource->start,
2200 +- resource_size(devmem->resource));
2201 +-error_no_resource:
2202 +-error_devm_add_action:
2203 +- hmm_devmem_ref_kill(&devmem->ref);
2204 +- hmm_devmem_ref_exit(&devmem->ref);
2205 +-error_percpu_ref:
2206 +- devres_free(devmem);
2207 +- return ERR_PTR(ret);
2208 + }
2209 +-EXPORT_SYMBOL(hmm_devmem_add);
2210 ++EXPORT_SYMBOL_GPL(hmm_devmem_add);
2211 +
2212 + struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
2213 + struct device *device,
2214 + struct resource *res)
2215 + {
2216 + struct hmm_devmem *devmem;
2217 ++ void *result;
2218 + int ret;
2219 +
2220 + if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
2221 +@@ -1308,8 +1125,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
2222 +
2223 + dev_pagemap_get_ops();
2224 +
2225 +- devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
2226 +- GFP_KERNEL, dev_to_node(device));
2227 ++ devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
2228 + if (!devmem)
2229 + return ERR_PTR(-ENOMEM);
2230 +
2231 +@@ -1323,71 +1139,32 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
2232 + ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
2233 + 0, GFP_KERNEL);
2234 + if (ret)
2235 +- goto error_percpu_ref;
2236 ++ return ERR_PTR(ret);
2237 +
2238 +- ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
2239 ++ ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
2240 ++ &devmem->ref);
2241 + if (ret)
2242 +- goto error_devm_add_action;
2243 +-
2244 ++ return ERR_PTR(ret);
2245 +
2246 + devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
2247 + devmem->pfn_last = devmem->pfn_first +
2248 + (resource_size(devmem->resource) >> PAGE_SHIFT);
2249 +
2250 +- ret = hmm_devmem_pages_create(devmem);
2251 +- if (ret)
2252 +- goto error_devm_add_action;
2253 +-
2254 +- devres_add(device, devmem);
2255 +-
2256 +- ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
2257 +- if (ret) {
2258 +- hmm_devmem_remove(devmem);
2259 +- return ERR_PTR(ret);
2260 +- }
2261 ++ devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
2262 ++ devmem->pagemap.res = *devmem->resource;
2263 ++ devmem->pagemap.page_fault = hmm_devmem_fault;
2264 ++ devmem->pagemap.page_free = hmm_devmem_free;
2265 ++ devmem->pagemap.altmap_valid = false;
2266 ++ devmem->pagemap.ref = &devmem->ref;
2267 ++ devmem->pagemap.data = devmem;
2268 ++ devmem->pagemap.kill = hmm_devmem_ref_kill;
2269 +
2270 ++ result = devm_memremap_pages(devmem->device, &devmem->pagemap);
2271 ++ if (IS_ERR(result))
2272 ++ return result;
2273 + return devmem;
2274 +-
2275 +-error_devm_add_action:
2276 +- hmm_devmem_ref_kill(&devmem->ref);
2277 +- hmm_devmem_ref_exit(&devmem->ref);
2278 +-error_percpu_ref:
2279 +- devres_free(devmem);
2280 +- return ERR_PTR(ret);
2281 +-}
2282 +-EXPORT_SYMBOL(hmm_devmem_add_resource);
2283 +-
2284 +-/*
2285 +- * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
2286 +- *
2287 +- * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
2288 +- *
2289 +- * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
2290 +- * of the device driver. It will free struct page and remove the resource that
2291 +- * reserved the physical address range for this device memory.
2292 +- */
2293 +-void hmm_devmem_remove(struct hmm_devmem *devmem)
2294 +-{
2295 +- resource_size_t start, size;
2296 +- struct device *device;
2297 +- bool cdm = false;
2298 +-
2299 +- if (!devmem)
2300 +- return;
2301 +-
2302 +- device = devmem->device;
2303 +- start = devmem->resource->start;
2304 +- size = resource_size(devmem->resource);
2305 +-
2306 +- cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
2307 +- hmm_devmem_ref_kill(&devmem->ref);
2308 +- hmm_devmem_ref_exit(&devmem->ref);
2309 +- hmm_devmem_pages_remove(devmem);
2310 +-
2311 +- if (!cdm)
2312 +- devm_release_mem_region(device, start, size);
2313 + }
2314 +-EXPORT_SYMBOL(hmm_devmem_remove);
2315 ++EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
2316 +
2317 + /*
2318 + * A device driver that wants to handle multiple devices memory through a
2319 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2320 +index 6e1469b80cb7..7e6bf74ddb1e 100644
2321 +--- a/mm/memcontrol.c
2322 ++++ b/mm/memcontrol.c
2323 +@@ -1666,6 +1666,9 @@ enum oom_status {
2324 +
2325 + static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2326 + {
2327 ++ enum oom_status ret;
2328 ++ bool locked;
2329 ++
2330 + if (order > PAGE_ALLOC_COSTLY_ORDER)
2331 + return OOM_SKIPPED;
2332 +
2333 +@@ -1700,10 +1703,23 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
2334 + return OOM_ASYNC;
2335 + }
2336 +
2337 ++ mem_cgroup_mark_under_oom(memcg);
2338 ++
2339 ++ locked = mem_cgroup_oom_trylock(memcg);
2340 ++
2341 ++ if (locked)
2342 ++ mem_cgroup_oom_notify(memcg);
2343 ++
2344 ++ mem_cgroup_unmark_under_oom(memcg);
2345 + if (mem_cgroup_out_of_memory(memcg, mask, order))
2346 +- return OOM_SUCCESS;
2347 ++ ret = OOM_SUCCESS;
2348 ++ else
2349 ++ ret = OOM_FAILED;
2350 +
2351 +- return OOM_FAILED;
2352 ++ if (locked)
2353 ++ mem_cgroup_oom_unlock(memcg);
2354 ++
2355 ++ return ret;
2356 + }
2357 +
2358 + /**
2359 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2360 +index 2b2b3ccbbfb5..cea0880eadfb 100644
2361 +--- a/mm/memory_hotplug.c
2362 ++++ b/mm/memory_hotplug.c
2363 +@@ -34,6 +34,7 @@
2364 + #include <linux/hugetlb.h>
2365 + #include <linux/memblock.h>
2366 + #include <linux/compaction.h>
2367 ++#include <linux/rmap.h>
2368 +
2369 + #include <asm/tlbflush.h>
2370 +
2371 +@@ -1369,6 +1370,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
2372 + pfn = page_to_pfn(compound_head(page))
2373 + + hpage_nr_pages(page) - 1;
2374 +
2375 ++ /*
2376 ++ * HWPoison pages have elevated reference counts so the migration would
2377 ++ * fail on them. It also doesn't make any sense to migrate them in the
2378 ++ * first place. Still try to unmap such a page in case it is still mapped
2379 ++ * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
2380 ++ * the unmap as the catch all safety net).
2381 ++ */
2382 ++ if (PageHWPoison(page)) {
2383 ++ if (WARN_ON(PageLRU(page)))
2384 ++ isolate_lru_page(page);
2385 ++ if (page_mapped(page))
2386 ++ try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
2387 ++ continue;
2388 ++ }
2389 ++
2390 + if (!get_page_unless_zero(page))
2391 + continue;
2392 + /*
2393 +diff --git a/mm/swapfile.c b/mm/swapfile.c
2394 +index 8688ae65ef58..20d3c0f47a5f 100644
2395 +--- a/mm/swapfile.c
2396 ++++ b/mm/swapfile.c
2397 +@@ -2197,7 +2197,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
2398 + */
2399 + if (PageSwapCache(page) &&
2400 + likely(page_private(page) == entry.val) &&
2401 +- !page_swapped(page))
2402 ++ (!PageTransCompound(page) ||
2403 ++ !swap_page_trans_huge_swapped(si, entry)))
2404 + delete_from_swap_cache(compound_head(page));
2405 +
2406 + /*
2407 +diff --git a/net/9p/client.c b/net/9p/client.c
2408 +index 2c9a17b9b46b..357214a51f13 100644
2409 +--- a/net/9p/client.c
2410 ++++ b/net/9p/client.c
2411 +@@ -181,6 +181,12 @@ static int parse_opts(char *opts, struct p9_client *clnt)
2412 + ret = r;
2413 + continue;
2414 + }
2415 ++ if (option < 4096) {
2416 ++ p9_debug(P9_DEBUG_ERROR,
2417 ++ "msize should be at least 4k\n");
2418 ++ ret = -EINVAL;
2419 ++ continue;
2420 ++ }
2421 + clnt->msize = option;
2422 + break;
2423 + case Opt_trans:
2424 +@@ -983,10 +989,18 @@ static int p9_client_version(struct p9_client *c)
2425 + else if (!strncmp(version, "9P2000", 6))
2426 + c->proto_version = p9_proto_legacy;
2427 + else {
2428 ++ p9_debug(P9_DEBUG_ERROR,
2429 ++ "server returned an unknown version: %s\n", version);
2430 + err = -EREMOTEIO;
2431 + goto error;
2432 + }
2433 +
2434 ++ if (msize < 4096) {
2435 ++ p9_debug(P9_DEBUG_ERROR,
2436 ++ "server returned a msize < 4096: %d\n", msize);
2437 ++ err = -EREMOTEIO;
2438 ++ goto error;
2439 ++ }
2440 + if (msize < c->msize)
2441 + c->msize = msize;
2442 +
2443 +@@ -1043,6 +1057,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
2444 + if (clnt->msize > clnt->trans_mod->maxsize)
2445 + clnt->msize = clnt->trans_mod->maxsize;
2446 +
2447 ++ if (clnt->msize < 4096) {
2448 ++ p9_debug(P9_DEBUG_ERROR,
2449 ++ "Please specify a msize of at least 4k\n");
2450 ++ err = -EINVAL;
2451 ++ goto free_client;
2452 ++ }
2453 ++
2454 + err = p9_client_version(clnt);
2455 + if (err)
2456 + goto close_trans;
2457 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
2458 +index 1ece4bc3eb8d..152790ed309c 100644
2459 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
2460 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
2461 +@@ -1142,7 +1142,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
2462 + struct kvec *resv = &rqstp->rq_res.head[0];
2463 + struct rsi *rsip, rsikey;
2464 + int ret;
2465 +- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
2466 ++ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
2467 +
2468 + memset(&rsikey, 0, sizeof(rsikey));
2469 + ret = gss_read_verf(gc, argv, authp,
2470 +@@ -1253,7 +1253,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
2471 + uint64_t handle;
2472 + int status;
2473 + int ret;
2474 +- struct net *net = rqstp->rq_xprt->xpt_net;
2475 ++ struct net *net = SVC_NET(rqstp);
2476 + struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2477 +
2478 + memset(&ud, 0, sizeof(ud));
2479 +@@ -1444,7 +1444,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
2480 + __be32 *rpcstart;
2481 + __be32 *reject_stat = resv->iov_base + resv->iov_len;
2482 + int ret;
2483 +- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
2484 ++ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
2485 +
2486 + dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
2487 + argv->iov_len);
2488 +@@ -1734,7 +1734,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
2489 + struct rpc_gss_wire_cred *gc = &gsd->clcred;
2490 + struct xdr_buf *resbuf = &rqstp->rq_res;
2491 + int stat = -EINVAL;
2492 +- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
2493 ++ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
2494 +
2495 + if (gc->gc_proc != RPC_GSS_PROC_DATA)
2496 + goto out;
2497 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
2498 +index f96345b1180e..12bb23b8e0c5 100644
2499 +--- a/net/sunrpc/cache.c
2500 ++++ b/net/sunrpc/cache.c
2501 +@@ -54,6 +54,11 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
2502 + h->last_refresh = now;
2503 + }
2504 +
2505 ++static void cache_fresh_locked(struct cache_head *head, time_t expiry,
2506 ++ struct cache_detail *detail);
2507 ++static void cache_fresh_unlocked(struct cache_head *head,
2508 ++ struct cache_detail *detail);
2509 ++
2510 + static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
2511 + struct cache_head *key,
2512 + int hash)
2513 +@@ -100,6 +105,7 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
2514 + if (cache_is_expired(detail, tmp)) {
2515 + hlist_del_init_rcu(&tmp->cache_list);
2516 + detail->entries --;
2517 ++ cache_fresh_locked(tmp, 0, detail);
2518 + freeme = tmp;
2519 + break;
2520 + }
2521 +@@ -115,8 +121,10 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
2522 + cache_get(new);
2523 + spin_unlock(&detail->hash_lock);
2524 +
2525 +- if (freeme)
2526 ++ if (freeme) {
2527 ++ cache_fresh_unlocked(freeme, detail);
2528 + cache_put(freeme, detail);
2529 ++ }
2530 + return new;
2531 + }
2532 +
2533 +diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
2534 +index fc6378cc0c1c..20ced24cc61b 100644
2535 +--- a/net/sunrpc/xprtrdma/frwr_ops.c
2536 ++++ b/net/sunrpc/xprtrdma/frwr_ops.c
2537 +@@ -117,15 +117,15 @@ static void
2538 + frwr_mr_recycle_worker(struct work_struct *work)
2539 + {
2540 + struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
2541 +- enum rpcrdma_frwr_state state = mr->frwr.fr_state;
2542 + struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
2543 +
2544 + trace_xprtrdma_mr_recycle(mr);
2545 +
2546 +- if (state != FRWR_FLUSHED_LI) {
2547 ++ if (mr->mr_dir != DMA_NONE) {
2548 + trace_xprtrdma_mr_unmap(mr);
2549 + ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
2550 + mr->mr_sg, mr->mr_nents, mr->mr_dir);
2551 ++ mr->mr_dir = DMA_NONE;
2552 + }
2553 +
2554 + spin_lock(&r_xprt->rx_buf.rb_mrlock);
2555 +@@ -150,6 +150,8 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
2556 + if (!mr->mr_sg)
2557 + goto out_list_err;
2558 +
2559 ++ frwr->fr_state = FRWR_IS_INVALID;
2560 ++ mr->mr_dir = DMA_NONE;
2561 + INIT_LIST_HEAD(&mr->mr_list);
2562 + INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
2563 + sg_init_table(mr->mr_sg, depth);
2564 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
2565 +index 3ddba94c939f..b9bc7f9f6bb9 100644
2566 +--- a/net/sunrpc/xprtrdma/verbs.c
2567 ++++ b/net/sunrpc/xprtrdma/verbs.c
2568 +@@ -1329,9 +1329,12 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
2569 + {
2570 + struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
2571 +
2572 +- trace_xprtrdma_mr_unmap(mr);
2573 +- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
2574 +- mr->mr_sg, mr->mr_nents, mr->mr_dir);
2575 ++ if (mr->mr_dir != DMA_NONE) {
2576 ++ trace_xprtrdma_mr_unmap(mr);
2577 ++ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
2578 ++ mr->mr_sg, mr->mr_nents, mr->mr_dir);
2579 ++ mr->mr_dir = DMA_NONE;
2580 ++ }
2581 + __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
2582 + }
2583 +
2584 +diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
2585 +index f4eadd3f7350..b63ef865ce1e 100644
2586 +--- a/security/selinux/ss/policydb.c
2587 ++++ b/security/selinux/ss/policydb.c
2588 +@@ -2108,6 +2108,7 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
2589 + {
2590 + int i, j, rc;
2591 + u32 nel, len;
2592 ++ __be64 prefixbuf[1];
2593 + __le32 buf[3];
2594 + struct ocontext *l, *c;
2595 + u32 nodebuf[8];
2596 +@@ -2217,21 +2218,30 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
2597 + goto out;
2598 + break;
2599 + }
2600 +- case OCON_IBPKEY:
2601 +- rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
2602 ++ case OCON_IBPKEY: {
2603 ++ u32 pkey_lo, pkey_hi;
2604 ++
2605 ++ rc = next_entry(prefixbuf, fp, sizeof(u64));
2606 ++ if (rc)
2607 ++ goto out;
2608 ++
2609 ++ /* we need to have subnet_prefix in CPU order */
2610 ++ c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]);
2611 ++
2612 ++ rc = next_entry(buf, fp, sizeof(u32) * 2);
2613 + if (rc)
2614 + goto out;
2615 +
2616 +- c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf));
2617 ++ pkey_lo = le32_to_cpu(buf[0]);
2618 ++ pkey_hi = le32_to_cpu(buf[1]);
2619 +
2620 +- if (nodebuf[2] > 0xffff ||
2621 +- nodebuf[3] > 0xffff) {
2622 ++ if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
2623 + rc = -EINVAL;
2624 + goto out;
2625 + }
2626 +
2627 +- c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
2628 +- c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
2629 ++ c->u.ibpkey.low_pkey = pkey_lo;
2630 ++ c->u.ibpkey.high_pkey = pkey_hi;
2631 +
2632 + rc = context_read_and_validate(&c->context[0],
2633 + p,
2634 +@@ -2239,7 +2249,10 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
2635 + if (rc)
2636 + goto out;
2637 + break;
2638 +- case OCON_IBENDPORT:
2639 ++ }
2640 ++ case OCON_IBENDPORT: {
2641 ++ u32 port;
2642 ++
2643 + rc = next_entry(buf, fp, sizeof(u32) * 2);
2644 + if (rc)
2645 + goto out;
2646 +@@ -2249,12 +2262,13 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
2647 + if (rc)
2648 + goto out;
2649 +
2650 +- if (buf[1] > 0xff || buf[1] == 0) {
2651 ++ port = le32_to_cpu(buf[1]);
2652 ++ if (port > U8_MAX || port == 0) {
2653 + rc = -EINVAL;
2654 + goto out;
2655 + }
2656 +
2657 +- c->u.ibendport.port = le32_to_cpu(buf[1]);
2658 ++ c->u.ibendport.port = port;
2659 +
2660 + rc = context_read_and_validate(&c->context[0],
2661 + p,
2662 +@@ -2262,7 +2276,8 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
2663 + if (rc)
2664 + goto out;
2665 + break;
2666 +- }
2667 ++ } /* end case */
2668 ++ } /* end switch */
2669 + }
2670 + }
2671 + rc = 0;
2672 +@@ -3105,6 +3120,7 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
2673 + {
2674 + unsigned int i, j, rc;
2675 + size_t nel, len;
2676 ++ __be64 prefixbuf[1];
2677 + __le32 buf[3];
2678 + u32 nodebuf[8];
2679 + struct ocontext *c;
2680 +@@ -3192,12 +3208,17 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
2681 + return rc;
2682 + break;
2683 + case OCON_IBPKEY:
2684 +- *((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix);
2685 ++ /* subnet_prefix is in CPU order */
2686 ++ prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix);
2687 +
2688 +- nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
2689 +- nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
2690 ++ rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
2691 ++ if (rc)
2692 ++ return rc;
2693 ++
2694 ++ buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
2695 ++ buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
2696 +
2697 +- rc = put_entry(nodebuf, sizeof(u32), 4, fp);
2698 ++ rc = put_entry(buf, sizeof(u32), 2, fp);
2699 + if (rc)
2700 + return rc;
2701 + rc = context_write(p, &c->context[0], fp);
2702 +diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
2703 +index 598d140bb7cb..5fc497c6d738 100644
2704 +--- a/sound/pci/cs46xx/dsp_spos.c
2705 ++++ b/sound/pci/cs46xx/dsp_spos.c
2706 +@@ -903,6 +903,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
2707 + struct dsp_spos_instance * ins = chip->dsp_spos_instance;
2708 + int i;
2709 +
2710 ++ if (!ins)
2711 ++ return 0;
2712 ++
2713 + snd_info_free_entry(ins->proc_sym_info_entry);
2714 + ins->proc_sym_info_entry = NULL;
2715 +
2716 +diff --git a/sound/usb/card.c b/sound/usb/card.c
2717 +index a105947eaf55..746a72e23cf9 100644
2718 +--- a/sound/usb/card.c
2719 ++++ b/sound/usb/card.c
2720 +@@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
2721 + h1 = snd_usb_find_csint_desc(host_iface->extra,
2722 + host_iface->extralen,
2723 + NULL, UAC_HEADER);
2724 +- if (!h1) {
2725 ++ if (!h1 || h1->bLength < sizeof(*h1)) {
2726 + dev_err(&dev->dev, "cannot find UAC_HEADER\n");
2727 + return -EINVAL;
2728 + }
2729 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2730 +index c63c84b54969..e7d441d0e839 100644
2731 +--- a/sound/usb/mixer.c
2732 ++++ b/sound/usb/mixer.c
2733 +@@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
2734 + struct uac_mixer_unit_descriptor *desc)
2735 + {
2736 + int mu_channels;
2737 ++ void *c;
2738 +
2739 +- if (desc->bLength < 11)
2740 ++ if (desc->bLength < sizeof(*desc))
2741 + return -EINVAL;
2742 + if (!desc->bNrInPins)
2743 + return -EINVAL;
2744 +@@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
2745 + case UAC_VERSION_1:
2746 + case UAC_VERSION_2:
2747 + default:
2748 ++ if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
2749 ++ return 0; /* no bmControls -> skip */
2750 + mu_channels = uac_mixer_unit_bNrChannels(desc);
2751 + break;
2752 + case UAC_VERSION_3:
2753 +@@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
2754 + }
2755 +
2756 + if (!mu_channels)
2757 +- return -EINVAL;
2758 ++ return 0;
2759 ++
2760 ++ c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
2761 ++ if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
2762 ++ return 0; /* no bmControls -> skip */
2763 +
2764 + return mu_channels;
2765 + }
2766 +@@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int id,
2767 + struct uac_mixer_unit_descriptor *d = p1;
2768 +
2769 + err = uac_mixer_unit_get_channels(state, d);
2770 +- if (err < 0)
2771 ++ if (err <= 0)
2772 + return err;
2773 +
2774 + term->channels = err;
2775 +@@ -2068,11 +2075,15 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
2776 +
2777 + if (state->mixer->protocol == UAC_VERSION_2) {
2778 + struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
2779 ++ if (d_v2->bLength < sizeof(*d_v2))
2780 ++ return -EINVAL;
2781 + control = UAC2_TE_CONNECTOR;
2782 + term_id = d_v2->bTerminalID;
2783 + bmctls = le16_to_cpu(d_v2->bmControls);
2784 + } else if (state->mixer->protocol == UAC_VERSION_3) {
2785 + struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
2786 ++ if (d_v3->bLength < sizeof(*d_v3))
2787 ++ return -EINVAL;
2788 + control = UAC3_TE_INSERTION;
2789 + term_id = d_v3->bTerminalID;
2790 + bmctls = le32_to_cpu(d_v3->bmControls);
2791 +@@ -2118,7 +2129,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2792 + if (err < 0)
2793 + continue;
2794 + /* no bmControls field (e.g. Maya44) -> ignore */
2795 +- if (desc->bLength <= 10 + input_pins)
2796 ++ if (!num_outs)
2797 + continue;
2798 + err = check_input_term(state, desc->baSourceID[pin], &iterm);
2799 + if (err < 0)
2800 +@@ -2314,7 +2325,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
2801 + char *name)
2802 + {
2803 + struct uac_processing_unit_descriptor *desc = raw_desc;
2804 +- int num_ins = desc->bNrInPins;
2805 ++ int num_ins;
2806 + struct usb_mixer_elem_info *cval;
2807 + struct snd_kcontrol *kctl;
2808 + int i, err, nameid, type, len;
2809 +@@ -2329,7 +2340,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
2810 + 0, NULL, default_value_info
2811 + };
2812 +
2813 +- if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
2814 ++ if (desc->bLength < 13) {
2815 ++ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
2816 ++ return -EINVAL;
2817 ++ }
2818 ++
2819 ++ num_ins = desc->bNrInPins;
2820 ++ if (desc->bLength < 13 + num_ins ||
2821 + desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
2822 + usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
2823 + return -EINVAL;
2824 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
2825 +index 37fc0447c071..b345beb447bd 100644
2826 +--- a/sound/usb/quirks-table.h
2827 ++++ b/sound/usb/quirks-table.h
2828 +@@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2829 + }
2830 + }
2831 + },
2832 ++ {
2833 ++ .ifnum = -1
2834 ++ },
2835 + }
2836 + }
2837 + },
2838 +@@ -3369,6 +3372,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2839 + }
2840 + }
2841 + },
2842 ++ {
2843 ++ .ifnum = -1
2844 ++ },
2845 + }
2846 + }
2847 + },
2848 +diff --git a/sound/usb/stream.c b/sound/usb/stream.c
2849 +index 67cf849aa16b..d9e3de495c16 100644
2850 +--- a/sound/usb/stream.c
2851 ++++ b/sound/usb/stream.c
2852 +@@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
2853 + csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
2854 +
2855 + if (!csep || csep->bLength < 7 ||
2856 +- csep->bDescriptorSubtype != UAC_EP_GENERAL) {
2857 +- usb_audio_warn(chip,
2858 +- "%u:%d : no or invalid class specific endpoint descriptor\n",
2859 +- iface_no, altsd->bAlternateSetting);
2860 +- return 0;
2861 +- }
2862 ++ csep->bDescriptorSubtype != UAC_EP_GENERAL)
2863 ++ goto error;
2864 +
2865 + if (protocol == UAC_VERSION_1) {
2866 + attributes = csep->bmAttributes;
2867 +@@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
2868 + struct uac2_iso_endpoint_descriptor *csep2 =
2869 + (struct uac2_iso_endpoint_descriptor *) csep;
2870 +
2871 ++ if (csep2->bLength < sizeof(*csep2))
2872 ++ goto error;
2873 + attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
2874 +
2875 + /* emulate the endpoint attributes of a v1 device */
2876 +@@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
2877 + struct uac3_iso_endpoint_descriptor *csep3 =
2878 + (struct uac3_iso_endpoint_descriptor *) csep;
2879 +
2880 ++ if (csep3->bLength < sizeof(*csep3))
2881 ++ goto error;
2882 + /* emulate the endpoint attributes of a v1 device */
2883 + if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
2884 + attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
2885 + }
2886 +
2887 + return attributes;
2888 ++
2889 ++ error:
2890 ++ usb_audio_warn(chip,
2891 ++ "%u:%d : no or invalid class specific endpoint descriptor\n",
2892 ++ iface_no, altsd->bAlternateSetting);
2893 ++ return 0;
2894 + }
2895 +
2896 + /* find an input terminal descriptor (either UAC1 or UAC2) with the given
2897 +@@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
2898 + */
2899 + static void *
2900 + snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
2901 +- int terminal_id)
2902 ++ int terminal_id, bool uac23)
2903 + {
2904 + struct uac2_input_terminal_descriptor *term = NULL;
2905 ++ size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
2906 ++ sizeof(struct uac_input_terminal_descriptor);
2907 +
2908 + while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
2909 + ctrl_iface->extralen,
2910 + term, UAC_INPUT_TERMINAL))) {
2911 ++ if (term->bLength < minlen)
2912 ++ continue;
2913 + if (term->bTerminalID == terminal_id)
2914 + return term;
2915 + }
2916 +@@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface,
2917 + while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
2918 + ctrl_iface->extralen,
2919 + term, UAC_OUTPUT_TERMINAL))) {
2920 +- if (term->bTerminalID == terminal_id)
2921 ++ if (term->bLength >= sizeof(*term) &&
2922 ++ term->bTerminalID == terminal_id)
2923 + return term;
2924 + }
2925 +
2926 +@@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
2927 + format = le16_to_cpu(as->wFormatTag); /* remember the format value */
2928 +
2929 + iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
2930 +- as->bTerminalLink);
2931 ++ as->bTerminalLink,
2932 ++ false);
2933 + if (iterm) {
2934 + num_channels = iterm->bNrChannels;
2935 + chconfig = le16_to_cpu(iterm->wChannelConfig);
2936 +@@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
2937 + * to extract the clock
2938 + */
2939 + input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
2940 +- as->bTerminalLink);
2941 ++ as->bTerminalLink,
2942 ++ true);
2943 + if (input_term) {
2944 + clock = input_term->bCSourceID;
2945 + if (!chconfig && (num_channels == input_term->bNrChannels))
2946 +@@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
2947 + * to extract the clock
2948 + */
2949 + input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
2950 +- as->bTerminalLink);
2951 ++ as->bTerminalLink,
2952 ++ true);
2953 + if (input_term) {
2954 + clock = input_term->bCSourceID;
2955 + goto found_clock;
2956 +diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
2957 +index ff9d3a5825e1..c6635fee27d8 100644
2958 +--- a/tools/testing/nvdimm/test/iomap.c
2959 ++++ b/tools/testing/nvdimm/test/iomap.c
2960 +@@ -104,16 +104,29 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
2961 + }
2962 + EXPORT_SYMBOL(__wrap_devm_memremap);
2963 +
2964 ++static void nfit_test_kill(void *_pgmap)
2965 ++{
2966 ++ struct dev_pagemap *pgmap = _pgmap;
2967 ++
2968 ++ pgmap->kill(pgmap->ref);
2969 ++}
2970 ++
2971 + void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
2972 + {
2973 + resource_size_t offset = pgmap->res.start;
2974 + struct nfit_test_resource *nfit_res = get_nfit_res(offset);
2975 +
2976 +- if (nfit_res)
2977 ++ if (nfit_res) {
2978 ++ int rc;
2979 ++
2980 ++ rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
2981 ++ if (rc)
2982 ++ return ERR_PTR(rc);
2983 + return nfit_res->buf + offset - nfit_res->res.start;
2984 ++ }
2985 + return devm_memremap_pages(dev, pgmap);
2986 + }
2987 +-EXPORT_SYMBOL(__wrap_devm_memremap_pages);
2988 ++EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
2989 +
2990 + pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
2991 + {
2992 +diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
2993 +index d9a725478375..72c25a3cb658 100644
2994 +--- a/tools/testing/selftests/android/Makefile
2995 ++++ b/tools/testing/selftests/android/Makefile
2996 +@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
2997 +
2998 + include ../lib.mk
2999 +
3000 +-all: khdr
3001 ++all:
3002 + @for DIR in $(SUBDIRS); do \
3003 + BUILD_TARGET=$(OUTPUT)/$$DIR; \
3004 + mkdir $$BUILD_TARGET -p; \
3005 +diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
3006 +index ad1eeb14fda7..30996306cabc 100644
3007 +--- a/tools/testing/selftests/futex/functional/Makefile
3008 ++++ b/tools/testing/selftests/futex/functional/Makefile
3009 +@@ -19,6 +19,7 @@ TEST_GEN_FILES := \
3010 + TEST_PROGS := run.sh
3011 +
3012 + top_srcdir = ../../../../..
3013 ++KSFT_KHDR_INSTALL := 1
3014 + include ../../lib.mk
3015 +
3016 + $(TEST_GEN_FILES): $(HEADERS)
3017 +diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
3018 +index 46648427d537..07f572a1bd3f 100644
3019 +--- a/tools/testing/selftests/gpio/Makefile
3020 ++++ b/tools/testing/selftests/gpio/Makefile
3021 +@@ -10,8 +10,6 @@ TEST_PROGS_EXTENDED := gpio-mockup-chardev
3022 + GPIODIR := $(realpath ../../../gpio)
3023 + GPIOOBJ := gpio-utils.o
3024 +
3025 +-include ../lib.mk
3026 +-
3027 + all: $(TEST_PROGS_EXTENDED)
3028 +
3029 + override define CLEAN
3030 +@@ -19,7 +17,9 @@ override define CLEAN
3031 + $(MAKE) -C $(GPIODIR) OUTPUT=$(GPIODIR)/ clean
3032 + endef
3033 +
3034 +-$(TEST_PROGS_EXTENDED):| khdr
3035 ++KSFT_KHDR_INSTALL := 1
3036 ++include ../lib.mk
3037 ++
3038 + $(TEST_PROGS_EXTENDED): $(GPIODIR)/$(GPIOOBJ)
3039 +
3040 + $(GPIODIR)/$(GPIOOBJ):
3041 +diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
3042 +index 01a219229238..52bfe5e76907 100644
3043 +--- a/tools/testing/selftests/kvm/Makefile
3044 ++++ b/tools/testing/selftests/kvm/Makefile
3045 +@@ -1,6 +1,7 @@
3046 + all:
3047 +
3048 + top_srcdir = ../../../..
3049 ++KSFT_KHDR_INSTALL := 1
3050 + UNAME_M := $(shell uname -m)
3051 +
3052 + LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
3053 +@@ -44,7 +45,6 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
3054 +
3055 + all: $(STATIC_LIBS)
3056 + $(TEST_GEN_PROGS): $(STATIC_LIBS)
3057 +-$(STATIC_LIBS):| khdr
3058 +
3059 + cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
3060 + cscope:
3061 +diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
3062 +index 0a8e75886224..8b0f16409ed7 100644
3063 +--- a/tools/testing/selftests/lib.mk
3064 ++++ b/tools/testing/selftests/lib.mk
3065 +@@ -16,18 +16,18 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
3066 + TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
3067 + TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
3068 +
3069 ++ifdef KSFT_KHDR_INSTALL
3070 + top_srcdir ?= ../../../..
3071 + include $(top_srcdir)/scripts/subarch.include
3072 + ARCH ?= $(SUBARCH)
3073 +
3074 +-all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3075 +-
3076 + .PHONY: khdr
3077 + khdr:
3078 + make ARCH=$(ARCH) -C $(top_srcdir) headers_install
3079 +
3080 +-ifdef KSFT_KHDR_INSTALL
3081 +-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
3082 ++all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3083 ++else
3084 ++all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3085 + endif
3086 +
3087 + .ONESHELL:
3088 +diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
3089 +index 14cfcf006936..c46c0eefab9e 100644
3090 +--- a/tools/testing/selftests/networking/timestamping/Makefile
3091 ++++ b/tools/testing/selftests/networking/timestamping/Makefile
3092 +@@ -6,6 +6,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
3093 + all: $(TEST_PROGS)
3094 +
3095 + top_srcdir = ../../../../..
3096 ++KSFT_KHDR_INSTALL := 1
3097 + include ../../lib.mk
3098 +
3099 + clean:
3100 +diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile
3101 +index dc92eb271d9a..be5a5e542804 100644
3102 +--- a/tools/testing/selftests/tc-testing/bpf/Makefile
3103 ++++ b/tools/testing/selftests/tc-testing/bpf/Makefile
3104 +@@ -4,6 +4,7 @@ APIDIR := ../../../../include/uapi
3105 + TEST_GEN_FILES = action.o
3106 +
3107 + top_srcdir = ../../../../..
3108 ++KSFT_KHDR_INSTALL := 1
3109 + include ../../lib.mk
3110 +
3111 + CLANG ?= clang
3112 +diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
3113 +index 6e67e726e5a5..e13eb6cc8901 100644
3114 +--- a/tools/testing/selftests/vm/Makefile
3115 ++++ b/tools/testing/selftests/vm/Makefile
3116 +@@ -25,6 +25,7 @@ TEST_GEN_FILES += virtual_address_range
3117 +
3118 + TEST_PROGS := run_vmtests
3119 +
3120 ++KSFT_KHDR_INSTALL := 1
3121 + include ../lib.mk
3122 +
3123 + $(OUTPUT)/userfaultfd: LDLIBS += -lpthread