Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Fri, 21 Aug 2020 13:25:41
Message-Id: 1598016320.8599420bcb7e835fb32a4d5ba5e524adbb2c0e59.alicef@gentoo
1 commit: 8599420bcb7e835fb32a4d5ba5e524adbb2c0e59
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 21 13:25:13 2020 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 21 13:25:20 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8599420b
7
8 Linux patch 5.4.60
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1059_linux-5.4.60.patch | 4875 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4879 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 78d0290..5a3df13 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -279,6 +279,10 @@ Patch: 1058_linux-5.4.59.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.59
23
24 +Patch: 1059_linux-5.4.60.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.60
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1059_linux-5.4.60.patch b/1059_linux-5.4.60.patch
33 new file mode 100644
34 index 0000000..280c19f
35 --- /dev/null
36 +++ b/1059_linux-5.4.60.patch
37 @@ -0,0 +1,4875 @@
38 +diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
39 +index c82794002595f..89647d7143879 100644
40 +--- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
41 ++++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
42 +@@ -21,7 +21,7 @@ controller state. The mux controller state is described in
43 +
44 + Example:
45 + mux: mux-controller {
46 +- compatible = "mux-gpio";
47 ++ compatible = "gpio-mux";
48 + #mux-control-cells = <0>;
49 +
50 + mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
51 +diff --git a/Makefile b/Makefile
52 +index cc72b8472f24a..7c001e21e28e7 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 4
59 +-SUBLEVEL = 59
60 ++SUBLEVEL = 60
61 + EXTRAVERSION =
62 + NAME = Kleptomaniac Octopus
63 +
64 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
65 +index fbcf03f86c967..05dc58c13fa41 100644
66 +--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
67 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
68 +@@ -19,6 +19,12 @@
69 + model = "Globalscale Marvell ESPRESSOBin Board";
70 + compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
71 +
72 ++ aliases {
73 ++ ethernet0 = &eth0;
74 ++ serial0 = &uart0;
75 ++ serial1 = &uart1;
76 ++ };
77 ++
78 + chosen {
79 + stdout-path = "serial0:115200n8";
80 + };
81 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
82 +index a0b4f1bca4917..19128d994ee97 100644
83 +--- a/arch/arm64/kernel/perf_event.c
84 ++++ b/arch/arm64/kernel/perf_event.c
85 +@@ -155,7 +155,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
86 +
87 + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
88 +
89 +- return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
90 ++ return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
91 + }
92 +
93 + #define ARMV8_EVENT_ATTR(name, config) \
94 +@@ -303,10 +303,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj,
95 + test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
96 + return attr->mode;
97 +
98 +- pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
99 +- if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
100 +- test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
101 +- return attr->mode;
102 ++ if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
103 ++ u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
104 ++
105 ++ if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
106 ++ test_bit(id, cpu_pmu->pmceid_ext_bitmap))
107 ++ return attr->mode;
108 ++ }
109 +
110 + return 0;
111 + }
112 +diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts
113 +index 7a371d9c5a33f..eda37fb516f0e 100644
114 +--- a/arch/mips/boot/dts/ingenic/qi_lb60.dts
115 ++++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts
116 +@@ -69,7 +69,7 @@
117 + "Speaker", "OUTL",
118 + "Speaker", "OUTR",
119 + "INL", "LOUT",
120 +- "INL", "ROUT";
121 ++ "INR", "ROUT";
122 +
123 + simple-audio-card,aux-devs = <&amp>;
124 +
125 +diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
126 +index cd3e1f82e1a5d..08ad6371fbe08 100644
127 +--- a/arch/mips/kernel/topology.c
128 ++++ b/arch/mips/kernel/topology.c
129 +@@ -20,7 +20,7 @@ static int __init topology_init(void)
130 + for_each_present_cpu(i) {
131 + struct cpu *c = &per_cpu(cpu_devices, i);
132 +
133 +- c->hotpluggable = 1;
134 ++ c->hotpluggable = !!i;
135 + ret = register_cpu(c, i);
136 + if (ret)
137 + printk(KERN_WARNING "topology_init: register_cpu %d "
138 +diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c
139 +index 43f140a28bc72..54d38809e22cb 100644
140 +--- a/arch/openrisc/kernel/stacktrace.c
141 ++++ b/arch/openrisc/kernel/stacktrace.c
142 +@@ -13,6 +13,7 @@
143 + #include <linux/export.h>
144 + #include <linux/sched.h>
145 + #include <linux/sched/debug.h>
146 ++#include <linux/sched/task_stack.h>
147 + #include <linux/stacktrace.h>
148 +
149 + #include <asm/processor.h>
150 +@@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
151 + {
152 + unsigned long *sp = NULL;
153 +
154 ++ if (!try_get_task_stack(tsk))
155 ++ return;
156 ++
157 + if (tsk == current)
158 + sp = (unsigned long *) &sp;
159 +- else
160 +- sp = (unsigned long *) KSTK_ESP(tsk);
161 ++ else {
162 ++ unsigned long ksp;
163 ++
164 ++ /* Locate stack from kernel context */
165 ++ ksp = task_thread_info(tsk)->ksp;
166 ++ ksp += STACK_FRAME_OVERHEAD; /* redzone */
167 ++ ksp += sizeof(struct pt_regs);
168 ++
169 ++ sp = (unsigned long *) ksp;
170 ++ }
171 +
172 + unwind_stack(trace, sp, save_stack_address_nosched);
173 ++
174 ++ put_task_stack(tsk);
175 + }
176 + EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
177 +
178 +diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
179 +index dce863a7635cd..8e5b7d0b851c6 100644
180 +--- a/arch/powerpc/include/asm/percpu.h
181 ++++ b/arch/powerpc/include/asm/percpu.h
182 +@@ -10,8 +10,6 @@
183 +
184 + #ifdef CONFIG_SMP
185 +
186 +-#include <asm/paca.h>
187 +-
188 + #define __my_cpu_offset local_paca->data_offset
189 +
190 + #endif /* CONFIG_SMP */
191 +@@ -19,4 +17,6 @@
192 +
193 + #include <asm-generic/percpu.h>
194 +
195 ++#include <asm/paca.h>
196 ++
197 + #endif /* _ASM_POWERPC_PERCPU_H_ */
198 +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
199 +index 881a026a603a6..187047592d53c 100644
200 +--- a/arch/powerpc/mm/fault.c
201 ++++ b/arch/powerpc/mm/fault.c
202 +@@ -241,6 +241,9 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
203 + return false;
204 + }
205 +
206 ++// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
207 ++#define SIGFRAME_MAX_SIZE (4096 + 128)
208 ++
209 + static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
210 + struct vm_area_struct *vma, unsigned int flags,
211 + bool *must_retry)
212 +@@ -248,7 +251,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
213 + /*
214 + * N.B. The POWER/Open ABI allows programs to access up to
215 + * 288 bytes below the stack pointer.
216 +- * The kernel signal delivery code writes up to about 1.5kB
217 ++ * The kernel signal delivery code writes a bit over 4KB
218 + * below the stack pointer (r1) before decrementing it.
219 + * The exec code can write slightly over 640kB to the stack
220 + * before setting the user r1. Thus we allow the stack to
221 +@@ -273,7 +276,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
222 + * between the last mapped region and the stack will
223 + * expand the stack rather than segfaulting.
224 + */
225 +- if (address + 2048 >= uregs->gpr[1])
226 ++ if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1])
227 + return false;
228 +
229 + if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
230 +diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
231 +index a07278027c6f4..a2e8c3b2cf351 100644
232 +--- a/arch/powerpc/mm/ptdump/hashpagetable.c
233 ++++ b/arch/powerpc/mm/ptdump/hashpagetable.c
234 +@@ -259,7 +259,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *
235 + for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
236 + lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
237 +
238 +- if (lpar_rc != H_SUCCESS)
239 ++ if (lpar_rc)
240 + continue;
241 + for (j = 0; j < 4; j++) {
242 + if (HPTE_V_COMPARE(ptes[j].v, want_v) &&
243 +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
244 +index f1888352b4e0b..e7d23a933a0d3 100644
245 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
246 ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
247 +@@ -27,7 +27,7 @@ static bool rtas_hp_event;
248 + unsigned long pseries_memory_block_size(void)
249 + {
250 + struct device_node *np;
251 +- unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
252 ++ u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
253 + struct resource r;
254 +
255 + np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
256 +diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c
257 +index 16b4d8b0bb850..2c44b94f82fb2 100644
258 +--- a/arch/sh/boards/mach-landisk/setup.c
259 ++++ b/arch/sh/boards/mach-landisk/setup.c
260 +@@ -82,6 +82,9 @@ device_initcall(landisk_devices_setup);
261 +
262 + static void __init landisk_setup(char **cmdline_p)
263 + {
264 ++ /* I/O port identity mapping */
265 ++ __set_io_port_base(0);
266 ++
267 + /* LED ON */
268 + __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED);
269 +
270 +diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
271 +index 3c222d6fdee3b..187c72a58e69c 100644
272 +--- a/arch/x86/events/rapl.c
273 ++++ b/arch/x86/events/rapl.c
274 +@@ -642,7 +642,7 @@ static const struct attribute_group *rapl_attr_update[] = {
275 + &rapl_events_pkg_group,
276 + &rapl_events_ram_group,
277 + &rapl_events_gpu_group,
278 +- &rapl_events_gpu_group,
279 ++ &rapl_events_psys_group,
280 + NULL,
281 + };
282 +
283 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
284 +index df4d5385e6ddd..c8203694d9ce4 100644
285 +--- a/arch/x86/kernel/apic/vector.c
286 ++++ b/arch/x86/kernel/apic/vector.c
287 +@@ -554,6 +554,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
288 + irqd->chip_data = apicd;
289 + irqd->hwirq = virq + i;
290 + irqd_set_single_target(irqd);
291 ++
292 ++ /* Don't invoke affinity setter on deactivated interrupts */
293 ++ irqd_set_affinity_on_activate(irqd);
294 ++
295 + /*
296 + * Legacy vectors are already assigned when the IOAPIC
297 + * takes them over. They stay on the same vector. This is
298 +diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
299 +index c65adaf813848..41200706e6da1 100644
300 +--- a/arch/x86/kernel/tsc_msr.c
301 ++++ b/arch/x86/kernel/tsc_msr.c
302 +@@ -133,10 +133,15 @@ static const struct freq_desc freq_desc_ann = {
303 + .mask = 0x0f,
304 + };
305 +
306 +-/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */
307 ++/*
308 ++ * 24 MHz crystal? : 24 * 13 / 4 = 78 MHz
309 ++ * Frequency step for Lightning Mountain SoC is fixed to 78 MHz,
310 ++ * so all the frequency entries are 78000.
311 ++ */
312 + static const struct freq_desc freq_desc_lgm = {
313 + .use_msr_plat = true,
314 +- .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
315 ++ .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000,
316 ++ 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
317 + .mask = 0x0f,
318 + };
319 +
320 +diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
321 +index f092cc3f4e66d..956d4d47c6cd1 100644
322 +--- a/arch/xtensa/include/asm/thread_info.h
323 ++++ b/arch/xtensa/include/asm/thread_info.h
324 +@@ -55,6 +55,10 @@ struct thread_info {
325 + mm_segment_t addr_limit; /* thread address space */
326 +
327 + unsigned long cpenable;
328 ++#if XCHAL_HAVE_EXCLUSIVE
329 ++ /* result of the most recent exclusive store */
330 ++ unsigned long atomctl8;
331 ++#endif
332 +
333 + /* Allocate storage for extra user states and coprocessor states. */
334 + #if XTENSA_HAVE_COPROCESSORS
335 +diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
336 +index 33a257b33723a..dc5c83cad9be8 100644
337 +--- a/arch/xtensa/kernel/asm-offsets.c
338 ++++ b/arch/xtensa/kernel/asm-offsets.c
339 +@@ -93,6 +93,9 @@ int main(void)
340 + DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
341 + DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
342 + DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
343 ++#if XCHAL_HAVE_EXCLUSIVE
344 ++ DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
345 ++#endif
346 + #if XTENSA_HAVE_COPROCESSORS
347 + DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
348 + DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
349 +diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
350 +index 9e3676879168a..59671603c9c62 100644
351 +--- a/arch/xtensa/kernel/entry.S
352 ++++ b/arch/xtensa/kernel/entry.S
353 +@@ -374,6 +374,11 @@ common_exception:
354 + s32i a2, a1, PT_LCOUNT
355 + #endif
356 +
357 ++#if XCHAL_HAVE_EXCLUSIVE
358 ++ /* Clear exclusive access monitor set by interrupted code */
359 ++ clrex
360 ++#endif
361 ++
362 + /* It is now save to restore the EXC_TABLE_FIXUP variable. */
363 +
364 + rsr a2, exccause
365 +@@ -2024,6 +2029,12 @@ ENTRY(_switch_to)
366 + s32i a3, a4, THREAD_CPENABLE
367 + #endif
368 +
369 ++#if XCHAL_HAVE_EXCLUSIVE
370 ++ l32i a3, a5, THREAD_ATOMCTL8
371 ++ getex a3
372 ++ s32i a3, a4, THREAD_ATOMCTL8
373 ++#endif
374 ++
375 + /* Flush register file. */
376 +
377 + spill_registers_kernel
378 +diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
379 +index 9bae79f703013..86c9ba9631551 100644
380 +--- a/arch/xtensa/kernel/perf_event.c
381 ++++ b/arch/xtensa/kernel/perf_event.c
382 +@@ -401,7 +401,7 @@ static struct pmu xtensa_pmu = {
383 + .read = xtensa_pmu_read,
384 + };
385 +
386 +-static int xtensa_pmu_setup(int cpu)
387 ++static int xtensa_pmu_setup(unsigned int cpu)
388 + {
389 + unsigned i;
390 +
391 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c
392 +index a3b9df99af6de..35e026ba2c7ed 100644
393 +--- a/crypto/af_alg.c
394 ++++ b/crypto/af_alg.c
395 +@@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
396 +
397 + if (!ctx->used)
398 + ctx->merge = 0;
399 ++ ctx->init = ctx->more;
400 + }
401 + EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
402 +
403 +@@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
404 + *
405 + * @sk socket of connection to user space
406 + * @flags If MSG_DONTWAIT is set, then only report if function would sleep
407 ++ * @min Set to minimum request size if partial requests are allowed.
408 + * @return 0 when writable memory is available, < 0 upon error
409 + */
410 +-int af_alg_wait_for_data(struct sock *sk, unsigned flags)
411 ++int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
412 + {
413 + DEFINE_WAIT_FUNC(wait, woken_wake_function);
414 + struct alg_sock *ask = alg_sk(sk);
415 +@@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags)
416 + if (signal_pending(current))
417 + break;
418 + timeout = MAX_SCHEDULE_TIMEOUT;
419 +- if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
420 ++ if (sk_wait_event(sk, &timeout,
421 ++ ctx->init && (!ctx->more ||
422 ++ (min && ctx->used >= min)),
423 + &wait)) {
424 + err = 0;
425 + break;
426 +@@ -843,10 +847,11 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
427 + }
428 +
429 + lock_sock(sk);
430 +- if (!ctx->more && ctx->used) {
431 ++ if (ctx->init && (init || !ctx->more)) {
432 + err = -EINVAL;
433 + goto unlock;
434 + }
435 ++ ctx->init = true;
436 +
437 + if (init) {
438 + ctx->enc = enc;
439 +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
440 +index 0ae000a61c7f5..43c6aa784858b 100644
441 +--- a/crypto/algif_aead.c
442 ++++ b/crypto/algif_aead.c
443 +@@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
444 + size_t usedpages = 0; /* [in] RX bufs to be used from user */
445 + size_t processed = 0; /* [in] TX bufs to be consumed */
446 +
447 +- if (!ctx->used) {
448 +- err = af_alg_wait_for_data(sk, flags);
449 ++ if (!ctx->init || ctx->more) {
450 ++ err = af_alg_wait_for_data(sk, flags, 0);
451 + if (err)
452 + return err;
453 + }
454 +@@ -558,12 +558,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
455 +
456 + INIT_LIST_HEAD(&ctx->tsgl_list);
457 + ctx->len = len;
458 +- ctx->used = 0;
459 +- atomic_set(&ctx->rcvused, 0);
460 +- ctx->more = 0;
461 +- ctx->merge = 0;
462 +- ctx->enc = 0;
463 +- ctx->aead_assoclen = 0;
464 + crypto_init_wait(&ctx->wait);
465 +
466 + ask->private = ctx;
467 +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
468 +index ec5567c87a6df..81c4022285a7c 100644
469 +--- a/crypto/algif_skcipher.c
470 ++++ b/crypto/algif_skcipher.c
471 +@@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
472 + int err = 0;
473 + size_t len = 0;
474 +
475 +- if (!ctx->used) {
476 +- err = af_alg_wait_for_data(sk, flags);
477 ++ if (!ctx->init || (ctx->more && ctx->used < bs)) {
478 ++ err = af_alg_wait_for_data(sk, flags, bs);
479 + if (err)
480 + return err;
481 + }
482 +@@ -333,6 +333,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
483 + ctx = sock_kmalloc(sk, len, GFP_KERNEL);
484 + if (!ctx)
485 + return -ENOMEM;
486 ++ memset(ctx, 0, len);
487 +
488 + ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
489 + GFP_KERNEL);
490 +@@ -340,16 +341,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
491 + sock_kfree_s(sk, ctx, len);
492 + return -ENOMEM;
493 + }
494 +-
495 + memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
496 +
497 + INIT_LIST_HEAD(&ctx->tsgl_list);
498 + ctx->len = len;
499 +- ctx->used = 0;
500 +- atomic_set(&ctx->rcvused, 0);
501 +- ctx->more = 0;
502 +- ctx->merge = 0;
503 +- ctx->enc = 0;
504 + crypto_init_wait(&ctx->wait);
505 +
506 + ask->private = ctx;
507 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
508 +index b25bcab2a26bd..1d5dd37f3abe4 100644
509 +--- a/drivers/base/dd.c
510 ++++ b/drivers/base/dd.c
511 +@@ -872,7 +872,9 @@ static int __device_attach(struct device *dev, bool allow_async)
512 + int ret = 0;
513 +
514 + device_lock(dev);
515 +- if (dev->driver) {
516 ++ if (dev->p->dead) {
517 ++ goto out_unlock;
518 ++ } else if (dev->driver) {
519 + if (device_is_bound(dev)) {
520 + ret = 1;
521 + goto out_unlock;
522 +diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c
523 +index e2007ac4d235d..0eb83a0b70bcc 100644
524 +--- a/drivers/clk/actions/owl-s500.c
525 ++++ b/drivers/clk/actions/owl-s500.c
526 +@@ -183,7 +183,7 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
527 + static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
528 +
529 + /* divider clocks */
530 +-static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
531 ++static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
532 + static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
533 +
534 + /* factor clocks */
535 +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
536 +index 6e5d635f030f4..45420b514149f 100644
537 +--- a/drivers/clk/bcm/clk-bcm2835.c
538 ++++ b/drivers/clk/bcm/clk-bcm2835.c
539 +@@ -314,6 +314,7 @@ struct bcm2835_cprman {
540 + struct device *dev;
541 + void __iomem *regs;
542 + spinlock_t regs_lock; /* spinlock for all clocks */
543 ++ unsigned int soc;
544 +
545 + /*
546 + * Real names of cprman clock parents looked up through
547 +@@ -525,6 +526,20 @@ static int bcm2835_pll_is_on(struct clk_hw *hw)
548 + A2W_PLL_CTRL_PRST_DISABLE;
549 + }
550 +
551 ++static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman,
552 ++ const struct bcm2835_pll_data *data)
553 ++{
554 ++ /*
555 ++ * On BCM2711 there isn't a pre-divisor available in the PLL feedback
556 ++ * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed
557 ++ * for to for VCO RANGE bits.
558 ++ */
559 ++ if (cprman->soc & SOC_BCM2711)
560 ++ return 0;
561 ++
562 ++ return data->ana->fb_prediv_mask;
563 ++}
564 ++
565 + static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate,
566 + unsigned long parent_rate,
567 + u32 *ndiv, u32 *fdiv)
568 +@@ -582,7 +597,7 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
569 + ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT;
570 + pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT;
571 + using_prediv = cprman_read(cprman, data->ana_reg_base + 4) &
572 +- data->ana->fb_prediv_mask;
573 ++ bcm2835_pll_get_prediv_mask(cprman, data);
574 +
575 + if (using_prediv) {
576 + ndiv *= 2;
577 +@@ -665,6 +680,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
578 + struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
579 + struct bcm2835_cprman *cprman = pll->cprman;
580 + const struct bcm2835_pll_data *data = pll->data;
581 ++ u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data);
582 + bool was_using_prediv, use_fb_prediv, do_ana_setup_first;
583 + u32 ndiv, fdiv, a2w_ctl;
584 + u32 ana[4];
585 +@@ -682,7 +698,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
586 + for (i = 3; i >= 0; i--)
587 + ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4);
588 +
589 +- was_using_prediv = ana[1] & data->ana->fb_prediv_mask;
590 ++ was_using_prediv = ana[1] & prediv_mask;
591 +
592 + ana[0] &= ~data->ana->mask0;
593 + ana[0] |= data->ana->set0;
594 +@@ -692,10 +708,10 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
595 + ana[3] |= data->ana->set3;
596 +
597 + if (was_using_prediv && !use_fb_prediv) {
598 +- ana[1] &= ~data->ana->fb_prediv_mask;
599 ++ ana[1] &= ~prediv_mask;
600 + do_ana_setup_first = true;
601 + } else if (!was_using_prediv && use_fb_prediv) {
602 +- ana[1] |= data->ana->fb_prediv_mask;
603 ++ ana[1] |= prediv_mask;
604 + do_ana_setup_first = false;
605 + } else {
606 + do_ana_setup_first = true;
607 +@@ -2234,6 +2250,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
608 + platform_set_drvdata(pdev, cprman);
609 +
610 + cprman->onecell.num = asize;
611 ++ cprman->soc = pdata->soc;
612 + hws = cprman->onecell.hws;
613 +
614 + for (i = 0; i < asize; i++) {
615 +diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
616 +index 055318f979915..a69f53e435ed5 100644
617 +--- a/drivers/clk/qcom/clk-alpha-pll.c
618 ++++ b/drivers/clk/qcom/clk-alpha-pll.c
619 +@@ -55,7 +55,6 @@
620 + #define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
621 + #define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
622 + #define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
623 +-#define PLL_CAL_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL])
624 +
625 + const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
626 + [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
627 +@@ -114,7 +113,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
628 + [PLL_OFF_STATUS] = 0x30,
629 + [PLL_OFF_OPMODE] = 0x38,
630 + [PLL_OFF_ALPHA_VAL] = 0x40,
631 +- [PLL_OFF_CAL_VAL] = 0x44,
632 + },
633 + };
634 + EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
635 +diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
636 +index bf5730832ef3d..c6fb57cd576f5 100644
637 +--- a/drivers/clk/qcom/gcc-sdm660.c
638 ++++ b/drivers/clk/qcom/gcc-sdm660.c
639 +@@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
640 +
641 + static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
642 + .halt_reg = 0x8a004,
643 ++ .halt_check = BRANCH_HALT,
644 ++ .hwcg_reg = 0x8a004,
645 ++ .hwcg_bit = 1,
646 + .clkr = {
647 + .enable_reg = 0x8a004,
648 + .enable_mask = BIT(0),
649 +diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
650 +index fad42897a7a7f..ee908fbfeab17 100644
651 +--- a/drivers/clk/qcom/gcc-sm8150.c
652 ++++ b/drivers/clk/qcom/gcc-sm8150.c
653 +@@ -1616,6 +1616,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
654 + };
655 +
656 + static struct clk_branch gcc_gpu_gpll0_clk_src = {
657 ++ .halt_check = BRANCH_HALT_SKIP,
658 + .clkr = {
659 + .enable_reg = 0x52004,
660 + .enable_mask = BIT(15),
661 +@@ -1631,13 +1632,14 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = {
662 + };
663 +
664 + static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
665 ++ .halt_check = BRANCH_HALT_SKIP,
666 + .clkr = {
667 + .enable_reg = 0x52004,
668 + .enable_mask = BIT(16),
669 + .hw.init = &(struct clk_init_data){
670 + .name = "gcc_gpu_gpll0_div_clk_src",
671 + .parent_hws = (const struct clk_hw *[]){
672 +- &gcc_gpu_gpll0_clk_src.clkr.hw },
673 ++ &gpll0_out_even.clkr.hw },
674 + .num_parents = 1,
675 + .flags = CLK_SET_RATE_PARENT,
676 + .ops = &clk_branch2_ops,
677 +@@ -1728,6 +1730,7 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = {
678 + };
679 +
680 + static struct clk_branch gcc_npu_gpll0_clk_src = {
681 ++ .halt_check = BRANCH_HALT_SKIP,
682 + .clkr = {
683 + .enable_reg = 0x52004,
684 + .enable_mask = BIT(18),
685 +@@ -1743,13 +1746,14 @@ static struct clk_branch gcc_npu_gpll0_clk_src = {
686 + };
687 +
688 + static struct clk_branch gcc_npu_gpll0_div_clk_src = {
689 ++ .halt_check = BRANCH_HALT_SKIP,
690 + .clkr = {
691 + .enable_reg = 0x52004,
692 + .enable_mask = BIT(19),
693 + .hw.init = &(struct clk_init_data){
694 + .name = "gcc_npu_gpll0_div_clk_src",
695 + .parent_hws = (const struct clk_hw *[]){
696 +- &gcc_npu_gpll0_clk_src.clkr.hw },
697 ++ &gpll0_out_even.clkr.hw },
698 + .num_parents = 1,
699 + .flags = CLK_SET_RATE_PARENT,
700 + .ops = &clk_branch2_ops,
701 +diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
702 +index c84d5bab7ac28..b95483bb6a5ec 100644
703 +--- a/drivers/clk/sirf/clk-atlas6.c
704 ++++ b/drivers/clk/sirf/clk-atlas6.c
705 +@@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np)
706 +
707 + for (i = pll1; i < maxclk; i++) {
708 + atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
709 +- BUG_ON(!atlas6_clks[i]);
710 ++ BUG_ON(IS_ERR(atlas6_clks[i]));
711 + }
712 + clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
713 + clk_register_clkdev(atlas6_clks[io], NULL, "io");
714 +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
715 +index 4ce9c2b4544a2..fdd994ee55e22 100644
716 +--- a/drivers/crypto/caam/caamalg.c
717 ++++ b/drivers/crypto/caam/caamalg.c
718 +@@ -818,12 +818,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
719 + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
720 + }
721 +
722 +-static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
723 +- const u8 *key, unsigned int keylen)
724 +-{
725 +- return skcipher_setkey(skcipher, key, keylen, 0);
726 +-}
727 +-
728 + static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
729 + const u8 *key, unsigned int keylen)
730 + {
731 +@@ -2058,21 +2052,6 @@ static struct caam_skcipher_alg driver_algs[] = {
732 + },
733 + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
734 + },
735 +- {
736 +- .skcipher = {
737 +- .base = {
738 +- .cra_name = "ecb(arc4)",
739 +- .cra_driver_name = "ecb-arc4-caam",
740 +- .cra_blocksize = ARC4_BLOCK_SIZE,
741 +- },
742 +- .setkey = arc4_skcipher_setkey,
743 +- .encrypt = skcipher_encrypt,
744 +- .decrypt = skcipher_decrypt,
745 +- .min_keysize = ARC4_MIN_KEY_SIZE,
746 +- .max_keysize = ARC4_MAX_KEY_SIZE,
747 +- },
748 +- .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
749 +- },
750 + };
751 +
752 + static struct caam_aead_alg driver_aeads[] = {
753 +@@ -3533,7 +3512,6 @@ int caam_algapi_init(struct device *ctrldev)
754 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
755 + int i = 0, err = 0;
756 + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
757 +- u32 arc4_inst;
758 + unsigned int md_limit = SHA512_DIGEST_SIZE;
759 + bool registered = false, gcm_support;
760 +
761 +@@ -3553,8 +3531,6 @@ int caam_algapi_init(struct device *ctrldev)
762 + CHA_ID_LS_DES_SHIFT;
763 + aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
764 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
765 +- arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
766 +- CHA_ID_LS_ARC4_SHIFT;
767 + ccha_inst = 0;
768 + ptha_inst = 0;
769 +
770 +@@ -3575,7 +3551,6 @@ int caam_algapi_init(struct device *ctrldev)
771 + md_inst = mdha & CHA_VER_NUM_MASK;
772 + ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
773 + ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
774 +- arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
775 +
776 + gcm_support = aesa & CHA_VER_MISC_AES_GCM;
777 + }
778 +@@ -3598,10 +3573,6 @@ int caam_algapi_init(struct device *ctrldev)
779 + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
780 + continue;
781 +
782 +- /* Skip ARC4 algorithms if not supported by device */
783 +- if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
784 +- continue;
785 +-
786 + /*
787 + * Check support for AES modes not available
788 + * on LP devices.
789 +diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
790 +index 60e2a54c19f11..c3c22a8de4c00 100644
791 +--- a/drivers/crypto/caam/compat.h
792 ++++ b/drivers/crypto/caam/compat.h
793 +@@ -43,7 +43,6 @@
794 + #include <crypto/akcipher.h>
795 + #include <crypto/scatterwalk.h>
796 + #include <crypto/skcipher.h>
797 +-#include <crypto/arc4.h>
798 + #include <crypto/internal/skcipher.h>
799 + #include <crypto/internal/hash.h>
800 + #include <crypto/internal/rsa.h>
801 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
802 +index 47f529ce280ae..2718396083ee4 100644
803 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
804 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
805 +@@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru
806 + return disp_clk_threshold;
807 + }
808 +
809 +-static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
810 ++static void ramp_up_dispclk_with_dpp(
811 ++ struct clk_mgr_internal *clk_mgr,
812 ++ struct dc *dc,
813 ++ struct dc_clocks *new_clocks,
814 ++ bool safe_to_lower)
815 + {
816 + int i;
817 + int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
818 + bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
819 +
820 ++ /* this function is to change dispclk, dppclk and dprefclk according to
821 ++ * bandwidth requirement. Its call stack is rv1_update_clocks -->
822 ++ * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth
823 ++ * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw,
824 ++ * prepare_bandwidth will be called first to allow enough clock,
825 ++ * watermark for change, after end of dcn hw change, optimize_bandwidth
826 ++ * is executed to lower clock to save power for new dcn hw settings.
827 ++ *
828 ++ * below is sequence of commit_planes_for_stream:
829 ++ *
830 ++ * step 1: prepare_bandwidth - raise clock to have enough bandwidth
831 ++ * step 2: lock_doublebuffer_enable
832 ++ * step 3: pipe_control_lock(true) - make dchubp register change will
833 ++ * not take effect right way
834 ++ * step 4: apply_ctx_for_surface - program dchubp
835 ++ * step 5: pipe_control_lock(false) - dchubp register change take effect
836 ++ * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream
837 ++ * for full_date, optimize clock to save power
838 ++ *
839 ++ * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be
840 ++ * changed for new dchubp configuration. but real dcn hub dchubps are
841 ++ * still running with old configuration until end of step 5. this need
842 ++ * clocks settings at step 1 should not less than that before step 1.
843 ++ * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower
844 ++ * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) ||
845 ++ * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz)
846 ++ * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz
847 ++ *
848 ++ * the second condition is based on new dchubp configuration. dppclk
849 ++ * for new dchubp may be different from dppclk before step 1.
850 ++ * for example, before step 1, dchubps are as below:
851 ++ * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979)
852 ++ * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080)
853 ++ * for dppclk for pipe0 need dppclk = dispclk
854 ++ *
855 ++ * new dchubp pipe split configuration:
856 ++ * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080)
857 ++ * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080)
858 ++ * dppclk only needs dppclk = dispclk /2.
859 ++ *
860 ++ * dispclk, dppclk are not lock by otg master lock. they take effect
861 ++ * after step 1. during this transition, dispclk are the same, but
862 ++ * dppclk is changed to half of previous clock for old dchubp
863 ++ * configuration between step 1 and step 6. This may cause p-state
864 ++ * warning intermittently.
865 ++ *
866 ++ * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we
867 ++ * need make sure dppclk are not changed to less between step 1 and 6.
868 ++ * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz,
869 ++ * new display clock is raised, but we do not know ratio of
870 ++ * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz,
871 ++ * new_clocks->dispclk_khz /2 does not guarantee equal or higher than
872 ++ * old dppclk. we could ignore power saving different between
873 ++ * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6.
874 ++ * as long as safe_to_lower = false, set dpclk = dispclk to simplify
875 ++ * condition check.
876 ++ * todo: review this change for other asic.
877 ++ **/
878 ++ if (!safe_to_lower)
879 ++ request_dpp_div = false;
880 ++
881 + /* set disp clk to dpp clk threshold */
882 +
883 + clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
884 +@@ -206,7 +271,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
885 + /* program dispclk on = as a w/a for sleep resume clock ramping issues */
886 + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
887 + || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
888 +- ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
889 ++ ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
890 + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
891 + send_request_to_lower = true;
892 + }
893 +diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
894 +index 2e71ca3e19f58..09a3d8ae44491 100644
895 +--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
896 ++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
897 +@@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
898 +
899 + static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
900 + {
901 +- return ci_is_smc_ram_running(hwmgr);
902 ++ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
903 ++ CGS_IND_REG__SMC, FEATURE_STATUS,
904 ++ VOLTAGE_CONTROLLER_ON))
905 ++ ? true : false;
906 + }
907 +
908 + static int ci_smu_init(struct pp_hwmgr *hwmgr)
909 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
910 +index 006d6087700fb..2de1eebe591f9 100644
911 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
912 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
913 +@@ -3369,11 +3369,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
914 + {
915 + int ret;
916 +
917 +- port = drm_dp_mst_topology_get_port_validated(mgr, port);
918 +- if (!port)
919 ++ if (slots < 0)
920 + return false;
921 +
922 +- if (slots < 0)
923 ++ port = drm_dp_mst_topology_get_port_validated(mgr, port);
924 ++ if (!port)
925 + return false;
926 +
927 + if (port->vcpi.vcpi > 0) {
928 +@@ -3389,6 +3389,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
929 + if (ret) {
930 + DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
931 + DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
932 ++ drm_dp_mst_topology_put_port(port);
933 + goto out;
934 + }
935 + DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
936 +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
937 +index d00ea384dcbfe..58f5dc2f6dd52 100644
938 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
939 ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
940 +@@ -121,6 +121,12 @@ static const struct dmi_system_id orientation_data[] = {
941 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
942 + },
943 + .driver_data = (void *)&lcd800x1280_rightside_up,
944 ++ }, { /* Asus T103HAF */
945 ++ .matches = {
946 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
947 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
948 ++ },
949 ++ .driver_data = (void *)&lcd800x1280_rightside_up,
950 + }, { /* GPD MicroPC (generic strings, also match on bios date) */
951 + .matches = {
952 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
953 +diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
954 +index 9af5a08d5490f..d6629fc869f3f 100644
955 +--- a/drivers/gpu/drm/imx/imx-ldb.c
956 ++++ b/drivers/gpu/drm/imx/imx-ldb.c
957 +@@ -302,18 +302,19 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
958 + {
959 + struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
960 + struct imx_ldb *ldb = imx_ldb_ch->ldb;
961 ++ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
962 + int mux, ret;
963 +
964 + drm_panel_disable(imx_ldb_ch->panel);
965 +
966 +- if (imx_ldb_ch == &ldb->channel[0])
967 ++ if (imx_ldb_ch == &ldb->channel[0] || dual)
968 + ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
969 +- else if (imx_ldb_ch == &ldb->channel[1])
970 ++ if (imx_ldb_ch == &ldb->channel[1] || dual)
971 + ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
972 +
973 + regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
974 +
975 +- if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
976 ++ if (dual) {
977 + clk_disable_unprepare(ldb->clk[0]);
978 + clk_disable_unprepare(ldb->clk[1]);
979 + }
980 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
981 +index 77c3a3855c682..c05e013bb8e3d 100644
982 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
983 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
984 +@@ -46,7 +46,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
985 + sg_free_table(&bo->sgts[i]);
986 + }
987 + }
988 +- kfree(bo->sgts);
989 ++ kvfree(bo->sgts);
990 + }
991 +
992 + drm_gem_shmem_free_object(obj);
993 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
994 +index 5d75f8cf64776..3dc9b30a64b01 100644
995 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
996 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
997 +@@ -486,7 +486,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
998 + pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
999 + sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
1000 + if (!pages) {
1001 +- kfree(bo->sgts);
1002 ++ kvfree(bo->sgts);
1003 + bo->sgts = NULL;
1004 + mutex_unlock(&bo->base.pages_lock);
1005 + ret = -ENOMEM;
1006 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1007 +index f47d5710cc951..33b1519887474 100644
1008 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1009 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1010 +@@ -2666,7 +2666,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
1011 + ++i;
1012 + }
1013 +
1014 +- if (i != unit) {
1015 ++ if (&con->head == &dev_priv->dev->mode_config.connector_list) {
1016 + DRM_ERROR("Could not find initial display unit.\n");
1017 + ret = -EINVAL;
1018 + goto out_unlock;
1019 +@@ -2690,13 +2690,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
1020 + break;
1021 + }
1022 +
1023 +- if (mode->type & DRM_MODE_TYPE_PREFERRED)
1024 +- *p_mode = mode;
1025 +- else {
1026 ++ if (&mode->head == &con->modes) {
1027 + WARN_ONCE(true, "Could not find initial preferred mode.\n");
1028 + *p_mode = list_first_entry(&con->modes,
1029 + struct drm_display_mode,
1030 + head);
1031 ++ } else {
1032 ++ *p_mode = mode;
1033 + }
1034 +
1035 + out_unlock:
1036 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
1037 +index 5702219ec38f6..7b54c1f56208f 100644
1038 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
1039 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
1040 +@@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
1041 + struct vmw_legacy_display_unit *entry;
1042 + struct drm_framebuffer *fb = NULL;
1043 + struct drm_crtc *crtc = NULL;
1044 +- int i = 0;
1045 ++ int i;
1046 +
1047 + /* If there is no display topology the host just assumes
1048 + * that the guest will set the same layout as the host.
1049 +@@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
1050 + crtc = &entry->base.crtc;
1051 + w = max(w, crtc->x + crtc->mode.hdisplay);
1052 + h = max(h, crtc->y + crtc->mode.vdisplay);
1053 +- i++;
1054 + }
1055 +
1056 + if (crtc == NULL)
1057 + return 0;
1058 +- fb = entry->base.crtc.primary->state->fb;
1059 ++ fb = crtc->primary->state->fb;
1060 +
1061 + return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
1062 + fb->format->cpp[0] * 8,
1063 +diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
1064 +index eeca50d9a1ee4..aa1d4b6d278f7 100644
1065 +--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
1066 ++++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
1067 +@@ -137,6 +137,17 @@ struct ipu_image_convert_ctx;
1068 + struct ipu_image_convert_chan;
1069 + struct ipu_image_convert_priv;
1070 +
1071 ++enum eof_irq_mask {
1072 ++ EOF_IRQ_IN = BIT(0),
1073 ++ EOF_IRQ_ROT_IN = BIT(1),
1074 ++ EOF_IRQ_OUT = BIT(2),
1075 ++ EOF_IRQ_ROT_OUT = BIT(3),
1076 ++};
1077 ++
1078 ++#define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
1079 ++#define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
1080 ++ EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
1081 ++
1082 + struct ipu_image_convert_ctx {
1083 + struct ipu_image_convert_chan *chan;
1084 +
1085 +@@ -173,6 +184,9 @@ struct ipu_image_convert_ctx {
1086 + /* where to place converted tile in dest image */
1087 + unsigned int out_tile_map[MAX_TILES];
1088 +
1089 ++ /* mask of completed EOF irqs at every tile conversion */
1090 ++ enum eof_irq_mask eof_mask;
1091 ++
1092 + struct list_head list;
1093 + };
1094 +
1095 +@@ -189,6 +203,8 @@ struct ipu_image_convert_chan {
1096 + struct ipuv3_channel *rotation_out_chan;
1097 +
1098 + /* the IPU end-of-frame irqs */
1099 ++ int in_eof_irq;
1100 ++ int rot_in_eof_irq;
1101 + int out_eof_irq;
1102 + int rot_out_eof_irq;
1103 +
1104 +@@ -1380,6 +1396,9 @@ static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
1105 + dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
1106 + __func__, chan->ic_task, ctx, run, tile, dst_tile);
1107 +
1108 ++ /* clear EOF irq mask */
1109 ++ ctx->eof_mask = 0;
1110 ++
1111 + if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1112 + /* swap width/height for resizer */
1113 + dest_width = d_image->tile[dst_tile].height;
1114 +@@ -1615,7 +1634,7 @@ static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
1115 + }
1116 +
1117 + /* hold irqlock when calling */
1118 +-static irqreturn_t do_irq(struct ipu_image_convert_run *run)
1119 ++static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
1120 + {
1121 + struct ipu_image_convert_ctx *ctx = run->ctx;
1122 + struct ipu_image_convert_chan *chan = ctx->chan;
1123 +@@ -1700,6 +1719,7 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run)
1124 + ctx->cur_buf_num ^= 1;
1125 + }
1126 +
1127 ++ ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
1128 + ctx->next_tile++;
1129 + return IRQ_HANDLED;
1130 + done:
1131 +@@ -1709,13 +1729,15 @@ done:
1132 + return IRQ_WAKE_THREAD;
1133 + }
1134 +
1135 +-static irqreturn_t norotate_irq(int irq, void *data)
1136 ++static irqreturn_t eof_irq(int irq, void *data)
1137 + {
1138 + struct ipu_image_convert_chan *chan = data;
1139 ++ struct ipu_image_convert_priv *priv = chan->priv;
1140 + struct ipu_image_convert_ctx *ctx;
1141 + struct ipu_image_convert_run *run;
1142 ++ irqreturn_t ret = IRQ_HANDLED;
1143 ++ bool tile_complete = false;
1144 + unsigned long flags;
1145 +- irqreturn_t ret;
1146 +
1147 + spin_lock_irqsave(&chan->irqlock, flags);
1148 +
1149 +@@ -1728,46 +1750,33 @@ static irqreturn_t norotate_irq(int irq, void *data)
1150 +
1151 + ctx = run->ctx;
1152 +
1153 +- if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1154 +- /* this is a rotation operation, just ignore */
1155 +- spin_unlock_irqrestore(&chan->irqlock, flags);
1156 +- return IRQ_HANDLED;
1157 +- }
1158 +-
1159 +- ret = do_irq(run);
1160 +-out:
1161 +- spin_unlock_irqrestore(&chan->irqlock, flags);
1162 +- return ret;
1163 +-}
1164 +-
1165 +-static irqreturn_t rotate_irq(int irq, void *data)
1166 +-{
1167 +- struct ipu_image_convert_chan *chan = data;
1168 +- struct ipu_image_convert_priv *priv = chan->priv;
1169 +- struct ipu_image_convert_ctx *ctx;
1170 +- struct ipu_image_convert_run *run;
1171 +- unsigned long flags;
1172 +- irqreturn_t ret;
1173 +-
1174 +- spin_lock_irqsave(&chan->irqlock, flags);
1175 +-
1176 +- /* get current run and its context */
1177 +- run = chan->current_run;
1178 +- if (!run) {
1179 ++ if (irq == chan->in_eof_irq) {
1180 ++ ctx->eof_mask |= EOF_IRQ_IN;
1181 ++ } else if (irq == chan->out_eof_irq) {
1182 ++ ctx->eof_mask |= EOF_IRQ_OUT;
1183 ++ } else if (irq == chan->rot_in_eof_irq ||
1184 ++ irq == chan->rot_out_eof_irq) {
1185 ++ if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1186 ++ /* this was NOT a rotation op, shouldn't happen */
1187 ++ dev_err(priv->ipu->dev,
1188 ++ "Unexpected rotation interrupt\n");
1189 ++ goto out;
1190 ++ }
1191 ++ ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
1192 ++ EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
1193 ++ } else {
1194 ++ dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
1195 + ret = IRQ_NONE;
1196 + goto out;
1197 + }
1198 +
1199 +- ctx = run->ctx;
1200 +-
1201 +- if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1202 +- /* this was NOT a rotation operation, shouldn't happen */
1203 +- dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
1204 +- spin_unlock_irqrestore(&chan->irqlock, flags);
1205 +- return IRQ_HANDLED;
1206 +- }
1207 ++ if (ipu_rot_mode_is_irt(ctx->rot_mode))
1208 ++ tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
1209 ++ else
1210 ++ tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
1211 +
1212 +- ret = do_irq(run);
1213 ++ if (tile_complete)
1214 ++ ret = do_tile_complete(run);
1215 + out:
1216 + spin_unlock_irqrestore(&chan->irqlock, flags);
1217 + return ret;
1218 +@@ -1801,6 +1810,10 @@ static void force_abort(struct ipu_image_convert_ctx *ctx)
1219 +
1220 + static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1221 + {
1222 ++ if (chan->in_eof_irq >= 0)
1223 ++ free_irq(chan->in_eof_irq, chan);
1224 ++ if (chan->rot_in_eof_irq >= 0)
1225 ++ free_irq(chan->rot_in_eof_irq, chan);
1226 + if (chan->out_eof_irq >= 0)
1227 + free_irq(chan->out_eof_irq, chan);
1228 + if (chan->rot_out_eof_irq >= 0)
1229 +@@ -1819,7 +1832,27 @@ static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1230 +
1231 + chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1232 + chan->rotation_out_chan = NULL;
1233 +- chan->out_eof_irq = chan->rot_out_eof_irq = -1;
1234 ++ chan->in_eof_irq = -1;
1235 ++ chan->rot_in_eof_irq = -1;
1236 ++ chan->out_eof_irq = -1;
1237 ++ chan->rot_out_eof_irq = -1;
1238 ++}
1239 ++
1240 ++static int get_eof_irq(struct ipu_image_convert_chan *chan,
1241 ++ struct ipuv3_channel *channel)
1242 ++{
1243 ++ struct ipu_image_convert_priv *priv = chan->priv;
1244 ++ int ret, irq;
1245 ++
1246 ++ irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
1247 ++
1248 ++ ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
1249 ++ if (ret < 0) {
1250 ++ dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
1251 ++ return ret;
1252 ++ }
1253 ++
1254 ++ return irq;
1255 + }
1256 +
1257 + static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1258 +@@ -1855,31 +1888,33 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1259 + }
1260 +
1261 + /* acquire the EOF interrupts */
1262 +- chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1263 +- chan->out_chan,
1264 +- IPU_IRQ_EOF);
1265 ++ ret = get_eof_irq(chan, chan->in_chan);
1266 ++ if (ret < 0) {
1267 ++ chan->in_eof_irq = -1;
1268 ++ goto err;
1269 ++ }
1270 ++ chan->in_eof_irq = ret;
1271 +
1272 +- ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
1273 +- 0, "ipu-ic", chan);
1274 ++ ret = get_eof_irq(chan, chan->rotation_in_chan);
1275 + if (ret < 0) {
1276 +- dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1277 +- chan->out_eof_irq);
1278 +- chan->out_eof_irq = -1;
1279 ++ chan->rot_in_eof_irq = -1;
1280 + goto err;
1281 + }
1282 ++ chan->rot_in_eof_irq = ret;
1283 +
1284 +- chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1285 +- chan->rotation_out_chan,
1286 +- IPU_IRQ_EOF);
1287 ++ ret = get_eof_irq(chan, chan->out_chan);
1288 ++ if (ret < 0) {
1289 ++ chan->out_eof_irq = -1;
1290 ++ goto err;
1291 ++ }
1292 ++ chan->out_eof_irq = ret;
1293 +
1294 +- ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
1295 +- 0, "ipu-ic", chan);
1296 ++ ret = get_eof_irq(chan, chan->rotation_out_chan);
1297 + if (ret < 0) {
1298 +- dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1299 +- chan->rot_out_eof_irq);
1300 + chan->rot_out_eof_irq = -1;
1301 + goto err;
1302 + }
1303 ++ chan->rot_out_eof_irq = ret;
1304 +
1305 + return 0;
1306 + err:
1307 +@@ -2458,6 +2493,8 @@ int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
1308 + chan->ic_task = i;
1309 + chan->priv = priv;
1310 + chan->dma_ch = &image_convert_dma_chan[i];
1311 ++ chan->in_eof_irq = -1;
1312 ++ chan->rot_in_eof_irq = -1;
1313 + chan->out_eof_irq = -1;
1314 + chan->rot_out_eof_irq = -1;
1315 +
1316 +diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
1317 +index 03475f1799730..dd9661c11782a 100644
1318 +--- a/drivers/i2c/busses/i2c-bcm-iproc.c
1319 ++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
1320 +@@ -1037,7 +1037,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
1321 + if (!iproc_i2c->slave)
1322 + return -EINVAL;
1323 +
1324 +- iproc_i2c->slave = NULL;
1325 ++ disable_irq(iproc_i2c->irq);
1326 +
1327 + /* disable all slave interrupts */
1328 + tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
1329 +@@ -1050,6 +1050,17 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
1330 + tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
1331 + iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
1332 +
1333 ++ /* flush TX/RX FIFOs */
1334 ++ tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
1335 ++ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
1336 ++
1337 ++ /* clear all pending slave interrupts */
1338 ++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
1339 ++
1340 ++ iproc_i2c->slave = NULL;
1341 ++
1342 ++ enable_irq(iproc_i2c->irq);
1343 ++
1344 + return 0;
1345 + }
1346 +
1347 +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
1348 +index 36af8fdb66586..0b90aa0318df3 100644
1349 +--- a/drivers/i2c/busses/i2c-rcar.c
1350 ++++ b/drivers/i2c/busses/i2c-rcar.c
1351 +@@ -580,13 +580,14 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
1352 + rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
1353 + }
1354 +
1355 +- rcar_i2c_write(priv, ICSSR, ~SAR & 0xff);
1356 ++ /* Clear SSR, too, because of old STOPs to other clients than us */
1357 ++ rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff);
1358 + }
1359 +
1360 + /* master sent stop */
1361 + if (ssr_filtered & SSR) {
1362 + i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
1363 +- rcar_i2c_write(priv, ICSIER, SAR | SSR);
1364 ++ rcar_i2c_write(priv, ICSIER, SAR);
1365 + rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
1366 + }
1367 +
1368 +@@ -850,7 +851,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
1369 + priv->slave = slave;
1370 + rcar_i2c_write(priv, ICSAR, slave->addr);
1371 + rcar_i2c_write(priv, ICSSR, 0);
1372 +- rcar_i2c_write(priv, ICSIER, SAR | SSR);
1373 ++ rcar_i2c_write(priv, ICSIER, SAR);
1374 + rcar_i2c_write(priv, ICSCR, SIE | SDBS);
1375 +
1376 + return 0;
1377 +@@ -862,12 +863,14 @@ static int rcar_unreg_slave(struct i2c_client *slave)
1378 +
1379 + WARN_ON(!priv->slave);
1380 +
1381 +- /* disable irqs and ensure none is running before clearing ptr */
1382 ++ /* ensure no irq is running before clearing ptr */
1383 ++ disable_irq(priv->irq);
1384 + rcar_i2c_write(priv, ICSIER, 0);
1385 +- rcar_i2c_write(priv, ICSCR, 0);
1386 ++ rcar_i2c_write(priv, ICSSR, 0);
1387 ++ enable_irq(priv->irq);
1388 ++ rcar_i2c_write(priv, ICSCR, SDBS);
1389 + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
1390 +
1391 +- synchronize_irq(priv->irq);
1392 + priv->slave = NULL;
1393 +
1394 + pm_runtime_put(rcar_i2c_priv_to_dev(priv));
1395 +diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
1396 +index 2d897e64c6a9e..424922cad1e39 100644
1397 +--- a/drivers/iio/dac/ad5592r-base.c
1398 ++++ b/drivers/iio/dac/ad5592r-base.c
1399 +@@ -416,7 +416,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
1400 + s64 tmp = *val * (3767897513LL / 25LL);
1401 + *val = div_s64_rem(tmp, 1000000000LL, val2);
1402 +
1403 +- ret = IIO_VAL_INT_PLUS_MICRO;
1404 ++ return IIO_VAL_INT_PLUS_MICRO;
1405 + } else {
1406 + int mult;
1407 +
1408 +@@ -447,7 +447,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
1409 + ret = IIO_VAL_INT;
1410 + break;
1411 + default:
1412 +- ret = -EINVAL;
1413 ++ return -EINVAL;
1414 + }
1415 +
1416 + unlock:
1417 +diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
1418 +index 11210bf7fd61b..f454d63008d69 100644
1419 +--- a/drivers/infiniband/core/counters.c
1420 ++++ b/drivers/infiniband/core/counters.c
1421 +@@ -284,7 +284,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
1422 + struct rdma_counter *counter;
1423 + int ret;
1424 +
1425 +- if (!qp->res.valid)
1426 ++ if (!qp->res.valid || rdma_is_kernel_res(&qp->res))
1427 + return 0;
1428 +
1429 + if (!rdma_is_port_valid(dev, port))
1430 +@@ -487,7 +487,7 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
1431 + goto err;
1432 + }
1433 +
1434 +- if (counter->res.task != qp->res.task) {
1435 ++ if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) {
1436 + ret = -EINVAL;
1437 + goto err_task;
1438 + }
1439 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
1440 +index e2ddcb0dc4ee3..c398d1a64614c 100644
1441 +--- a/drivers/infiniband/core/uverbs_cmd.c
1442 ++++ b/drivers/infiniband/core/uverbs_cmd.c
1443 +@@ -757,6 +757,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
1444 + mr->uobject = uobj;
1445 + atomic_inc(&pd->usecnt);
1446 + mr->res.type = RDMA_RESTRACK_MR;
1447 ++ mr->iova = cmd.hca_va;
1448 + rdma_restrack_uadd(&mr->res);
1449 +
1450 + uobj->object = mr;
1451 +@@ -847,6 +848,9 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
1452 + atomic_dec(&old_pd->usecnt);
1453 + }
1454 +
1455 ++ if (cmd.flags & IB_MR_REREG_TRANS)
1456 ++ mr->iova = cmd.hca_va;
1457 ++
1458 + memset(&resp, 0, sizeof(resp));
1459 + resp.lkey = mr->lkey;
1460 + resp.rkey = mr->rkey;
1461 +diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
1462 +index 35c284af574da..dcb58cef336d9 100644
1463 +--- a/drivers/infiniband/hw/cxgb4/mem.c
1464 ++++ b/drivers/infiniband/hw/cxgb4/mem.c
1465 +@@ -399,7 +399,6 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
1466 + mmid = stag >> 8;
1467 + mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
1468 + mhp->ibmr.length = mhp->attr.len;
1469 +- mhp->ibmr.iova = mhp->attr.va_fbo;
1470 + mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
1471 + pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
1472 + return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
1473 +diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
1474 +index 6ae503cfc5264..9114cb7307692 100644
1475 +--- a/drivers/infiniband/hw/mlx4/mr.c
1476 ++++ b/drivers/infiniband/hw/mlx4/mr.c
1477 +@@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1478 +
1479 + mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
1480 + mr->ibmr.length = length;
1481 +- mr->ibmr.iova = virt_addr;
1482 + mr->ibmr.page_size = 1U << shift;
1483 +
1484 + return &mr->ibmr;
1485 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
1486 +index 0e5f27caf2b2d..50a3557386090 100644
1487 +--- a/drivers/infiniband/ulp/ipoib/ipoib.h
1488 ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
1489 +@@ -515,7 +515,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev);
1490 +
1491 + int ipoib_ib_dev_open_default(struct net_device *dev);
1492 + int ipoib_ib_dev_open(struct net_device *dev);
1493 +-int ipoib_ib_dev_stop(struct net_device *dev);
1494 ++void ipoib_ib_dev_stop(struct net_device *dev);
1495 + void ipoib_ib_dev_up(struct net_device *dev);
1496 + void ipoib_ib_dev_down(struct net_device *dev);
1497 + int ipoib_ib_dev_stop_default(struct net_device *dev);
1498 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1499 +index da3c5315bbb51..494f413dc3c6c 100644
1500 +--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1501 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1502 +@@ -670,13 +670,12 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
1503 + return rc;
1504 + }
1505 +
1506 +-static void __ipoib_reap_ah(struct net_device *dev)
1507 ++static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
1508 + {
1509 +- struct ipoib_dev_priv *priv = ipoib_priv(dev);
1510 + struct ipoib_ah *ah, *tah;
1511 + unsigned long flags;
1512 +
1513 +- netif_tx_lock_bh(dev);
1514 ++ netif_tx_lock_bh(priv->dev);
1515 + spin_lock_irqsave(&priv->lock, flags);
1516 +
1517 + list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
1518 +@@ -687,37 +686,37 @@ static void __ipoib_reap_ah(struct net_device *dev)
1519 + }
1520 +
1521 + spin_unlock_irqrestore(&priv->lock, flags);
1522 +- netif_tx_unlock_bh(dev);
1523 ++ netif_tx_unlock_bh(priv->dev);
1524 + }
1525 +
1526 + void ipoib_reap_ah(struct work_struct *work)
1527 + {
1528 + struct ipoib_dev_priv *priv =
1529 + container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
1530 +- struct net_device *dev = priv->dev;
1531 +
1532 +- __ipoib_reap_ah(dev);
1533 ++ ipoib_reap_dead_ahs(priv);
1534 +
1535 + if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
1536 + queue_delayed_work(priv->wq, &priv->ah_reap_task,
1537 + round_jiffies_relative(HZ));
1538 + }
1539 +
1540 +-static void ipoib_flush_ah(struct net_device *dev)
1541 ++static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
1542 + {
1543 +- struct ipoib_dev_priv *priv = ipoib_priv(dev);
1544 +-
1545 +- cancel_delayed_work(&priv->ah_reap_task);
1546 +- flush_workqueue(priv->wq);
1547 +- ipoib_reap_ah(&priv->ah_reap_task.work);
1548 ++ clear_bit(IPOIB_STOP_REAPER, &priv->flags);
1549 ++ queue_delayed_work(priv->wq, &priv->ah_reap_task,
1550 ++ round_jiffies_relative(HZ));
1551 + }
1552 +
1553 +-static void ipoib_stop_ah(struct net_device *dev)
1554 ++static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
1555 + {
1556 +- struct ipoib_dev_priv *priv = ipoib_priv(dev);
1557 +-
1558 + set_bit(IPOIB_STOP_REAPER, &priv->flags);
1559 +- ipoib_flush_ah(dev);
1560 ++ cancel_delayed_work(&priv->ah_reap_task);
1561 ++ /*
1562 ++ * After ipoib_stop_ah_reaper() we always go through
1563 ++ * ipoib_reap_dead_ahs() which ensures the work is really stopped and
1564 ++ * does a final flush out of the dead_ah's list
1565 ++ */
1566 + }
1567 +
1568 + static int recvs_pending(struct net_device *dev)
1569 +@@ -846,18 +845,6 @@ timeout:
1570 + return 0;
1571 + }
1572 +
1573 +-int ipoib_ib_dev_stop(struct net_device *dev)
1574 +-{
1575 +- struct ipoib_dev_priv *priv = ipoib_priv(dev);
1576 +-
1577 +- priv->rn_ops->ndo_stop(dev);
1578 +-
1579 +- clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
1580 +- ipoib_flush_ah(dev);
1581 +-
1582 +- return 0;
1583 +-}
1584 +-
1585 + int ipoib_ib_dev_open_default(struct net_device *dev)
1586 + {
1587 + struct ipoib_dev_priv *priv = ipoib_priv(dev);
1588 +@@ -901,10 +888,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
1589 + return -1;
1590 + }
1591 +
1592 +- clear_bit(IPOIB_STOP_REAPER, &priv->flags);
1593 +- queue_delayed_work(priv->wq, &priv->ah_reap_task,
1594 +- round_jiffies_relative(HZ));
1595 +-
1596 ++ ipoib_start_ah_reaper(priv);
1597 + if (priv->rn_ops->ndo_open(dev)) {
1598 + pr_warn("%s: Failed to open dev\n", dev->name);
1599 + goto dev_stop;
1600 +@@ -915,13 +899,20 @@ int ipoib_ib_dev_open(struct net_device *dev)
1601 + return 0;
1602 +
1603 + dev_stop:
1604 +- set_bit(IPOIB_STOP_REAPER, &priv->flags);
1605 +- cancel_delayed_work(&priv->ah_reap_task);
1606 +- set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
1607 +- ipoib_ib_dev_stop(dev);
1608 ++ ipoib_stop_ah_reaper(priv);
1609 + return -1;
1610 + }
1611 +
1612 ++void ipoib_ib_dev_stop(struct net_device *dev)
1613 ++{
1614 ++ struct ipoib_dev_priv *priv = ipoib_priv(dev);
1615 ++
1616 ++ priv->rn_ops->ndo_stop(dev);
1617 ++
1618 ++ clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
1619 ++ ipoib_stop_ah_reaper(priv);
1620 ++}
1621 ++
1622 + void ipoib_pkey_dev_check_presence(struct net_device *dev)
1623 + {
1624 + struct ipoib_dev_priv *priv = ipoib_priv(dev);
1625 +@@ -1232,7 +1223,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1626 + ipoib_mcast_dev_flush(dev);
1627 + if (oper_up)
1628 + set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1629 +- ipoib_flush_ah(dev);
1630 ++ ipoib_reap_dead_ahs(priv);
1631 + }
1632 +
1633 + if (level >= IPOIB_FLUSH_NORMAL)
1634 +@@ -1307,7 +1298,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
1635 + * the neighbor garbage collection is stopped and reaped.
1636 + * That should all be done now, so make a final ah flush.
1637 + */
1638 +- ipoib_stop_ah(dev);
1639 ++ ipoib_reap_dead_ahs(priv);
1640 +
1641 + clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1642 +
1643 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1644 +index 4fd095fd63b6f..044bcacad6e48 100644
1645 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
1646 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1647 +@@ -1979,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
1648 +
1649 + /* no more works over the priv->wq */
1650 + if (priv->wq) {
1651 ++ /* See ipoib_mcast_carrier_on_task() */
1652 ++ WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
1653 + flush_workqueue(priv->wq);
1654 + destroy_workqueue(priv->wq);
1655 + priv->wq = NULL;
1656 +diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
1657 +index e99d9bf1a267d..e78c4c7eda34d 100644
1658 +--- a/drivers/input/mouse/sentelic.c
1659 ++++ b/drivers/input/mouse/sentelic.c
1660 +@@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
1661 +
1662 + fsp_reg_write_enable(psmouse, false);
1663 +
1664 +- return count;
1665 ++ return retval;
1666 + }
1667 +
1668 + PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg);
1669 +diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
1670 +index 8e19bfa94121e..a99afb5d9011c 100644
1671 +--- a/drivers/iommu/omap-iommu-debug.c
1672 ++++ b/drivers/iommu/omap-iommu-debug.c
1673 +@@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
1674 + mutex_lock(&iommu_debug_lock);
1675 +
1676 + bytes = omap_iommu_dump_ctx(obj, p, count);
1677 ++ if (bytes < 0)
1678 ++ goto err;
1679 + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
1680 +
1681 ++err:
1682 + mutex_unlock(&iommu_debug_lock);
1683 + kfree(buf);
1684 +
1685 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1686 +index 263cf9240b168..7966b19ceba79 100644
1687 +--- a/drivers/irqchip/irq-gic-v3-its.c
1688 ++++ b/drivers/irqchip/irq-gic-v3-its.c
1689 +@@ -2581,6 +2581,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1690 + msi_alloc_info_t *info = args;
1691 + struct its_device *its_dev = info->scratchpad[0].ptr;
1692 + struct its_node *its = its_dev->its;
1693 ++ struct irq_data *irqd;
1694 + irq_hw_number_t hwirq;
1695 + int err;
1696 + int i;
1697 +@@ -2600,7 +2601,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1698 +
1699 + irq_domain_set_hwirq_and_chip(domain, virq + i,
1700 + hwirq + i, &its_irq_chip, its_dev);
1701 +- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
1702 ++ irqd = irq_get_irq_data(virq + i);
1703 ++ irqd_set_single_target(irqd);
1704 ++ irqd_set_affinity_on_activate(irqd);
1705 + pr_debug("ID:%d pID:%d vID:%d\n",
1706 + (int)(hwirq + i - its_dev->event_map.lpi_base),
1707 + (int)(hwirq + i), virq + i);
1708 +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
1709 +index 3d2b63585da95..217c838a1b405 100644
1710 +--- a/drivers/md/bcache/bcache.h
1711 ++++ b/drivers/md/bcache/bcache.h
1712 +@@ -264,7 +264,7 @@ struct bcache_device {
1713 + #define BCACHE_DEV_UNLINK_DONE 2
1714 + #define BCACHE_DEV_WB_RUNNING 3
1715 + #define BCACHE_DEV_RATE_DW_RUNNING 4
1716 +- unsigned int nr_stripes;
1717 ++ int nr_stripes;
1718 + unsigned int stripe_size;
1719 + atomic_t *stripe_sectors_dirty;
1720 + unsigned long *full_dirty_stripes;
1721 +diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
1722 +index 08768796b5439..fda68c00ddd53 100644
1723 +--- a/drivers/md/bcache/bset.c
1724 ++++ b/drivers/md/bcache/bset.c
1725 +@@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b,
1726 +
1727 + b->page_order = page_order;
1728 +
1729 +- t->data = (void *) __get_free_pages(gfp, b->page_order);
1730 ++ t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
1731 + if (!t->data)
1732 + goto err;
1733 +
1734 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
1735 +index 3c1109fceb2fb..46556bde032e2 100644
1736 +--- a/drivers/md/bcache/btree.c
1737 ++++ b/drivers/md/bcache/btree.c
1738 +@@ -840,7 +840,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
1739 + mutex_init(&c->verify_lock);
1740 +
1741 + c->verify_ondisk = (void *)
1742 +- __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
1743 ++ __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c)));
1744 +
1745 + c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
1746 +
1747 +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
1748 +index 6730820780b06..8250d2d1d780c 100644
1749 +--- a/drivers/md/bcache/journal.c
1750 ++++ b/drivers/md/bcache/journal.c
1751 +@@ -1002,8 +1002,8 @@ int bch_journal_alloc(struct cache_set *c)
1752 + j->w[1].c = c;
1753 +
1754 + if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1755 +- !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
1756 +- !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
1757 ++ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
1758 ++ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
1759 + return -ENOMEM;
1760 +
1761 + return 0;
1762 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1763 +index 168d647078591..25ad64a3919f6 100644
1764 +--- a/drivers/md/bcache/super.c
1765 ++++ b/drivers/md/bcache/super.c
1766 +@@ -1754,7 +1754,7 @@ void bch_cache_set_unregister(struct cache_set *c)
1767 + }
1768 +
1769 + #define alloc_bucket_pages(gfp, c) \
1770 +- ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1771 ++ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
1772 +
1773 + struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1774 + {
1775 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
1776 +index d60268fe49e10..0b02210ab4355 100644
1777 +--- a/drivers/md/bcache/writeback.c
1778 ++++ b/drivers/md/bcache/writeback.c
1779 +@@ -519,15 +519,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
1780 + uint64_t offset, int nr_sectors)
1781 + {
1782 + struct bcache_device *d = c->devices[inode];
1783 +- unsigned int stripe_offset, stripe, sectors_dirty;
1784 ++ unsigned int stripe_offset, sectors_dirty;
1785 ++ int stripe;
1786 +
1787 + if (!d)
1788 + return;
1789 +
1790 ++ stripe = offset_to_stripe(d, offset);
1791 ++ if (stripe < 0)
1792 ++ return;
1793 ++
1794 + if (UUID_FLASH_ONLY(&c->uuids[inode]))
1795 + atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
1796 +
1797 +- stripe = offset_to_stripe(d, offset);
1798 + stripe_offset = offset & (d->stripe_size - 1);
1799 +
1800 + while (nr_sectors) {
1801 +@@ -567,12 +571,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
1802 + static void refill_full_stripes(struct cached_dev *dc)
1803 + {
1804 + struct keybuf *buf = &dc->writeback_keys;
1805 +- unsigned int start_stripe, stripe, next_stripe;
1806 ++ unsigned int start_stripe, next_stripe;
1807 ++ int stripe;
1808 + bool wrapped = false;
1809 +
1810 + stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
1811 +-
1812 +- if (stripe >= dc->disk.nr_stripes)
1813 ++ if (stripe < 0)
1814 + stripe = 0;
1815 +
1816 + start_stripe = stripe;
1817 +diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
1818 +index 4e4c6810dc3c7..c4ff76037227b 100644
1819 +--- a/drivers/md/bcache/writeback.h
1820 ++++ b/drivers/md/bcache/writeback.h
1821 +@@ -33,10 +33,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
1822 + return ret;
1823 + }
1824 +
1825 +-static inline unsigned int offset_to_stripe(struct bcache_device *d,
1826 ++static inline int offset_to_stripe(struct bcache_device *d,
1827 + uint64_t offset)
1828 + {
1829 + do_div(offset, d->stripe_size);
1830 ++
1831 ++ /* d->nr_stripes is in range [1, INT_MAX] */
1832 ++ if (unlikely(offset >= d->nr_stripes)) {
1833 ++ pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
1834 ++ offset, d->nr_stripes);
1835 ++ return -EINVAL;
1836 ++ }
1837 ++
1838 ++ /*
1839 ++ * Here offset is definitly smaller than INT_MAX,
1840 ++ * return it as int will never overflow.
1841 ++ */
1842 + return offset;
1843 + }
1844 +
1845 +@@ -44,7 +56,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
1846 + uint64_t offset,
1847 + unsigned int nr_sectors)
1848 + {
1849 +- unsigned int stripe = offset_to_stripe(&dc->disk, offset);
1850 ++ int stripe = offset_to_stripe(&dc->disk, offset);
1851 ++
1852 ++ if (stripe < 0)
1853 ++ return false;
1854 +
1855 + while (1) {
1856 + if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
1857 +diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
1858 +index 3f8577e2c13be..2bd2444ad99c6 100644
1859 +--- a/drivers/md/dm-rq.c
1860 ++++ b/drivers/md/dm-rq.c
1861 +@@ -70,9 +70,6 @@ void dm_start_queue(struct request_queue *q)
1862 +
1863 + void dm_stop_queue(struct request_queue *q)
1864 + {
1865 +- if (blk_mq_queue_stopped(q))
1866 +- return;
1867 +-
1868 + blk_mq_quiesce_queue(q);
1869 + }
1870 +
1871 +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
1872 +index 73fd50e779754..d50737ec40394 100644
1873 +--- a/drivers/md/md-cluster.c
1874 ++++ b/drivers/md/md-cluster.c
1875 +@@ -1139,6 +1139,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
1876 + bitmap = get_bitmap_from_slot(mddev, i);
1877 + if (IS_ERR(bitmap)) {
1878 + pr_err("can't get bitmap from slot %d\n", i);
1879 ++ bitmap = NULL;
1880 + goto out;
1881 + }
1882 + counts = &bitmap->counts;
1883 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1884 +index a3cbc9f4fec17..02acd5d5a8488 100644
1885 +--- a/drivers/md/raid5.c
1886 ++++ b/drivers/md/raid5.c
1887 +@@ -3604,6 +3604,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
1888 + * is missing/faulty, then we need to read everything we can.
1889 + */
1890 + if (sh->raid_conf->level != 6 &&
1891 ++ sh->raid_conf->rmw_level != PARITY_DISABLE_RMW &&
1892 + sh->sector < sh->raid_conf->mddev->recovery_cp)
1893 + /* reconstruct-write isn't being forced */
1894 + return 0;
1895 +@@ -4839,7 +4840,7 @@ static void handle_stripe(struct stripe_head *sh)
1896 + * or to load a block that is being partially written.
1897 + */
1898 + if (s.to_read || s.non_overwrite
1899 +- || (conf->level == 6 && s.to_write && s.failed)
1900 ++ || (s.to_write && s.failed)
1901 + || (s.syncing && (s.uptodate + s.compute < disks))
1902 + || s.replacing
1903 + || s.expanding)
1904 +diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
1905 +index 4be6dcf292fff..aaa96f256356b 100644
1906 +--- a/drivers/media/platform/rockchip/rga/rga-hw.c
1907 ++++ b/drivers/media/platform/rockchip/rga/rga-hw.c
1908 +@@ -200,22 +200,25 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
1909 + dst_info.data.format = ctx->out.fmt->hw_format;
1910 + dst_info.data.swap = ctx->out.fmt->color_swap;
1911 +
1912 +- if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
1913 +- if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) {
1914 +- switch (ctx->in.colorspace) {
1915 +- case V4L2_COLORSPACE_REC709:
1916 +- src_info.data.csc_mode =
1917 +- RGA_SRC_CSC_MODE_BT709_R0;
1918 +- break;
1919 +- default:
1920 +- src_info.data.csc_mode =
1921 +- RGA_SRC_CSC_MODE_BT601_R0;
1922 +- break;
1923 +- }
1924 ++ /*
1925 ++ * CSC mode must only be set when the colorspace families differ between
1926 ++ * input and output. It must remain unset (zeroed) if both are the same.
1927 ++ */
1928 ++
1929 ++ if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) &&
1930 ++ RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) {
1931 ++ switch (ctx->in.colorspace) {
1932 ++ case V4L2_COLORSPACE_REC709:
1933 ++ src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
1934 ++ break;
1935 ++ default:
1936 ++ src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0;
1937 ++ break;
1938 + }
1939 + }
1940 +
1941 +- if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
1942 ++ if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) &&
1943 ++ RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) {
1944 + switch (ctx->out.colorspace) {
1945 + case V4L2_COLORSPACE_REC709:
1946 + dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
1947 +diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
1948 +index 96cb0314dfa70..e8917e5630a48 100644
1949 +--- a/drivers/media/platform/rockchip/rga/rga-hw.h
1950 ++++ b/drivers/media/platform/rockchip/rga/rga-hw.h
1951 +@@ -95,6 +95,11 @@
1952 + #define RGA_COLOR_FMT_CP_8BPP 15
1953 + #define RGA_COLOR_FMT_MASK 15
1954 +
1955 ++#define RGA_COLOR_FMT_IS_YUV(fmt) \
1956 ++ (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP))
1957 ++#define RGA_COLOR_FMT_IS_RGB(fmt) \
1958 ++ ((fmt) < RGA_COLOR_FMT_YUV422SP)
1959 ++
1960 + #define RGA_COLOR_NONE_SWAP 0
1961 + #define RGA_COLOR_RB_SWAP 1
1962 + #define RGA_COLOR_ALPHA_SWAP 2
1963 +diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
1964 +index d7b43037e500a..e07b135613eb5 100644
1965 +--- a/drivers/media/platform/vsp1/vsp1_dl.c
1966 ++++ b/drivers/media/platform/vsp1/vsp1_dl.c
1967 +@@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
1968 + if (!pool)
1969 + return NULL;
1970 +
1971 ++ pool->vsp1 = vsp1;
1972 ++
1973 + spin_lock_init(&pool->lock);
1974 + INIT_LIST_HEAD(&pool->free);
1975 +
1976 +diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
1977 +index 4a31907a4525f..3ff872c205eeb 100644
1978 +--- a/drivers/mfd/arizona-core.c
1979 ++++ b/drivers/mfd/arizona-core.c
1980 +@@ -1430,6 +1430,15 @@ err_irq:
1981 + arizona_irq_exit(arizona);
1982 + err_pm:
1983 + pm_runtime_disable(arizona->dev);
1984 ++
1985 ++ switch (arizona->pdata.clk32k_src) {
1986 ++ case ARIZONA_32KZ_MCLK1:
1987 ++ case ARIZONA_32KZ_MCLK2:
1988 ++ arizona_clk32k_disable(arizona);
1989 ++ break;
1990 ++ default:
1991 ++ break;
1992 ++ }
1993 + err_reset:
1994 + arizona_enable_reset(arizona);
1995 + regulator_disable(arizona->dcvdd);
1996 +@@ -1452,6 +1461,15 @@ int arizona_dev_exit(struct arizona *arizona)
1997 + regulator_disable(arizona->dcvdd);
1998 + regulator_put(arizona->dcvdd);
1999 +
2000 ++ switch (arizona->pdata.clk32k_src) {
2001 ++ case ARIZONA_32KZ_MCLK1:
2002 ++ case ARIZONA_32KZ_MCLK2:
2003 ++ arizona_clk32k_disable(arizona);
2004 ++ break;
2005 ++ default:
2006 ++ break;
2007 ++ }
2008 ++
2009 + mfd_remove_devices(arizona->dev);
2010 + arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
2011 + arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
2012 +diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
2013 +index 4faa8d2e5d045..707f4287ab4a0 100644
2014 +--- a/drivers/mfd/dln2.c
2015 ++++ b/drivers/mfd/dln2.c
2016 +@@ -287,7 +287,11 @@ static void dln2_rx(struct urb *urb)
2017 + len = urb->actual_length - sizeof(struct dln2_header);
2018 +
2019 + if (handle == DLN2_HANDLE_EVENT) {
2020 ++ unsigned long flags;
2021 ++
2022 ++ spin_lock_irqsave(&dln2->event_cb_lock, flags);
2023 + dln2_run_event_callbacks(dln2, id, echo, data, len);
2024 ++ spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
2025 + } else {
2026 + /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
2027 + if (dln2_transfer_complete(dln2, urb, handle, echo))
2028 +diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
2029 +index a66f8d6d61d1b..cb89f0578d425 100644
2030 +--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
2031 ++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
2032 +@@ -229,15 +229,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
2033 + DTRAN_CTRL_DM_START);
2034 + }
2035 +
2036 +-static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
2037 ++static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
2038 + {
2039 +- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
2040 + enum dma_data_direction dir;
2041 +
2042 +- spin_lock_irq(&host->lock);
2043 +-
2044 + if (!host->data)
2045 +- goto out;
2046 ++ return false;
2047 +
2048 + if (host->data->flags & MMC_DATA_READ)
2049 + dir = DMA_FROM_DEVICE;
2050 +@@ -250,6 +247,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
2051 + if (dir == DMA_FROM_DEVICE)
2052 + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
2053 +
2054 ++ return true;
2055 ++}
2056 ++
2057 ++static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
2058 ++{
2059 ++ struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
2060 ++
2061 ++ spin_lock_irq(&host->lock);
2062 ++ if (!renesas_sdhi_internal_dmac_complete(host))
2063 ++ goto out;
2064 ++
2065 + tmio_mmc_do_data_irq(host);
2066 + out:
2067 + spin_unlock_irq(&host->lock);
2068 +diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
2069 +index 1054cc070747e..20b0ee174dc61 100644
2070 +--- a/drivers/mtd/nand/raw/fsl_upm.c
2071 ++++ b/drivers/mtd/nand/raw/fsl_upm.c
2072 +@@ -62,7 +62,6 @@ static int fun_chip_ready(struct nand_chip *chip)
2073 + static void fun_wait_rnb(struct fsl_upm_nand *fun)
2074 + {
2075 + if (fun->rnb_gpio[fun->mchip_number] >= 0) {
2076 +- struct mtd_info *mtd = nand_to_mtd(&fun->chip);
2077 + int cnt = 1000000;
2078 +
2079 + while (--cnt && !fun_chip_ready(&fun->chip))
2080 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
2081 +index 413c3f254cf85..c881a573da662 100644
2082 +--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
2083 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
2084 +@@ -43,7 +43,7 @@ struct qmem {
2085 + void *base;
2086 + dma_addr_t iova;
2087 + int alloc_sz;
2088 +- u8 entry_sz;
2089 ++ u16 entry_sz;
2090 + u8 align;
2091 + u32 qsize;
2092 + };
2093 +diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
2094 +index c84ab052ef265..3eee8df359a12 100644
2095 +--- a/drivers/net/ethernet/qualcomm/emac/emac.c
2096 ++++ b/drivers/net/ethernet/qualcomm/emac/emac.c
2097 +@@ -485,13 +485,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
2098 +
2099 + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
2100 + if (ret)
2101 +- return ret;
2102 ++ goto disable_clk_axi;
2103 +
2104 + ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
2105 + if (ret)
2106 +- return ret;
2107 ++ goto disable_clk_cfg_ahb;
2108 ++
2109 ++ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
2110 ++ if (ret)
2111 ++ goto disable_clk_cfg_ahb;
2112 +
2113 +- return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
2114 ++ return 0;
2115 ++
2116 ++disable_clk_cfg_ahb:
2117 ++ clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
2118 ++disable_clk_axi:
2119 ++ clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
2120 ++
2121 ++ return ret;
2122 + }
2123 +
2124 + /* Enable clocks; needs emac_clks_phase1_init to be called before */
2125 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
2126 +index 4d75158c64b29..826626e870d5c 100644
2127 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
2128 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
2129 +@@ -350,6 +350,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
2130 + plat_dat->has_gmac = true;
2131 + plat_dat->bsp_priv = gmac;
2132 + plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
2133 ++ plat_dat->multicast_filter_bins = 0;
2134 +
2135 + err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
2136 + if (err)
2137 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
2138 +index bc9b01376e807..1d0b64bd1e1a9 100644
2139 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
2140 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
2141 +@@ -166,6 +166,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
2142 + value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
2143 + } else if (dev->flags & IFF_ALLMULTI) {
2144 + value = GMAC_FRAME_FILTER_PM; /* pass all multi */
2145 ++ } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
2146 ++ /* Fall back to all multicast if we've no filter */
2147 ++ value = GMAC_FRAME_FILTER_PM;
2148 + } else if (!netdev_mc_empty(dev)) {
2149 + struct netdev_hw_addr *ha;
2150 +
2151 +diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
2152 +index 89b85970912db..35d265014e1ec 100644
2153 +--- a/drivers/nvdimm/security.c
2154 ++++ b/drivers/nvdimm/security.c
2155 +@@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
2156 + else
2157 + dev_dbg(&nvdimm->dev, "overwrite completed\n");
2158 +
2159 +- if (nvdimm->sec.overwrite_state)
2160 +- sysfs_notify_dirent(nvdimm->sec.overwrite_state);
2161 ++ /*
2162 ++ * Mark the overwrite work done and update dimm security flags,
2163 ++ * then send a sysfs event notification to wake up userspace
2164 ++ * poll threads to picked up the changed state.
2165 ++ */
2166 + nvdimm->sec.overwrite_tmo = 0;
2167 + clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
2168 + clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
2169 +- put_device(&nvdimm->dev);
2170 + nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
2171 +- nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
2172 ++ nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
2173 ++ if (nvdimm->sec.overwrite_state)
2174 ++ sysfs_notify_dirent(nvdimm->sec.overwrite_state);
2175 ++ put_device(&nvdimm->dev);
2176 + }
2177 +
2178 + void nvdimm_security_overwrite_query(struct work_struct *work)
2179 +diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
2180 +index 8e40b3e6da77d..3cef835b375fd 100644
2181 +--- a/drivers/pci/bus.c
2182 ++++ b/drivers/pci/bus.c
2183 +@@ -322,12 +322,8 @@ void pci_bus_add_device(struct pci_dev *dev)
2184 +
2185 + dev->match_driver = true;
2186 + retval = device_attach(&dev->dev);
2187 +- if (retval < 0 && retval != -EPROBE_DEFER) {
2188 ++ if (retval < 0 && retval != -EPROBE_DEFER)
2189 + pci_warn(dev, "device attach failed (%d)\n", retval);
2190 +- pci_proc_detach_device(dev);
2191 +- pci_remove_sysfs_dev_files(dev);
2192 +- return;
2193 +- }
2194 +
2195 + pci_dev_assign_added(dev, true);
2196 + }
2197 +diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
2198 +index 70ded8900e285..270d502b8cd50 100644
2199 +--- a/drivers/pci/controller/dwc/pcie-qcom.c
2200 ++++ b/drivers/pci/controller/dwc/pcie-qcom.c
2201 +@@ -45,7 +45,13 @@
2202 + #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
2203 +
2204 + #define PCIE20_PARF_PHY_CTRL 0x40
2205 ++#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
2206 ++#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
2207 ++
2208 + #define PCIE20_PARF_PHY_REFCLK 0x4C
2209 ++#define PHY_REFCLK_SSP_EN BIT(16)
2210 ++#define PHY_REFCLK_USE_PAD BIT(12)
2211 ++
2212 + #define PCIE20_PARF_DBI_BASE_ADDR 0x168
2213 + #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
2214 + #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
2215 +@@ -76,6 +82,18 @@
2216 + #define DBI_RO_WR_EN 1
2217 +
2218 + #define PERST_DELAY_US 1000
2219 ++/* PARF registers */
2220 ++#define PCIE20_PARF_PCS_DEEMPH 0x34
2221 ++#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
2222 ++#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
2223 ++#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
2224 ++
2225 ++#define PCIE20_PARF_PCS_SWING 0x38
2226 ++#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
2227 ++#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
2228 ++
2229 ++#define PCIE20_PARF_CONFIG_BITS 0x50
2230 ++#define PHY_RX0_EQ(x) ((x) << 24)
2231 +
2232 + #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
2233 + #define SLV_ADDR_SPACE_SZ 0x10000000
2234 +@@ -275,6 +293,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
2235 + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
2236 + struct dw_pcie *pci = pcie->pci;
2237 + struct device *dev = pci->dev;
2238 ++ struct device_node *node = dev->of_node;
2239 + u32 val;
2240 + int ret;
2241 +
2242 +@@ -319,9 +338,29 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
2243 + val &= ~BIT(0);
2244 + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
2245 +
2246 ++ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
2247 ++ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
2248 ++ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
2249 ++ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
2250 ++ pcie->parf + PCIE20_PARF_PCS_DEEMPH);
2251 ++ writel(PCS_SWING_TX_SWING_FULL(120) |
2252 ++ PCS_SWING_TX_SWING_LOW(120),
2253 ++ pcie->parf + PCIE20_PARF_PCS_SWING);
2254 ++ writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
2255 ++ }
2256 ++
2257 ++ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
2258 ++ /* set TX termination offset */
2259 ++ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
2260 ++ val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
2261 ++ val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
2262 ++ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
2263 ++ }
2264 ++
2265 + /* enable external reference clock */
2266 + val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
2267 +- val |= BIT(16);
2268 ++ val &= ~PHY_REFCLK_USE_PAD;
2269 ++ val |= PHY_REFCLK_SSP_EN;
2270 + writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
2271 +
2272 + ret = reset_control_deassert(res->phy_reset);
2273 +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
2274 +index b3869951c0eb7..6e60b4b1bf53b 100644
2275 +--- a/drivers/pci/hotplug/acpiphp_glue.c
2276 ++++ b/drivers/pci/hotplug/acpiphp_glue.c
2277 +@@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
2278 + struct acpiphp_context *context;
2279 +
2280 + acpi_lock_hp_context();
2281 ++
2282 + context = acpiphp_get_context(adev);
2283 +- if (!context || context->func.parent->is_going_away) {
2284 +- acpi_unlock_hp_context();
2285 +- return NULL;
2286 ++ if (!context)
2287 ++ goto unlock;
2288 ++
2289 ++ if (context->func.parent->is_going_away) {
2290 ++ acpiphp_put_context(context);
2291 ++ context = NULL;
2292 ++ goto unlock;
2293 + }
2294 ++
2295 + get_bridge(context->func.parent);
2296 + acpiphp_put_context(context);
2297 ++
2298 ++unlock:
2299 + acpi_unlock_hp_context();
2300 + return context;
2301 + }
2302 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2303 +index 9bc0f321aaf0e..c98067579e9f3 100644
2304 +--- a/drivers/pci/quirks.c
2305 ++++ b/drivers/pci/quirks.c
2306 +@@ -5208,7 +5208,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
2307 + */
2308 + static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
2309 + {
2310 +- if (pdev->device == 0x7340 && pdev->revision != 0xc5)
2311 ++ if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
2312 ++ (pdev->device == 0x7340 && pdev->revision != 0xc5))
2313 + return;
2314 +
2315 + pci_info(pdev, "disabling ATS\n");
2316 +@@ -5219,6 +5220,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
2317 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
2318 + /* AMD Iceland dGPU */
2319 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
2320 ++/* AMD Navi10 dGPU */
2321 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
2322 + /* AMD Navi14 dGPU */
2323 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
2324 + #endif /* CONFIG_PCI_ATS */
2325 +diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
2326 +index 6e2683016c1f0..8bd0a078bfc47 100644
2327 +--- a/drivers/pinctrl/pinctrl-ingenic.c
2328 ++++ b/drivers/pinctrl/pinctrl-ingenic.c
2329 +@@ -1500,9 +1500,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
2330 + */
2331 + high = ingenic_gpio_get_value(jzgc, irq);
2332 + if (high)
2333 +- irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING);
2334 ++ irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_LOW);
2335 + else
2336 +- irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
2337 ++ irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
2338 + }
2339 +
2340 + if (jzgc->jzpc->version >= ID_JZ4760)
2341 +@@ -1538,7 +1538,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
2342 + */
2343 + bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
2344 +
2345 +- type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
2346 ++ type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
2347 + }
2348 +
2349 + irq_set_type(jzgc, irqd->hwirq, type);
2350 +@@ -1644,7 +1644,8 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
2351 + unsigned int pin = gc->base + offset;
2352 +
2353 + if (jzpc->version >= ID_JZ4760)
2354 +- return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
2355 ++ return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
2356 ++ ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
2357 +
2358 + if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
2359 + return true;
2360 +diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
2361 +index 25ca2c894b4de..ab0662a33b41a 100644
2362 +--- a/drivers/platform/chrome/cros_ec_ishtp.c
2363 ++++ b/drivers/platform/chrome/cros_ec_ishtp.c
2364 +@@ -645,8 +645,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
2365 +
2366 + /* Register croc_ec_dev mfd */
2367 + rv = cros_ec_dev_init(client_data);
2368 +- if (rv)
2369 ++ if (rv) {
2370 ++ down_write(&init_lock);
2371 + goto end_cros_ec_dev_init_error;
2372 ++ }
2373 +
2374 + return 0;
2375 +
2376 +diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
2377 +index 1f829edd8ee70..d392a828fc493 100644
2378 +--- a/drivers/pwm/pwm-bcm-iproc.c
2379 ++++ b/drivers/pwm/pwm-bcm-iproc.c
2380 +@@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
2381 + u64 tmp, multi, rate;
2382 + u32 value, prescale;
2383 +
2384 +- rate = clk_get_rate(ip->clk);
2385 +-
2386 + value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
2387 +
2388 + if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
2389 +@@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
2390 + else
2391 + state->polarity = PWM_POLARITY_INVERSED;
2392 +
2393 ++ rate = clk_get_rate(ip->clk);
2394 ++ if (rate == 0) {
2395 ++ state->period = 0;
2396 ++ state->duty_cycle = 0;
2397 ++ return;
2398 ++ }
2399 ++
2400 + value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
2401 + prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
2402 + prescale &= IPROC_PWM_PRESCALE_MAX;
2403 +diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
2404 +index cb0f4a0be0322..eaeb6aee6da5c 100644
2405 +--- a/drivers/remoteproc/qcom_q6v5.c
2406 ++++ b/drivers/remoteproc/qcom_q6v5.c
2407 +@@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
2408 + {
2409 + int ret;
2410 +
2411 ++ q6v5->running = false;
2412 ++
2413 + qcom_smem_state_update_bits(q6v5->state,
2414 + BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
2415 +
2416 +diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
2417 +index d84e9f306086b..a67c55785b4de 100644
2418 +--- a/drivers/remoteproc/qcom_q6v5_mss.c
2419 ++++ b/drivers/remoteproc/qcom_q6v5_mss.c
2420 +@@ -381,6 +381,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
2421 + {
2422 + struct q6v5 *qproc = rproc->priv;
2423 +
2424 ++ /* MBA is restricted to a maximum size of 1M */
2425 ++ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
2426 ++ dev_err(qproc->dev, "MBA firmware load failed\n");
2427 ++ return -EINVAL;
2428 ++ }
2429 ++
2430 + memcpy(qproc->mba_region, fw->data, fw->size);
2431 +
2432 + return 0;
2433 +@@ -1028,15 +1034,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
2434 + } else if (phdr->p_filesz) {
2435 + /* Replace "xxx.xxx" with "xxx.bxx" */
2436 + sprintf(fw_name + fw_name_len - 3, "b%02d", i);
2437 +- ret = request_firmware(&seg_fw, fw_name, qproc->dev);
2438 ++ ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
2439 ++ ptr, phdr->p_filesz);
2440 + if (ret) {
2441 + dev_err(qproc->dev, "failed to load %s\n", fw_name);
2442 + iounmap(ptr);
2443 + goto release_firmware;
2444 + }
2445 +
2446 +- memcpy(ptr, seg_fw->data, seg_fw->size);
2447 +-
2448 + release_firmware(seg_fw);
2449 + }
2450 +
2451 +diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
2452 +index 9884228800a50..f14394ab0e037 100644
2453 +--- a/drivers/scsi/lpfc/lpfc_nvmet.c
2454 ++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
2455 +@@ -1923,7 +1923,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2456 + }
2457 + tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2458 + nvmet_fc_unregister_targetport(phba->targetport);
2459 +- if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
2460 ++ if (!wait_for_completion_timeout(&tport_unreg_cmp,
2461 + msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2462 + lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2463 + "6179 Unreg targetport x%px timeout "
2464 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2465 +index 9ad44a96dfe3a..33f1cca7eaa61 100644
2466 +--- a/drivers/usb/serial/ftdi_sio.c
2467 ++++ b/drivers/usb/serial/ftdi_sio.c
2468 +@@ -2480,12 +2480,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
2469 + #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
2470 +
2471 + static int ftdi_process_packet(struct usb_serial_port *port,
2472 +- struct ftdi_private *priv, char *packet, int len)
2473 ++ struct ftdi_private *priv, unsigned char *buf, int len)
2474 + {
2475 ++ unsigned char status;
2476 + int i;
2477 +- char status;
2478 + char flag;
2479 +- char *ch;
2480 +
2481 + if (len < 2) {
2482 + dev_dbg(&port->dev, "malformed packet\n");
2483 +@@ -2495,7 +2494,7 @@ static int ftdi_process_packet(struct usb_serial_port *port,
2484 + /* Compare new line status to the old one, signal if different/
2485 + N.B. packet may be processed more than once, but differences
2486 + are only processed once. */
2487 +- status = packet[0] & FTDI_STATUS_B0_MASK;
2488 ++ status = buf[0] & FTDI_STATUS_B0_MASK;
2489 + if (status != priv->prev_status) {
2490 + char diff_status = status ^ priv->prev_status;
2491 +
2492 +@@ -2521,13 +2520,12 @@ static int ftdi_process_packet(struct usb_serial_port *port,
2493 + }
2494 +
2495 + /* save if the transmitter is empty or not */
2496 +- if (packet[1] & FTDI_RS_TEMT)
2497 ++ if (buf[1] & FTDI_RS_TEMT)
2498 + priv->transmit_empty = 1;
2499 + else
2500 + priv->transmit_empty = 0;
2501 +
2502 +- len -= 2;
2503 +- if (!len)
2504 ++ if (len == 2)
2505 + return 0; /* status only */
2506 +
2507 + /*
2508 +@@ -2535,40 +2533,41 @@ static int ftdi_process_packet(struct usb_serial_port *port,
2509 + * data payload to avoid over-reporting.
2510 + */
2511 + flag = TTY_NORMAL;
2512 +- if (packet[1] & FTDI_RS_ERR_MASK) {
2513 ++ if (buf[1] & FTDI_RS_ERR_MASK) {
2514 + /* Break takes precedence over parity, which takes precedence
2515 + * over framing errors */
2516 +- if (packet[1] & FTDI_RS_BI) {
2517 ++ if (buf[1] & FTDI_RS_BI) {
2518 + flag = TTY_BREAK;
2519 + port->icount.brk++;
2520 + usb_serial_handle_break(port);
2521 +- } else if (packet[1] & FTDI_RS_PE) {
2522 ++ } else if (buf[1] & FTDI_RS_PE) {
2523 + flag = TTY_PARITY;
2524 + port->icount.parity++;
2525 +- } else if (packet[1] & FTDI_RS_FE) {
2526 ++ } else if (buf[1] & FTDI_RS_FE) {
2527 + flag = TTY_FRAME;
2528 + port->icount.frame++;
2529 + }
2530 + /* Overrun is special, not associated with a char */
2531 +- if (packet[1] & FTDI_RS_OE) {
2532 ++ if (buf[1] & FTDI_RS_OE) {
2533 + port->icount.overrun++;
2534 + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
2535 + }
2536 + }
2537 +
2538 +- port->icount.rx += len;
2539 +- ch = packet + 2;
2540 ++ port->icount.rx += len - 2;
2541 +
2542 + if (port->port.console && port->sysrq) {
2543 +- for (i = 0; i < len; i++, ch++) {
2544 +- if (!usb_serial_handle_sysrq_char(port, *ch))
2545 +- tty_insert_flip_char(&port->port, *ch, flag);
2546 ++ for (i = 2; i < len; i++) {
2547 ++ if (usb_serial_handle_sysrq_char(port, buf[i]))
2548 ++ continue;
2549 ++ tty_insert_flip_char(&port->port, buf[i], flag);
2550 + }
2551 + } else {
2552 +- tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
2553 ++ tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag,
2554 ++ len - 2);
2555 + }
2556 +
2557 +- return len;
2558 ++ return len - 2;
2559 + }
2560 +
2561 + static void ftdi_process_read_urb(struct urb *urb)
2562 +diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
2563 +index e46104c2fd94e..893cef70c1599 100644
2564 +--- a/drivers/watchdog/f71808e_wdt.c
2565 ++++ b/drivers/watchdog/f71808e_wdt.c
2566 +@@ -689,9 +689,9 @@ static int __init watchdog_init(int sioaddr)
2567 + * into the module have been registered yet.
2568 + */
2569 + watchdog.sioaddr = sioaddr;
2570 +- watchdog.ident.options = WDIOC_SETTIMEOUT
2571 +- | WDIOF_MAGICCLOSE
2572 +- | WDIOF_KEEPALIVEPING;
2573 ++ watchdog.ident.options = WDIOF_MAGICCLOSE
2574 ++ | WDIOF_KEEPALIVEPING
2575 ++ | WDIOF_CARDRESET;
2576 +
2577 + snprintf(watchdog.ident.identity,
2578 + sizeof(watchdog.ident.identity), "%s watchdog",
2579 +@@ -705,6 +705,13 @@ static int __init watchdog_init(int sioaddr)
2580 + wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
2581 + watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
2582 +
2583 ++ /*
2584 ++ * We don't want WDTMOUT_STS to stick around till regular reboot.
2585 ++ * Write 1 to the bit to clear it to zero.
2586 ++ */
2587 ++ superio_outb(sioaddr, F71808FG_REG_WDT_CONF,
2588 ++ wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS));
2589 ++
2590 + superio_exit(sioaddr);
2591 +
2592 + err = watchdog_set_timeout(timeout);
2593 +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
2594 +index c4147e93aa7d4..3729f99fd8eca 100644
2595 +--- a/drivers/watchdog/watchdog_dev.c
2596 ++++ b/drivers/watchdog/watchdog_dev.c
2597 +@@ -974,6 +974,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
2598 + if (IS_ERR_OR_NULL(watchdog_kworker))
2599 + return -ENODEV;
2600 +
2601 ++ device_initialize(&wd_data->dev);
2602 ++ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
2603 ++ wd_data->dev.class = &watchdog_class;
2604 ++ wd_data->dev.parent = wdd->parent;
2605 ++ wd_data->dev.groups = wdd->groups;
2606 ++ wd_data->dev.release = watchdog_core_data_release;
2607 ++ dev_set_drvdata(&wd_data->dev, wdd);
2608 ++ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
2609 ++
2610 + kthread_init_work(&wd_data->work, watchdog_ping_work);
2611 + hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
2612 + wd_data->timer.function = watchdog_timer_expired;
2613 +@@ -994,15 +1003,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
2614 + }
2615 + }
2616 +
2617 +- device_initialize(&wd_data->dev);
2618 +- wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
2619 +- wd_data->dev.class = &watchdog_class;
2620 +- wd_data->dev.parent = wdd->parent;
2621 +- wd_data->dev.groups = wdd->groups;
2622 +- wd_data->dev.release = watchdog_core_data_release;
2623 +- dev_set_drvdata(&wd_data->dev, wdd);
2624 +- dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
2625 +-
2626 + /* Fill in the data structures */
2627 + cdev_init(&wd_data->cdev, &watchdog_fops);
2628 +
2629 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
2630 +index 36cd210ee2ef7..2374f3f6f3b70 100644
2631 +--- a/fs/btrfs/ctree.h
2632 ++++ b/fs/btrfs/ctree.h
2633 +@@ -990,8 +990,10 @@ struct btrfs_root {
2634 + wait_queue_head_t log_writer_wait;
2635 + wait_queue_head_t log_commit_wait[2];
2636 + struct list_head log_ctxs[2];
2637 ++ /* Used only for log trees of subvolumes, not for the log root tree */
2638 + atomic_t log_writers;
2639 + atomic_t log_commit[2];
2640 ++ /* Used only for log trees of subvolumes, not for the log root tree */
2641 + atomic_t log_batch;
2642 + int log_transid;
2643 + /* No matter the commit succeeds or not*/
2644 +@@ -3164,7 +3166,7 @@ do { \
2645 + /* Report first abort since mount */ \
2646 + if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
2647 + &((trans)->fs_info->fs_state))) { \
2648 +- if ((errno) != -EIO) { \
2649 ++ if ((errno) != -EIO && (errno) != -EROFS) { \
2650 + WARN(1, KERN_DEBUG \
2651 + "BTRFS: Transaction aborted (error %d)\n", \
2652 + (errno)); \
2653 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2654 +index 273d1ccdd45df..ad1c8e3b8133a 100644
2655 +--- a/fs/btrfs/disk-io.c
2656 ++++ b/fs/btrfs/disk-io.c
2657 +@@ -1475,9 +1475,16 @@ int btrfs_init_fs_root(struct btrfs_root *root)
2658 + spin_lock_init(&root->ino_cache_lock);
2659 + init_waitqueue_head(&root->ino_cache_wait);
2660 +
2661 +- ret = get_anon_bdev(&root->anon_dev);
2662 +- if (ret)
2663 +- goto fail;
2664 ++ /*
2665 ++ * Don't assign anonymous block device to roots that are not exposed to
2666 ++ * userspace, the id pool is limited to 1M
2667 ++ */
2668 ++ if (is_fstree(root->root_key.objectid) &&
2669 ++ btrfs_root_refs(&root->root_item) > 0) {
2670 ++ ret = get_anon_bdev(&root->anon_dev);
2671 ++ if (ret)
2672 ++ goto fail;
2673 ++ }
2674 +
2675 + mutex_lock(&root->objectid_mutex);
2676 + ret = btrfs_find_highest_objectid(root,
2677 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2678 +index 47ecf7216b3e5..739332b462059 100644
2679 +--- a/fs/btrfs/extent-tree.c
2680 ++++ b/fs/btrfs/extent-tree.c
2681 +@@ -5221,7 +5221,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
2682 + goto out;
2683 + }
2684 +
2685 +- trans = btrfs_start_transaction(tree_root, 0);
2686 ++ /*
2687 ++ * Use join to avoid potential EINTR from transaction start. See
2688 ++ * wait_reserve_ticket and the whole reservation callchain.
2689 ++ */
2690 ++ if (for_reloc)
2691 ++ trans = btrfs_join_transaction(tree_root);
2692 ++ else
2693 ++ trans = btrfs_start_transaction(tree_root, 0);
2694 + if (IS_ERR(trans)) {
2695 + err = PTR_ERR(trans);
2696 + goto out_free;
2697 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2698 +index 99dcb38976592..035ea5bc692ad 100644
2699 +--- a/fs/btrfs/extent_io.c
2700 ++++ b/fs/btrfs/extent_io.c
2701 +@@ -4467,15 +4467,25 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
2702 + free_extent_map(em);
2703 + break;
2704 + }
2705 +- if (!test_range_bit(tree, em->start,
2706 +- extent_map_end(em) - 1,
2707 +- EXTENT_LOCKED, 0, NULL)) {
2708 ++ if (test_range_bit(tree, em->start,
2709 ++ extent_map_end(em) - 1,
2710 ++ EXTENT_LOCKED, 0, NULL))
2711 ++ goto next;
2712 ++ /*
2713 ++ * If it's not in the list of modified extents, used
2714 ++ * by a fast fsync, we can remove it. If it's being
2715 ++ * logged we can safely remove it since fsync took an
2716 ++ * extra reference on the em.
2717 ++ */
2718 ++ if (list_empty(&em->list) ||
2719 ++ test_bit(EXTENT_FLAG_LOGGING, &em->flags)) {
2720 + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2721 + &btrfs_inode->runtime_flags);
2722 + remove_extent_mapping(map, em);
2723 + /* once for the rb tree */
2724 + free_extent_map(em);
2725 + }
2726 ++next:
2727 + start = extent_map_end(em);
2728 + write_unlock(&map->lock);
2729 +
2730 +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
2731 +index d86ada9c3c541..8bfc0f348ad55 100644
2732 +--- a/fs/btrfs/free-space-cache.c
2733 ++++ b/fs/btrfs/free-space-cache.c
2734 +@@ -2166,7 +2166,7 @@ out:
2735 + static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2736 + struct btrfs_free_space *info, bool update_stat)
2737 + {
2738 +- struct btrfs_free_space *left_info;
2739 ++ struct btrfs_free_space *left_info = NULL;
2740 + struct btrfs_free_space *right_info;
2741 + bool merged = false;
2742 + u64 offset = info->offset;
2743 +@@ -2181,7 +2181,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2744 + if (right_info && rb_prev(&right_info->offset_index))
2745 + left_info = rb_entry(rb_prev(&right_info->offset_index),
2746 + struct btrfs_free_space, offset_index);
2747 +- else
2748 ++ else if (!right_info)
2749 + left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2750 +
2751 + if (right_info && !right_info->bitmap) {
2752 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2753 +index e408181a5eba3..fa7f3a59813ea 100644
2754 +--- a/fs/btrfs/inode.c
2755 ++++ b/fs/btrfs/inode.c
2756 +@@ -641,12 +641,18 @@ cont:
2757 + page_error_op |
2758 + PAGE_END_WRITEBACK);
2759 +
2760 +- for (i = 0; i < nr_pages; i++) {
2761 +- WARN_ON(pages[i]->mapping);
2762 +- put_page(pages[i]);
2763 ++ /*
2764 ++ * Ensure we only free the compressed pages if we have
2765 ++ * them allocated, as we can still reach here with
2766 ++ * inode_need_compress() == false.
2767 ++ */
2768 ++ if (pages) {
2769 ++ for (i = 0; i < nr_pages; i++) {
2770 ++ WARN_ON(pages[i]->mapping);
2771 ++ put_page(pages[i]);
2772 ++ }
2773 ++ kfree(pages);
2774 + }
2775 +- kfree(pages);
2776 +-
2777 + return 0;
2778 + }
2779 + }
2780 +@@ -4681,6 +4687,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
2781 + }
2782 + }
2783 +
2784 ++ free_anon_bdev(dest->anon_dev);
2785 ++ dest->anon_dev = 0;
2786 + out_end_trans:
2787 + trans->block_rsv = NULL;
2788 + trans->bytes_reserved = 0;
2789 +@@ -7186,7 +7194,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
2790 + extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
2791 + /* Only regular file could have regular/prealloc extent */
2792 + if (!S_ISREG(inode->vfs_inode.i_mode)) {
2793 +- ret = -EUCLEAN;
2794 ++ err = -EUCLEAN;
2795 + btrfs_crit(fs_info,
2796 + "regular/prealloc extent found for non-regular inode %llu",
2797 + btrfs_ino(inode));
2798 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2799 +index d88b8d8897cc5..88745b5182126 100644
2800 +--- a/fs/btrfs/ioctl.c
2801 ++++ b/fs/btrfs/ioctl.c
2802 +@@ -167,8 +167,11 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
2803 + return 0;
2804 + }
2805 +
2806 +-/* Check if @flags are a supported and valid set of FS_*_FL flags */
2807 +-static int check_fsflags(unsigned int flags)
2808 ++/*
2809 ++ * Check if @flags are a supported and valid set of FS_*_FL flags and that
2810 ++ * the old and new flags are not conflicting
2811 ++ */
2812 ++static int check_fsflags(unsigned int old_flags, unsigned int flags)
2813 + {
2814 + if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
2815 + FS_NOATIME_FL | FS_NODUMP_FL | \
2816 +@@ -177,9 +180,19 @@ static int check_fsflags(unsigned int flags)
2817 + FS_NOCOW_FL))
2818 + return -EOPNOTSUPP;
2819 +
2820 ++ /* COMPR and NOCOMP on new/old are valid */
2821 + if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
2822 + return -EINVAL;
2823 +
2824 ++ if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL))
2825 ++ return -EINVAL;
2826 ++
2827 ++ /* NOCOW and compression options are mutually exclusive */
2828 ++ if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
2829 ++ return -EINVAL;
2830 ++ if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL)))
2831 ++ return -EINVAL;
2832 ++
2833 + return 0;
2834 + }
2835 +
2836 +@@ -193,7 +206,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
2837 + unsigned int fsflags, old_fsflags;
2838 + int ret;
2839 + const char *comp = NULL;
2840 +- u32 binode_flags = binode->flags;
2841 ++ u32 binode_flags;
2842 +
2843 + if (!inode_owner_or_capable(inode))
2844 + return -EPERM;
2845 +@@ -204,22 +217,23 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
2846 + if (copy_from_user(&fsflags, arg, sizeof(fsflags)))
2847 + return -EFAULT;
2848 +
2849 +- ret = check_fsflags(fsflags);
2850 +- if (ret)
2851 +- return ret;
2852 +-
2853 + ret = mnt_want_write_file(file);
2854 + if (ret)
2855 + return ret;
2856 +
2857 + inode_lock(inode);
2858 +-
2859 + fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
2860 + old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
2861 ++
2862 + ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2863 + if (ret)
2864 + goto out_unlock;
2865 +
2866 ++ ret = check_fsflags(old_fsflags, fsflags);
2867 ++ if (ret)
2868 ++ goto out_unlock;
2869 ++
2870 ++ binode_flags = binode->flags;
2871 + if (fsflags & FS_SYNC_FL)
2872 + binode_flags |= BTRFS_INODE_SYNC;
2873 + else
2874 +diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
2875 +index 454a1015d026b..9a2f15f4c80e0 100644
2876 +--- a/fs/btrfs/ref-verify.c
2877 ++++ b/fs/btrfs/ref-verify.c
2878 +@@ -286,6 +286,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
2879 + exist_re = insert_root_entry(&exist->roots, re);
2880 + if (exist_re)
2881 + kfree(re);
2882 ++ } else {
2883 ++ kfree(re);
2884 + }
2885 + kfree(be);
2886 + return exist;
2887 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
2888 +index 1b087ee338ccb..af3605a0bf2e0 100644
2889 +--- a/fs/btrfs/relocation.c
2890 ++++ b/fs/btrfs/relocation.c
2891 +@@ -2312,12 +2312,20 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2892 + btrfs_unlock_up_safe(path, 0);
2893 + }
2894 +
2895 +- min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2896 ++ /*
2897 ++ * In merge_reloc_root(), we modify the upper level pointer to swap the
2898 ++ * tree blocks between reloc tree and subvolume tree. Thus for tree
2899 ++ * block COW, we COW at most from level 1 to root level for each tree.
2900 ++ *
2901 ++ * Thus the needed metadata size is at most root_level * nodesize,
2902 ++ * and * 2 since we have two trees to COW.
2903 ++ */
2904 ++ min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2;
2905 + memset(&next_key, 0, sizeof(next_key));
2906 +
2907 + while (1) {
2908 + ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2909 +- BTRFS_RESERVE_FLUSH_ALL);
2910 ++ BTRFS_RESERVE_FLUSH_LIMIT);
2911 + if (ret) {
2912 + err = ret;
2913 + goto out;
2914 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
2915 +index aea24202cd355..4b0ee34aa65d5 100644
2916 +--- a/fs/btrfs/super.c
2917 ++++ b/fs/btrfs/super.c
2918 +@@ -435,6 +435,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
2919 + char *compress_type;
2920 + bool compress_force = false;
2921 + enum btrfs_compression_type saved_compress_type;
2922 ++ int saved_compress_level;
2923 + bool saved_compress_force;
2924 + int no_compress = 0;
2925 +
2926 +@@ -517,6 +518,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
2927 + info->compress_type : BTRFS_COMPRESS_NONE;
2928 + saved_compress_force =
2929 + btrfs_test_opt(info, FORCE_COMPRESS);
2930 ++ saved_compress_level = info->compress_level;
2931 + if (token == Opt_compress ||
2932 + token == Opt_compress_force ||
2933 + strncmp(args[0].from, "zlib", 4) == 0) {
2934 +@@ -561,6 +563,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
2935 + no_compress = 0;
2936 + } else if (strncmp(args[0].from, "no", 2) == 0) {
2937 + compress_type = "no";
2938 ++ info->compress_level = 0;
2939 ++ info->compress_type = 0;
2940 + btrfs_clear_opt(info->mount_opt, COMPRESS);
2941 + btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
2942 + compress_force = false;
2943 +@@ -581,11 +585,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
2944 + */
2945 + btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
2946 + }
2947 +- if ((btrfs_test_opt(info, COMPRESS) &&
2948 +- (info->compress_type != saved_compress_type ||
2949 +- compress_force != saved_compress_force)) ||
2950 +- (!btrfs_test_opt(info, COMPRESS) &&
2951 +- no_compress == 1)) {
2952 ++ if (no_compress == 1) {
2953 ++ btrfs_info(info, "use no compression");
2954 ++ } else if ((info->compress_type != saved_compress_type) ||
2955 ++ (compress_force != saved_compress_force) ||
2956 ++ (info->compress_level != saved_compress_level)) {
2957 + btrfs_info(info, "%s %s compression, level %d",
2958 + (compress_force) ? "force" : "use",
2959 + compress_type, info->compress_level);
2960 +@@ -1848,6 +1852,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
2961 + set_bit(BTRFS_FS_OPEN, &fs_info->flags);
2962 + }
2963 + out:
2964 ++ /*
2965 ++ * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS,
2966 ++ * since the absence of the flag means it can be toggled off by remount.
2967 ++ */
2968 ++ *flags |= SB_I_VERSION;
2969 ++
2970 + wake_up_process(fs_info->transaction_kthread);
2971 + btrfs_remount_cleanup(fs_info, old_opts);
2972 + return 0;
2973 +@@ -2254,9 +2264,7 @@ static int btrfs_unfreeze(struct super_block *sb)
2974 + static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2975 + {
2976 + struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
2977 +- struct btrfs_fs_devices *cur_devices;
2978 + struct btrfs_device *dev, *first_dev = NULL;
2979 +- struct list_head *head;
2980 +
2981 + /*
2982 + * Lightweight locking of the devices. We should not need
2983 +@@ -2266,18 +2274,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2984 + * least until the rcu_read_unlock.
2985 + */
2986 + rcu_read_lock();
2987 +- cur_devices = fs_info->fs_devices;
2988 +- while (cur_devices) {
2989 +- head = &cur_devices->devices;
2990 +- list_for_each_entry_rcu(dev, head, dev_list) {
2991 +- if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2992 +- continue;
2993 +- if (!dev->name)
2994 +- continue;
2995 +- if (!first_dev || dev->devid < first_dev->devid)
2996 +- first_dev = dev;
2997 +- }
2998 +- cur_devices = cur_devices->seed;
2999 ++ list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) {
3000 ++ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3001 ++ continue;
3002 ++ if (!dev->name)
3003 ++ continue;
3004 ++ if (!first_dev || dev->devid < first_dev->devid)
3005 ++ first_dev = dev;
3006 + }
3007 +
3008 + if (first_dev)
3009 +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
3010 +index f6d3c80f2e289..5c299e1f2297e 100644
3011 +--- a/fs/btrfs/sysfs.c
3012 ++++ b/fs/btrfs/sysfs.c
3013 +@@ -975,7 +975,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
3014 + {
3015 + int error = 0;
3016 + struct btrfs_device *dev;
3017 ++ unsigned int nofs_flag;
3018 +
3019 ++ nofs_flag = memalloc_nofs_save();
3020 + list_for_each_entry(dev, &fs_devices->devices, dev_list) {
3021 + struct hd_struct *disk;
3022 + struct kobject *disk_kobj;
3023 +@@ -994,6 +996,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
3024 + if (error)
3025 + break;
3026 + }
3027 ++ memalloc_nofs_restore(nofs_flag);
3028 +
3029 + return error;
3030 + }
3031 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3032 +index f46afbff668eb..3c090549ed07d 100644
3033 +--- a/fs/btrfs/tree-log.c
3034 ++++ b/fs/btrfs/tree-log.c
3035 +@@ -3140,29 +3140,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
3036 + btrfs_init_log_ctx(&root_log_ctx, NULL);
3037 +
3038 + mutex_lock(&log_root_tree->log_mutex);
3039 +- atomic_inc(&log_root_tree->log_batch);
3040 +- atomic_inc(&log_root_tree->log_writers);
3041 +
3042 + index2 = log_root_tree->log_transid % 2;
3043 + list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3044 + root_log_ctx.log_transid = log_root_tree->log_transid;
3045 +
3046 +- mutex_unlock(&log_root_tree->log_mutex);
3047 +-
3048 +- mutex_lock(&log_root_tree->log_mutex);
3049 +-
3050 + /*
3051 + * Now we are safe to update the log_root_tree because we're under the
3052 + * log_mutex, and we're a current writer so we're holding the commit
3053 + * open until we drop the log_mutex.
3054 + */
3055 + ret = update_log_root(trans, log, &new_root_item);
3056 +-
3057 +- if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3058 +- /* atomic_dec_and_test implies a barrier */
3059 +- cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3060 +- }
3061 +-
3062 + if (ret) {
3063 + if (!list_empty(&root_log_ctx.list))
3064 + list_del_init(&root_log_ctx.list);
3065 +@@ -3208,8 +3196,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
3066 + root_log_ctx.log_transid - 1);
3067 + }
3068 +
3069 +- wait_for_writer(log_root_tree);
3070 +-
3071 + /*
3072 + * now that we've moved on to the tree of log tree roots,
3073 + * check the full commit flag again
3074 +@@ -4054,11 +4040,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
3075 + fs_info->csum_root,
3076 + ds + cs, ds + cs + cl - 1,
3077 + &ordered_sums, 0);
3078 +- if (ret) {
3079 +- btrfs_release_path(dst_path);
3080 +- kfree(ins_data);
3081 +- return ret;
3082 +- }
3083 ++ if (ret)
3084 ++ break;
3085 + }
3086 + }
3087 + }
3088 +@@ -4071,7 +4054,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
3089 + * we have to do this after the loop above to avoid changing the
3090 + * log tree while trying to change the log tree.
3091 + */
3092 +- ret = 0;
3093 + while (!list_empty(&ordered_sums)) {
3094 + struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3095 + struct btrfs_ordered_sum,
3096 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3097 +index 1e6e3c1d97dfa..196ddbcd29360 100644
3098 +--- a/fs/btrfs/volumes.c
3099 ++++ b/fs/btrfs/volumes.c
3100 +@@ -219,7 +219,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
3101 + *
3102 + * global::fs_devs - add, remove, updates to the global list
3103 + *
3104 +- * does not protect: manipulation of the fs_devices::devices list!
3105 ++ * does not protect: manipulation of the fs_devices::devices list in general
3106 ++ * but in mount context it could be used to exclude list modifications by eg.
3107 ++ * scan ioctl
3108 + *
3109 + * btrfs_device::name - renames (write side), read is RCU
3110 + *
3111 +@@ -232,6 +234,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
3112 + * may be used to exclude some operations from running concurrently without any
3113 + * modifications to the list (see write_all_supers)
3114 + *
3115 ++ * Is not required at mount and close times, because our device list is
3116 ++ * protected by the uuid_mutex at that point.
3117 ++ *
3118 + * balance_mutex
3119 + * -------------
3120 + * protects balance structures (status, state) and context accessed from
3121 +@@ -778,6 +783,11 @@ static int btrfs_free_stale_devices(const char *path,
3122 + return ret;
3123 + }
3124 +
3125 ++/*
3126 ++ * This is only used on mount, and we are protected from competing things
3127 ++ * messing with our fs_devices by the uuid_mutex, thus we do not need the
3128 ++ * fs_devices->device_list_mutex here.
3129 ++ */
3130 + static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
3131 + struct btrfs_device *device, fmode_t flags,
3132 + void *holder)
3133 +@@ -1418,8 +1428,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
3134 + int ret;
3135 +
3136 + lockdep_assert_held(&uuid_mutex);
3137 ++ /*
3138 ++ * The device_list_mutex cannot be taken here in case opening the
3139 ++ * underlying device takes further locks like bd_mutex.
3140 ++ *
3141 ++ * We also don't need the lock here as this is called during mount and
3142 ++ * exclusion is provided by uuid_mutex
3143 ++ */
3144 +
3145 +- mutex_lock(&fs_devices->device_list_mutex);
3146 + if (fs_devices->opened) {
3147 + fs_devices->opened++;
3148 + ret = 0;
3149 +@@ -1427,7 +1443,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
3150 + list_sort(NULL, &fs_devices->devices, devid_cmp);
3151 + ret = open_fs_devices(fs_devices, flags, holder);
3152 + }
3153 +- mutex_unlock(&fs_devices->device_list_mutex);
3154 +
3155 + return ret;
3156 + }
3157 +@@ -3283,7 +3298,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
3158 + if (!path)
3159 + return -ENOMEM;
3160 +
3161 +- trans = btrfs_start_transaction(root, 0);
3162 ++ trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3163 + if (IS_ERR(trans)) {
3164 + btrfs_free_path(path);
3165 + return PTR_ERR(trans);
3166 +@@ -4246,7 +4261,22 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
3167 + mutex_lock(&fs_info->balance_mutex);
3168 + if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
3169 + btrfs_info(fs_info, "balance: paused");
3170 +- else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
3171 ++ /*
3172 ++ * Balance can be canceled by:
3173 ++ *
3174 ++ * - Regular cancel request
3175 ++ * Then ret == -ECANCELED and balance_cancel_req > 0
3176 ++ *
3177 ++ * - Fatal signal to "btrfs" process
3178 ++ * Either the signal caught by wait_reserve_ticket() and callers
3179 ++ * got -EINTR, or caught by btrfs_should_cancel_balance() and
3180 ++ * got -ECANCELED.
3181 ++ * Either way, in this case balance_cancel_req = 0, and
3182 ++ * ret == -EINTR or ret == -ECANCELED.
3183 ++ *
3184 ++ * So here we only check the return value to catch canceled balance.
3185 ++ */
3186 ++ else if (ret == -ECANCELED || ret == -EINTR)
3187 + btrfs_info(fs_info, "balance: canceled");
3188 + else
3189 + btrfs_info(fs_info, "balance: ended with status: %d", ret);
3190 +@@ -7267,7 +7297,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
3191 + * otherwise we don't need it.
3192 + */
3193 + mutex_lock(&uuid_mutex);
3194 +- mutex_lock(&fs_info->chunk_mutex);
3195 +
3196 + /*
3197 + * It is possible for mount and umount to race in such a way that
3198 +@@ -7312,7 +7341,9 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
3199 + } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3200 + struct btrfs_chunk *chunk;
3201 + chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3202 ++ mutex_lock(&fs_info->chunk_mutex);
3203 + ret = read_one_chunk(&found_key, leaf, chunk);
3204 ++ mutex_unlock(&fs_info->chunk_mutex);
3205 + if (ret)
3206 + goto error;
3207 + }
3208 +@@ -7342,7 +7373,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
3209 + }
3210 + ret = 0;
3211 + error:
3212 +- mutex_unlock(&fs_info->chunk_mutex);
3213 + mutex_unlock(&uuid_mutex);
3214 +
3215 + btrfs_free_path(path);
3216 +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
3217 +index 2e4764fd18727..3367a8194f24b 100644
3218 +--- a/fs/ceph/dir.c
3219 ++++ b/fs/ceph/dir.c
3220 +@@ -920,6 +920,10 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
3221 + req->r_num_caps = 2;
3222 + req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
3223 + req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
3224 ++ if (as_ctx.pagelist) {
3225 ++ req->r_pagelist = as_ctx.pagelist;
3226 ++ as_ctx.pagelist = NULL;
3227 ++ }
3228 + err = ceph_mdsc_do_request(mdsc, dir, req);
3229 + if (!err && !req->r_reply_info.head->is_dentry)
3230 + err = ceph_handle_notrace_create(dir, dentry);
3231 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
3232 +index b79fe6549df6f..701bc3f4d4ba1 100644
3233 +--- a/fs/ceph/mds_client.c
3234 ++++ b/fs/ceph/mds_client.c
3235 +@@ -3091,8 +3091,10 @@ static void handle_session(struct ceph_mds_session *session,
3236 + goto bad;
3237 + /* version >= 3, feature bits */
3238 + ceph_decode_32_safe(&p, end, len, bad);
3239 +- ceph_decode_64_safe(&p, end, features, bad);
3240 +- p += len - sizeof(features);
3241 ++ if (len) {
3242 ++ ceph_decode_64_safe(&p, end, features, bad);
3243 ++ p += len - sizeof(features);
3244 ++ }
3245 + }
3246 +
3247 + mutex_lock(&mdsc->mutex);
3248 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3249 +index 14265b4bbcc00..2fc96f7923ee5 100644
3250 +--- a/fs/cifs/smb2misc.c
3251 ++++ b/fs/cifs/smb2misc.c
3252 +@@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work)
3253 + kfree(lw);
3254 + }
3255 +
3256 ++static void
3257 ++smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
3258 ++ __le32 new_lease_state)
3259 ++{
3260 ++ struct smb2_lease_break_work *lw;
3261 ++
3262 ++ lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3263 ++ if (!lw) {
3264 ++ cifs_put_tlink(tlink);
3265 ++ return;
3266 ++ }
3267 ++
3268 ++ INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3269 ++ lw->tlink = tlink;
3270 ++ lw->lease_state = new_lease_state;
3271 ++ memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
3272 ++ queue_work(cifsiod_wq, &lw->lease_break);
3273 ++}
3274 ++
3275 + static bool
3276 +-smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3277 +- struct smb2_lease_break_work *lw)
3278 ++smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
3279 + {
3280 +- bool found;
3281 + __u8 lease_state;
3282 + struct list_head *tmp;
3283 + struct cifsFileInfo *cfile;
3284 +- struct cifs_pending_open *open;
3285 + struct cifsInodeInfo *cinode;
3286 + int ack_req = le32_to_cpu(rsp->Flags &
3287 + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
3288 +@@ -556,22 +572,29 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3289 + &cinode->flags);
3290 +
3291 + cifs_queue_oplock_break(cfile);
3292 +- kfree(lw);
3293 + return true;
3294 + }
3295 +
3296 +- found = false;
3297 ++ return false;
3298 ++}
3299 ++
3300 ++static struct cifs_pending_open *
3301 ++smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
3302 ++ struct smb2_lease_break *rsp)
3303 ++{
3304 ++ __u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
3305 ++ int ack_req = le32_to_cpu(rsp->Flags &
3306 ++ SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
3307 ++ struct cifs_pending_open *open;
3308 ++ struct cifs_pending_open *found = NULL;
3309 ++
3310 + list_for_each_entry(open, &tcon->pending_opens, olist) {
3311 + if (memcmp(open->lease_key, rsp->LeaseKey,
3312 + SMB2_LEASE_KEY_SIZE))
3313 + continue;
3314 +
3315 + if (!found && ack_req) {
3316 +- found = true;
3317 +- memcpy(lw->lease_key, open->lease_key,
3318 +- SMB2_LEASE_KEY_SIZE);
3319 +- lw->tlink = cifs_get_tlink(open->tlink);
3320 +- queue_work(cifsiod_wq, &lw->lease_break);
3321 ++ found = open;
3322 + }
3323 +
3324 + cifs_dbg(FYI, "found in the pending open list\n");
3325 +@@ -592,14 +615,7 @@ smb2_is_valid_lease_break(char *buffer)
3326 + struct TCP_Server_Info *server;
3327 + struct cifs_ses *ses;
3328 + struct cifs_tcon *tcon;
3329 +- struct smb2_lease_break_work *lw;
3330 +-
3331 +- lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3332 +- if (!lw)
3333 +- return false;
3334 +-
3335 +- INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3336 +- lw->lease_state = rsp->NewLeaseState;
3337 ++ struct cifs_pending_open *open;
3338 +
3339 + cifs_dbg(FYI, "Checking for lease break\n");
3340 +
3341 +@@ -617,11 +633,27 @@ smb2_is_valid_lease_break(char *buffer)
3342 + spin_lock(&tcon->open_file_lock);
3343 + cifs_stats_inc(
3344 + &tcon->stats.cifs_stats.num_oplock_brks);
3345 +- if (smb2_tcon_has_lease(tcon, rsp, lw)) {
3346 ++ if (smb2_tcon_has_lease(tcon, rsp)) {
3347 + spin_unlock(&tcon->open_file_lock);
3348 + spin_unlock(&cifs_tcp_ses_lock);
3349 + return true;
3350 + }
3351 ++ open = smb2_tcon_find_pending_open_lease(tcon,
3352 ++ rsp);
3353 ++ if (open) {
3354 ++ __u8 lease_key[SMB2_LEASE_KEY_SIZE];
3355 ++ struct tcon_link *tlink;
3356 ++
3357 ++ tlink = cifs_get_tlink(open->tlink);
3358 ++ memcpy(lease_key, open->lease_key,
3359 ++ SMB2_LEASE_KEY_SIZE);
3360 ++ spin_unlock(&tcon->open_file_lock);
3361 ++ spin_unlock(&cifs_tcp_ses_lock);
3362 ++ smb2_queue_pending_open_break(tlink,
3363 ++ lease_key,
3364 ++ rsp->NewLeaseState);
3365 ++ return true;
3366 ++ }
3367 + spin_unlock(&tcon->open_file_lock);
3368 +
3369 + if (tcon->crfid.is_valid &&
3370 +@@ -639,7 +671,6 @@ smb2_is_valid_lease_break(char *buffer)
3371 + }
3372 + }
3373 + spin_unlock(&cifs_tcp_ses_lock);
3374 +- kfree(lw);
3375 + cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
3376 + return false;
3377 + }
3378 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3379 +index 06b1a86d76b18..7ff05c06f2a4c 100644
3380 +--- a/fs/cifs/smb2pdu.c
3381 ++++ b/fs/cifs/smb2pdu.c
3382 +@@ -1323,6 +1323,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
3383 + spnego_key = cifs_get_spnego_key(ses);
3384 + if (IS_ERR(spnego_key)) {
3385 + rc = PTR_ERR(spnego_key);
3386 ++ if (rc == -ENOKEY)
3387 ++ cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
3388 + spnego_key = NULL;
3389 + goto out;
3390 + }
3391 +diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
3392 +index fda7d3f5b4be5..432c3febea6df 100644
3393 +--- a/fs/ext2/ialloc.c
3394 ++++ b/fs/ext2/ialloc.c
3395 +@@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
3396 + if (dir)
3397 + le16_add_cpu(&desc->bg_used_dirs_count, -1);
3398 + spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
3399 ++ percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter);
3400 + if (dir)
3401 + percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
3402 + mark_buffer_dirty(bh);
3403 +@@ -528,7 +529,7 @@ got:
3404 + goto fail;
3405 + }
3406 +
3407 +- percpu_counter_add(&sbi->s_freeinodes_counter, -1);
3408 ++ percpu_counter_dec(&sbi->s_freeinodes_counter);
3409 + if (S_ISDIR(mode))
3410 + percpu_counter_inc(&sbi->s_dirs_counter);
3411 +
3412 +diff --git a/fs/minix/inode.c b/fs/minix/inode.c
3413 +index 0dd929346f3f3..7b09a9158e401 100644
3414 +--- a/fs/minix/inode.c
3415 ++++ b/fs/minix/inode.c
3416 +@@ -150,8 +150,10 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
3417 + return 0;
3418 + }
3419 +
3420 +-static bool minix_check_superblock(struct minix_sb_info *sbi)
3421 ++static bool minix_check_superblock(struct super_block *sb)
3422 + {
3423 ++ struct minix_sb_info *sbi = minix_sb(sb);
3424 ++
3425 + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
3426 + return false;
3427 +
3428 +@@ -161,7 +163,7 @@ static bool minix_check_superblock(struct minix_sb_info *sbi)
3429 + * of indirect blocks which places the limit well above U32_MAX.
3430 + */
3431 + if (sbi->s_version == MINIX_V1 &&
3432 +- sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE)
3433 ++ sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
3434 + return false;
3435 +
3436 + return true;
3437 +@@ -202,7 +204,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
3438 + sbi->s_zmap_blocks = ms->s_zmap_blocks;
3439 + sbi->s_firstdatazone = ms->s_firstdatazone;
3440 + sbi->s_log_zone_size = ms->s_log_zone_size;
3441 +- sbi->s_max_size = ms->s_max_size;
3442 ++ s->s_maxbytes = ms->s_max_size;
3443 + s->s_magic = ms->s_magic;
3444 + if (s->s_magic == MINIX_SUPER_MAGIC) {
3445 + sbi->s_version = MINIX_V1;
3446 +@@ -233,7 +235,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
3447 + sbi->s_zmap_blocks = m3s->s_zmap_blocks;
3448 + sbi->s_firstdatazone = m3s->s_firstdatazone;
3449 + sbi->s_log_zone_size = m3s->s_log_zone_size;
3450 +- sbi->s_max_size = m3s->s_max_size;
3451 ++ s->s_maxbytes = m3s->s_max_size;
3452 + sbi->s_ninodes = m3s->s_ninodes;
3453 + sbi->s_nzones = m3s->s_zones;
3454 + sbi->s_dirsize = 64;
3455 +@@ -245,7 +247,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
3456 + } else
3457 + goto out_no_fs;
3458 +
3459 +- if (!minix_check_superblock(sbi))
3460 ++ if (!minix_check_superblock(s))
3461 + goto out_illegal_sb;
3462 +
3463 + /*
3464 +diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c
3465 +index 046cc96ee7adb..1fed906042aa8 100644
3466 +--- a/fs/minix/itree_v1.c
3467 ++++ b/fs/minix/itree_v1.c
3468 +@@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
3469 + if (block < 0) {
3470 + printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
3471 + block, inode->i_sb->s_bdev);
3472 +- } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
3473 +- if (printk_ratelimit())
3474 +- printk("MINIX-fs: block_to_path: "
3475 +- "block %ld too big on dev %pg\n",
3476 +- block, inode->i_sb->s_bdev);
3477 +- } else if (block < 7) {
3478 ++ return 0;
3479 ++ }
3480 ++ if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes)
3481 ++ return 0;
3482 ++
3483 ++ if (block < 7) {
3484 + offsets[n++] = block;
3485 + } else if ((block -= 7) < 512) {
3486 + offsets[n++] = 7;
3487 +diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c
3488 +index f7fc7eccccccd..9d00f31a2d9d1 100644
3489 +--- a/fs/minix/itree_v2.c
3490 ++++ b/fs/minix/itree_v2.c
3491 +@@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
3492 + if (block < 0) {
3493 + printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
3494 + block, sb->s_bdev);
3495 +- } else if ((u64)block * (u64)sb->s_blocksize >=
3496 +- minix_sb(sb)->s_max_size) {
3497 +- if (printk_ratelimit())
3498 +- printk("MINIX-fs: block_to_path: "
3499 +- "block %ld too big on dev %pg\n",
3500 +- block, sb->s_bdev);
3501 +- } else if (block < DIRCOUNT) {
3502 ++ return 0;
3503 ++ }
3504 ++ if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes)
3505 ++ return 0;
3506 ++
3507 ++ if (block < DIRCOUNT) {
3508 + offsets[n++] = block;
3509 + } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
3510 + offsets[n++] = DIRCOUNT;
3511 +diff --git a/fs/minix/minix.h b/fs/minix/minix.h
3512 +index df081e8afcc3c..168d45d3de73e 100644
3513 +--- a/fs/minix/minix.h
3514 ++++ b/fs/minix/minix.h
3515 +@@ -32,7 +32,6 @@ struct minix_sb_info {
3516 + unsigned long s_zmap_blocks;
3517 + unsigned long s_firstdatazone;
3518 + unsigned long s_log_zone_size;
3519 +- unsigned long s_max_size;
3520 + int s_dirsize;
3521 + int s_namelen;
3522 + struct buffer_head ** s_imap;
3523 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3524 +index 95dc90570786c..387a2cfa7e172 100644
3525 +--- a/fs/nfs/file.c
3526 ++++ b/fs/nfs/file.c
3527 +@@ -140,6 +140,7 @@ static int
3528 + nfs_file_flush(struct file *file, fl_owner_t id)
3529 + {
3530 + struct inode *inode = file_inode(file);
3531 ++ errseq_t since;
3532 +
3533 + dprintk("NFS: flush(%pD2)\n", file);
3534 +
3535 +@@ -148,7 +149,9 @@ nfs_file_flush(struct file *file, fl_owner_t id)
3536 + return 0;
3537 +
3538 + /* Flush writes to the server and return any errors */
3539 +- return nfs_wb_all(inode);
3540 ++ since = filemap_sample_wb_err(file->f_mapping);
3541 ++ nfs_wb_all(inode);
3542 ++ return filemap_check_wb_err(file->f_mapping, since);
3543 + }
3544 +
3545 + ssize_t
3546 +@@ -580,12 +583,14 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
3547 + .page_mkwrite = nfs_vm_page_mkwrite,
3548 + };
3549 +
3550 +-static int nfs_need_check_write(struct file *filp, struct inode *inode)
3551 ++static int nfs_need_check_write(struct file *filp, struct inode *inode,
3552 ++ int error)
3553 + {
3554 + struct nfs_open_context *ctx;
3555 +
3556 + ctx = nfs_file_open_context(filp);
3557 +- if (nfs_ctx_key_to_expire(ctx, inode))
3558 ++ if (nfs_error_is_fatal_on_server(error) ||
3559 ++ nfs_ctx_key_to_expire(ctx, inode))
3560 + return 1;
3561 + return 0;
3562 + }
3563 +@@ -596,6 +601,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
3564 + struct inode *inode = file_inode(file);
3565 + unsigned long written = 0;
3566 + ssize_t result;
3567 ++ errseq_t since;
3568 ++ int error;
3569 +
3570 + result = nfs_key_timeout_notify(file, inode);
3571 + if (result)
3572 +@@ -620,6 +627,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
3573 + if (iocb->ki_pos > i_size_read(inode))
3574 + nfs_revalidate_mapping(inode, file->f_mapping);
3575 +
3576 ++ since = filemap_sample_wb_err(file->f_mapping);
3577 + nfs_start_io_write(inode);
3578 + result = generic_write_checks(iocb, from);
3579 + if (result > 0) {
3580 +@@ -638,7 +646,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
3581 + goto out;
3582 +
3583 + /* Return error values */
3584 +- if (nfs_need_check_write(file, inode)) {
3585 ++ error = filemap_check_wb_err(file->f_mapping, since);
3586 ++ if (nfs_need_check_write(file, inode, error)) {
3587 + int err = nfs_wb_all(inode);
3588 + if (err < 0)
3589 + result = err;
3590 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
3591 +index fb55c04cdc6bd..534b6fd70ffdb 100644
3592 +--- a/fs/nfs/nfs4file.c
3593 ++++ b/fs/nfs/nfs4file.c
3594 +@@ -109,6 +109,7 @@ static int
3595 + nfs4_file_flush(struct file *file, fl_owner_t id)
3596 + {
3597 + struct inode *inode = file_inode(file);
3598 ++ errseq_t since;
3599 +
3600 + dprintk("NFS: flush(%pD2)\n", file);
3601 +
3602 +@@ -124,7 +125,9 @@ nfs4_file_flush(struct file *file, fl_owner_t id)
3603 + return filemap_fdatawrite(file->f_mapping);
3604 +
3605 + /* Flush writes to the server and return any errors */
3606 +- return nfs_wb_all(inode);
3607 ++ since = filemap_sample_wb_err(file->f_mapping);
3608 ++ nfs_wb_all(inode);
3609 ++ return filemap_check_wb_err(file->f_mapping, since);
3610 + }
3611 +
3612 + #ifdef CONFIG_NFS_V4_2
3613 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3614 +index 1a1bd2fe6e98d..d0cb827b72cfa 100644
3615 +--- a/fs/nfs/nfs4proc.c
3616 ++++ b/fs/nfs/nfs4proc.c
3617 +@@ -5811,8 +5811,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
3618 + return ret;
3619 + if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
3620 + return -ENOENT;
3621 +- if (buflen < label.len)
3622 +- return -ERANGE;
3623 + return 0;
3624 + }
3625 +
3626 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3627 +index 7c0ff1a3b5914..677751bc3a334 100644
3628 +--- a/fs/nfs/nfs4xdr.c
3629 ++++ b/fs/nfs/nfs4xdr.c
3630 +@@ -4169,7 +4169,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
3631 + return -EIO;
3632 + if (len < NFS4_MAXLABELLEN) {
3633 + if (label) {
3634 +- memcpy(label->label, p, len);
3635 ++ if (label->len) {
3636 ++ if (label->len < len)
3637 ++ return -ERANGE;
3638 ++ memcpy(label->label, p, len);
3639 ++ }
3640 + label->len = len;
3641 + label->pi = pi;
3642 + label->lfs = lfs;
3643 +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
3644 +index 9461bd3e1c0c8..0a8cd8e59a92c 100644
3645 +--- a/fs/ocfs2/ocfs2.h
3646 ++++ b/fs/ocfs2/ocfs2.h
3647 +@@ -326,8 +326,8 @@ struct ocfs2_super
3648 + spinlock_t osb_lock;
3649 + u32 s_next_generation;
3650 + unsigned long osb_flags;
3651 +- s16 s_inode_steal_slot;
3652 +- s16 s_meta_steal_slot;
3653 ++ u16 s_inode_steal_slot;
3654 ++ u16 s_meta_steal_slot;
3655 + atomic_t s_num_inodes_stolen;
3656 + atomic_t s_num_meta_stolen;
3657 +
3658 +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
3659 +index 503e724d39f53..5e0eaea474055 100644
3660 +--- a/fs/ocfs2/suballoc.c
3661 ++++ b/fs/ocfs2/suballoc.c
3662 +@@ -879,9 +879,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
3663 + {
3664 + spin_lock(&osb->osb_lock);
3665 + if (type == INODE_ALLOC_SYSTEM_INODE)
3666 +- osb->s_inode_steal_slot = slot;
3667 ++ osb->s_inode_steal_slot = (u16)slot;
3668 + else if (type == EXTENT_ALLOC_SYSTEM_INODE)
3669 +- osb->s_meta_steal_slot = slot;
3670 ++ osb->s_meta_steal_slot = (u16)slot;
3671 + spin_unlock(&osb->osb_lock);
3672 + }
3673 +
3674 +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
3675 +index c81e86c623807..70d8857b161df 100644
3676 +--- a/fs/ocfs2/super.c
3677 ++++ b/fs/ocfs2/super.c
3678 +@@ -78,7 +78,7 @@ struct mount_options
3679 + unsigned long commit_interval;
3680 + unsigned long mount_opt;
3681 + unsigned int atime_quantum;
3682 +- signed short slot;
3683 ++ unsigned short slot;
3684 + int localalloc_opt;
3685 + unsigned int resv_level;
3686 + int dir_resv_level;
3687 +@@ -1334,7 +1334,7 @@ static int ocfs2_parse_options(struct super_block *sb,
3688 + goto bail;
3689 + }
3690 + if (option)
3691 +- mopt->slot = (s16)option;
3692 ++ mopt->slot = (u16)option;
3693 + break;
3694 + case Opt_commit:
3695 + if (match_int(&args[0], &option)) {
3696 +diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
3697 +index a5612abc09363..bcd4fd5ad1751 100644
3698 +--- a/fs/orangefs/file.c
3699 ++++ b/fs/orangefs/file.c
3700 +@@ -311,23 +311,8 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb,
3701 + struct iov_iter *iter)
3702 + {
3703 + int ret;
3704 +- struct orangefs_read_options *ro;
3705 +-
3706 + orangefs_stats.reads++;
3707 +
3708 +- /*
3709 +- * Remember how they set "count" in read(2) or pread(2) or whatever -
3710 +- * users can use count as a knob to control orangefs io size and later
3711 +- * we can try to help them fill as many pages as possible in readpage.
3712 +- */
3713 +- if (!iocb->ki_filp->private_data) {
3714 +- iocb->ki_filp->private_data = kmalloc(sizeof *ro, GFP_KERNEL);
3715 +- if (!iocb->ki_filp->private_data)
3716 +- return(ENOMEM);
3717 +- ro = iocb->ki_filp->private_data;
3718 +- ro->blksiz = iter->count;
3719 +- }
3720 +-
3721 + down_read(&file_inode(iocb->ki_filp)->i_rwsem);
3722 + ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp));
3723 + if (ret)
3724 +@@ -615,12 +600,6 @@ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
3725 + return rc;
3726 + }
3727 +
3728 +-static int orangefs_file_open(struct inode * inode, struct file *file)
3729 +-{
3730 +- file->private_data = NULL;
3731 +- return generic_file_open(inode, file);
3732 +-}
3733 +-
3734 + static int orangefs_flush(struct file *file, fl_owner_t id)
3735 + {
3736 + /*
3737 +@@ -634,9 +613,6 @@ static int orangefs_flush(struct file *file, fl_owner_t id)
3738 + struct inode *inode = file->f_mapping->host;
3739 + int r;
3740 +
3741 +- kfree(file->private_data);
3742 +- file->private_data = NULL;
3743 +-
3744 + if (inode->i_state & I_DIRTY_TIME) {
3745 + spin_lock(&inode->i_lock);
3746 + inode->i_state &= ~I_DIRTY_TIME;
3747 +@@ -659,7 +635,7 @@ const struct file_operations orangefs_file_operations = {
3748 + .lock = orangefs_lock,
3749 + .unlocked_ioctl = orangefs_ioctl,
3750 + .mmap = orangefs_file_mmap,
3751 +- .open = orangefs_file_open,
3752 ++ .open = generic_file_open,
3753 + .flush = orangefs_flush,
3754 + .release = orangefs_file_release,
3755 + .fsync = orangefs_fsync,
3756 +diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
3757 +index efb12197da181..636892ffec0ba 100644
3758 +--- a/fs/orangefs/inode.c
3759 ++++ b/fs/orangefs/inode.c
3760 +@@ -259,46 +259,19 @@ static int orangefs_readpage(struct file *file, struct page *page)
3761 + pgoff_t index; /* which page */
3762 + struct page *next_page;
3763 + char *kaddr;
3764 +- struct orangefs_read_options *ro = file->private_data;
3765 + loff_t read_size;
3766 +- loff_t roundedup;
3767 + int buffer_index = -1; /* orangefs shared memory slot */
3768 + int slot_index; /* index into slot */
3769 + int remaining;
3770 +
3771 + /*
3772 +- * If they set some miniscule size for "count" in read(2)
3773 +- * (for example) then let's try to read a page, or the whole file
3774 +- * if it is smaller than a page. Once "count" goes over a page
3775 +- * then lets round up to the highest page size multiple that is
3776 +- * less than or equal to "count" and do that much orangefs IO and
3777 +- * try to fill as many pages as we can from it.
3778 +- *
3779 +- * "count" should be represented in ro->blksiz.
3780 +- *
3781 +- * inode->i_size = file size.
3782 ++ * Get up to this many bytes from Orangefs at a time and try
3783 ++ * to fill them into the page cache at once. Tests with dd made
3784 ++ * this seem like a reasonable static number, if there was
3785 ++ * interest perhaps this number could be made setable through
3786 ++ * sysfs...
3787 + */
3788 +- if (ro) {
3789 +- if (ro->blksiz < PAGE_SIZE) {
3790 +- if (inode->i_size < PAGE_SIZE)
3791 +- read_size = inode->i_size;
3792 +- else
3793 +- read_size = PAGE_SIZE;
3794 +- } else {
3795 +- roundedup = ((PAGE_SIZE - 1) & ro->blksiz) ?
3796 +- ((ro->blksiz + PAGE_SIZE) & ~(PAGE_SIZE -1)) :
3797 +- ro->blksiz;
3798 +- if (roundedup > inode->i_size)
3799 +- read_size = inode->i_size;
3800 +- else
3801 +- read_size = roundedup;
3802 +-
3803 +- }
3804 +- } else {
3805 +- read_size = PAGE_SIZE;
3806 +- }
3807 +- if (!read_size)
3808 +- read_size = PAGE_SIZE;
3809 ++ read_size = 524288;
3810 +
3811 + if (PageDirty(page))
3812 + orangefs_launder_page(page);
3813 +diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
3814 +index 34a6c99fa29bd..3003007681a05 100644
3815 +--- a/fs/orangefs/orangefs-kernel.h
3816 ++++ b/fs/orangefs/orangefs-kernel.h
3817 +@@ -239,10 +239,6 @@ struct orangefs_write_range {
3818 + kgid_t gid;
3819 + };
3820 +
3821 +-struct orangefs_read_options {
3822 +- ssize_t blksiz;
3823 +-};
3824 +-
3825 + extern struct orangefs_stats orangefs_stats;
3826 +
3827 + /*
3828 +diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
3829 +index 826dad0243dcc..a6ae2428e4c96 100644
3830 +--- a/fs/ubifs/journal.c
3831 ++++ b/fs/ubifs/journal.c
3832 +@@ -539,7 +539,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
3833 + const struct fscrypt_name *nm, const struct inode *inode,
3834 + int deletion, int xent)
3835 + {
3836 +- int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
3837 ++ int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0;
3838 + int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
3839 + int last_reference = !!(deletion && inode->i_nlink == 0);
3840 + struct ubifs_inode *ui = ubifs_inode(inode);
3841 +@@ -630,6 +630,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
3842 + goto out_finish;
3843 + }
3844 + ui->del_cmtno = c->cmt_no;
3845 ++ orphan_added = 1;
3846 + }
3847 +
3848 + err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
3849 +@@ -702,7 +703,7 @@ out_release:
3850 + kfree(dent);
3851 + out_ro:
3852 + ubifs_ro_mode(c, err);
3853 +- if (last_reference)
3854 ++ if (orphan_added)
3855 + ubifs_delete_orphan(c, inode->i_ino);
3856 + finish_reservation(c);
3857 + return err;
3858 +@@ -1217,7 +1218,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
3859 + void *p;
3860 + union ubifs_key key;
3861 + struct ubifs_dent_node *dent, *dent2;
3862 +- int err, dlen1, dlen2, ilen, lnum, offs, len;
3863 ++ int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
3864 + int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
3865 + int last_reference = !!(new_inode && new_inode->i_nlink == 0);
3866 + int move = (old_dir != new_dir);
3867 +@@ -1333,6 +1334,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
3868 + goto out_finish;
3869 + }
3870 + new_ui->del_cmtno = c->cmt_no;
3871 ++ orphan_added = 1;
3872 + }
3873 +
3874 + err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
3875 +@@ -1414,7 +1416,7 @@ out_release:
3876 + release_head(c, BASEHD);
3877 + out_ro:
3878 + ubifs_ro_mode(c, err);
3879 +- if (last_reference)
3880 ++ if (orphan_added)
3881 + ubifs_delete_orphan(c, new_inode->i_ino);
3882 + out_finish:
3883 + finish_reservation(c);
3884 +diff --git a/fs/ufs/super.c b/fs/ufs/super.c
3885 +index 1da0be667409b..e3b69fb280e8c 100644
3886 +--- a/fs/ufs/super.c
3887 ++++ b/fs/ufs/super.c
3888 +@@ -101,7 +101,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene
3889 + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
3890 + struct inode *inode;
3891 +
3892 +- if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
3893 ++ if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg)
3894 + return ERR_PTR(-ESTALE);
3895 +
3896 + inode = ufs_iget(sb, ino);
3897 +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
3898 +index 864849e942c45..c1a8d4a41bb16 100644
3899 +--- a/include/crypto/if_alg.h
3900 ++++ b/include/crypto/if_alg.h
3901 +@@ -135,6 +135,7 @@ struct af_alg_async_req {
3902 + * SG?
3903 + * @enc: Cryptographic operation to be performed when
3904 + * recvmsg is invoked.
3905 ++ * @init: True if metadata has been sent.
3906 + * @len: Length of memory allocated for this data structure.
3907 + */
3908 + struct af_alg_ctx {
3909 +@@ -151,6 +152,7 @@ struct af_alg_ctx {
3910 + bool more;
3911 + bool merge;
3912 + bool enc;
3913 ++ bool init;
3914 +
3915 + unsigned int len;
3916 + };
3917 +@@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
3918 + void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
3919 + size_t dst_offset);
3920 + void af_alg_wmem_wakeup(struct sock *sk);
3921 +-int af_alg_wait_for_data(struct sock *sk, unsigned flags);
3922 ++int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
3923 + int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
3924 + unsigned int ivsize);
3925 + ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
3926 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
3927 +index 1e5dad8b8e59b..ed870da78326b 100644
3928 +--- a/include/linux/intel-iommu.h
3929 ++++ b/include/linux/intel-iommu.h
3930 +@@ -359,8 +359,8 @@ enum {
3931 +
3932 + #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
3933 + #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
3934 +-#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
3935 +-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
3936 ++#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1)
3937 ++#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
3938 + #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
3939 + #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
3940 + #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
3941 +diff --git a/include/linux/irq.h b/include/linux/irq.h
3942 +index f8755e5fcd742..e9e69c511ea92 100644
3943 +--- a/include/linux/irq.h
3944 ++++ b/include/linux/irq.h
3945 +@@ -211,6 +211,8 @@ struct irq_data {
3946 + * IRQD_CAN_RESERVE - Can use reservation mode
3947 + * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
3948 + * required
3949 ++ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
3950 ++ * irq_chip::irq_set_affinity() when deactivated.
3951 + */
3952 + enum {
3953 + IRQD_TRIGGER_MASK = 0xf,
3954 +@@ -234,6 +236,7 @@ enum {
3955 + IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
3956 + IRQD_CAN_RESERVE = (1 << 26),
3957 + IRQD_MSI_NOMASK_QUIRK = (1 << 27),
3958 ++ IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
3959 + };
3960 +
3961 + #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
3962 +@@ -408,6 +411,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
3963 + return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
3964 + }
3965 +
3966 ++static inline void irqd_set_affinity_on_activate(struct irq_data *d)
3967 ++{
3968 ++ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
3969 ++}
3970 ++
3971 ++static inline bool irqd_affinity_on_activate(struct irq_data *d)
3972 ++{
3973 ++ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
3974 ++}
3975 ++
3976 + #undef __irqd_to_state
3977 +
3978 + static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
3979 +diff --git a/include/net/sock.h b/include/net/sock.h
3980 +index 8263bbf756a22..6d9c1131fe5c8 100644
3981 +--- a/include/net/sock.h
3982 ++++ b/include/net/sock.h
3983 +@@ -849,6 +849,8 @@ static inline int sk_memalloc_socks(void)
3984 + {
3985 + return static_branch_unlikely(&memalloc_socks_key);
3986 + }
3987 ++
3988 ++void __receive_sock(struct file *file);
3989 + #else
3990 +
3991 + static inline int sk_memalloc_socks(void)
3992 +@@ -856,6 +858,8 @@ static inline int sk_memalloc_socks(void)
3993 + return 0;
3994 + }
3995 +
3996 ++static inline void __receive_sock(struct file *file)
3997 ++{ }
3998 + #endif
3999 +
4000 + static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
4001 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4002 +index df73685de1144..3b1d0a4725a49 100644
4003 +--- a/kernel/irq/manage.c
4004 ++++ b/kernel/irq/manage.c
4005 +@@ -281,12 +281,16 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
4006 + struct irq_desc *desc = irq_data_to_desc(data);
4007 +
4008 + /*
4009 ++ * Handle irq chips which can handle affinity only in activated
4010 ++ * state correctly
4011 ++ *
4012 + * If the interrupt is not yet activated, just store the affinity
4013 + * mask and do not call the chip driver at all. On activation the
4014 + * driver has to make sure anyway that the interrupt is in a
4015 + * useable state so startup works.
4016 + */
4017 +- if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data))
4018 ++ if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
4019 ++ irqd_is_activated(data) || !irqd_affinity_on_activate(data))
4020 + return false;
4021 +
4022 + cpumask_copy(desc->irq_common_data.affinity, mask);
4023 +diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
4024 +index 8f557fa1f4fe4..c6c7e187ae748 100644
4025 +--- a/kernel/irq/pm.c
4026 ++++ b/kernel/irq/pm.c
4027 +@@ -185,14 +185,18 @@ void rearm_wake_irq(unsigned int irq)
4028 + unsigned long flags;
4029 + struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
4030 +
4031 +- if (!desc || !(desc->istate & IRQS_SUSPENDED) ||
4032 +- !irqd_is_wakeup_set(&desc->irq_data))
4033 ++ if (!desc)
4034 + return;
4035 +
4036 ++ if (!(desc->istate & IRQS_SUSPENDED) ||
4037 ++ !irqd_is_wakeup_set(&desc->irq_data))
4038 ++ goto unlock;
4039 ++
4040 + desc->istate &= ~IRQS_SUSPENDED;
4041 + irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
4042 + __enable_irq(desc);
4043 +
4044 ++unlock:
4045 + irq_put_desc_busunlock(desc, flags);
4046 + }
4047 +
4048 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4049 +index 0a967db226d8a..bbff4bccb885d 100644
4050 +--- a/kernel/kprobes.c
4051 ++++ b/kernel/kprobes.c
4052 +@@ -2104,6 +2104,13 @@ static void kill_kprobe(struct kprobe *p)
4053 + * the original probed function (which will be freed soon) any more.
4054 + */
4055 + arch_remove_kprobe(p);
4056 ++
4057 ++ /*
4058 ++ * The module is going away. We should disarm the kprobe which
4059 ++ * is using ftrace.
4060 ++ */
4061 ++ if (kprobe_ftrace(p))
4062 ++ disarm_kprobe_ftrace(p);
4063 + }
4064 +
4065 + /* Disable one kprobe */
4066 +diff --git a/kernel/module.c b/kernel/module.c
4067 +index 6baa1080cdb76..819c5d3b4c295 100644
4068 +--- a/kernel/module.c
4069 ++++ b/kernel/module.c
4070 +@@ -1517,18 +1517,34 @@ struct module_sect_attrs {
4071 + struct module_sect_attr attrs[0];
4072 + };
4073 +
4074 ++#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
4075 + static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
4076 + struct bin_attribute *battr,
4077 + char *buf, loff_t pos, size_t count)
4078 + {
4079 + struct module_sect_attr *sattr =
4080 + container_of(battr, struct module_sect_attr, battr);
4081 ++ char bounce[MODULE_SECT_READ_SIZE + 1];
4082 ++ size_t wrote;
4083 +
4084 + if (pos != 0)
4085 + return -EINVAL;
4086 +
4087 +- return sprintf(buf, "0x%px\n",
4088 +- kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
4089 ++ /*
4090 ++ * Since we're a binary read handler, we must account for the
4091 ++ * trailing NUL byte that sprintf will write: if "buf" is
4092 ++ * too small to hold the NUL, or the NUL is exactly the last
4093 ++ * byte, the read will look like it got truncated by one byte.
4094 ++ * Since there is no way to ask sprintf nicely to not write
4095 ++ * the NUL, we have to use a bounce buffer.
4096 ++ */
4097 ++ wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
4098 ++ kallsyms_show_value(file->f_cred)
4099 ++ ? (void *)sattr->address : NULL);
4100 ++ count = min(count, wrote);
4101 ++ memcpy(buf, bounce, count);
4102 ++
4103 ++ return count;
4104 + }
4105 +
4106 + static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
4107 +@@ -1577,7 +1593,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
4108 + goto out;
4109 + sect_attrs->nsections++;
4110 + sattr->battr.read = module_sect_read;
4111 +- sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
4112 ++ sattr->battr.size = MODULE_SECT_READ_SIZE;
4113 + sattr->battr.attr.mode = 0400;
4114 + *(gattr++) = &(sattr++)->battr;
4115 + }
4116 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4117 +index 15160d707da45..705852c1724aa 100644
4118 +--- a/kernel/trace/ftrace.c
4119 ++++ b/kernel/trace/ftrace.c
4120 +@@ -5699,8 +5699,11 @@ static int referenced_filters(struct dyn_ftrace *rec)
4121 + int cnt = 0;
4122 +
4123 + for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
4124 +- if (ops_references_rec(ops, rec))
4125 +- cnt++;
4126 ++ if (ops_references_rec(ops, rec)) {
4127 ++ cnt++;
4128 ++ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
4129 ++ rec->flags |= FTRACE_FL_REGS;
4130 ++ }
4131 + }
4132 +
4133 + return cnt;
4134 +@@ -5877,8 +5880,8 @@ void ftrace_module_enable(struct module *mod)
4135 + if (ftrace_start_up)
4136 + cnt += referenced_filters(rec);
4137 +
4138 +- /* This clears FTRACE_FL_DISABLED */
4139 +- rec->flags = cnt;
4140 ++ rec->flags &= ~FTRACE_FL_DISABLED;
4141 ++ rec->flags += cnt;
4142 +
4143 + if (ftrace_start_up && cnt) {
4144 + int failed = __ftrace_replace_code(rec, 1);
4145 +@@ -6459,12 +6462,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
4146 + if (enable) {
4147 + register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
4148 + tr);
4149 +- register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
4150 ++ register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
4151 + tr);
4152 + } else {
4153 + unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
4154 + tr);
4155 +- unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
4156 ++ unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
4157 + tr);
4158 + }
4159 + }
4160 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4161 +index 721947b9962db..f9c2bdbbd8936 100644
4162 +--- a/kernel/trace/trace.c
4163 ++++ b/kernel/trace/trace.c
4164 +@@ -5686,7 +5686,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4165 + }
4166 +
4167 + /* If trace pipe files are being read, we can't change the tracer */
4168 +- if (tr->current_trace->ref) {
4169 ++ if (tr->trace_ref) {
4170 + ret = -EBUSY;
4171 + goto out;
4172 + }
4173 +@@ -5902,7 +5902,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4174 +
4175 + nonseekable_open(inode, filp);
4176 +
4177 +- tr->current_trace->ref++;
4178 ++ tr->trace_ref++;
4179 + out:
4180 + mutex_unlock(&trace_types_lock);
4181 + return ret;
4182 +@@ -5921,7 +5921,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
4183 +
4184 + mutex_lock(&trace_types_lock);
4185 +
4186 +- tr->current_trace->ref--;
4187 ++ tr->trace_ref--;
4188 +
4189 + if (iter->trace->pipe_close)
4190 + iter->trace->pipe_close(iter);
4191 +@@ -7230,7 +7230,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4192 +
4193 + filp->private_data = info;
4194 +
4195 +- tr->current_trace->ref++;
4196 ++ tr->trace_ref++;
4197 +
4198 + mutex_unlock(&trace_types_lock);
4199 +
4200 +@@ -7331,7 +7331,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
4201 +
4202 + mutex_lock(&trace_types_lock);
4203 +
4204 +- iter->tr->current_trace->ref--;
4205 ++ iter->tr->trace_ref--;
4206 +
4207 + __trace_array_put(iter->tr);
4208 +
4209 +@@ -8470,7 +8470,7 @@ static int __remove_instance(struct trace_array *tr)
4210 + {
4211 + int i;
4212 +
4213 +- if (tr->ref || (tr->current_trace && tr->current_trace->ref))
4214 ++ if (tr->ref || (tr->current_trace && tr->trace_ref))
4215 + return -EBUSY;
4216 +
4217 + list_del(&tr->list);
4218 +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
4219 +index a3c29d5fcc616..4055158c1dd25 100644
4220 +--- a/kernel/trace/trace.h
4221 ++++ b/kernel/trace/trace.h
4222 +@@ -309,6 +309,7 @@ struct trace_array {
4223 + struct trace_event_file *trace_marker_file;
4224 + cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
4225 + int ref;
4226 ++ int trace_ref;
4227 + #ifdef CONFIG_FUNCTION_TRACER
4228 + struct ftrace_ops *ops;
4229 + struct trace_pid_list __rcu *function_pids;
4230 +@@ -498,7 +499,6 @@ struct tracer {
4231 + struct tracer *next;
4232 + struct tracer_flags *flags;
4233 + int enabled;
4234 +- int ref;
4235 + bool print_max;
4236 + bool allow_instances;
4237 + #ifdef CONFIG_TRACER_MAX_TRACE
4238 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
4239 +index 995061bb2deca..ed9eb97b64b47 100644
4240 +--- a/kernel/trace/trace_events.c
4241 ++++ b/kernel/trace/trace_events.c
4242 +@@ -527,12 +527,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
4243 + if (enable) {
4244 + register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
4245 + tr, INT_MIN);
4246 +- register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
4247 ++ register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
4248 + tr, INT_MAX);
4249 + } else {
4250 + unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
4251 + tr);
4252 +- unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
4253 ++ unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
4254 + tr);
4255 + }
4256 + }
4257 +diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
4258 +index 862f4b0139fcb..35512ed26d9ff 100644
4259 +--- a/kernel/trace/trace_hwlat.c
4260 ++++ b/kernel/trace/trace_hwlat.c
4261 +@@ -270,6 +270,7 @@ static bool disable_migrate;
4262 + static void move_to_next_cpu(void)
4263 + {
4264 + struct cpumask *current_mask = &save_cpumask;
4265 ++ struct trace_array *tr = hwlat_trace;
4266 + int next_cpu;
4267 +
4268 + if (disable_migrate)
4269 +@@ -283,7 +284,7 @@ static void move_to_next_cpu(void)
4270 + goto disable;
4271 +
4272 + get_online_cpus();
4273 +- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
4274 ++ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
4275 + next_cpu = cpumask_next(smp_processor_id(), current_mask);
4276 + put_online_cpus();
4277 +
4278 +@@ -360,7 +361,7 @@ static int start_kthread(struct trace_array *tr)
4279 + /* Just pick the first CPU on first iteration */
4280 + current_mask = &save_cpumask;
4281 + get_online_cpus();
4282 +- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
4283 ++ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
4284 + put_online_cpus();
4285 + next_cpu = cpumask_first(current_mask);
4286 +
4287 +diff --git a/lib/devres.c b/lib/devres.c
4288 +index 17624d35e82d4..77c80ca9e4856 100644
4289 +--- a/lib/devres.c
4290 ++++ b/lib/devres.c
4291 +@@ -155,6 +155,7 @@ void __iomem *devm_ioremap_resource(struct device *dev,
4292 + {
4293 + resource_size_t size;
4294 + void __iomem *dest_ptr;
4295 ++ char *pretty_name;
4296 +
4297 + BUG_ON(!dev);
4298 +
4299 +@@ -165,7 +166,15 @@ void __iomem *devm_ioremap_resource(struct device *dev,
4300 +
4301 + size = resource_size(res);
4302 +
4303 +- if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
4304 ++ if (res->name)
4305 ++ pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
4306 ++ dev_name(dev), res->name);
4307 ++ else
4308 ++ pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4309 ++ if (!pretty_name)
4310 ++ return IOMEM_ERR_PTR(-ENOMEM);
4311 ++
4312 ++ if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
4313 + dev_err(dev, "can't request region for resource %pR\n", res);
4314 + return IOMEM_ERR_PTR(-EBUSY);
4315 + }
4316 +diff --git a/lib/test_kmod.c b/lib/test_kmod.c
4317 +index 9cf77628fc913..87a0cc750ea23 100644
4318 +--- a/lib/test_kmod.c
4319 ++++ b/lib/test_kmod.c
4320 +@@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
4321 + break;
4322 + case TEST_KMOD_FS_TYPE:
4323 + kfree_const(config->test_fs);
4324 +- config->test_driver = NULL;
4325 ++ config->test_fs = NULL;
4326 + copied = config_copy_test_fs(config, test_str,
4327 + strlen(test_str));
4328 + break;
4329 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
4330 +index 5977f7824a9ac..719f49d1fba2f 100644
4331 +--- a/mm/khugepaged.c
4332 ++++ b/mm/khugepaged.c
4333 +@@ -1294,7 +1294,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
4334 + {
4335 + unsigned long haddr = addr & HPAGE_PMD_MASK;
4336 + struct vm_area_struct *vma = find_vma(mm, haddr);
4337 +- struct page *hpage = NULL;
4338 ++ struct page *hpage;
4339 + pte_t *start_pte, *pte;
4340 + pmd_t *pmd, _pmd;
4341 + spinlock_t *ptl;
4342 +@@ -1314,9 +1314,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
4343 + if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
4344 + return;
4345 +
4346 ++ hpage = find_lock_page(vma->vm_file->f_mapping,
4347 ++ linear_page_index(vma, haddr));
4348 ++ if (!hpage)
4349 ++ return;
4350 ++
4351 ++ if (!PageHead(hpage))
4352 ++ goto drop_hpage;
4353 ++
4354 + pmd = mm_find_pmd(mm, haddr);
4355 + if (!pmd)
4356 +- return;
4357 ++ goto drop_hpage;
4358 +
4359 + start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
4360 +
4361 +@@ -1335,30 +1343,11 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
4362 +
4363 + page = vm_normal_page(vma, addr, *pte);
4364 +
4365 +- if (!page || !PageCompound(page))
4366 +- goto abort;
4367 +-
4368 +- if (!hpage) {
4369 +- hpage = compound_head(page);
4370 +- /*
4371 +- * The mapping of the THP should not change.
4372 +- *
4373 +- * Note that uprobe, debugger, or MAP_PRIVATE may
4374 +- * change the page table, but the new page will
4375 +- * not pass PageCompound() check.
4376 +- */
4377 +- if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
4378 +- goto abort;
4379 +- }
4380 +-
4381 + /*
4382 +- * Confirm the page maps to the correct subpage.
4383 +- *
4384 +- * Note that uprobe, debugger, or MAP_PRIVATE may change
4385 +- * the page table, but the new page will not pass
4386 +- * PageCompound() check.
4387 ++ * Note that uprobe, debugger, or MAP_PRIVATE may change the
4388 ++ * page table, but the new page will not be a subpage of hpage.
4389 + */
4390 +- if (WARN_ON(hpage + i != page))
4391 ++ if (hpage + i != page)
4392 + goto abort;
4393 + count++;
4394 + }
4395 +@@ -1377,21 +1366,26 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
4396 + pte_unmap_unlock(start_pte, ptl);
4397 +
4398 + /* step 3: set proper refcount and mm_counters. */
4399 +- if (hpage) {
4400 ++ if (count) {
4401 + page_ref_sub(hpage, count);
4402 + add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
4403 + }
4404 +
4405 + /* step 4: collapse pmd */
4406 + ptl = pmd_lock(vma->vm_mm, pmd);
4407 +- _pmd = pmdp_collapse_flush(vma, addr, pmd);
4408 ++ _pmd = pmdp_collapse_flush(vma, haddr, pmd);
4409 + spin_unlock(ptl);
4410 + mm_dec_nr_ptes(mm);
4411 + pte_free(mm, pmd_pgtable(_pmd));
4412 ++
4413 ++drop_hpage:
4414 ++ unlock_page(hpage);
4415 ++ put_page(hpage);
4416 + return;
4417 +
4418 + abort:
4419 + pte_unmap_unlock(start_pte, ptl);
4420 ++ goto drop_hpage;
4421 + }
4422 +
4423 + static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
4424 +@@ -1420,6 +1414,7 @@ out:
4425 + static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
4426 + {
4427 + struct vm_area_struct *vma;
4428 ++ struct mm_struct *mm;
4429 + unsigned long addr;
4430 + pmd_t *pmd, _pmd;
4431 +
4432 +@@ -1448,7 +1443,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
4433 + continue;
4434 + if (vma->vm_end < addr + HPAGE_PMD_SIZE)
4435 + continue;
4436 +- pmd = mm_find_pmd(vma->vm_mm, addr);
4437 ++ mm = vma->vm_mm;
4438 ++ pmd = mm_find_pmd(mm, addr);
4439 + if (!pmd)
4440 + continue;
4441 + /*
4442 +@@ -1458,17 +1454,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
4443 + * mmap_sem while holding page lock. Fault path does it in
4444 + * reverse order. Trylock is a way to avoid deadlock.
4445 + */
4446 +- if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
4447 +- spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
4448 +- /* assume page table is clear */
4449 +- _pmd = pmdp_collapse_flush(vma, addr, pmd);
4450 +- spin_unlock(ptl);
4451 +- up_write(&vma->vm_mm->mmap_sem);
4452 +- mm_dec_nr_ptes(vma->vm_mm);
4453 +- pte_free(vma->vm_mm, pmd_pgtable(_pmd));
4454 ++ if (down_write_trylock(&mm->mmap_sem)) {
4455 ++ if (!khugepaged_test_exit(mm)) {
4456 ++ spinlock_t *ptl = pmd_lock(mm, pmd);
4457 ++ /* assume page table is clear */
4458 ++ _pmd = pmdp_collapse_flush(vma, addr, pmd);
4459 ++ spin_unlock(ptl);
4460 ++ mm_dec_nr_ptes(mm);
4461 ++ pte_free(mm, pmd_pgtable(_pmd));
4462 ++ }
4463 ++ up_write(&mm->mmap_sem);
4464 + } else {
4465 + /* Try again later */
4466 +- khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
4467 ++ khugepaged_add_pte_mapped_thp(mm, addr);
4468 + }
4469 + }
4470 + i_mmap_unlock_write(mapping);
4471 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4472 +index c054945a9a742..3128d95847125 100644
4473 +--- a/mm/memory_hotplug.c
4474 ++++ b/mm/memory_hotplug.c
4475 +@@ -1751,7 +1751,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
4476 + */
4477 + rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb);
4478 + if (rc)
4479 +- goto done;
4480 ++ return rc;
4481 +
4482 + /* remove memmap entry */
4483 + firmware_map_remove(start, start + size, "System RAM");
4484 +@@ -1771,9 +1771,8 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
4485 +
4486 + try_offline_node(nid);
4487 +
4488 +-done:
4489 + mem_hotplug_done();
4490 +- return rc;
4491 ++ return 0;
4492 + }
4493 +
4494 + /**
4495 +diff --git a/mm/page_counter.c b/mm/page_counter.c
4496 +index de31470655f66..147ff99187b81 100644
4497 +--- a/mm/page_counter.c
4498 ++++ b/mm/page_counter.c
4499 +@@ -77,7 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
4500 + long new;
4501 +
4502 + new = atomic_long_add_return(nr_pages, &c->usage);
4503 +- propagate_protected_usage(counter, new);
4504 ++ propagate_protected_usage(c, new);
4505 + /*
4506 + * This is indeed racy, but we can live with some
4507 + * inaccuracy in the watermark.
4508 +@@ -121,7 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter,
4509 + new = atomic_long_add_return(nr_pages, &c->usage);
4510 + if (new > c->max) {
4511 + atomic_long_sub(nr_pages, &c->usage);
4512 +- propagate_protected_usage(counter, new);
4513 ++ propagate_protected_usage(c, new);
4514 + /*
4515 + * This is racy, but we can live with some
4516 + * inaccuracy in the failcnt.
4517 +@@ -130,7 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter,
4518 + *fail = c;
4519 + goto failed;
4520 + }
4521 +- propagate_protected_usage(counter, new);
4522 ++ propagate_protected_usage(c, new);
4523 + /*
4524 + * Just like with failcnt, we can live with some
4525 + * inaccuracy in the watermark.
4526 +diff --git a/net/compat.c b/net/compat.c
4527 +index 0f7ded26059ec..c848bcb517f3e 100644
4528 +--- a/net/compat.c
4529 ++++ b/net/compat.c
4530 +@@ -291,6 +291,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
4531 + break;
4532 + }
4533 + /* Bump the usage count and install the file. */
4534 ++ __receive_sock(fp[i]);
4535 + fd_install(new_fd, get_file(fp[i]));
4536 + }
4537 +
4538 +diff --git a/net/core/sock.c b/net/core/sock.c
4539 +index 991ab80234cec..919f1a1739e90 100644
4540 +--- a/net/core/sock.c
4541 ++++ b/net/core/sock.c
4542 +@@ -2736,6 +2736,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *
4543 + }
4544 + EXPORT_SYMBOL(sock_no_mmap);
4545 +
4546 ++/*
4547 ++ * When a file is received (via SCM_RIGHTS, etc), we must bump the
4548 ++ * various sock-based usage counts.
4549 ++ */
4550 ++void __receive_sock(struct file *file)
4551 ++{
4552 ++ struct socket *sock;
4553 ++ int error;
4554 ++
4555 ++ /*
4556 ++ * The resulting value of "error" is ignored here since we only
4557 ++ * need to take action when the file is a socket and testing
4558 ++ * "sock" for NULL is sufficient.
4559 ++ */
4560 ++ sock = sock_from_file(file, &error);
4561 ++ if (sock) {
4562 ++ sock_update_netprioidx(&sock->sk->sk_cgrp_data);
4563 ++ sock_update_classid(&sock->sk->sk_cgrp_data);
4564 ++ }
4565 ++}
4566 ++
4567 + ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
4568 + {
4569 + ssize_t res;
4570 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
4571 +index b1669f0244706..f5d96107af6de 100644
4572 +--- a/net/mac80211/sta_info.c
4573 ++++ b/net/mac80211/sta_info.c
4574 +@@ -1033,7 +1033,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
4575 + might_sleep();
4576 + lockdep_assert_held(&local->sta_mtx);
4577 +
4578 +- while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
4579 ++ if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
4580 + ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
4581 + WARN_ON_ONCE(ret);
4582 + }
4583 +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
4584 +index e59022b3f1254..b9c2ee7ab43fa 100644
4585 +--- a/scripts/recordmcount.c
4586 ++++ b/scripts/recordmcount.c
4587 +@@ -42,6 +42,8 @@
4588 + #define R_ARM_THM_CALL 10
4589 + #define R_ARM_CALL 28
4590 +
4591 ++#define R_AARCH64_CALL26 283
4592 ++
4593 + static int fd_map; /* File descriptor for file being modified. */
4594 + static int mmap_failed; /* Boolean flag. */
4595 + static char gpfx; /* prefix for global symbol name (sometimes '_') */
4596 +diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
4597 +index ca9125726be24..8596ae4c2bdef 100644
4598 +--- a/sound/pci/echoaudio/echoaudio.c
4599 ++++ b/sound/pci/echoaudio/echoaudio.c
4600 +@@ -2198,7 +2198,6 @@ static int snd_echo_resume(struct device *dev)
4601 + if (err < 0) {
4602 + kfree(commpage_bak);
4603 + dev_err(dev, "resume init_hw err=%d\n", err);
4604 +- snd_echo_free(chip);
4605 + return err;
4606 + }
4607 +
4608 +@@ -2225,7 +2224,6 @@ static int snd_echo_resume(struct device *dev)
4609 + if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
4610 + KBUILD_MODNAME, chip)) {
4611 + dev_err(chip->card->dev, "cannot grab irq\n");
4612 +- snd_echo_free(chip);
4613 + return -EBUSY;
4614 + }
4615 + chip->irq = pci->irq;
4616 +diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
4617 +index 8a19753cc26aa..8c6e1ea67f213 100644
4618 +--- a/tools/build/Makefile.feature
4619 ++++ b/tools/build/Makefile.feature
4620 +@@ -8,7 +8,7 @@ endif
4621 +
4622 + feature_check = $(eval $(feature_check_code))
4623 + define feature_check_code
4624 +- feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
4625 ++ feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
4626 + endef
4627 +
4628 + feature_set = $(eval $(feature_set_code))
4629 +diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
4630 +index 8499385365c02..054e09ab4a9e4 100644
4631 +--- a/tools/build/feature/Makefile
4632 ++++ b/tools/build/feature/Makefile
4633 +@@ -70,8 +70,6 @@ FILES= \
4634 +
4635 + FILES := $(addprefix $(OUTPUT),$(FILES))
4636 +
4637 +-CC ?= $(CROSS_COMPILE)gcc
4638 +-CXX ?= $(CROSS_COMPILE)g++
4639 + PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
4640 + LLVM_CONFIG ?= llvm-config
4641 +
4642 +diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
4643 +index 9235b76501be8..19d45c377ac18 100644
4644 +--- a/tools/perf/bench/mem-functions.c
4645 ++++ b/tools/perf/bench/mem-functions.c
4646 +@@ -223,12 +223,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
4647 + return 0;
4648 + }
4649 +
4650 +-static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
4651 ++static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst)
4652 + {
4653 +- u64 cycle_start = 0ULL, cycle_end = 0ULL;
4654 +- memcpy_t fn = r->fn.memcpy;
4655 +- int i;
4656 +-
4657 + /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
4658 + memset(src, 0, size);
4659 +
4660 +@@ -237,6 +233,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo
4661 + * to not measure page fault overhead:
4662 + */
4663 + fn(dst, src, size);
4664 ++}
4665 ++
4666 ++static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
4667 ++{
4668 ++ u64 cycle_start = 0ULL, cycle_end = 0ULL;
4669 ++ memcpy_t fn = r->fn.memcpy;
4670 ++ int i;
4671 ++
4672 ++ memcpy_prefault(fn, size, src, dst);
4673 +
4674 + cycle_start = get_cycles();
4675 + for (i = 0; i < nr_loops; ++i)
4676 +@@ -252,11 +257,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void
4677 + memcpy_t fn = r->fn.memcpy;
4678 + int i;
4679 +
4680 +- /*
4681 +- * We prefault the freshly allocated memory range here,
4682 +- * to not measure page fault overhead:
4683 +- */
4684 +- fn(dst, src, size);
4685 ++ memcpy_prefault(fn, size, src, dst);
4686 +
4687 + BUG_ON(gettimeofday(&tv_start, NULL));
4688 + for (i = 0; i < nr_loops; ++i)
4689 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4690 +index f8ccfd6be0eee..7ffcbd6fcd1ae 100644
4691 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4692 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4693 +@@ -1164,6 +1164,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
4694 + return 0;
4695 + if (err == -EAGAIN ||
4696 + intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
4697 ++ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4698 + if (intel_pt_fup_event(decoder))
4699 + return 0;
4700 + return -EAGAIN;
4701 +@@ -1942,17 +1943,13 @@ next:
4702 + }
4703 + if (decoder->set_fup_mwait)
4704 + no_tip = true;
4705 ++ if (no_tip)
4706 ++ decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP;
4707 ++ else
4708 ++ decoder->pkt_state = INTEL_PT_STATE_FUP;
4709 + err = intel_pt_walk_fup(decoder);
4710 +- if (err != -EAGAIN) {
4711 +- if (err)
4712 +- return err;
4713 +- if (no_tip)
4714 +- decoder->pkt_state =
4715 +- INTEL_PT_STATE_FUP_NO_TIP;
4716 +- else
4717 +- decoder->pkt_state = INTEL_PT_STATE_FUP;
4718 +- return 0;
4719 +- }
4720 ++ if (err != -EAGAIN)
4721 ++ return err;
4722 + if (no_tip) {
4723 + no_tip = false;
4724 + break;
4725 +@@ -1980,8 +1977,10 @@ next:
4726 + * possibility of another CBR change that gets caught up
4727 + * in the PSB+.
4728 + */
4729 +- if (decoder->cbr != decoder->cbr_seen)
4730 ++ if (decoder->cbr != decoder->cbr_seen) {
4731 ++ decoder->state.type = 0;
4732 + return 0;
4733 ++ }
4734 + break;
4735 +
4736 + case INTEL_PT_PIP:
4737 +@@ -2022,8 +2021,10 @@ next:
4738 +
4739 + case INTEL_PT_CBR:
4740 + intel_pt_calc_cbr(decoder);
4741 +- if (decoder->cbr != decoder->cbr_seen)
4742 ++ if (decoder->cbr != decoder->cbr_seen) {
4743 ++ decoder->state.type = 0;
4744 + return 0;
4745 ++ }
4746 + break;
4747 +
4748 + case INTEL_PT_MODE_EXEC:
4749 +@@ -2599,15 +2600,11 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
4750 + err = intel_pt_walk_tip(decoder);
4751 + break;
4752 + case INTEL_PT_STATE_FUP:
4753 +- decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4754 + err = intel_pt_walk_fup(decoder);
4755 + if (err == -EAGAIN)
4756 + err = intel_pt_walk_fup_tip(decoder);
4757 +- else if (!err)
4758 +- decoder->pkt_state = INTEL_PT_STATE_FUP;
4759 + break;
4760 + case INTEL_PT_STATE_FUP_NO_TIP:
4761 +- decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4762 + err = intel_pt_walk_fup(decoder);
4763 + if (err == -EAGAIN)
4764 + err = intel_pt_walk_trace(decoder);
4765 +diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
4766 +index 8cb3469dd11f2..48bbe8e0ce48d 100644
4767 +--- a/tools/testing/selftests/bpf/test_progs.c
4768 ++++ b/tools/testing/selftests/bpf/test_progs.c
4769 +@@ -7,6 +7,8 @@
4770 + #include <argp.h>
4771 + #include <string.h>
4772 +
4773 ++#define EXIT_NO_TEST 2
4774 ++
4775 + /* defined in test_progs.h */
4776 + struct test_env env;
4777 +
4778 +@@ -584,5 +586,8 @@ int main(int argc, char **argv)
4779 + free(env.test_selector.num_set);
4780 + free(env.subtest_selector.num_set);
4781 +
4782 ++ if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
4783 ++ return EXIT_NO_TEST;
4784 ++
4785 + return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
4786 + }
4787 +diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
4788 +index bdbbbe8431e03..3694613f418f6 100644
4789 +--- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
4790 ++++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
4791 +@@ -44,7 +44,7 @@ struct shared_info {
4792 + unsigned long amr2;
4793 +
4794 + /* AMR value that ptrace should refuse to write to the child. */
4795 +- unsigned long amr3;
4796 ++ unsigned long invalid_amr;
4797 +
4798 + /* IAMR value the parent expects to read from the child. */
4799 + unsigned long expected_iamr;
4800 +@@ -57,8 +57,8 @@ struct shared_info {
4801 + * (even though they're valid ones) because userspace doesn't have
4802 + * access to those registers.
4803 + */
4804 +- unsigned long new_iamr;
4805 +- unsigned long new_uamor;
4806 ++ unsigned long invalid_iamr;
4807 ++ unsigned long invalid_uamor;
4808 + };
4809 +
4810 + static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
4811 +@@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
4812 + return syscall(__NR_pkey_alloc, flags, init_access_rights);
4813 + }
4814 +
4815 +-static int sys_pkey_free(int pkey)
4816 +-{
4817 +- return syscall(__NR_pkey_free, pkey);
4818 +-}
4819 +-
4820 + static int child(struct shared_info *info)
4821 + {
4822 + unsigned long reg;
4823 +@@ -100,28 +95,32 @@ static int child(struct shared_info *info)
4824 +
4825 + info->amr1 |= 3ul << pkeyshift(pkey1);
4826 + info->amr2 |= 3ul << pkeyshift(pkey2);
4827 +- info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3);
4828 ++ /*
4829 ++ * invalid amr value where we try to force write
4830 ++ * things which are deined by a uamor setting.
4831 ++ */
4832 ++ info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor);
4833 +
4834 ++ /*
4835 ++ * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr
4836 ++ */
4837 + if (disable_execute)
4838 + info->expected_iamr |= 1ul << pkeyshift(pkey1);
4839 + else
4840 + info->expected_iamr &= ~(1ul << pkeyshift(pkey1));
4841 +
4842 +- info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3));
4843 +-
4844 +- info->expected_uamor |= 3ul << pkeyshift(pkey1) |
4845 +- 3ul << pkeyshift(pkey2);
4846 +- info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2);
4847 +- info->new_uamor |= 3ul << pkeyshift(pkey1);
4848 ++ /*
4849 ++ * We allocated pkey2 and pkey 3 above. Clear the IAMR bits.
4850 ++ */
4851 ++ info->expected_iamr &= ~(1ul << pkeyshift(pkey2));
4852 ++ info->expected_iamr &= ~(1ul << pkeyshift(pkey3));
4853 +
4854 + /*
4855 +- * We won't use pkey3. We just want a plausible but invalid key to test
4856 +- * whether ptrace will let us write to AMR bits we are not supposed to.
4857 +- *
4858 +- * This also tests whether the kernel restores the UAMOR permissions
4859 +- * after a key is freed.
4860 ++ * Create an IAMR value different from expected value.
4861 ++ * Kernel will reject an IAMR and UAMOR change.
4862 + */
4863 +- sys_pkey_free(pkey3);
4864 ++ info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2));
4865 ++ info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1));
4866 +
4867 + printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n",
4868 + user_write, info->amr1, pkey1, pkey2, pkey3);
4869 +@@ -196,9 +195,9 @@ static int parent(struct shared_info *info, pid_t pid)
4870 + PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync);
4871 + PARENT_FAIL_IF(ret, &info->child_sync);
4872 +
4873 +- info->amr1 = info->amr2 = info->amr3 = regs[0];
4874 +- info->expected_iamr = info->new_iamr = regs[1];
4875 +- info->expected_uamor = info->new_uamor = regs[2];
4876 ++ info->amr1 = info->amr2 = regs[0];
4877 ++ info->expected_iamr = regs[1];
4878 ++ info->expected_uamor = regs[2];
4879 +
4880 + /* Wake up child so that it can set itself up. */
4881 + ret = prod_child(&info->child_sync);
4882 +@@ -234,10 +233,10 @@ static int parent(struct shared_info *info, pid_t pid)
4883 + return ret;
4884 +
4885 + /* Write invalid AMR value in child. */
4886 +- ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1);
4887 ++ ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1);
4888 + PARENT_FAIL_IF(ret, &info->child_sync);
4889 +
4890 +- printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3);
4891 ++ printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr);
4892 +
4893 + /* Wake up child so that it can verify it didn't change. */
4894 + ret = prod_child(&info->child_sync);
4895 +@@ -249,7 +248,7 @@ static int parent(struct shared_info *info, pid_t pid)
4896 +
4897 + /* Try to write to IAMR. */
4898 + regs[0] = info->amr1;
4899 +- regs[1] = info->new_iamr;
4900 ++ regs[1] = info->invalid_iamr;
4901 + ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2);
4902 + PARENT_FAIL_IF(!ret, &info->child_sync);
4903 +
4904 +@@ -257,7 +256,7 @@ static int parent(struct shared_info *info, pid_t pid)
4905 + ptrace_write_running, regs[0], regs[1]);
4906 +
4907 + /* Try to write to IAMR and UAMOR. */
4908 +- regs[2] = info->new_uamor;
4909 ++ regs[2] = info->invalid_uamor;
4910 + ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3);
4911 + PARENT_FAIL_IF(!ret, &info->child_sync);
4912 +