Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sat, 22 Jun 2019 19:04:13
Message-Id: 1561230229.c8a38ba49ad1bf446472d33d4796ff3083148a27.mpagano@gentoo
1 commit: c8a38ba49ad1bf446472d33d4796ff3083148a27
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jun 22 19:03:49 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jun 22 19:03:49 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c8a38ba4
7
8 Linux patch 4.9.183
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1182_linux-4.9.183.patch | 3044 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3048 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index af9dbef..fd03898 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -771,6 +771,10 @@ Patch: 1181_linux-4.9.182.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.182
23
24 +Patch: 1182_linux-4.9.183.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.183
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1182_linux-4.9.183.patch b/1182_linux-4.9.183.patch
33 new file mode 100644
34 index 0000000..bc977be
35 --- /dev/null
36 +++ b/1182_linux-4.9.183.patch
37 @@ -0,0 +1,3044 @@
38 +diff --git a/Makefile b/Makefile
39 +index f34cb9225255..e63ace93b67b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 182
46 ++SUBLEVEL = 183
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
51 +index 9cc83c51c925..e664c33c3c64 100644
52 +--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
53 ++++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
54 +@@ -110,6 +110,7 @@
55 + regulator-name = "PVDD_APIO_1V8";
56 + regulator-min-microvolt = <1800000>;
57 + regulator-max-microvolt = <1800000>;
58 ++ regulator-always-on;
59 + };
60 +
61 + ldo3_reg: LDO3 {
62 +@@ -148,6 +149,7 @@
63 + regulator-name = "PVDD_ABB_1V8";
64 + regulator-min-microvolt = <1800000>;
65 + regulator-max-microvolt = <1800000>;
66 ++ regulator-always-on;
67 + };
68 +
69 + ldo9_reg: LDO9 {
70 +diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
71 +index b13b0b2db881..8ccafdfbe87c 100644
72 +--- a/arch/arm/boot/dts/imx6qdl.dtsi
73 ++++ b/arch/arm/boot/dts/imx6qdl.dtsi
74 +@@ -875,7 +875,7 @@
75 + compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
76 + reg = <0x020ec000 0x4000>;
77 + interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
78 +- clocks = <&clks IMX6QDL_CLK_SDMA>,
79 ++ clocks = <&clks IMX6QDL_CLK_IPG>,
80 + <&clks IMX6QDL_CLK_SDMA>;
81 + clock-names = "ipg", "ahb";
82 + #dma-cells = <3>;
83 +diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
84 +index 02378db3f5fc..a2c76797e871 100644
85 +--- a/arch/arm/boot/dts/imx6sl.dtsi
86 ++++ b/arch/arm/boot/dts/imx6sl.dtsi
87 +@@ -704,7 +704,7 @@
88 + reg = <0x020ec000 0x4000>;
89 + interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
90 + clocks = <&clks IMX6SL_CLK_SDMA>,
91 +- <&clks IMX6SL_CLK_SDMA>;
92 ++ <&clks IMX6SL_CLK_AHB>;
93 + clock-names = "ipg", "ahb";
94 + #dma-cells = <3>;
95 + /* imx6sl reuses imx6q sdma firmware */
96 +diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
97 +index a885052157f0..5834194b62e1 100644
98 +--- a/arch/arm/boot/dts/imx6sx.dtsi
99 ++++ b/arch/arm/boot/dts/imx6sx.dtsi
100 +@@ -751,7 +751,7 @@
101 + compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma";
102 + reg = <0x020ec000 0x4000>;
103 + interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
104 +- clocks = <&clks IMX6SX_CLK_SDMA>,
105 ++ clocks = <&clks IMX6SX_CLK_IPG>,
106 + <&clks IMX6SX_CLK_SDMA>;
107 + clock-names = "ipg", "ahb";
108 + #dma-cells = <3>;
109 +diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
110 +index c5c05fdccc78..7839300fe46b 100644
111 +--- a/arch/arm/boot/dts/imx6ul.dtsi
112 ++++ b/arch/arm/boot/dts/imx6ul.dtsi
113 +@@ -669,7 +669,7 @@
114 + "fsl,imx35-sdma";
115 + reg = <0x020ec000 0x4000>;
116 + interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
117 +- clocks = <&clks IMX6UL_CLK_SDMA>,
118 ++ clocks = <&clks IMX6UL_CLK_IPG>,
119 + <&clks IMX6UL_CLK_SDMA>;
120 + clock-names = "ipg", "ahb";
121 + #dma-cells = <3>;
122 +diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
123 +index 2b6cb05bc01a..edc5ddeb851a 100644
124 +--- a/arch/arm/boot/dts/imx7s.dtsi
125 ++++ b/arch/arm/boot/dts/imx7s.dtsi
126 +@@ -962,8 +962,8 @@
127 + compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma";
128 + reg = <0x30bd0000 0x10000>;
129 + interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
130 +- clocks = <&clks IMX7D_SDMA_CORE_CLK>,
131 +- <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
132 ++ clocks = <&clks IMX7D_IPG_ROOT_CLK>,
133 ++ <&clks IMX7D_SDMA_CORE_CLK>;
134 + clock-names = "ipg", "ahb";
135 + #dma-cells = <3>;
136 + fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
137 +diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
138 +index 3d7351c844aa..2fd0a2619b0b 100644
139 +--- a/arch/arm/include/asm/hardirq.h
140 ++++ b/arch/arm/include/asm/hardirq.h
141 +@@ -5,6 +5,7 @@
142 + #include <linux/threads.h>
143 + #include <asm/irq.h>
144 +
145 ++/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
146 + #define NR_IPI 7
147 +
148 + typedef struct {
149 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
150 +index 7a5dc011c523..deea60f01d24 100644
151 +--- a/arch/arm/kernel/smp.c
152 ++++ b/arch/arm/kernel/smp.c
153 +@@ -75,6 +75,10 @@ enum ipi_msg_type {
154 + IPI_CPU_STOP,
155 + IPI_IRQ_WORK,
156 + IPI_COMPLETION,
157 ++ /*
158 ++ * CPU_BACKTRACE is special and not included in NR_IPI
159 ++ * or tracable with trace_ipi_*
160 ++ */
161 + IPI_CPU_BACKTRACE,
162 + /*
163 + * SGI8-15 can be reserved by secure firmware, and thus may
164 +@@ -801,7 +805,7 @@ core_initcall(register_cpufreq_notifier);
165 +
166 + static void raise_nmi(cpumask_t *mask)
167 + {
168 +- smp_cross_call(mask, IPI_CPU_BACKTRACE);
169 ++ __smp_cross_call(mask, IPI_CPU_BACKTRACE);
170 + }
171 +
172 + void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
173 +diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
174 +index 81c935ce089b..b406c12077b9 100644
175 +--- a/arch/arm/mach-exynos/suspend.c
176 ++++ b/arch/arm/mach-exynos/suspend.c
177 +@@ -500,8 +500,27 @@ early_wakeup:
178 +
179 + static void exynos5420_prepare_pm_resume(void)
180 + {
181 ++ unsigned int mpidr, cluster;
182 ++
183 ++ mpidr = read_cpuid_mpidr();
184 ++ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
185 ++
186 + if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
187 + WARN_ON(mcpm_cpu_powered_up());
188 ++
189 ++ if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
190 ++ /*
191 ++ * When system is resumed on the LITTLE/KFC core (cluster 1),
192 ++ * the DSCR is not properly updated until the power is turned
193 ++ * on also for the cluster 0. Enable it for a while to
194 ++ * propagate the SPNIDEN and SPIDEN signals from Secure JTAG
195 ++ * block and avoid undefined instruction issue on CP14 reset.
196 ++ */
197 ++ pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
198 ++ EXYNOS_COMMON_CONFIGURATION(0));
199 ++ pmu_raw_writel(0,
200 ++ EXYNOS_COMMON_CONFIGURATION(0));
201 ++ }
202 + }
203 +
204 + static void exynos5420_pm_resume(void)
205 +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
206 +index 0a56898f8410..efd65fc85238 100644
207 +--- a/arch/arm64/mm/mmu.c
208 ++++ b/arch/arm64/mm/mmu.c
209 +@@ -765,13 +765,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
210 +
211 + int __init arch_ioremap_pud_supported(void)
212 + {
213 +- /* only 4k granule supports level 1 block mappings */
214 +- return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
215 ++ /*
216 ++ * Only 4k granule supports level 1 block mappings.
217 ++ * SW table walks can't handle removal of intermediate entries.
218 ++ */
219 ++ return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
220 ++ !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
221 + }
222 +
223 + int __init arch_ioremap_pmd_supported(void)
224 + {
225 +- return 1;
226 ++ /* See arch_ioremap_pud_supported() */
227 ++ return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
228 + }
229 +
230 + int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
231 +diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
232 +index aa19b7ac8222..476c7b4be378 100644
233 +--- a/arch/ia64/mm/numa.c
234 ++++ b/arch/ia64/mm/numa.c
235 +@@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
236 +
237 + return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
238 + }
239 ++EXPORT_SYMBOL(paddr_to_nid);
240 +
241 + #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
242 + /*
243 +diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
244 +index 5e12e19940e2..defa553fe823 100644
245 +--- a/arch/powerpc/include/asm/kvm_host.h
246 ++++ b/arch/powerpc/include/asm/kvm_host.h
247 +@@ -271,6 +271,7 @@ struct kvm_arch {
248 + #ifdef CONFIG_PPC_BOOK3S_64
249 + struct list_head spapr_tce_tables;
250 + struct list_head rtas_tokens;
251 ++ struct mutex rtas_token_lock;
252 + DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
253 + #endif
254 + #ifdef CONFIG_KVM_MPIC
255 +diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
256 +index b6952dd23152..73c3c127d858 100644
257 +--- a/arch/powerpc/kvm/book3s.c
258 ++++ b/arch/powerpc/kvm/book3s.c
259 +@@ -811,6 +811,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
260 + #ifdef CONFIG_PPC64
261 + INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
262 + INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
263 ++ mutex_init(&kvm->arch.rtas_token_lock);
264 + #endif
265 +
266 + return kvm->arch.kvm_ops->init_vm(kvm);
267 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
268 +index 0a2b247dbc6b..e840f943cd2c 100644
269 +--- a/arch/powerpc/kvm/book3s_hv.c
270 ++++ b/arch/powerpc/kvm/book3s_hv.c
271 +@@ -374,12 +374,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
272 +
273 + static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
274 + {
275 +- struct kvm_vcpu *ret;
276 +-
277 +- mutex_lock(&kvm->lock);
278 +- ret = kvm_get_vcpu_by_id(kvm, id);
279 +- mutex_unlock(&kvm->lock);
280 +- return ret;
281 ++ return kvm_get_vcpu_by_id(kvm, id);
282 + }
283 +
284 + static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
285 +@@ -1098,7 +1093,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
286 + struct kvmppc_vcore *vc = vcpu->arch.vcore;
287 + u64 mask;
288 +
289 +- mutex_lock(&kvm->lock);
290 + spin_lock(&vc->lock);
291 + /*
292 + * If ILE (interrupt little-endian) has changed, update the
293 +@@ -1132,7 +1126,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
294 + mask &= 0xFFFFFFFF;
295 + vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
296 + spin_unlock(&vc->lock);
297 +- mutex_unlock(&kvm->lock);
298 + }
299 +
300 + static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
301 +diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
302 +index ef27fbd5d9c5..b1b2273d1f6d 100644
303 +--- a/arch/powerpc/kvm/book3s_rtas.c
304 ++++ b/arch/powerpc/kvm/book3s_rtas.c
305 +@@ -133,7 +133,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
306 + {
307 + struct rtas_token_definition *d, *tmp;
308 +
309 +- lockdep_assert_held(&kvm->lock);
310 ++ lockdep_assert_held(&kvm->arch.rtas_token_lock);
311 +
312 + list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
313 + if (rtas_name_matches(d->handler->name, name)) {
314 +@@ -154,7 +154,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
315 + bool found;
316 + int i;
317 +
318 +- lockdep_assert_held(&kvm->lock);
319 ++ lockdep_assert_held(&kvm->arch.rtas_token_lock);
320 +
321 + list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
322 + if (d->token == token)
323 +@@ -193,14 +193,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
324 + if (copy_from_user(&args, argp, sizeof(args)))
325 + return -EFAULT;
326 +
327 +- mutex_lock(&kvm->lock);
328 ++ mutex_lock(&kvm->arch.rtas_token_lock);
329 +
330 + if (args.token)
331 + rc = rtas_token_define(kvm, args.name, args.token);
332 + else
333 + rc = rtas_token_undefine(kvm, args.name);
334 +
335 +- mutex_unlock(&kvm->lock);
336 ++ mutex_unlock(&kvm->arch.rtas_token_lock);
337 +
338 + return rc;
339 + }
340 +@@ -232,7 +232,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
341 + orig_rets = args.rets;
342 + args.rets = &args.args[be32_to_cpu(args.nargs)];
343 +
344 +- mutex_lock(&vcpu->kvm->lock);
345 ++ mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
346 +
347 + rc = -ENOENT;
348 + list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
349 +@@ -243,7 +243,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
350 + }
351 + }
352 +
353 +- mutex_unlock(&vcpu->kvm->lock);
354 ++ mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
355 +
356 + if (rc == 0) {
357 + args.rets = orig_rets;
358 +@@ -269,8 +269,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
359 + {
360 + struct rtas_token_definition *d, *tmp;
361 +
362 +- lockdep_assert_held(&kvm->lock);
363 +-
364 + list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
365 + list_del(&d->list);
366 + kfree(d);
367 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
368 +index 2032ab81b2d7..07f571900676 100644
369 +--- a/arch/s390/kvm/kvm-s390.c
370 ++++ b/arch/s390/kvm/kvm-s390.c
371 +@@ -3288,21 +3288,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
372 + const struct kvm_memory_slot *new,
373 + enum kvm_mr_change change)
374 + {
375 +- int rc;
376 +-
377 +- /* If the basics of the memslot do not change, we do not want
378 +- * to update the gmap. Every update causes several unnecessary
379 +- * segment translation exceptions. This is usually handled just
380 +- * fine by the normal fault handler + gmap, but it will also
381 +- * cause faults on the prefix page of running guest CPUs.
382 +- */
383 +- if (old->userspace_addr == mem->userspace_addr &&
384 +- old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
385 +- old->npages * PAGE_SIZE == mem->memory_size)
386 +- return;
387 ++ int rc = 0;
388 +
389 +- rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
390 +- mem->guest_phys_addr, mem->memory_size);
391 ++ switch (change) {
392 ++ case KVM_MR_DELETE:
393 ++ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
394 ++ old->npages * PAGE_SIZE);
395 ++ break;
396 ++ case KVM_MR_MOVE:
397 ++ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
398 ++ old->npages * PAGE_SIZE);
399 ++ if (rc)
400 ++ break;
401 ++ /* FALLTHROUGH */
402 ++ case KVM_MR_CREATE:
403 ++ rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
404 ++ mem->guest_phys_addr, mem->memory_size);
405 ++ break;
406 ++ case KVM_MR_FLAGS_ONLY:
407 ++ break;
408 ++ default:
409 ++ WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
410 ++ }
411 + if (rc)
412 + pr_warn("failed to commit memory region\n");
413 + return;
414 +diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
415 +index 25c23666d592..040e3efdc9a6 100644
416 +--- a/arch/um/kernel/time.c
417 ++++ b/arch/um/kernel/time.c
418 +@@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt)
419 + static struct clock_event_device timer_clockevent = {
420 + .name = "posix-timer",
421 + .rating = 250,
422 +- .cpumask = cpu_all_mask,
423 ++ .cpumask = cpu_possible_mask,
424 + .features = CLOCK_EVT_FEAT_PERIODIC |
425 + CLOCK_EVT_FEAT_ONESHOT,
426 + .set_state_shutdown = itimer_shutdown,
427 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
428 +index cb8178a2783a..e98e238d3775 100644
429 +--- a/arch/x86/events/intel/core.c
430 ++++ b/arch/x86/events/intel/core.c
431 +@@ -2867,7 +2867,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
432 + return ret;
433 +
434 + if (event->attr.precise_ip) {
435 +- if (!(event->attr.freq || event->attr.wakeup_events)) {
436 ++ if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
437 + event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
438 + if (!(event->attr.sample_type &
439 + ~intel_pmu_free_running_flags(event)))
440 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
441 +index f26e26e4d84f..ad31c01f810f 100644
442 +--- a/arch/x86/events/intel/ds.c
443 ++++ b/arch/x86/events/intel/ds.c
444 +@@ -655,7 +655,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
445 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
446 + INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
447 + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
448 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
449 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
450 + EVENT_CONSTRAINT_END
451 + };
452 +
453 +@@ -664,7 +664,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
454 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
455 + INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
456 + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
457 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
458 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
459 + /* Allow all events as PEBS with no flags */
460 + INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
461 + EVENT_CONSTRAINT_END
462 +@@ -672,7 +672,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
463 +
464 + struct event_constraint intel_slm_pebs_event_constraints[] = {
465 + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
466 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
467 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
468 + /* Allow all events as PEBS with no flags */
469 + INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
470 + EVENT_CONSTRAINT_END
471 +@@ -697,7 +697,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
472 + INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
473 + INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
474 + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
475 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
476 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
477 + EVENT_CONSTRAINT_END
478 + };
479 +
480 +@@ -714,7 +714,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
481 + INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
482 + INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
483 + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
484 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
485 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
486 + EVENT_CONSTRAINT_END
487 + };
488 +
489 +@@ -723,7 +723,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
490 + INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
491 + INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
492 + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
493 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
494 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
495 + INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
496 + INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
497 + INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
498 +@@ -738,9 +738,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
499 + INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
500 + INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
501 + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
502 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
503 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
504 + /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
505 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
506 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
507 + INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
508 + INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
509 + INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
510 +@@ -754,9 +754,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
511 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
512 + INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
513 + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
514 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
515 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
516 + /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
517 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
518 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
519 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
520 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
521 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
522 +@@ -777,9 +777,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
523 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
524 + INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
525 + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
526 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
527 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
528 + /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
529 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
530 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
531 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
532 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
533 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
534 +@@ -800,9 +800,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
535 + struct event_constraint intel_skl_pebs_event_constraints[] = {
536 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
537 + /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
538 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
539 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
540 + /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
541 +- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
542 ++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
543 + INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
544 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
545 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
546 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
547 +index be6d0543e626..52a65f14db06 100644
548 +--- a/arch/x86/kernel/cpu/amd.c
549 ++++ b/arch/x86/kernel/cpu/amd.c
550 +@@ -766,8 +766,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
551 + {
552 + set_cpu_cap(c, X86_FEATURE_ZEN);
553 +
554 +- /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
555 +- if (!cpu_has(c, X86_FEATURE_CPB))
556 ++ /*
557 ++ * Fix erratum 1076: CPB feature bit not being set in CPUID.
558 ++ * Always set it, except when running under a hypervisor.
559 ++ */
560 ++ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
561 + set_cpu_cap(c, X86_FEATURE_CPB);
562 + }
563 +
564 +diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
565 +index 5ab4a364348e..2729131fe9bf 100644
566 +--- a/arch/x86/kvm/pmu_intel.c
567 ++++ b/arch/x86/kvm/pmu_intel.c
568 +@@ -235,11 +235,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
569 + }
570 + break;
571 + default:
572 +- if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
573 +- (pmc = get_fixed_pmc(pmu, msr))) {
574 +- if (!msr_info->host_initiated)
575 +- data = (s64)(s32)data;
576 +- pmc->counter += data - pmc_read_counter(pmc);
577 ++ if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
578 ++ if (msr_info->host_initiated)
579 ++ pmc->counter = data;
580 ++ else
581 ++ pmc->counter = (s32)data;
582 ++ return 0;
583 ++ } else if ((pmc = get_fixed_pmc(pmu, msr))) {
584 ++ pmc->counter = data;
585 + return 0;
586 + } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
587 + if (data == pmc->eventsel)
588 +diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
589 +index 9bd115484745..5f0e596b0519 100644
590 +--- a/arch/x86/pci/irq.c
591 ++++ b/arch/x86/pci/irq.c
592 +@@ -1117,6 +1117,8 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = {
593 +
594 + void __init pcibios_irq_init(void)
595 + {
596 ++ struct irq_routing_table *rtable = NULL;
597 ++
598 + DBG(KERN_DEBUG "PCI: IRQ init\n");
599 +
600 + if (raw_pci_ops == NULL)
601 +@@ -1127,8 +1129,10 @@ void __init pcibios_irq_init(void)
602 + pirq_table = pirq_find_routing_table();
603 +
604 + #ifdef CONFIG_PCI_BIOS
605 +- if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
606 ++ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
607 + pirq_table = pcibios_get_irq_routing_table();
608 ++ rtable = pirq_table;
609 ++ }
610 + #endif
611 + if (pirq_table) {
612 + pirq_peer_trick();
613 +@@ -1143,8 +1147,10 @@ void __init pcibios_irq_init(void)
614 + * If we're using the I/O APIC, avoid using the PCI IRQ
615 + * routing table
616 + */
617 +- if (io_apic_assign_pci_irqs)
618 ++ if (io_apic_assign_pci_irqs) {
619 ++ kfree(rtable);
620 + pirq_table = NULL;
621 ++ }
622 + }
623 +
624 + x86_init.pci.fixup_irqs();
625 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
626 +index 35be49f5791d..da1a987c622a 100644
627 +--- a/drivers/ata/libata-core.c
628 ++++ b/drivers/ata/libata-core.c
629 +@@ -4355,9 +4355,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
630 + { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
631 + ATA_HORKAGE_FIRMWARE_WARN },
632 +
633 +- /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
634 +- { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
635 +- { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
636 ++ /* drives which fail FPDMA_AA activation (some may freeze afterwards)
637 ++ the ST disks also have LPM issues */
638 ++ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
639 ++ ATA_HORKAGE_NOLPM, },
640 ++ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
641 ++ ATA_HORKAGE_NOLPM, },
642 + { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
643 +
644 + /* Blacklist entries taken from Silicon Image 3124/3132
645 +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
646 +index 39af05a589b3..32b130c53ff9 100644
647 +--- a/drivers/clk/rockchip/clk-rk3288.c
648 ++++ b/drivers/clk/rockchip/clk-rk3288.c
649 +@@ -826,6 +826,9 @@ static const int rk3288_saved_cru_reg_ids[] = {
650 + RK3288_CLKSEL_CON(10),
651 + RK3288_CLKSEL_CON(33),
652 + RK3288_CLKSEL_CON(37),
653 ++
654 ++ /* We turn aclk_dmac1 on for suspend; this will restore it */
655 ++ RK3288_CLKGATE_CON(10),
656 + };
657 +
658 + static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
659 +@@ -841,6 +844,14 @@ static int rk3288_clk_suspend(void)
660 + readl_relaxed(rk3288_cru_base + reg_id);
661 + }
662 +
663 ++ /*
664 ++ * Going into deep sleep (specifically setting PMU_CLR_DMA in
665 ++ * RK3288_PMU_PWRMODE_CON1) appears to fail unless
666 ++ * "aclk_dmac1" is on.
667 ++ */
668 ++ writel_relaxed(1 << (12 + 16),
669 ++ rk3288_cru_base + RK3288_CLKGATE_CON(10));
670 ++
671 + /*
672 + * Switch PLLs other than DPLL (for SDRAM) to slow mode to
673 + * avoid crashes on resume. The Mask ROM on the system will
674 +diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
675 +index 1953e57505f4..f17a4c7a1781 100644
676 +--- a/drivers/dma/idma64.c
677 ++++ b/drivers/dma/idma64.c
678 +@@ -589,7 +589,7 @@ static int idma64_probe(struct idma64_chip *chip)
679 + idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
680 + idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
681 +
682 +- idma64->dma.dev = chip->dev;
683 ++ idma64->dma.dev = chip->sysdev;
684 +
685 + dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
686 +
687 +@@ -629,6 +629,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
688 + {
689 + struct idma64_chip *chip;
690 + struct device *dev = &pdev->dev;
691 ++ struct device *sysdev = dev->parent;
692 + struct resource *mem;
693 + int ret;
694 +
695 +@@ -645,11 +646,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
696 + if (IS_ERR(chip->regs))
697 + return PTR_ERR(chip->regs);
698 +
699 +- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
700 ++ ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
701 + if (ret)
702 + return ret;
703 +
704 + chip->dev = dev;
705 ++ chip->sysdev = sysdev;
706 +
707 + ret = idma64_probe(chip);
708 + if (ret)
709 +diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
710 +index 6b816878e5e7..baa32e1425de 100644
711 +--- a/drivers/dma/idma64.h
712 ++++ b/drivers/dma/idma64.h
713 +@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
714 + /**
715 + * struct idma64_chip - representation of iDMA 64-bit controller hardware
716 + * @dev: struct device of the DMA controller
717 ++ * @sysdev: struct device of the physical device that does DMA
718 + * @irq: irq line
719 + * @regs: memory mapped I/O space
720 + * @idma64: struct idma64 that is filed by idma64_probe()
721 + */
722 + struct idma64_chip {
723 + struct device *dev;
724 ++ struct device *sysdev;
725 + int irq;
726 + void __iomem *regs;
727 + struct idma64 *idma64;
728 +diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
729 +index 12d417a4d4a8..b992badb99dd 100644
730 +--- a/drivers/gpio/Kconfig
731 ++++ b/drivers/gpio/Kconfig
732 +@@ -670,6 +670,7 @@ config GPIO_ADP5588
733 + config GPIO_ADP5588_IRQ
734 + bool "Interrupt controller support for ADP5588"
735 + depends on GPIO_ADP5588=y
736 ++ select GPIOLIB_IRQCHIP
737 + help
738 + Say yes here to enable the adp5588 to be used as an interrupt
739 + controller. It requires the driver to be built in the kernel.
740 +diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
741 +index 75f30a0c418a..038882183bdf 100644
742 +--- a/drivers/gpio/gpio-omap.c
743 ++++ b/drivers/gpio/gpio-omap.c
744 +@@ -296,6 +296,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
745 + }
746 + }
747 +
748 ++/*
749 ++ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
750 ++ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
751 ++ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
752 ++ * are capable waking up the system from off mode.
753 ++ */
754 ++static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
755 ++{
756 ++ u32 no_wake = bank->non_wakeup_gpios;
757 ++
758 ++ if (no_wake)
759 ++ return !!(~no_wake & gpio_mask);
760 ++
761 ++ return false;
762 ++}
763 ++
764 + static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
765 + unsigned trigger)
766 + {
767 +@@ -327,13 +343,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
768 + }
769 +
770 + /* This part needs to be executed always for OMAP{34xx, 44xx} */
771 +- if (!bank->regs->irqctrl) {
772 +- /* On omap24xx proceed only when valid GPIO bit is set */
773 +- if (bank->non_wakeup_gpios) {
774 +- if (!(bank->non_wakeup_gpios & gpio_bit))
775 +- goto exit;
776 +- }
777 +-
778 ++ if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
779 + /*
780 + * Log the edge gpio and manually trigger the IRQ
781 + * after resume if the input level changes
782 +@@ -346,7 +356,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
783 + bank->enabled_non_wakeup_gpios &= ~gpio_bit;
784 + }
785 +
786 +-exit:
787 + bank->level_mask =
788 + readl_relaxed(bank->base + bank->regs->leveldetect0) |
789 + readl_relaxed(bank->base + bank->regs->leveldetect1);
790 +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
791 +index 32ab5c32834b..1b2fae915ecc 100644
792 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
793 ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
794 +@@ -735,11 +735,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
795 + vsync_polarity = 1;
796 + }
797 +
798 +- if (mode->vrefresh <= 24000)
799 ++ if (drm_mode_vrefresh(mode) <= 24)
800 + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
801 +- else if (mode->vrefresh <= 25000)
802 ++ else if (drm_mode_vrefresh(mode) <= 25)
803 + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
804 +- else if (mode->vrefresh <= 30000)
805 ++ else if (drm_mode_vrefresh(mode) <= 30)
806 + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
807 + else
808 + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
809 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
810 +index 9fe8eda7c859..40c1e89ed361 100644
811 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
812 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
813 +@@ -2493,7 +2493,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
814 +
815 + cmd = container_of(header, typeof(*cmd), header);
816 +
817 +- if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
818 ++ if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
819 ++ cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
820 + DRM_ERROR("Illegal shader type %u.\n",
821 + (unsigned) cmd->body.type);
822 + return -EINVAL;
823 +@@ -2732,6 +2733,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
824 + if (view_type == vmw_view_max)
825 + return -EINVAL;
826 + cmd = container_of(header, typeof(*cmd), header);
827 ++ if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
828 ++ DRM_ERROR("Invalid surface id.\n");
829 ++ return -EINVAL;
830 ++ }
831 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
832 + user_surface_converter,
833 + &cmd->sid, &srf_node);
834 +diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
835 +index 9d7be5af2bf2..6618db75fa25 100644
836 +--- a/drivers/i2c/busses/i2c-acorn.c
837 ++++ b/drivers/i2c/busses/i2c-acorn.c
838 +@@ -83,6 +83,7 @@ static struct i2c_algo_bit_data ioc_data = {
839 +
840 + static struct i2c_adapter ioc_ops = {
841 + .nr = 0,
842 ++ .name = "ioc",
843 + .algo_data = &ioc_data,
844 + };
845 +
846 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
847 +index 00e8e675cbeb..eaa312bc3a3c 100644
848 +--- a/drivers/i2c/i2c-dev.c
849 ++++ b/drivers/i2c/i2c-dev.c
850 +@@ -297,6 +297,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
851 + rdwr_pa[i].buf[0] < 1 ||
852 + rdwr_pa[i].len < rdwr_pa[i].buf[0] +
853 + I2C_SMBUS_BLOCK_MAX) {
854 ++ i++;
855 + res = -EINVAL;
856 + break;
857 + }
858 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
859 +index 28feb1744710..25cc6ae87039 100644
860 +--- a/drivers/iommu/intel-iommu.c
861 ++++ b/drivers/iommu/intel-iommu.c
862 +@@ -4119,9 +4119,7 @@ static void __init init_no_remapping_devices(void)
863 +
864 + /* This IOMMU has *only* gfx devices. Either bypass it or
865 + set the gfx_mapped flag, as appropriate */
866 +- if (dmar_map_gfx) {
867 +- intel_iommu_gfx_mapped = 1;
868 +- } else {
869 ++ if (!dmar_map_gfx) {
870 + drhd->ignored = 1;
871 + for_each_active_dev_scope(drhd->devices,
872 + drhd->devices_cnt, i, dev)
873 +@@ -4870,6 +4868,9 @@ int __init intel_iommu_init(void)
874 + goto out_free_reserved_range;
875 + }
876 +
877 ++ if (dmar_map_gfx)
878 ++ intel_iommu_gfx_mapped = 1;
879 ++
880 + init_no_remapping_devices();
881 +
882 + ret = init_dmars();
883 +diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
884 +index f96b8f2bdf74..d7c986fb0b3b 100644
885 +--- a/drivers/isdn/mISDN/socket.c
886 ++++ b/drivers/isdn/mISDN/socket.c
887 +@@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
888 + memcpy(di.channelmap, dev->channelmap,
889 + sizeof(di.channelmap));
890 + di.nrbchan = dev->nrbchan;
891 +- strcpy(di.name, dev_name(&dev->dev));
892 ++ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
893 + if (copy_to_user((void __user *)arg, &di, sizeof(di)))
894 + err = -EFAULT;
895 + } else
896 +@@ -678,7 +678,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
897 + memcpy(di.channelmap, dev->channelmap,
898 + sizeof(di.channelmap));
899 + di.nrbchan = dev->nrbchan;
900 +- strcpy(di.name, dev_name(&dev->dev));
901 ++ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
902 + if (copy_to_user((void __user *)arg, &di, sizeof(di)))
903 + err = -EFAULT;
904 + } else
905 +@@ -692,6 +692,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
906 + err = -EFAULT;
907 + break;
908 + }
909 ++ dn.name[sizeof(dn.name) - 1] = '\0';
910 + dev = get_mdevice(dn.id);
911 + if (dev)
912 + err = device_rename(&dev->dev, dn.name);
913 +diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
914 +index 646fe85261c1..158eae17031c 100644
915 +--- a/drivers/md/bcache/bset.c
916 ++++ b/drivers/md/bcache/bset.c
917 +@@ -823,12 +823,22 @@ unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
918 + struct bset *i = bset_tree_last(b)->data;
919 + struct bkey *m, *prev = NULL;
920 + struct btree_iter iter;
921 ++ struct bkey preceding_key_on_stack = ZERO_KEY;
922 ++ struct bkey *preceding_key_p = &preceding_key_on_stack;
923 +
924 + BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
925 +
926 +- m = bch_btree_iter_init(b, &iter, b->ops->is_extents
927 +- ? PRECEDING_KEY(&START_KEY(k))
928 +- : PRECEDING_KEY(k));
929 ++ /*
930 ++ * If k has preceding key, preceding_key_p will be set to address
931 ++ * of k's preceding key; otherwise preceding_key_p will be set
932 ++ * to NULL inside preceding_key().
933 ++ */
934 ++ if (b->ops->is_extents)
935 ++ preceding_key(&START_KEY(k), &preceding_key_p);
936 ++ else
937 ++ preceding_key(k, &preceding_key_p);
938 ++
939 ++ m = bch_btree_iter_init(b, &iter, preceding_key_p);
940 +
941 + if (b->ops->insert_fixup(b, k, &iter, replace_key))
942 + return status;
943 +diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
944 +index ae964624efb2..b935839ab79c 100644
945 +--- a/drivers/md/bcache/bset.h
946 ++++ b/drivers/md/bcache/bset.h
947 +@@ -417,20 +417,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
948 + return __bch_cut_back(where, k);
949 + }
950 +
951 +-#define PRECEDING_KEY(_k) \
952 +-({ \
953 +- struct bkey *_ret = NULL; \
954 +- \
955 +- if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
956 +- _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
957 +- \
958 +- if (!_ret->low) \
959 +- _ret->high--; \
960 +- _ret->low--; \
961 +- } \
962 +- \
963 +- _ret; \
964 +-})
965 ++/*
966 ++ * Pointer '*preceding_key_p' points to a memory object to store preceding
967 ++ * key of k. If the preceding key does not exist, set '*preceding_key_p' to
968 ++ * NULL. So the caller of preceding_key() needs to take care of memory
969 ++ * which '*preceding_key_p' pointed to before calling preceding_key().
970 ++ * Currently the only caller of preceding_key() is bch_btree_insert_key(),
971 ++ * and it points to an on-stack variable, so the memory release is handled
972 ++ * by stackframe itself.
973 ++ */
974 ++static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
975 ++{
976 ++ if (KEY_INODE(k) || KEY_OFFSET(k)) {
977 ++ (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
978 ++ if (!(*preceding_key_p)->low)
979 ++ (*preceding_key_p)->high--;
980 ++ (*preceding_key_p)->low--;
981 ++ } else {
982 ++ (*preceding_key_p) = NULL;
983 ++ }
984 ++}
985 +
986 + static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
987 + {
988 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
989 +index 4510e8a37244..699e5f8e0a71 100644
990 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
991 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
992 +@@ -1959,7 +1959,22 @@ static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
993 + struct v4l2_streamparm *p = arg;
994 + int ret = check_fmt(file, p->type);
995 +
996 +- return ret ? ret : ops->vidioc_s_parm(file, fh, p);
997 ++ if (ret)
998 ++ return ret;
999 ++
1000 ++ /* Note: extendedmode is never used in drivers */
1001 ++ if (V4L2_TYPE_IS_OUTPUT(p->type)) {
1002 ++ memset(p->parm.output.reserved, 0,
1003 ++ sizeof(p->parm.output.reserved));
1004 ++ p->parm.output.extendedmode = 0;
1005 ++ p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY;
1006 ++ } else {
1007 ++ memset(p->parm.capture.reserved, 0,
1008 ++ sizeof(p->parm.capture.reserved));
1009 ++ p->parm.capture.extendedmode = 0;
1010 ++ p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
1011 ++ }
1012 ++ return ops->vidioc_s_parm(file, fh, p);
1013 + }
1014 +
1015 + static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
1016 +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
1017 +index 19ac8bc8e7ea..22dd8c055048 100644
1018 +--- a/drivers/mfd/intel-lpss.c
1019 ++++ b/drivers/mfd/intel-lpss.c
1020 +@@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
1021 + {
1022 + u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
1023 +
1024 ++ /* Set the device in reset state */
1025 ++ writel(0, lpss->priv + LPSS_PRIV_RESETS);
1026 ++
1027 + intel_lpss_deassert_reset(lpss);
1028 +
1029 + intel_lpss_set_remap_addr(lpss);
1030 +diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
1031 +index 4aeba9b6942a..ec37cfe32ca3 100644
1032 +--- a/drivers/mfd/tps65912-spi.c
1033 ++++ b/drivers/mfd/tps65912-spi.c
1034 +@@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = {
1035 + { .compatible = "ti,tps65912", },
1036 + { /* sentinel */ }
1037 + };
1038 ++MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table);
1039 +
1040 + static int tps65912_spi_probe(struct spi_device *spi)
1041 + {
1042 +diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
1043 +index dd19f17a1b63..2b8c479dbfa6 100644
1044 +--- a/drivers/mfd/twl6040.c
1045 ++++ b/drivers/mfd/twl6040.c
1046 +@@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on)
1047 + }
1048 + }
1049 +
1050 ++ /*
1051 ++ * Register access can produce errors after power-up unless we
1052 ++ * wait at least 8ms based on measurements on duovero.
1053 ++ */
1054 ++ usleep_range(10000, 12000);
1055 ++
1056 + /* Sync with the HW */
1057 +- regcache_sync(twl6040->regmap);
1058 ++ ret = regcache_sync(twl6040->regmap);
1059 ++ if (ret) {
1060 ++ dev_err(twl6040->dev, "Failed to sync with the HW: %i\n",
1061 ++ ret);
1062 ++ goto out;
1063 ++ }
1064 +
1065 + /* Default PLL configuration after power up */
1066 + twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
1067 +diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
1068 +index 99635dd9dbac..bb3a76ad80da 100644
1069 +--- a/drivers/misc/kgdbts.c
1070 ++++ b/drivers/misc/kgdbts.c
1071 +@@ -1132,7 +1132,7 @@ static void kgdbts_put_char(u8 chr)
1072 +
1073 + static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
1074 + {
1075 +- int len = strlen(kmessage);
1076 ++ size_t len = strlen(kmessage);
1077 +
1078 + if (len >= MAX_CONFIG_LEN) {
1079 + printk(KERN_ERR "kgdbts: config string too long\n");
1080 +@@ -1152,7 +1152,7 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
1081 +
1082 + strcpy(config, kmessage);
1083 + /* Chop out \n char as a result of echo */
1084 +- if (config[len - 1] == '\n')
1085 ++ if (len && config[len - 1] == '\n')
1086 + config[len - 1] = '\0';
1087 +
1088 + /* Go and configure with the new params. */
1089 +diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
1090 +index 6620fc861c47..005c79b5b3f0 100644
1091 +--- a/drivers/net/ethernet/dec/tulip/de4x5.c
1092 ++++ b/drivers/net/ethernet/dec/tulip/de4x5.c
1093 +@@ -2109,7 +2109,6 @@ static struct eisa_driver de4x5_eisa_driver = {
1094 + .remove = de4x5_eisa_remove,
1095 + }
1096 + };
1097 +-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
1098 + #endif
1099 +
1100 + #ifdef CONFIG_PCI
1101 +diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
1102 +index 0a48a31225e6..345818193de9 100644
1103 +--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
1104 ++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
1105 +@@ -1108,7 +1108,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1106 + cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
1107 + break;
1108 + case ETHTOOL_GRXRINGS:
1109 +- cmd->data = adapter->num_rx_qs - 1;
1110 ++ cmd->data = adapter->num_rx_qs;
1111 + break;
1112 + default:
1113 + return -EINVAL;
1114 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1115 +index e3ed70a24029..585a40cc6470 100644
1116 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1117 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1118 +@@ -2044,6 +2044,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
1119 + mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
1120 +
1121 + autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
1122 ++ if (!autoneg && cmd->base.speed == SPEED_56000) {
1123 ++ netdev_err(dev, "56G not supported with autoneg off\n");
1124 ++ return -EINVAL;
1125 ++ }
1126 + eth_proto_new = autoneg ?
1127 + mlxsw_sp_to_ptys_advert_link(cmd) :
1128 + mlxsw_sp_to_ptys_speed(cmd->base.speed);
1129 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
1130 +index c59e8fe37069..49300194d3f9 100644
1131 +--- a/drivers/net/ethernet/renesas/sh_eth.c
1132 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
1133 +@@ -1388,6 +1388,10 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1134 + sh_eth_get_stats(ndev);
1135 + sh_eth_reset(ndev);
1136 +
1137 ++ /* Set the RMII mode again if required */
1138 ++ if (mdp->cd->rmiimode)
1139 ++ sh_eth_write(ndev, 0x1, RMIIMODE);
1140 ++
1141 + /* Set MAC address again */
1142 + update_mac_address(ndev);
1143 + }
1144 +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
1145 +index 01f95d192d25..2b16a5fed9de 100644
1146 +--- a/drivers/net/usb/ipheth.c
1147 ++++ b/drivers/net/usb/ipheth.c
1148 +@@ -437,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
1149 + dev);
1150 + dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1151 +
1152 ++ netif_stop_queue(net);
1153 + retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
1154 + if (retval) {
1155 + dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
1156 + __func__, retval);
1157 + dev->net->stats.tx_errors++;
1158 + dev_kfree_skb_any(skb);
1159 ++ netif_wake_queue(net);
1160 + } else {
1161 + dev->net->stats.tx_packets++;
1162 + dev->net->stats.tx_bytes += skb->len;
1163 + dev_consume_skb_any(skb);
1164 +- netif_stop_queue(net);
1165 + }
1166 +
1167 + return NETDEV_TX_OK;
1168 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
1169 +index 824e282cd80e..bb2f79933b17 100644
1170 +--- a/drivers/nvmem/core.c
1171 ++++ b/drivers/nvmem/core.c
1172 +@@ -934,7 +934,7 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
1173 + void *buf)
1174 + {
1175 + u8 *p, *b;
1176 +- int i, bit_offset = cell->bit_offset;
1177 ++ int i, extra, bit_offset = cell->bit_offset;
1178 +
1179 + p = b = buf;
1180 + if (bit_offset) {
1181 +@@ -949,11 +949,16 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
1182 + p = b;
1183 + *b++ >>= bit_offset;
1184 + }
1185 +-
1186 +- /* result fits in less bytes */
1187 +- if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
1188 +- *p-- = 0;
1189 ++ } else {
1190 ++ /* point to the msb */
1191 ++ p += cell->bytes - 1;
1192 + }
1193 ++
1194 ++ /* result fits in less bytes */
1195 ++ extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1196 ++ while (--extra >= 0)
1197 ++ *p-- = 0;
1198 ++
1199 + /* clear msb bits if any leftover in the last byte */
1200 + *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1201 + }
1202 +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
1203 +index d6196f7b1d58..7f6b454bca65 100644
1204 +--- a/drivers/pci/host/pcie-rcar.c
1205 ++++ b/drivers/pci/host/pcie-rcar.c
1206 +@@ -847,7 +847,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
1207 + {
1208 + struct device *dev = pcie->dev;
1209 + struct rcar_msi *msi = &pcie->msi;
1210 +- unsigned long base;
1211 ++ phys_addr_t base;
1212 + int err, i;
1213 +
1214 + mutex_init(&msi->lock);
1215 +@@ -886,10 +886,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
1216 +
1217 + /* setup MSI data target */
1218 + msi->pages = __get_free_pages(GFP_KERNEL, 0);
1219 ++ if (!msi->pages) {
1220 ++ err = -ENOMEM;
1221 ++ goto err;
1222 ++ }
1223 + base = virt_to_phys((void *)msi->pages);
1224 +
1225 +- rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
1226 +- rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
1227 ++ rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
1228 ++ rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
1229 +
1230 + /* enable all MSI interrupts */
1231 + rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
1232 +diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
1233 +index 61332f4d51c3..c3964fca57b0 100644
1234 +--- a/drivers/pci/host/pcie-xilinx.c
1235 ++++ b/drivers/pci/host/pcie-xilinx.c
1236 +@@ -337,14 +337,19 @@ static const struct irq_domain_ops msi_domain_ops = {
1237 + * xilinx_pcie_enable_msi - Enable MSI support
1238 + * @port: PCIe port information
1239 + */
1240 +-static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
1241 ++static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
1242 + {
1243 + phys_addr_t msg_addr;
1244 +
1245 + port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
1246 ++ if (!port->msi_pages)
1247 ++ return -ENOMEM;
1248 ++
1249 + msg_addr = virt_to_phys((void *)port->msi_pages);
1250 + pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
1251 + pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
1252 ++
1253 ++ return 0;
1254 + }
1255 +
1256 + /* INTx Functions */
1257 +@@ -516,6 +521,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
1258 + struct device *dev = port->dev;
1259 + struct device_node *node = dev->of_node;
1260 + struct device_node *pcie_intc_node;
1261 ++ int ret;
1262 +
1263 + /* Setup INTx */
1264 + pcie_intc_node = of_get_next_child(node, NULL);
1265 +@@ -544,7 +550,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
1266 + return -ENODEV;
1267 + }
1268 +
1269 +- xilinx_pcie_enable_msi(port);
1270 ++ ret = xilinx_pcie_enable_msi(port);
1271 ++ if (ret)
1272 ++ return ret;
1273 + }
1274 +
1275 + return 0;
1276 +diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
1277 +index c614ff7c3bc3..d3562df64456 100644
1278 +--- a/drivers/pci/hotplug/rpadlpar_core.c
1279 ++++ b/drivers/pci/hotplug/rpadlpar_core.c
1280 +@@ -55,6 +55,7 @@ static struct device_node *find_vio_slot_node(char *drc_name)
1281 + if ((rc == 0) && (!strcmp(drc_name, name)))
1282 + break;
1283 + }
1284 ++ of_node_put(parent);
1285 +
1286 + return dn;
1287 + }
1288 +@@ -78,6 +79,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name,
1289 + return np;
1290 + }
1291 +
1292 ++/* Returns a device_node with its reference count incremented */
1293 + static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
1294 + {
1295 + struct device_node *dn;
1296 +@@ -313,6 +315,7 @@ int dlpar_add_slot(char *drc_name)
1297 + rc = dlpar_add_phb(drc_name, dn);
1298 + break;
1299 + }
1300 ++ of_node_put(dn);
1301 +
1302 + printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
1303 + exit:
1304 +@@ -446,6 +449,7 @@ int dlpar_remove_slot(char *drc_name)
1305 + rc = dlpar_remove_pci_slot(drc_name, dn);
1306 + break;
1307 + }
1308 ++ of_node_put(dn);
1309 + vm_unmap_aliases();
1310 +
1311 + printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
1312 +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
1313 +index cfa3e850c49f..d225a835a64c 100644
1314 +--- a/drivers/platform/chrome/cros_ec_proto.c
1315 ++++ b/drivers/platform/chrome/cros_ec_proto.c
1316 +@@ -67,6 +67,17 @@ static int send_command(struct cros_ec_device *ec_dev,
1317 + else
1318 + xfer_fxn = ec_dev->cmd_xfer;
1319 +
1320 ++ if (!xfer_fxn) {
1321 ++ /*
1322 ++ * This error can happen if a communication error happened and
1323 ++ * the EC is trying to use protocol v2, on an underlying
1324 ++ * communication mechanism that does not support v2.
1325 ++ */
1326 ++ dev_err_once(ec_dev->dev,
1327 ++ "missing EC transfer API, cannot send command\n");
1328 ++ return -EIO;
1329 ++ }
1330 ++
1331 + ret = (*xfer_fxn)(ec_dev, msg);
1332 + if (msg->result == EC_RES_IN_PROGRESS) {
1333 + int i;
1334 +diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
1335 +index 0bf51d574fa9..f2b9dd82128f 100644
1336 +--- a/drivers/platform/x86/intel_pmc_ipc.c
1337 ++++ b/drivers/platform/x86/intel_pmc_ipc.c
1338 +@@ -620,13 +620,17 @@ static int ipc_create_pmc_devices(void)
1339 + if (ret) {
1340 + dev_err(ipcdev.dev, "Failed to add punit platform device\n");
1341 + platform_device_unregister(ipcdev.tco_dev);
1342 ++ return ret;
1343 + }
1344 +
1345 + if (!ipcdev.telem_res_inval) {
1346 + ret = ipc_create_telemetry_device();
1347 +- if (ret)
1348 ++ if (ret) {
1349 + dev_warn(ipcdev.dev,
1350 + "Failed to add telemetry platform device\n");
1351 ++ platform_device_unregister(ipcdev.punit_dev);
1352 ++ platform_device_unregister(ipcdev.tco_dev);
1353 ++ }
1354 + }
1355 +
1356 + return ret;
1357 +diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
1358 +index 172ef8245811..a19246455c13 100644
1359 +--- a/drivers/pwm/core.c
1360 ++++ b/drivers/pwm/core.c
1361 +@@ -302,10 +302,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
1362 + if (IS_ENABLED(CONFIG_OF))
1363 + of_pwmchip_add(chip);
1364 +
1365 +- pwmchip_sysfs_export(chip);
1366 +-
1367 + out:
1368 + mutex_unlock(&pwm_lock);
1369 ++
1370 ++ if (!ret)
1371 ++ pwmchip_sysfs_export(chip);
1372 ++
1373 + return ret;
1374 + }
1375 + EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
1376 +@@ -339,7 +341,7 @@ int pwmchip_remove(struct pwm_chip *chip)
1377 + unsigned int i;
1378 + int ret = 0;
1379 +
1380 +- pwmchip_sysfs_unexport_children(chip);
1381 ++ pwmchip_sysfs_unexport(chip);
1382 +
1383 + mutex_lock(&pwm_lock);
1384 +
1385 +@@ -359,8 +361,6 @@ int pwmchip_remove(struct pwm_chip *chip)
1386 +
1387 + free_pwms(chip);
1388 +
1389 +- pwmchip_sysfs_unexport(chip);
1390 +-
1391 + out:
1392 + mutex_unlock(&pwm_lock);
1393 + return ret;
1394 +diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
1395 +index 9d5bd7d5c610..f58a4867b519 100644
1396 +--- a/drivers/pwm/pwm-meson.c
1397 ++++ b/drivers/pwm/pwm-meson.c
1398 +@@ -110,6 +110,10 @@ struct meson_pwm {
1399 + const struct meson_pwm_data *data;
1400 + void __iomem *base;
1401 + u8 inverter_mask;
1402 ++ /*
1403 ++ * Protects register (write) access to the REG_MISC_AB register
1404 ++ * that is shared between the two PWMs.
1405 ++ */
1406 + spinlock_t lock;
1407 + };
1408 +
1409 +@@ -230,6 +234,7 @@ static void meson_pwm_enable(struct meson_pwm *meson,
1410 + {
1411 + u32 value, clk_shift, clk_enable, enable;
1412 + unsigned int offset;
1413 ++ unsigned long flags;
1414 +
1415 + switch (id) {
1416 + case 0:
1417 +@@ -250,6 +255,8 @@ static void meson_pwm_enable(struct meson_pwm *meson,
1418 + return;
1419 + }
1420 +
1421 ++ spin_lock_irqsave(&meson->lock, flags);
1422 ++
1423 + value = readl(meson->base + REG_MISC_AB);
1424 + value &= ~(MISC_CLK_DIV_MASK << clk_shift);
1425 + value |= channel->pre_div << clk_shift;
1426 +@@ -262,11 +269,14 @@ static void meson_pwm_enable(struct meson_pwm *meson,
1427 + value = readl(meson->base + REG_MISC_AB);
1428 + value |= enable;
1429 + writel(value, meson->base + REG_MISC_AB);
1430 ++
1431 ++ spin_unlock_irqrestore(&meson->lock, flags);
1432 + }
1433 +
1434 + static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
1435 + {
1436 + u32 value, enable;
1437 ++ unsigned long flags;
1438 +
1439 + switch (id) {
1440 + case 0:
1441 +@@ -281,9 +291,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
1442 + return;
1443 + }
1444 +
1445 ++ spin_lock_irqsave(&meson->lock, flags);
1446 ++
1447 + value = readl(meson->base + REG_MISC_AB);
1448 + value &= ~enable;
1449 + writel(value, meson->base + REG_MISC_AB);
1450 ++
1451 ++ spin_unlock_irqrestore(&meson->lock, flags);
1452 + }
1453 +
1454 + static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1455 +@@ -291,19 +305,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1456 + {
1457 + struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
1458 + struct meson_pwm *meson = to_meson_pwm(chip);
1459 +- unsigned long flags;
1460 + int err = 0;
1461 +
1462 + if (!state)
1463 + return -EINVAL;
1464 +
1465 +- spin_lock_irqsave(&meson->lock, flags);
1466 +-
1467 + if (!state->enabled) {
1468 + meson_pwm_disable(meson, pwm->hwpwm);
1469 + channel->state.enabled = false;
1470 +
1471 +- goto unlock;
1472 ++ return 0;
1473 + }
1474 +
1475 + if (state->period != channel->state.period ||
1476 +@@ -324,7 +335,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1477 + err = meson_pwm_calc(meson, channel, pwm->hwpwm,
1478 + state->duty_cycle, state->period);
1479 + if (err < 0)
1480 +- goto unlock;
1481 ++ return err;
1482 +
1483 + channel->state.polarity = state->polarity;
1484 + channel->state.period = state->period;
1485 +@@ -336,9 +347,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1486 + channel->state.enabled = true;
1487 + }
1488 +
1489 +-unlock:
1490 +- spin_unlock_irqrestore(&meson->lock, flags);
1491 +- return err;
1492 ++ return 0;
1493 + }
1494 +
1495 + static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
1496 +diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
1497 +index c0e06f0c19d1..9a232ebbbf96 100644
1498 +--- a/drivers/pwm/pwm-tiehrpwm.c
1499 ++++ b/drivers/pwm/pwm-tiehrpwm.c
1500 +@@ -383,6 +383,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
1501 + }
1502 +
1503 + /* Update shadow register first before modifying active register */
1504 ++ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
1505 ++ AQSFRC_RLDCSF_ZRO);
1506 + ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
1507 + /*
1508 + * Changes to immediate action on Action Qualifier. This puts
1509 +diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
1510 +index a813239300c3..0850b11dfd83 100644
1511 +--- a/drivers/pwm/sysfs.c
1512 ++++ b/drivers/pwm/sysfs.c
1513 +@@ -397,19 +397,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
1514 + }
1515 +
1516 + void pwmchip_sysfs_unexport(struct pwm_chip *chip)
1517 +-{
1518 +- struct device *parent;
1519 +-
1520 +- parent = class_find_device(&pwm_class, NULL, chip,
1521 +- pwmchip_sysfs_match);
1522 +- if (parent) {
1523 +- /* for class_find_device() */
1524 +- put_device(parent);
1525 +- device_unregister(parent);
1526 +- }
1527 +-}
1528 +-
1529 +-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
1530 + {
1531 + struct device *parent;
1532 + unsigned int i;
1533 +@@ -427,6 +414,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
1534 + }
1535 +
1536 + put_device(parent);
1537 ++ device_unregister(parent);
1538 + }
1539 +
1540 + static int __init pwm_sysfs_init(void)
1541 +diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
1542 +index bad0e0ea4f30..ef989a15aefc 100644
1543 +--- a/drivers/rapidio/rio_cm.c
1544 ++++ b/drivers/rapidio/rio_cm.c
1545 +@@ -2145,6 +2145,14 @@ static int riocm_add_mport(struct device *dev,
1546 + mutex_init(&cm->rx_lock);
1547 + riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
1548 + cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
1549 ++ if (!cm->rx_wq) {
1550 ++ riocm_error("failed to allocate IBMBOX_%d on %s",
1551 ++ cmbox, mport->name);
1552 ++ rio_release_outb_mbox(mport, cmbox);
1553 ++ kfree(cm);
1554 ++ return -ENOMEM;
1555 ++ }
1556 ++
1557 + INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
1558 +
1559 + cm->tx_slot = 0;
1560 +diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
1561 +index 28c48b3c1946..3c8c6f942e67 100644
1562 +--- a/drivers/rtc/rtc-pcf8523.c
1563 ++++ b/drivers/rtc/rtc-pcf8523.c
1564 +@@ -82,6 +82,18 @@ static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value)
1565 + return 0;
1566 + }
1567 +
1568 ++static int pcf8523_voltage_low(struct i2c_client *client)
1569 ++{
1570 ++ u8 value;
1571 ++ int err;
1572 ++
1573 ++ err = pcf8523_read(client, REG_CONTROL3, &value);
1574 ++ if (err < 0)
1575 ++ return err;
1576 ++
1577 ++ return !!(value & REG_CONTROL3_BLF);
1578 ++}
1579 ++
1580 + static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
1581 + {
1582 + u8 value;
1583 +@@ -164,6 +176,14 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
1584 + struct i2c_msg msgs[2];
1585 + int err;
1586 +
1587 ++ err = pcf8523_voltage_low(client);
1588 ++ if (err < 0) {
1589 ++ return err;
1590 ++ } else if (err > 0) {
1591 ++ dev_err(dev, "low voltage detected, time is unreliable\n");
1592 ++ return -EINVAL;
1593 ++ }
1594 ++
1595 + msgs[0].addr = client->addr;
1596 + msgs[0].flags = 0;
1597 + msgs[0].len = 1;
1598 +@@ -248,17 +268,13 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
1599 + unsigned long arg)
1600 + {
1601 + struct i2c_client *client = to_i2c_client(dev);
1602 +- u8 value;
1603 +- int ret = 0, err;
1604 ++ int ret;
1605 +
1606 + switch (cmd) {
1607 + case RTC_VL_READ:
1608 +- err = pcf8523_read(client, REG_CONTROL3, &value);
1609 +- if (err < 0)
1610 +- return err;
1611 +-
1612 +- if (value & REG_CONTROL3_BLF)
1613 +- ret = 1;
1614 ++ ret = pcf8523_voltage_low(client);
1615 ++ if (ret < 0)
1616 ++ return ret;
1617 +
1618 + if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
1619 + return -EFAULT;
1620 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
1621 +index 5ff9f89c17c7..39b2f60149d9 100644
1622 +--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
1623 ++++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
1624 +@@ -829,7 +829,7 @@ ret_err_rqe:
1625 + ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
1626 + (u64)err_entry->data.err_warn_bitmap_lo;
1627 + for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
1628 +- if (err_warn_bit_map & (u64) (1 << i)) {
1629 ++ if (err_warn_bit_map & ((u64)1 << i)) {
1630 + err_warn = i;
1631 + break;
1632 + }
1633 +diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
1634 +index 2ffe029ff2b6..e974106f2bb5 100644
1635 +--- a/drivers/scsi/cxgbi/libcxgbi.c
1636 ++++ b/drivers/scsi/cxgbi/libcxgbi.c
1637 +@@ -637,6 +637,10 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
1638 +
1639 + if (ndev->flags & IFF_LOOPBACK) {
1640 + ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
1641 ++ if (!ndev) {
1642 ++ err = -ENETUNREACH;
1643 ++ goto rel_neigh;
1644 ++ }
1645 + mtu = ndev->mtu;
1646 + pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
1647 + n->dev->name, ndev->name, mtu);
1648 +diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
1649 +index ee1f9ee995e5..400eee9d7783 100644
1650 +--- a/drivers/scsi/libsas/sas_expander.c
1651 ++++ b/drivers/scsi/libsas/sas_expander.c
1652 +@@ -978,6 +978,8 @@ static struct domain_device *sas_ex_discover_expander(
1653 + list_del(&child->dev_list_node);
1654 + spin_unlock_irq(&parent->port->dev_list_lock);
1655 + sas_put_device(child);
1656 ++ sas_port_delete(phy->port);
1657 ++ phy->port = NULL;
1658 + return NULL;
1659 + }
1660 + list_add_tail(&child->siblings, &parent->ex_dev.children);
1661 +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
1662 +index 4905455bbfc7..b5be4df05733 100644
1663 +--- a/drivers/scsi/lpfc/lpfc_els.c
1664 ++++ b/drivers/scsi/lpfc/lpfc_els.c
1665 +@@ -6789,7 +6789,10 @@ int
1666 + lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
1667 + {
1668 + struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
1669 +- rrq->nlp_DID);
1670 ++ rrq->nlp_DID);
1671 ++ if (!ndlp)
1672 ++ return 1;
1673 ++
1674 + if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
1675 + return lpfc_issue_els_rrq(rrq->vport, ndlp,
1676 + rrq->nlp_DID, rrq);
1677 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
1678 +index 06a062455404..b12f7f952b70 100644
1679 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
1680 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
1681 +@@ -5478,7 +5478,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
1682 + else
1683 + mask = DMA_BIT_MASK(32);
1684 +
1685 +- rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
1686 ++ rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
1687 + if (rc) {
1688 + dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
1689 + goto disable_device;
1690 +diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
1691 +index e929f5142862..36226976773f 100644
1692 +--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
1693 ++++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
1694 +@@ -778,7 +778,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
1695 + static int pwrap_init_cipher(struct pmic_wrapper *wrp)
1696 + {
1697 + int ret;
1698 +- u32 rdata;
1699 ++ u32 rdata = 0;
1700 +
1701 + pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
1702 + pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
1703 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
1704 +index 8b618f0fa459..6dd195b94c57 100644
1705 +--- a/drivers/spi/spi-pxa2xx.c
1706 ++++ b/drivers/spi/spi-pxa2xx.c
1707 +@@ -1475,12 +1475,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
1708 +
1709 + static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
1710 + {
1711 +- struct device *dev = param;
1712 +-
1713 +- if (dev != chan->device->dev->parent)
1714 +- return false;
1715 +-
1716 +- return true;
1717 ++ return param == chan->device->dev;
1718 + }
1719 +
1720 + static struct pxa2xx_spi_master *
1721 +diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1722 +index 94590ac5b3cf..f72eebc71dd8 100644
1723 +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1724 ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1725 +@@ -381,18 +381,9 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
1726 + int run, addridx, actual_pages;
1727 + unsigned long *need_release;
1728 +
1729 +- if (count >= INT_MAX - PAGE_SIZE)
1730 +- return NULL;
1731 +-
1732 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
1733 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
1734 +
1735 +- if (num_pages > (SIZE_MAX - sizeof(PAGELIST_T) -
1736 +- sizeof(struct vchiq_pagelist_info)) /
1737 +- (sizeof(u32) + sizeof(pages[0]) +
1738 +- sizeof(struct scatterlist)))
1739 +- return NULL;
1740 +-
1741 + *ppagelist = NULL;
1742 +
1743 + /* Allocate enough storage to hold the page pointers and the page
1744 +diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
1745 +index 3f9fe6aa51cc..ebbe1ec7b9e8 100644
1746 +--- a/drivers/thermal/qcom/tsens.c
1747 ++++ b/drivers/thermal/qcom/tsens.c
1748 +@@ -162,7 +162,8 @@ static int tsens_probe(struct platform_device *pdev)
1749 + if (tmdev->ops->calibrate) {
1750 + ret = tmdev->ops->calibrate(tmdev);
1751 + if (ret < 0) {
1752 +- dev_err(dev, "tsens calibration failed\n");
1753 ++ if (ret != -EPROBE_DEFER)
1754 ++ dev_err(dev, "tsens calibration failed\n");
1755 + return ret;
1756 + }
1757 + }
1758 +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
1759 +index 3177264a1166..22d65a33059e 100644
1760 +--- a/drivers/tty/serial/8250/8250_dw.c
1761 ++++ b/drivers/tty/serial/8250/8250_dw.c
1762 +@@ -269,7 +269,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param)
1763 +
1764 + static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
1765 + {
1766 +- return param == chan->device->dev->parent;
1767 ++ return param == chan->device->dev;
1768 + }
1769 +
1770 + static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
1771 +@@ -311,7 +311,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
1772 + p->set_termios = dw8250_set_termios;
1773 + }
1774 +
1775 +- /* Platforms with iDMA */
1776 ++ /* Platforms with iDMA 64-bit */
1777 + if (platform_get_resource_byname(to_platform_device(p->dev),
1778 + IORESOURCE_MEM, "lpss_priv")) {
1779 + p->set_termios = dw8250_set_termios;
1780 +diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
1781 +index 59828d819145..5ad978acd90c 100644
1782 +--- a/drivers/tty/serial/sunhv.c
1783 ++++ b/drivers/tty/serial/sunhv.c
1784 +@@ -392,7 +392,7 @@ static struct uart_ops sunhv_pops = {
1785 + static struct uart_driver sunhv_reg = {
1786 + .owner = THIS_MODULE,
1787 + .driver_name = "sunhv",
1788 +- .dev_name = "ttyS",
1789 ++ .dev_name = "ttyHV",
1790 + .major = TTY_MAJOR,
1791 + };
1792 +
1793 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1794 +index 38c7676e7a82..19e819aa2419 100644
1795 +--- a/drivers/usb/core/quirks.c
1796 ++++ b/drivers/usb/core/quirks.c
1797 +@@ -70,6 +70,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1798 + /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
1799 + { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
1800 +
1801 ++ /* Logitech HD Webcam C270 */
1802 ++ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
1803 ++
1804 + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
1805 + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
1806 + { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
1807 +diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
1808 +index 0e5435330c07..120c8f716acf 100644
1809 +--- a/drivers/usb/dwc2/hcd.c
1810 ++++ b/drivers/usb/dwc2/hcd.c
1811 +@@ -2552,8 +2552,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb)
1812 + return;
1813 +
1814 + /* Restore urb->transfer_buffer from the end of the allocated area */
1815 +- memcpy(&stored_xfer_buffer, urb->transfer_buffer +
1816 +- urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
1817 ++ memcpy(&stored_xfer_buffer,
1818 ++ PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
1819 ++ dma_get_cache_alignment()),
1820 ++ sizeof(urb->transfer_buffer));
1821 +
1822 + if (usb_urb_dir_in(urb))
1823 + memcpy(stored_xfer_buffer, urb->transfer_buffer,
1824 +@@ -2580,6 +2582,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
1825 + * DMA
1826 + */
1827 + kmalloc_size = urb->transfer_buffer_length +
1828 ++ (dma_get_cache_alignment() - 1) +
1829 + sizeof(urb->transfer_buffer);
1830 +
1831 + kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
1832 +@@ -2590,7 +2593,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
1833 + * Position value of original urb->transfer_buffer pointer to the end
1834 + * of allocation for later referencing
1835 + */
1836 +- memcpy(kmalloc_ptr + urb->transfer_buffer_length,
1837 ++ memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
1838 ++ dma_get_cache_alignment()),
1839 + &urb->transfer_buffer, sizeof(urb->transfer_buffer));
1840 +
1841 + if (usb_urb_dir_out(urb))
1842 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1843 +index 9f96dd274370..1effe74ec638 100644
1844 +--- a/drivers/usb/serial/option.c
1845 ++++ b/drivers/usb/serial/option.c
1846 +@@ -1166,6 +1166,10 @@ static const struct usb_device_id option_ids[] = {
1847 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
1848 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
1849 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1850 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
1851 ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1852 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
1853 ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1854 + { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
1855 + .driver_info = NCTRL(0) | RSVD(1) },
1856 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
1857 +@@ -1767,6 +1771,8 @@ static const struct usb_device_id option_ids[] = {
1858 + { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1859 + .driver_info = RSVD(5) | RSVD(6) },
1860 + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
1861 ++ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
1862 ++ .driver_info = RSVD(7) },
1863 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1864 + .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
1865 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
1866 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1867 +index 9706d214c409..8fd5e19846ef 100644
1868 +--- a/drivers/usb/serial/pl2303.c
1869 ++++ b/drivers/usb/serial/pl2303.c
1870 +@@ -101,6 +101,7 @@ static const struct usb_device_id id_table[] = {
1871 + { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
1872 + { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
1873 + { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
1874 ++ { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
1875 + { } /* Terminating entry */
1876 + };
1877 +
1878 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1879 +index d84c3b3d477b..496cbccbf26c 100644
1880 +--- a/drivers/usb/serial/pl2303.h
1881 ++++ b/drivers/usb/serial/pl2303.h
1882 +@@ -159,3 +159,6 @@
1883 + #define SMART_VENDOR_ID 0x0b8c
1884 + #define SMART_PRODUCT_ID 0x2303
1885 +
1886 ++/* Allied Telesis VT-Kit3 */
1887 ++#define AT_VENDOR_ID 0x0caa
1888 ++#define AT_VTKIT3_PRODUCT_ID 0x3001
1889 +diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
1890 +index 7ca779493671..dee100dd02e1 100644
1891 +--- a/drivers/usb/storage/unusual_realtek.h
1892 ++++ b/drivers/usb/storage/unusual_realtek.h
1893 +@@ -29,6 +29,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
1894 + "USB Card Reader",
1895 + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
1896 +
1897 ++UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999,
1898 ++ "Realtek",
1899 ++ "USB Card Reader",
1900 ++ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
1901 ++
1902 + UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
1903 + "Realtek",
1904 + "USB Card Reader",
1905 +diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
1906 +index 463028543173..59e1cae57948 100644
1907 +--- a/drivers/video/fbdev/hgafb.c
1908 ++++ b/drivers/video/fbdev/hgafb.c
1909 +@@ -285,6 +285,8 @@ static int hga_card_detect(void)
1910 + hga_vram_len = 0x08000;
1911 +
1912 + hga_vram = ioremap(0xb0000, hga_vram_len);
1913 ++ if (!hga_vram)
1914 ++ goto error;
1915 +
1916 + if (request_region(0x3b0, 12, "hgafb"))
1917 + release_io_ports = 1;
1918 +diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
1919 +index 4363c64d74e8..4ef9dc94e813 100644
1920 +--- a/drivers/video/fbdev/imsttfb.c
1921 ++++ b/drivers/video/fbdev/imsttfb.c
1922 +@@ -1516,6 +1516,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1923 + info->fix.smem_start = addr;
1924 + info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
1925 + 0x400000 : 0x800000);
1926 ++ if (!info->screen_base) {
1927 ++ release_mem_region(addr, size);
1928 ++ framebuffer_release(info);
1929 ++ return -ENOMEM;
1930 ++ }
1931 + info->fix.mmio_start = addr + 0x800000;
1932 + par->dc_regs = ioremap(addr + 0x800000, 0x1000);
1933 + par->cmap_regs_phys = addr + 0x840000;
1934 +diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
1935 +index 8f8909a668d7..78fffca4c119 100644
1936 +--- a/drivers/watchdog/Kconfig
1937 ++++ b/drivers/watchdog/Kconfig
1938 +@@ -1850,6 +1850,7 @@ comment "Watchdog Pretimeout Governors"
1939 +
1940 + config WATCHDOG_PRETIMEOUT_GOV
1941 + bool "Enable watchdog pretimeout governors"
1942 ++ depends on WATCHDOG_CORE
1943 + help
1944 + The option allows to select watchdog pretimeout governors.
1945 +
1946 +diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
1947 +index 518dfa1047cb..5098982e1a58 100644
1948 +--- a/drivers/watchdog/imx2_wdt.c
1949 ++++ b/drivers/watchdog/imx2_wdt.c
1950 +@@ -181,8 +181,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
1951 + static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
1952 + unsigned int new_timeout)
1953 + {
1954 +- __imx2_wdt_set_timeout(wdog, new_timeout);
1955 ++ unsigned int actual;
1956 +
1957 ++ actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
1958 ++ __imx2_wdt_set_timeout(wdog, actual);
1959 + wdog->timeout = new_timeout;
1960 + return 0;
1961 + }
1962 +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
1963 +index d2a1a79fa324..a1985a9ad2d6 100644
1964 +--- a/fs/configfs/dir.c
1965 ++++ b/fs/configfs/dir.c
1966 +@@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry,
1967 + if (sd) {
1968 + /* Coordinate with configfs_readdir */
1969 + spin_lock(&configfs_dirent_lock);
1970 +- /* Coordinate with configfs_attach_attr where will increase
1971 +- * sd->s_count and update sd->s_dentry to new allocated one.
1972 +- * Only set sd->dentry to null when this dentry is the only
1973 +- * sd owner.
1974 +- * If not do so, configfs_d_iput may run just after
1975 +- * configfs_attach_attr and set sd->s_dentry to null
1976 +- * even it's still in use.
1977 ++ /*
1978 ++ * Set sd->s_dentry to null only when this dentry is the one
1979 ++ * that is going to be killed. Otherwise configfs_d_iput may
1980 ++ * run just after configfs_attach_attr and set sd->s_dentry to
1981 ++ * NULL even it's still in use.
1982 + */
1983 +- if (atomic_read(&sd->s_count) <= 2)
1984 ++ if (sd->s_dentry == dentry)
1985 + sd->s_dentry = NULL;
1986 +
1987 + spin_unlock(&configfs_dirent_lock);
1988 +@@ -1755,12 +1753,19 @@ int configfs_register_group(struct config_group *parent_group,
1989 +
1990 + inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
1991 + ret = create_default_group(parent_group, group);
1992 +- if (!ret) {
1993 +- spin_lock(&configfs_dirent_lock);
1994 +- configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
1995 +- spin_unlock(&configfs_dirent_lock);
1996 +- }
1997 ++ if (ret)
1998 ++ goto err_out;
1999 ++
2000 ++ spin_lock(&configfs_dirent_lock);
2001 ++ configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
2002 ++ spin_unlock(&configfs_dirent_lock);
2003 ++ inode_unlock(d_inode(parent));
2004 ++ return 0;
2005 ++err_out:
2006 + inode_unlock(d_inode(parent));
2007 ++ mutex_lock(&subsys->su_mutex);
2008 ++ unlink_group(group);
2009 ++ mutex_unlock(&subsys->su_mutex);
2010 + return ret;
2011 + }
2012 + EXPORT_SYMBOL(configfs_register_group);
2013 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
2014 +index 1de02c31756b..c56d04ec45dc 100644
2015 +--- a/fs/f2fs/inode.c
2016 ++++ b/fs/f2fs/inode.c
2017 +@@ -288,6 +288,7 @@ make_now:
2018 + return inode;
2019 +
2020 + bad_inode:
2021 ++ f2fs_inode_synced(inode);
2022 + iget_failed(inode);
2023 + trace_f2fs_iget_exit(inode, ret);
2024 + return ERR_PTR(ret);
2025 +diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
2026 +index e59eeaf02eaa..9de1480a86bd 100644
2027 +--- a/fs/f2fs/recovery.c
2028 ++++ b/fs/f2fs/recovery.c
2029 +@@ -407,7 +407,15 @@ retry_dn:
2030 +
2031 + get_node_info(sbi, dn.nid, &ni);
2032 + f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
2033 +- f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
2034 ++
2035 ++ if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
2036 ++ f2fs_msg(sbi->sb, KERN_WARNING,
2037 ++ "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
2038 ++ inode->i_ino, ofs_of_node(dn.node_page),
2039 ++ ofs_of_node(page));
2040 ++ err = -EFAULT;
2041 ++ goto err;
2042 ++ }
2043 +
2044 + for (; start < end; start++, dn.ofs_in_node++) {
2045 + block_t src, dest;
2046 +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
2047 +index 893723978f5e..faca7fdb54b0 100644
2048 +--- a/fs/f2fs/segment.h
2049 ++++ b/fs/f2fs/segment.h
2050 +@@ -613,7 +613,6 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
2051 + static inline int check_block_count(struct f2fs_sb_info *sbi,
2052 + int segno, struct f2fs_sit_entry *raw_sit)
2053 + {
2054 +-#ifdef CONFIG_F2FS_CHECK_FS
2055 + bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
2056 + int valid_blocks = 0;
2057 + int cur_pos = 0, next_pos;
2058 +@@ -640,7 +639,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
2059 + set_sbi_flag(sbi, SBI_NEED_FSCK);
2060 + return -EINVAL;
2061 + }
2062 +-#endif
2063 ++
2064 + /* check segment usage, and check boundary of a given segment number */
2065 + if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
2066 + || segno > TOTAL_SEGS(sbi) - 1)) {
2067 +diff --git a/fs/fat/file.c b/fs/fat/file.c
2068 +index 3d04b124bce0..392ec5641f38 100644
2069 +--- a/fs/fat/file.c
2070 ++++ b/fs/fat/file.c
2071 +@@ -160,12 +160,17 @@ static int fat_file_release(struct inode *inode, struct file *filp)
2072 + int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
2073 + {
2074 + struct inode *inode = filp->f_mapping->host;
2075 +- int res, err;
2076 ++ int err;
2077 ++
2078 ++ err = __generic_file_fsync(filp, start, end, datasync);
2079 ++ if (err)
2080 ++ return err;
2081 +
2082 +- res = generic_file_fsync(filp, start, end, datasync);
2083 + err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping);
2084 ++ if (err)
2085 ++ return err;
2086 +
2087 +- return res ? res : err;
2088 ++ return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
2089 + }
2090 +
2091 +
2092 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2093 +index eaedbc1a3e95..8016cd059db1 100644
2094 +--- a/fs/fuse/dev.c
2095 ++++ b/fs/fuse/dev.c
2096 +@@ -1668,7 +1668,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
2097 + offset = outarg->offset & ~PAGE_MASK;
2098 + file_size = i_size_read(inode);
2099 +
2100 +- num = outarg->size;
2101 ++ num = min(outarg->size, fc->max_write);
2102 + if (outarg->offset > file_size)
2103 + num = 0;
2104 + else if (outarg->offset + num > file_size)
2105 +diff --git a/fs/inode.c b/fs/inode.c
2106 +index 2071ff5343c5..30a97292e965 100644
2107 +--- a/fs/inode.c
2108 ++++ b/fs/inode.c
2109 +@@ -1804,8 +1804,13 @@ int file_remove_privs(struct file *file)
2110 + int kill;
2111 + int error = 0;
2112 +
2113 +- /* Fast path for nothing security related */
2114 +- if (IS_NOSEC(inode))
2115 ++ /*
2116 ++ * Fast path for nothing security related.
2117 ++ * As well for non-regular files, e.g. blkdev inodes.
2118 ++ * For example, blkdev_write_iter() might get here
2119 ++ * trying to remove privs which it is not allowed to.
2120 ++ */
2121 ++ if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
2122 + return 0;
2123 +
2124 + kill = dentry_needs_remove_privs(dentry);
2125 +diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
2126 +index 0bf9e7bf5800..9140b9cf3870 100644
2127 +--- a/fs/nfsd/vfs.h
2128 ++++ b/fs/nfsd/vfs.h
2129 +@@ -116,8 +116,11 @@ void nfsd_put_raparams(struct file *file, struct raparms *ra);
2130 +
2131 + static inline int fh_want_write(struct svc_fh *fh)
2132 + {
2133 +- int ret = mnt_want_write(fh->fh_export->ex_path.mnt);
2134 ++ int ret;
2135 +
2136 ++ if (fh->fh_want_write)
2137 ++ return 0;
2138 ++ ret = mnt_want_write(fh->fh_export->ex_path.mnt);
2139 + if (!ret)
2140 + fh->fh_want_write = true;
2141 + return ret;
2142 +diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
2143 +index 290373024d9d..e8ace3b54e9c 100644
2144 +--- a/fs/ocfs2/dcache.c
2145 ++++ b/fs/ocfs2/dcache.c
2146 +@@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
2147 +
2148 + out_attach:
2149 + spin_lock(&dentry_attach_lock);
2150 ++ if (unlikely(dentry->d_fsdata && !alias)) {
2151 ++ /* d_fsdata is set by a racing thread which is doing
2152 ++ * the same thing as this thread is doing. Leave the racing
2153 ++ * thread going ahead and we return here.
2154 ++ */
2155 ++ spin_unlock(&dentry_attach_lock);
2156 ++ iput(dl->dl_inode);
2157 ++ ocfs2_lock_res_free(&dl->dl_lockres);
2158 ++ kfree(dl);
2159 ++ return 0;
2160 ++ }
2161 ++
2162 + dentry->d_fsdata = dl;
2163 + dl->dl_count++;
2164 + spin_unlock(&dentry_attach_lock);
2165 +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
2166 +index 7620a8bc0493..8be03520995c 100644
2167 +--- a/include/linux/cgroup.h
2168 ++++ b/include/linux/cgroup.h
2169 +@@ -462,7 +462,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
2170 + *
2171 + * Find the css for the (@task, @subsys_id) combination, increment a
2172 + * reference on and return it. This function is guaranteed to return a
2173 +- * valid css.
2174 ++ * valid css. The returned css may already have been offlined.
2175 + */
2176 + static inline struct cgroup_subsys_state *
2177 + task_get_css(struct task_struct *task, int subsys_id)
2178 +@@ -472,7 +472,13 @@ task_get_css(struct task_struct *task, int subsys_id)
2179 + rcu_read_lock();
2180 + while (true) {
2181 + css = task_css(task, subsys_id);
2182 +- if (likely(css_tryget_online(css)))
2183 ++ /*
2184 ++ * Can't use css_tryget_online() here. A task which has
2185 ++ * PF_EXITING set may stay associated with an offline css.
2186 ++ * If such task calls this function, css_tryget_online()
2187 ++ * will keep failing.
2188 ++ */
2189 ++ if (likely(css_tryget(css)))
2190 + break;
2191 + cpu_relax();
2192 + }
2193 +diff --git a/include/linux/pwm.h b/include/linux/pwm.h
2194 +index 2c6c5114c089..f1bbae014889 100644
2195 +--- a/include/linux/pwm.h
2196 ++++ b/include/linux/pwm.h
2197 +@@ -641,7 +641,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
2198 + #ifdef CONFIG_PWM_SYSFS
2199 + void pwmchip_sysfs_export(struct pwm_chip *chip);
2200 + void pwmchip_sysfs_unexport(struct pwm_chip *chip);
2201 +-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
2202 + #else
2203 + static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
2204 + {
2205 +@@ -650,10 +649,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
2206 + static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
2207 + {
2208 + }
2209 +-
2210 +-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
2211 +-{
2212 +-}
2213 + #endif /* CONFIG_PWM_SYSFS */
2214 +
2215 + #endif /* __LINUX_PWM_H */
2216 +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
2217 +index 57a7dba49d29..4931787193c3 100644
2218 +--- a/include/net/bluetooth/hci_core.h
2219 ++++ b/include/net/bluetooth/hci_core.h
2220 +@@ -176,9 +176,6 @@ struct adv_info {
2221 +
2222 + #define HCI_MAX_SHORT_NAME_LENGTH 10
2223 +
2224 +-/* Min encryption key size to match with SMP */
2225 +-#define HCI_MIN_ENC_KEY_SIZE 7
2226 +-
2227 + /* Default LE RPA expiry time, 15 minutes */
2228 + #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
2229 +
2230 +diff --git a/ipc/mqueue.c b/ipc/mqueue.c
2231 +index 28a142f1be36..d5491a880751 100644
2232 +--- a/ipc/mqueue.c
2233 ++++ b/ipc/mqueue.c
2234 +@@ -371,7 +371,8 @@ static void mqueue_evict_inode(struct inode *inode)
2235 + struct user_struct *user;
2236 + unsigned long mq_bytes, mq_treesize;
2237 + struct ipc_namespace *ipc_ns;
2238 +- struct msg_msg *msg;
2239 ++ struct msg_msg *msg, *nmsg;
2240 ++ LIST_HEAD(tmp_msg);
2241 +
2242 + clear_inode(inode);
2243 +
2244 +@@ -382,10 +383,15 @@ static void mqueue_evict_inode(struct inode *inode)
2245 + info = MQUEUE_I(inode);
2246 + spin_lock(&info->lock);
2247 + while ((msg = msg_get(info)) != NULL)
2248 +- free_msg(msg);
2249 ++ list_add_tail(&msg->m_list, &tmp_msg);
2250 + kfree(info->node_cache);
2251 + spin_unlock(&info->lock);
2252 +
2253 ++ list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
2254 ++ list_del(&msg->m_list);
2255 ++ free_msg(msg);
2256 ++ }
2257 ++
2258 + /* Total amount of bytes accounted for the mqueue */
2259 + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
2260 + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
2261 +diff --git a/ipc/msgutil.c b/ipc/msgutil.c
2262 +index bf74eaa5c39f..6d90b191c638 100644
2263 +--- a/ipc/msgutil.c
2264 ++++ b/ipc/msgutil.c
2265 +@@ -18,6 +18,7 @@
2266 + #include <linux/utsname.h>
2267 + #include <linux/proc_ns.h>
2268 + #include <linux/uaccess.h>
2269 ++#include <linux/sched.h>
2270 +
2271 + #include "util.h"
2272 +
2273 +@@ -64,6 +65,9 @@ static struct msg_msg *alloc_msg(size_t len)
2274 + pseg = &msg->next;
2275 + while (len > 0) {
2276 + struct msg_msgseg *seg;
2277 ++
2278 ++ cond_resched();
2279 ++
2280 + alen = min(len, DATALEN_SEG);
2281 + seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
2282 + if (seg == NULL)
2283 +@@ -176,6 +180,8 @@ void free_msg(struct msg_msg *msg)
2284 + kfree(msg);
2285 + while (seg != NULL) {
2286 + struct msg_msgseg *tmp = seg->next;
2287 ++
2288 ++ cond_resched();
2289 + kfree(seg);
2290 + seg = tmp;
2291 + }
2292 +diff --git a/kernel/Makefile b/kernel/Makefile
2293 +index 314e7d62f5f0..184fa9aa5802 100644
2294 +--- a/kernel/Makefile
2295 ++++ b/kernel/Makefile
2296 +@@ -28,6 +28,7 @@ KCOV_INSTRUMENT_extable.o := n
2297 + # Don't self-instrument.
2298 + KCOV_INSTRUMENT_kcov.o := n
2299 + KASAN_SANITIZE_kcov.o := n
2300 ++CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
2301 +
2302 + # cond_syscall is currently not LTO compatible
2303 + CFLAGS_sys_ni.o = $(DISABLE_LTO)
2304 +diff --git a/kernel/cred.c b/kernel/cred.c
2305 +index 5f264fb5737d..7b925925be95 100644
2306 +--- a/kernel/cred.c
2307 ++++ b/kernel/cred.c
2308 +@@ -447,6 +447,15 @@ int commit_creds(struct cred *new)
2309 + if (task->mm)
2310 + set_dumpable(task->mm, suid_dumpable);
2311 + task->pdeath_signal = 0;
2312 ++ /*
2313 ++ * If a task drops privileges and becomes nondumpable,
2314 ++ * the dumpability change must become visible before
2315 ++ * the credential change; otherwise, a __ptrace_may_access()
2316 ++ * racing with this change may be able to attach to a task it
2317 ++ * shouldn't be able to attach to (as if the task had dropped
2318 ++ * privileges without becoming nondumpable).
2319 ++ * Pairs with a read barrier in __ptrace_may_access().
2320 ++ */
2321 + smp_wmb();
2322 + }
2323 +
2324 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
2325 +index 99becab2c1ce..8e8b903b7613 100644
2326 +--- a/kernel/events/ring_buffer.c
2327 ++++ b/kernel/events/ring_buffer.c
2328 +@@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
2329 + unsigned long head;
2330 +
2331 + again:
2332 ++ /*
2333 ++ * In order to avoid publishing a head value that goes backwards,
2334 ++ * we must ensure the load of @rb->head happens after we've
2335 ++ * incremented @rb->nest.
2336 ++ *
2337 ++ * Otherwise we can observe a @rb->head value before one published
2338 ++ * by an IRQ/NMI happening between the load and the increment.
2339 ++ */
2340 ++ barrier();
2341 + head = local_read(&rb->head);
2342 +
2343 + /*
2344 +- * IRQ/NMI can happen here, which means we can miss a head update.
2345 ++ * IRQ/NMI can happen here and advance @rb->head, causing our
2346 ++ * load above to be stale.
2347 + */
2348 +
2349 +- if (!local_dec_and_test(&rb->nest))
2350 ++ /*
2351 ++ * If this isn't the outermost nesting, we don't have to update
2352 ++ * @rb->user_page->data_head.
2353 ++ */
2354 ++ if (local_read(&rb->nest) > 1) {
2355 ++ local_dec(&rb->nest);
2356 + goto out;
2357 ++ }
2358 +
2359 + /*
2360 + * Since the mmap() consumer (userspace) can run on a different CPU:
2361 +@@ -88,9 +104,18 @@ again:
2362 + rb->user_page->data_head = head;
2363 +
2364 + /*
2365 +- * Now check if we missed an update -- rely on previous implied
2366 +- * compiler barriers to force a re-read.
2367 ++ * We must publish the head before decrementing the nest count,
2368 ++ * otherwise an IRQ/NMI can publish a more recent head value and our
2369 ++ * write will (temporarily) publish a stale value.
2370 ++ */
2371 ++ barrier();
2372 ++ local_set(&rb->nest, 0);
2373 ++
2374 ++ /*
2375 ++ * Ensure we decrement @rb->nest before we validate the @rb->head.
2376 ++ * Otherwise we cannot be sure we caught the 'last' nested update.
2377 + */
2378 ++ barrier();
2379 + if (unlikely(head != local_read(&rb->head))) {
2380 + local_inc(&rb->nest);
2381 + goto again;
2382 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
2383 +index efba851ee018..f447f1e36185 100644
2384 +--- a/kernel/ptrace.c
2385 ++++ b/kernel/ptrace.c
2386 +@@ -322,6 +322,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
2387 + return -EPERM;
2388 + ok:
2389 + rcu_read_unlock();
2390 ++ /*
2391 ++ * If a task drops privileges and becomes nondumpable (through a syscall
2392 ++ * like setresuid()) while we are trying to access it, we must ensure
2393 ++ * that the dumpability is read after the credentials; otherwise,
2394 ++ * we may be able to attach to a task that we shouldn't be able to
2395 ++ * attach to (as if the task had dropped privileges without becoming
2396 ++ * nondumpable).
2397 ++ * Pairs with a write barrier in commit_creds().
2398 ++ */
2399 ++ smp_rmb();
2400 + mm = task->mm;
2401 + if (mm &&
2402 + ((get_dumpable(mm) != SUID_DUMP_USER) &&
2403 +@@ -710,6 +720,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
2404 + if (arg.nr < 0)
2405 + return -EINVAL;
2406 +
2407 ++ /* Ensure arg.off fits in an unsigned long */
2408 ++ if (arg.off > ULONG_MAX)
2409 ++ return 0;
2410 ++
2411 + if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
2412 + pending = &child->signal->shared_pending;
2413 + else
2414 +@@ -717,18 +731,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
2415 +
2416 + for (i = 0; i < arg.nr; ) {
2417 + siginfo_t info;
2418 +- s32 off = arg.off + i;
2419 ++ unsigned long off = arg.off + i;
2420 ++ bool found = false;
2421 +
2422 + spin_lock_irq(&child->sighand->siglock);
2423 + list_for_each_entry(q, &pending->list, list) {
2424 + if (!off--) {
2425 ++ found = true;
2426 + copy_siginfo(&info, &q->info);
2427 + break;
2428 + }
2429 + }
2430 + spin_unlock_irq(&child->sighand->siglock);
2431 +
2432 +- if (off >= 0) /* beyond the end of the list */
2433 ++ if (!found) /* beyond the end of the list */
2434 + break;
2435 +
2436 + #ifdef CONFIG_COMPAT
2437 +diff --git a/kernel/sys.c b/kernel/sys.c
2438 +index 6c4e9b533258..157277cbf83a 100644
2439 +--- a/kernel/sys.c
2440 ++++ b/kernel/sys.c
2441 +@@ -1762,7 +1762,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map)
2442 + ((unsigned long)prctl_map->__m1 __op \
2443 + (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
2444 + error = __prctl_check_order(start_code, <, end_code);
2445 +- error |= __prctl_check_order(start_data, <, end_data);
2446 ++ error |= __prctl_check_order(start_data,<=, end_data);
2447 + error |= __prctl_check_order(start_brk, <=, brk);
2448 + error |= __prctl_check_order(arg_start, <=, arg_end);
2449 + error |= __prctl_check_order(env_start, <=, env_end);
2450 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
2451 +index cf0aeaae567e..6af1ac551ea3 100644
2452 +--- a/kernel/sysctl.c
2453 ++++ b/kernel/sysctl.c
2454 +@@ -2527,8 +2527,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
2455 + if (neg)
2456 + continue;
2457 + val = convmul * val / convdiv;
2458 +- if ((min && val < *min) || (max && val > *max))
2459 +- continue;
2460 ++ if ((min && val < *min) || (max && val > *max)) {
2461 ++ err = -EINVAL;
2462 ++ break;
2463 ++ }
2464 + *i = val;
2465 + } else {
2466 + val = convdiv * (*i) / convmul;
2467 +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
2468 +index 6df8927c58a5..0a16419006f3 100644
2469 +--- a/kernel/time/ntp.c
2470 ++++ b/kernel/time/ntp.c
2471 +@@ -639,7 +639,7 @@ static inline void process_adjtimex_modes(struct timex *txc,
2472 + time_constant = max(time_constant, 0l);
2473 + }
2474 +
2475 +- if (txc->modes & ADJ_TAI && txc->constant > 0)
2476 ++ if (txc->modes & ADJ_TAI && txc->constant >= 0)
2477 + *time_tai = txc->constant;
2478 +
2479 + if (txc->modes & ADJ_OFFSET)
2480 +diff --git a/mm/cma.c b/mm/cma.c
2481 +index b5d8847497a3..4ea0f32761c1 100644
2482 +--- a/mm/cma.c
2483 ++++ b/mm/cma.c
2484 +@@ -100,8 +100,10 @@ static int __init cma_activate_area(struct cma *cma)
2485 +
2486 + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
2487 +
2488 +- if (!cma->bitmap)
2489 ++ if (!cma->bitmap) {
2490 ++ cma->count = 0;
2491 + return -ENOMEM;
2492 ++ }
2493 +
2494 + WARN_ON_ONCE(!pfn_valid(pfn));
2495 + zone = page_zone(pfn_to_page(pfn));
2496 +diff --git a/mm/cma_debug.c b/mm/cma_debug.c
2497 +index f8e4b60db167..da50dab56b70 100644
2498 +--- a/mm/cma_debug.c
2499 ++++ b/mm/cma_debug.c
2500 +@@ -57,7 +57,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
2501 + mutex_lock(&cma->lock);
2502 + for (;;) {
2503 + start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
2504 +- if (start >= cma->count)
2505 ++ if (start >= bitmap_maxno)
2506 + break;
2507 + end = find_next_bit(cma->bitmap, bitmap_maxno, start);
2508 + maxchunk = max(end - start, maxchunk);
2509 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2510 +index 6b03cd9b6d37..9914da93069e 100644
2511 +--- a/mm/hugetlb.c
2512 ++++ b/mm/hugetlb.c
2513 +@@ -1247,12 +1247,23 @@ void free_huge_page(struct page *page)
2514 + ClearPagePrivate(page);
2515 +
2516 + /*
2517 +- * A return code of zero implies that the subpool will be under its
2518 +- * minimum size if the reservation is not restored after page is free.
2519 +- * Therefore, force restore_reserve operation.
2520 ++ * If PagePrivate() was set on page, page allocation consumed a
2521 ++ * reservation. If the page was associated with a subpool, there
2522 ++ * would have been a page reserved in the subpool before allocation
2523 ++ * via hugepage_subpool_get_pages(). Since we are 'restoring' the
2524 ++ * reservtion, do not call hugepage_subpool_put_pages() as this will
2525 ++ * remove the reserved page from the subpool.
2526 + */
2527 +- if (hugepage_subpool_put_pages(spool, 1) == 0)
2528 +- restore_reserve = true;
2529 ++ if (!restore_reserve) {
2530 ++ /*
2531 ++ * A return code of zero implies that the subpool will be
2532 ++ * under its minimum size if the reservation is not restored
2533 ++ * after page is free. Therefore, force restore_reserve
2534 ++ * operation.
2535 ++ */
2536 ++ if (hugepage_subpool_put_pages(spool, 1) == 0)
2537 ++ restore_reserve = true;
2538 ++ }
2539 +
2540 + spin_lock(&hugetlb_lock);
2541 + clear_page_huge_active(page);
2542 +diff --git a/mm/list_lru.c b/mm/list_lru.c
2543 +index db3a77c60201..16361c989af9 100644
2544 +--- a/mm/list_lru.c
2545 ++++ b/mm/list_lru.c
2546 +@@ -313,7 +313,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
2547 + }
2548 + return 0;
2549 + fail:
2550 +- __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
2551 ++ __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
2552 + return -ENOMEM;
2553 + }
2554 +
2555 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2556 +index 05f141e39ac1..13a642192e12 100644
2557 +--- a/mm/page_alloc.c
2558 ++++ b/mm/page_alloc.c
2559 +@@ -5491,13 +5491,15 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
2560 + unsigned long *zone_end_pfn,
2561 + unsigned long *ignored)
2562 + {
2563 ++ unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
2564 ++ unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
2565 + /* When hotadd a new node from cpu_up(), the node should be empty */
2566 + if (!node_start_pfn && !node_end_pfn)
2567 + return 0;
2568 +
2569 + /* Get the start and end of the zone */
2570 +- *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
2571 +- *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2572 ++ *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
2573 ++ *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
2574 + adjust_zone_range_for_zone_movable(nid, zone_type,
2575 + node_start_pfn, node_end_pfn,
2576 + zone_start_pfn, zone_end_pfn);
2577 +diff --git a/mm/slab.c b/mm/slab.c
2578 +index d2c0499c6b15..9547f02b4af9 100644
2579 +--- a/mm/slab.c
2580 ++++ b/mm/slab.c
2581 +@@ -4365,8 +4365,12 @@ static int leaks_show(struct seq_file *m, void *p)
2582 + * whole processing.
2583 + */
2584 + do {
2585 +- set_store_user_clean(cachep);
2586 + drain_cpu_caches(cachep);
2587 ++ /*
2588 ++ * drain_cpu_caches() could make kmemleak_object and
2589 ++ * debug_objects_cache dirty, so reset afterwards.
2590 ++ */
2591 ++ set_store_user_clean(cachep);
2592 +
2593 + x[1] = 0;
2594 +
2595 +diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
2596 +index 149f82bd83fd..6ba56f215229 100644
2597 +--- a/net/ax25/ax25_route.c
2598 ++++ b/net/ax25/ax25_route.c
2599 +@@ -443,9 +443,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
2600 + }
2601 +
2602 + if (ax25->sk != NULL) {
2603 ++ local_bh_disable();
2604 + bh_lock_sock(ax25->sk);
2605 + sock_reset_flag(ax25->sk, SOCK_ZAPPED);
2606 + bh_unlock_sock(ax25->sk);
2607 ++ local_bh_enable();
2608 + }
2609 +
2610 + put:
2611 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
2612 +index fe4fb0c1fa61..cc061495f653 100644
2613 +--- a/net/bluetooth/hci_conn.c
2614 ++++ b/net/bluetooth/hci_conn.c
2615 +@@ -1165,14 +1165,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
2616 + !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2617 + return 0;
2618 +
2619 +- /* The minimum encryption key size needs to be enforced by the
2620 +- * host stack before establishing any L2CAP connections. The
2621 +- * specification in theory allows a minimum of 1, but to align
2622 +- * BR/EDR and LE transports, a minimum of 7 is chosen.
2623 +- */
2624 +- if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
2625 +- return 0;
2626 +-
2627 + return 1;
2628 + }
2629 +
2630 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2631 +index 428dd614a18a..01cdfe85bb09 100644
2632 +--- a/net/core/neighbour.c
2633 ++++ b/net/core/neighbour.c
2634 +@@ -2704,6 +2704,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2635 + }
2636 +
2637 + void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2638 ++ __acquires(tbl->lock)
2639 + __acquires(rcu_bh)
2640 + {
2641 + struct neigh_seq_state *state = seq->private;
2642 +@@ -2714,6 +2715,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2643 +
2644 + rcu_read_lock_bh();
2645 + state->nht = rcu_dereference_bh(tbl->nht);
2646 ++ read_lock(&tbl->lock);
2647 +
2648 + return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2649 + }
2650 +@@ -2747,8 +2749,13 @@ out:
2651 + EXPORT_SYMBOL(neigh_seq_next);
2652 +
2653 + void neigh_seq_stop(struct seq_file *seq, void *v)
2654 ++ __releases(tbl->lock)
2655 + __releases(rcu_bh)
2656 + {
2657 ++ struct neigh_seq_state *state = seq->private;
2658 ++ struct neigh_table *tbl = state->tbl;
2659 ++
2660 ++ read_unlock(&tbl->lock);
2661 + rcu_read_unlock_bh();
2662 + }
2663 + EXPORT_SYMBOL(neigh_seq_stop);
2664 +diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
2665 +index 8c2f9aedc2af..6a6011160f18 100644
2666 +--- a/net/ipv6/ip6_flowlabel.c
2667 ++++ b/net/ipv6/ip6_flowlabel.c
2668 +@@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
2669 + rcu_read_lock_bh();
2670 + for_each_sk_fl_rcu(np, sfl) {
2671 + struct ip6_flowlabel *fl = sfl->fl;
2672 +- if (fl->label == label) {
2673 ++
2674 ++ if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
2675 + fl->lastuse = jiffies;
2676 +- atomic_inc(&fl->users);
2677 + rcu_read_unlock_bh();
2678 + return fl;
2679 + }
2680 +@@ -623,7 +623,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
2681 + goto done;
2682 + }
2683 + fl1 = sfl->fl;
2684 +- atomic_inc(&fl1->users);
2685 ++ if (!atomic_inc_not_zero(&fl1->users))
2686 ++ fl1 = NULL;
2687 + break;
2688 + }
2689 + }
2690 +diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
2691 +index fc60d9d738b5..cdb913e7627e 100644
2692 +--- a/net/lapb/lapb_iface.c
2693 ++++ b/net/lapb/lapb_iface.c
2694 +@@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev)
2695 + lapb = __lapb_devtostruct(dev);
2696 + if (!lapb)
2697 + goto out;
2698 ++ lapb_put(lapb);
2699 +
2700 + lapb_stop_t1timer(lapb);
2701 + lapb_stop_t2timer(lapb);
2702 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
2703 +index 09491b27092e..bc6d371031fc 100644
2704 +--- a/sound/core/seq/seq_clientmgr.c
2705 ++++ b/sound/core/seq/seq_clientmgr.c
2706 +@@ -1905,20 +1905,14 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
2707 + int result;
2708 + struct snd_seq_client *sender = NULL;
2709 + struct snd_seq_client_port *sport = NULL;
2710 +- struct snd_seq_subscribers *p;
2711 +
2712 + result = -EINVAL;
2713 + if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
2714 + goto __end;
2715 + if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
2716 + goto __end;
2717 +- p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest);
2718 +- if (p) {
2719 +- result = 0;
2720 +- *subs = p->info;
2721 +- } else
2722 +- result = -ENOENT;
2723 +-
2724 ++ result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
2725 ++ subs);
2726 + __end:
2727 + if (sport)
2728 + snd_seq_port_unlock(sport);
2729 +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
2730 +index f04714d70bf7..9cfe4fcee9a5 100644
2731 +--- a/sound/core/seq/seq_ports.c
2732 ++++ b/sound/core/seq/seq_ports.c
2733 +@@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
2734 + list_del_init(list);
2735 + grp->exclusive = 0;
2736 + write_unlock_irq(&grp->list_lock);
2737 +- up_write(&grp->list_mutex);
2738 +
2739 + if (!empty)
2740 + unsubscribe_port(client, port, grp, &subs->info, ack);
2741 ++ up_write(&grp->list_mutex);
2742 + }
2743 +
2744 + /* connect two ports */
2745 +@@ -635,20 +635,23 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
2746 +
2747 +
2748 + /* get matched subscriber */
2749 +-struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
2750 +- struct snd_seq_addr *dest_addr)
2751 ++int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
2752 ++ struct snd_seq_addr *dest_addr,
2753 ++ struct snd_seq_port_subscribe *subs)
2754 + {
2755 +- struct snd_seq_subscribers *s, *found = NULL;
2756 ++ struct snd_seq_subscribers *s;
2757 ++ int err = -ENOENT;
2758 +
2759 + down_read(&src_grp->list_mutex);
2760 + list_for_each_entry(s, &src_grp->list_head, src_list) {
2761 + if (addr_match(dest_addr, &s->info.dest)) {
2762 +- found = s;
2763 ++ *subs = s->info;
2764 ++ err = 0;
2765 + break;
2766 + }
2767 + }
2768 + up_read(&src_grp->list_mutex);
2769 +- return found;
2770 ++ return err;
2771 + }
2772 +
2773 + /*
2774 +diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h
2775 +index 26bd71f36c41..06003b36652e 100644
2776 +--- a/sound/core/seq/seq_ports.h
2777 ++++ b/sound/core/seq/seq_ports.h
2778 +@@ -135,7 +135,8 @@ int snd_seq_port_subscribe(struct snd_seq_client_port *port,
2779 + struct snd_seq_port_subscribe *info);
2780 +
2781 + /* get matched subscriber */
2782 +-struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
2783 +- struct snd_seq_addr *dest_addr);
2784 ++int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
2785 ++ struct snd_seq_addr *dest_addr,
2786 ++ struct snd_seq_port_subscribe *subs);
2787 +
2788 + #endif
2789 +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
2790 +index b0395c4209ab..a7ab34d5e7b0 100644
2791 +--- a/sound/firewire/oxfw/oxfw.c
2792 ++++ b/sound/firewire/oxfw/oxfw.c
2793 +@@ -175,9 +175,6 @@ static int detect_quirks(struct snd_oxfw *oxfw)
2794 + oxfw->midi_input_ports = 0;
2795 + oxfw->midi_output_ports = 0;
2796 +
2797 +- /* Output stream exists but no data channels are useful. */
2798 +- oxfw->has_output = false;
2799 +-
2800 + return snd_oxfw_scs1x_add(oxfw);
2801 + }
2802 +
2803 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2804 +index 789eca17fc60..f2f1d9fd848c 100644
2805 +--- a/sound/pci/hda/hda_intel.c
2806 ++++ b/sound/pci/hda/hda_intel.c
2807 +@@ -1700,9 +1700,6 @@ static int azx_first_init(struct azx *chip)
2808 + chip->msi = 0;
2809 + }
2810 +
2811 +- if (azx_acquire_irq(chip, 0) < 0)
2812 +- return -EBUSY;
2813 +-
2814 + pci_set_master(pci);
2815 + synchronize_irq(bus->irq);
2816 +
2817 +@@ -1809,6 +1806,9 @@ static int azx_first_init(struct azx *chip)
2818 + return -ENODEV;
2819 + }
2820 +
2821 ++ if (azx_acquire_irq(chip, 0) < 0)
2822 ++ return -EBUSY;
2823 ++
2824 + strcpy(card->driver, "HDA-Intel");
2825 + strlcpy(card->shortname, driver_short_names[chip->driver_type],
2826 + sizeof(card->shortname));
2827 +diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
2828 +index b4d87379d2bc..462341fef5a9 100644
2829 +--- a/sound/soc/codecs/cs42xx8.c
2830 ++++ b/sound/soc/codecs/cs42xx8.c
2831 +@@ -569,6 +569,7 @@ static int cs42xx8_runtime_resume(struct device *dev)
2832 + msleep(5);
2833 +
2834 + regcache_cache_only(cs42xx8->regmap, false);
2835 ++ regcache_mark_dirty(cs42xx8->regmap);
2836 +
2837 + ret = regcache_sync(cs42xx8->regmap);
2838 + if (ret) {
2839 +diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
2840 +index 1d82f68305c3..88a438f6c2de 100644
2841 +--- a/sound/soc/fsl/fsl_asrc.c
2842 ++++ b/sound/soc/fsl/fsl_asrc.c
2843 +@@ -286,8 +286,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
2844 + return -EINVAL;
2845 + }
2846 +
2847 +- if ((outrate > 8000 && outrate < 30000) &&
2848 +- (outrate/inrate > 24 || inrate/outrate > 8)) {
2849 ++ if ((outrate >= 8000 && outrate <= 30000) &&
2850 ++ (outrate > 24 * inrate || inrate > 8 * outrate)) {
2851 + pair_err("exceed supported ratio range [1/24, 8] for \
2852 + inrate/outrate: %d/%d\n", inrate, outrate);
2853 + return -EINVAL;
2854 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
2855 +index ae3446768181..95326c6a7a24 100644
2856 +--- a/tools/objtool/check.c
2857 ++++ b/tools/objtool/check.c
2858 +@@ -28,6 +28,8 @@
2859 + #include <linux/hashtable.h>
2860 + #include <linux/kernel.h>
2861 +
2862 ++#define FAKE_JUMP_OFFSET -1
2863 ++
2864 + struct alternative {
2865 + struct list_head list;
2866 + struct instruction *insn;
2867 +@@ -498,7 +500,7 @@ static int add_jump_destinations(struct objtool_file *file)
2868 + insn->type != INSN_JUMP_UNCONDITIONAL)
2869 + continue;
2870 +
2871 +- if (insn->ignore)
2872 ++ if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
2873 + continue;
2874 +
2875 + rela = find_rela_by_dest_range(insn->sec, insn->offset,
2876 +@@ -645,10 +647,10 @@ static int handle_group_alt(struct objtool_file *file,
2877 + clear_insn_state(&fake_jump->state);
2878 +
2879 + fake_jump->sec = special_alt->new_sec;
2880 +- fake_jump->offset = -1;
2881 ++ fake_jump->offset = FAKE_JUMP_OFFSET;
2882 + fake_jump->type = INSN_JUMP_UNCONDITIONAL;
2883 + fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
2884 +- fake_jump->ignore = true;
2885 ++ fake_jump->func = orig_insn->func;
2886 + }
2887 +
2888 + if (!special_alt->new_len) {
2889 +diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
2890 +index b9a95a1a8e69..d3d1452021d4 100644
2891 +--- a/tools/perf/arch/s390/util/machine.c
2892 ++++ b/tools/perf/arch/s390/util/machine.c
2893 +@@ -4,16 +4,19 @@
2894 + #include "util.h"
2895 + #include "machine.h"
2896 + #include "api/fs/fs.h"
2897 ++#include "debug.h"
2898 +
2899 + int arch__fix_module_text_start(u64 *start, const char *name)
2900 + {
2901 ++ u64 m_start = *start;
2902 + char path[PATH_MAX];
2903 +
2904 + snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
2905 + (int)strlen(name) - 2, name + 1);
2906 +-
2907 +- if (sysfs__read_ull(path, (unsigned long long *)start) < 0)
2908 +- return -1;
2909 ++ if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
2910 ++ pr_debug2("Using module %s start:%#lx\n", path, m_start);
2911 ++ *start = m_start;
2912 ++ }
2913 +
2914 + return 0;
2915 + }
2916 +diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
2917 +index 7123f4de32cc..226f4312b8f3 100644
2918 +--- a/tools/perf/util/data-convert-bt.c
2919 ++++ b/tools/perf/util/data-convert-bt.c
2920 +@@ -265,7 +265,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
2921 + if (i > 0)
2922 + strncpy(buffer, string, i);
2923 + }
2924 +- strncat(buffer + p, numstr, 4);
2925 ++ memcpy(buffer + p, numstr, 4);
2926 + p += 3;
2927 + }
2928 + }
2929 +diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
2930 +index 8ec76681605c..f25f72a75cf3 100755
2931 +--- a/tools/testing/selftests/netfilter/nft_nat.sh
2932 ++++ b/tools/testing/selftests/netfilter/nft_nat.sh
2933 +@@ -23,7 +23,11 @@ ip netns add ns0
2934 + ip netns add ns1
2935 + ip netns add ns2
2936 +
2937 +-ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
2938 ++ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
2939 ++if [ $? -ne 0 ];then
2940 ++ echo "SKIP: No virtual ethernet pair device support in kernel"
2941 ++ exit $ksft_skip
2942 ++fi
2943 + ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
2944 +
2945 + ip -net ns0 link set lo up
2946 +diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
2947 +index 9887fd538fec..91316ab4b041 100644
2948 +--- a/tools/testing/selftests/timers/adjtick.c
2949 ++++ b/tools/testing/selftests/timers/adjtick.c
2950 +@@ -147,6 +147,7 @@ int check_tick_adj(long tickval)
2951 +
2952 + eppm = get_ppm_drift();
2953 + printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
2954 ++ fflush(stdout);
2955 +
2956 + tx1.modes = 0;
2957 + adjtimex(&tx1);
2958 +diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c
2959 +index a1071bdbdeb7..a77c70b47495 100644
2960 +--- a/tools/testing/selftests/timers/leapcrash.c
2961 ++++ b/tools/testing/selftests/timers/leapcrash.c
2962 +@@ -114,6 +114,7 @@ int main(void)
2963 + }
2964 + clear_time_state();
2965 + printf(".");
2966 ++ fflush(stdout);
2967 + }
2968 + printf("[OK]\n");
2969 + return ksft_exit_pass();
2970 +diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c
2971 +index a2a3924d0b41..efdb62470052 100644
2972 +--- a/tools/testing/selftests/timers/mqueue-lat.c
2973 ++++ b/tools/testing/selftests/timers/mqueue-lat.c
2974 +@@ -113,6 +113,7 @@ int main(int argc, char **argv)
2975 + int ret;
2976 +
2977 + printf("Mqueue latency : ");
2978 ++ fflush(stdout);
2979 +
2980 + ret = mqueue_lat_test();
2981 + if (ret < 0) {
2982 +diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
2983 +index ff942ff7c9b3..2e6e94c02a33 100644
2984 +--- a/tools/testing/selftests/timers/nanosleep.c
2985 ++++ b/tools/testing/selftests/timers/nanosleep.c
2986 +@@ -153,6 +153,7 @@ int main(int argc, char **argv)
2987 + continue;
2988 +
2989 + printf("Nanosleep %-31s ", clockstring(clockid));
2990 ++ fflush(stdout);
2991 +
2992 + length = 10;
2993 + while (length <= (NSEC_PER_SEC * 10)) {
2994 +diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c
2995 +index 2d7898fda0f1..ac06cf10a5c2 100644
2996 +--- a/tools/testing/selftests/timers/nsleep-lat.c
2997 ++++ b/tools/testing/selftests/timers/nsleep-lat.c
2998 +@@ -166,6 +166,7 @@ int main(int argc, char **argv)
2999 + continue;
3000 +
3001 + printf("nsleep latency %-26s ", clockstring(clockid));
3002 ++ fflush(stdout);
3003 +
3004 + length = 10;
3005 + while (length <= (NSEC_PER_SEC * 10)) {
3006 +diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
3007 +index 0ab937a17ebb..4e631da7f956 100644
3008 +--- a/tools/testing/selftests/timers/raw_skew.c
3009 ++++ b/tools/testing/selftests/timers/raw_skew.c
3010 +@@ -124,6 +124,7 @@ int main(int argv, char **argc)
3011 + printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n");
3012 +
3013 + printf("Estimating clock drift: ");
3014 ++ fflush(stdout);
3015 + sleep(120);
3016 +
3017 + get_monotonic_and_raw(&mon, &raw);
3018 +diff --git a/tools/testing/selftests/timers/set-tai.c b/tools/testing/selftests/timers/set-tai.c
3019 +index dc88dbc8831f..3ae76ab483de 100644
3020 +--- a/tools/testing/selftests/timers/set-tai.c
3021 ++++ b/tools/testing/selftests/timers/set-tai.c
3022 +@@ -66,6 +66,7 @@ int main(int argc, char **argv)
3023 + printf("tai offset started at %i\n", ret);
3024 +
3025 + printf("Checking tai offsets can be properly set: ");
3026 ++ fflush(stdout);
3027 + for (i = 1; i <= 60; i++) {
3028 + ret = set_tai(i);
3029 + ret = get_tai();
3030 +diff --git a/tools/testing/selftests/timers/set-tz.c b/tools/testing/selftests/timers/set-tz.c
3031 +index f4184928b16b..b038131c9682 100644
3032 +--- a/tools/testing/selftests/timers/set-tz.c
3033 ++++ b/tools/testing/selftests/timers/set-tz.c
3034 +@@ -76,6 +76,7 @@ int main(int argc, char **argv)
3035 + printf("tz_minuteswest started at %i, dst at %i\n", min, dst);
3036 +
3037 + printf("Checking tz_minuteswest can be properly set: ");
3038 ++ fflush(stdout);
3039 + for (i = -15*60; i < 15*60; i += 30) {
3040 + ret = set_tz(i, dst);
3041 + ret = get_tz_min();
3042 +@@ -87,6 +88,7 @@ int main(int argc, char **argv)
3043 + printf("[OK]\n");
3044 +
3045 + printf("Checking invalid tz_minuteswest values are caught: ");
3046 ++ fflush(stdout);
3047 +
3048 + if (!set_tz(-15*60-1, dst)) {
3049 + printf("[FAILED] %i didn't return failure!\n", -15*60-1);
3050 +diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c
3051 +index e632e116f05e..a4bf736dd842 100644
3052 +--- a/tools/testing/selftests/timers/threadtest.c
3053 ++++ b/tools/testing/selftests/timers/threadtest.c
3054 +@@ -175,6 +175,7 @@ int main(int argc, char **argv)
3055 + strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start));
3056 + printf("%s\n", buf);
3057 + printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime);
3058 ++ fflush(stdout);
3059 +
3060 + /* spawn */
3061 + for (i = 0; i < thread_count; i++)
3062 +diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
3063 +index 60fe3c569bd9..a747645d79f4 100644
3064 +--- a/tools/testing/selftests/timers/valid-adjtimex.c
3065 ++++ b/tools/testing/selftests/timers/valid-adjtimex.c
3066 +@@ -134,6 +134,7 @@ int validate_freq(void)
3067 + /* Set the leap second insert flag */
3068 +
3069 + printf("Testing ADJ_FREQ... ");
3070 ++ fflush(stdout);
3071 + for (i = 0; i < NUM_FREQ_VALID; i++) {
3072 + tx.modes = ADJ_FREQUENCY;
3073 + tx.freq = valid_freq[i];
3074 +@@ -261,6 +262,7 @@ int set_bad_offset(long sec, long usec, int use_nano)
3075 + int validate_set_offset(void)
3076 + {
3077 + printf("Testing ADJ_SETOFFSET... ");
3078 ++ fflush(stdout);
3079 +
3080 + /* Test valid values */
3081 + if (set_offset(NSEC_PER_SEC - 1, 1))