Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Sun, 26 Sep 2021 14:12:28
Message-Id: 1632665531.304210fdee883a06e79223ce982fec8985222041.mpagano@gentoo
1 commit: 304210fdee883a06e79223ce982fec8985222041
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Sep 26 14:12:11 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Sep 26 14:12:11 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=304210fd
7
8 Linux patch 5.10.69
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1068_linux-5.10.69.patch | 2394 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2398 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 416061d..456fb50 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -315,6 +315,10 @@ Patch: 1067_linux-5.10.68.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.68
23
24 +Patch: 1068_linux-5.10.69.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.69
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1068_linux-5.10.69.patch b/1068_linux-5.10.69.patch
33 new file mode 100644
34 index 0000000..d9237d5
35 --- /dev/null
36 +++ b/1068_linux-5.10.69.patch
37 @@ -0,0 +1,2394 @@
38 +diff --git a/Makefile b/Makefile
39 +index e50581c9db50e..e14943205b832 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 68
47 ++SUBLEVEL = 69
48 + EXTRAVERSION =
49 + NAME = Dare mighty things
50 +
51 +diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
52 +index 48ec1d0337da7..a4dbac07e4ef0 100644
53 +--- a/arch/arm/include/asm/ftrace.h
54 ++++ b/arch/arm/include/asm/ftrace.h
55 +@@ -15,6 +15,9 @@ extern void __gnu_mcount_nc(void);
56 +
57 + #ifdef CONFIG_DYNAMIC_FTRACE
58 + struct dyn_arch_ftrace {
59 ++#ifdef CONFIG_ARM_MODULE_PLTS
60 ++ struct module *mod;
61 ++#endif
62 + };
63 +
64 + static inline unsigned long ftrace_call_adjust(unsigned long addr)
65 +diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h
66 +index f20e08ac85aeb..5475cbf9fb6b4 100644
67 +--- a/arch/arm/include/asm/insn.h
68 ++++ b/arch/arm/include/asm/insn.h
69 +@@ -13,18 +13,18 @@ arm_gen_nop(void)
70 + }
71 +
72 + unsigned long
73 +-__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
74 ++__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn);
75 +
76 + static inline unsigned long
77 + arm_gen_branch(unsigned long pc, unsigned long addr)
78 + {
79 +- return __arm_gen_branch(pc, addr, false);
80 ++ return __arm_gen_branch(pc, addr, false, true);
81 + }
82 +
83 + static inline unsigned long
84 +-arm_gen_branch_link(unsigned long pc, unsigned long addr)
85 ++arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn)
86 + {
87 +- return __arm_gen_branch(pc, addr, true);
88 ++ return __arm_gen_branch(pc, addr, true, warn);
89 + }
90 +
91 + #endif
92 +diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
93 +index 4b0df09cbe678..cfffae67c04ee 100644
94 +--- a/arch/arm/include/asm/module.h
95 ++++ b/arch/arm/include/asm/module.h
96 +@@ -19,8 +19,18 @@ enum {
97 + };
98 + #endif
99 +
100 ++#define PLT_ENT_STRIDE L1_CACHE_BYTES
101 ++#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
102 ++#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
103 ++
104 ++struct plt_entries {
105 ++ u32 ldr[PLT_ENT_COUNT];
106 ++ u32 lit[PLT_ENT_COUNT];
107 ++};
108 ++
109 + struct mod_plt_sec {
110 + struct elf32_shdr *plt;
111 ++ struct plt_entries *plt_ent;
112 + int plt_count;
113 + };
114 +
115 +diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
116 +index 9a79ef6b1876c..3c83b5d296979 100644
117 +--- a/arch/arm/kernel/ftrace.c
118 ++++ b/arch/arm/kernel/ftrace.c
119 +@@ -68,9 +68,10 @@ int ftrace_arch_code_modify_post_process(void)
120 + return 0;
121 + }
122 +
123 +-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
124 ++static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
125 ++ bool warn)
126 + {
127 +- return arm_gen_branch_link(pc, addr);
128 ++ return arm_gen_branch_link(pc, addr, warn);
129 + }
130 +
131 + static int ftrace_modify_code(unsigned long pc, unsigned long old,
132 +@@ -104,14 +105,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
133 + int ret;
134 +
135 + pc = (unsigned long)&ftrace_call;
136 +- new = ftrace_call_replace(pc, (unsigned long)func);
137 ++ new = ftrace_call_replace(pc, (unsigned long)func, true);
138 +
139 + ret = ftrace_modify_code(pc, 0, new, false);
140 +
141 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
142 + if (!ret) {
143 + pc = (unsigned long)&ftrace_regs_call;
144 +- new = ftrace_call_replace(pc, (unsigned long)func);
145 ++ new = ftrace_call_replace(pc, (unsigned long)func, true);
146 +
147 + ret = ftrace_modify_code(pc, 0, new, false);
148 + }
149 +@@ -124,10 +125,22 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
150 + {
151 + unsigned long new, old;
152 + unsigned long ip = rec->ip;
153 ++ unsigned long aaddr = adjust_address(rec, addr);
154 ++ struct module *mod = NULL;
155 ++
156 ++#ifdef CONFIG_ARM_MODULE_PLTS
157 ++ mod = rec->arch.mod;
158 ++#endif
159 +
160 + old = ftrace_nop_replace(rec);
161 +
162 +- new = ftrace_call_replace(ip, adjust_address(rec, addr));
163 ++ new = ftrace_call_replace(ip, aaddr, !mod);
164 ++#ifdef CONFIG_ARM_MODULE_PLTS
165 ++ if (!new && mod) {
166 ++ aaddr = get_module_plt(mod, ip, aaddr);
167 ++ new = ftrace_call_replace(ip, aaddr, true);
168 ++ }
169 ++#endif
170 +
171 + return ftrace_modify_code(rec->ip, old, new, true);
172 + }
173 +@@ -140,9 +153,9 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
174 + unsigned long new, old;
175 + unsigned long ip = rec->ip;
176 +
177 +- old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
178 ++ old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true);
179 +
180 +- new = ftrace_call_replace(ip, adjust_address(rec, addr));
181 ++ new = ftrace_call_replace(ip, adjust_address(rec, addr), true);
182 +
183 + return ftrace_modify_code(rec->ip, old, new, true);
184 + }
185 +@@ -152,12 +165,29 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
186 + int ftrace_make_nop(struct module *mod,
187 + struct dyn_ftrace *rec, unsigned long addr)
188 + {
189 ++ unsigned long aaddr = adjust_address(rec, addr);
190 + unsigned long ip = rec->ip;
191 + unsigned long old;
192 + unsigned long new;
193 + int ret;
194 +
195 +- old = ftrace_call_replace(ip, adjust_address(rec, addr));
196 ++#ifdef CONFIG_ARM_MODULE_PLTS
197 ++ /* mod is only supplied during module loading */
198 ++ if (!mod)
199 ++ mod = rec->arch.mod;
200 ++ else
201 ++ rec->arch.mod = mod;
202 ++#endif
203 ++
204 ++ old = ftrace_call_replace(ip, aaddr,
205 ++ !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod);
206 ++#ifdef CONFIG_ARM_MODULE_PLTS
207 ++ if (!old && mod) {
208 ++ aaddr = get_module_plt(mod, ip, aaddr);
209 ++ old = ftrace_call_replace(ip, aaddr, true);
210 ++ }
211 ++#endif
212 ++
213 + new = ftrace_nop_replace(rec);
214 + ret = ftrace_modify_code(ip, old, new, true);
215 +
216 +diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
217 +index 2e844b70386b3..db0acbb7d7a02 100644
218 +--- a/arch/arm/kernel/insn.c
219 ++++ b/arch/arm/kernel/insn.c
220 +@@ -3,8 +3,9 @@
221 + #include <linux/kernel.h>
222 + #include <asm/opcodes.h>
223 +
224 +-static unsigned long
225 +-__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
226 ++static unsigned long __arm_gen_branch_thumb2(unsigned long pc,
227 ++ unsigned long addr, bool link,
228 ++ bool warn)
229 + {
230 + unsigned long s, j1, j2, i1, i2, imm10, imm11;
231 + unsigned long first, second;
232 +@@ -12,7 +13,7 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
233 +
234 + offset = (long)addr - (long)(pc + 4);
235 + if (offset < -16777216 || offset > 16777214) {
236 +- WARN_ON_ONCE(1);
237 ++ WARN_ON_ONCE(warn);
238 + return 0;
239 + }
240 +
241 +@@ -33,8 +34,8 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
242 + return __opcode_thumb32_compose(first, second);
243 + }
244 +
245 +-static unsigned long
246 +-__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
247 ++static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr,
248 ++ bool link, bool warn)
249 + {
250 + unsigned long opcode = 0xea000000;
251 + long offset;
252 +@@ -44,7 +45,7 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
253 +
254 + offset = (long)addr - (long)(pc + 8);
255 + if (unlikely(offset < -33554432 || offset > 33554428)) {
256 +- WARN_ON_ONCE(1);
257 ++ WARN_ON_ONCE(warn);
258 + return 0;
259 + }
260 +
261 +@@ -54,10 +55,10 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
262 + }
263 +
264 + unsigned long
265 +-__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
266 ++__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn)
267 + {
268 + if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
269 +- return __arm_gen_branch_thumb2(pc, addr, link);
270 ++ return __arm_gen_branch_thumb2(pc, addr, link, warn);
271 + else
272 +- return __arm_gen_branch_arm(pc, addr, link);
273 ++ return __arm_gen_branch_arm(pc, addr, link, warn);
274 + }
275 +diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
276 +index 6e626abaefc54..1fc309b41f944 100644
277 +--- a/arch/arm/kernel/module-plts.c
278 ++++ b/arch/arm/kernel/module-plts.c
279 +@@ -4,6 +4,7 @@
280 + */
281 +
282 + #include <linux/elf.h>
283 ++#include <linux/ftrace.h>
284 + #include <linux/kernel.h>
285 + #include <linux/module.h>
286 + #include <linux/sort.h>
287 +@@ -12,10 +13,6 @@
288 + #include <asm/cache.h>
289 + #include <asm/opcodes.h>
290 +
291 +-#define PLT_ENT_STRIDE L1_CACHE_BYTES
292 +-#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
293 +-#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
294 +-
295 + #ifdef CONFIG_THUMB2_KERNEL
296 + #define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
297 + (PLT_ENT_STRIDE - 4))
298 +@@ -24,9 +21,11 @@
299 + (PLT_ENT_STRIDE - 8))
300 + #endif
301 +
302 +-struct plt_entries {
303 +- u32 ldr[PLT_ENT_COUNT];
304 +- u32 lit[PLT_ENT_COUNT];
305 ++static const u32 fixed_plts[] = {
306 ++#ifdef CONFIG_DYNAMIC_FTRACE
307 ++ FTRACE_ADDR,
308 ++ MCOUNT_ADDR,
309 ++#endif
310 + };
311 +
312 + static bool in_init(const struct module *mod, unsigned long loc)
313 +@@ -34,14 +33,40 @@ static bool in_init(const struct module *mod, unsigned long loc)
314 + return loc - (u32)mod->init_layout.base < mod->init_layout.size;
315 + }
316 +
317 ++static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
318 ++{
319 ++ int i;
320 ++
321 ++ if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
322 ++ return;
323 ++ pltsec->plt_count = ARRAY_SIZE(fixed_plts);
324 ++
325 ++ for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
326 ++ plt->ldr[i] = PLT_ENT_LDR;
327 ++
328 ++ BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
329 ++ memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
330 ++}
331 ++
332 + u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
333 + {
334 + struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
335 + &mod->arch.init;
336 ++ struct plt_entries *plt;
337 ++ int idx;
338 ++
339 ++ /* cache the address, ELF header is available only during module load */
340 ++ if (!pltsec->plt_ent)
341 ++ pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
342 ++ plt = pltsec->plt_ent;
343 +
344 +- struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
345 +- int idx = 0;
346 ++ prealloc_fixed(pltsec, plt);
347 ++
348 ++ for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
349 ++ if (plt->lit[idx] == val)
350 ++ return (u32)&plt->ldr[idx];
351 +
352 ++ idx = 0;
353 + /*
354 + * Look for an existing entry pointing to 'val'. Given that the
355 + * relocations are sorted, this will be the last entry we allocated.
356 +@@ -189,8 +214,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
357 + int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
358 + char *secstrings, struct module *mod)
359 + {
360 +- unsigned long core_plts = 0;
361 +- unsigned long init_plts = 0;
362 ++ unsigned long core_plts = ARRAY_SIZE(fixed_plts);
363 ++ unsigned long init_plts = ARRAY_SIZE(fixed_plts);
364 + Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
365 + Elf32_Sym *syms = NULL;
366 +
367 +@@ -245,6 +270,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
368 + mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
369 + sizeof(struct plt_entries));
370 + mod->arch.core.plt_count = 0;
371 ++ mod->arch.core.plt_ent = NULL;
372 +
373 + mod->arch.init.plt->sh_type = SHT_NOBITS;
374 + mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
375 +@@ -252,6 +278,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
376 + mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
377 + sizeof(struct plt_entries));
378 + mod->arch.init.plt_count = 0;
379 ++ mod->arch.init.plt_ent = NULL;
380 +
381 + pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
382 + mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
383 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
384 +index d54d69cf17322..75f3ab531bdf4 100644
385 +--- a/arch/arm/mm/init.c
386 ++++ b/arch/arm/mm/init.c
387 +@@ -378,7 +378,11 @@ static void __init free_highpages(void)
388 + void __init mem_init(void)
389 + {
390 + #ifdef CONFIG_ARM_LPAE
391 +- swiotlb_init(1);
392 ++ if (swiotlb_force == SWIOTLB_FORCE ||
393 ++ max_pfn > arm_dma_pfn_limit)
394 ++ swiotlb_init(1);
395 ++ else
396 ++ swiotlb_force = SWIOTLB_NO_FORCE;
397 + #endif
398 +
399 + set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
400 +diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
401 +index 7fa6828bb488a..587543c6c51cb 100644
402 +--- a/arch/arm64/kernel/cacheinfo.c
403 ++++ b/arch/arm64/kernel/cacheinfo.c
404 +@@ -43,7 +43,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
405 + this_leaf->type = type;
406 + }
407 +
408 +-static int __init_cache_level(unsigned int cpu)
409 ++int init_cache_level(unsigned int cpu)
410 + {
411 + unsigned int ctype, level, leaves, fw_level;
412 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
413 +@@ -78,7 +78,7 @@ static int __init_cache_level(unsigned int cpu)
414 + return 0;
415 + }
416 +
417 +-static int __populate_cache_leaves(unsigned int cpu)
418 ++int populate_cache_leaves(unsigned int cpu)
419 + {
420 + unsigned int level, idx;
421 + enum cache_type type;
422 +@@ -97,6 +97,3 @@ static int __populate_cache_leaves(unsigned int cpu)
423 + }
424 + return 0;
425 + }
426 +-
427 +-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
428 +-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
429 +diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
430 +index 47312c5294102..529dab855aac9 100644
431 +--- a/arch/mips/kernel/cacheinfo.c
432 ++++ b/arch/mips/kernel/cacheinfo.c
433 +@@ -17,7 +17,7 @@ do { \
434 + leaf++; \
435 + } while (0)
436 +
437 +-static int __init_cache_level(unsigned int cpu)
438 ++int init_cache_level(unsigned int cpu)
439 + {
440 + struct cpuinfo_mips *c = &current_cpu_data;
441 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
442 +@@ -69,7 +69,7 @@ static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
443 + cpumask_set_cpu(cpu1, cpu_map);
444 + }
445 +
446 +-static int __populate_cache_leaves(unsigned int cpu)
447 ++int populate_cache_leaves(unsigned int cpu)
448 + {
449 + struct cpuinfo_mips *c = &current_cpu_data;
450 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
451 +@@ -98,6 +98,3 @@ static int __populate_cache_leaves(unsigned int cpu)
452 +
453 + return 0;
454 + }
455 +-
456 +-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
457 +-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
458 +diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
459 +index d867813570442..90deabfe63eaa 100644
460 +--- a/arch/riscv/kernel/cacheinfo.c
461 ++++ b/arch/riscv/kernel/cacheinfo.c
462 +@@ -113,7 +113,7 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf,
463 + }
464 + }
465 +
466 +-static int __init_cache_level(unsigned int cpu)
467 ++int init_cache_level(unsigned int cpu)
468 + {
469 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
470 + struct device_node *np = of_cpu_device_node_get(cpu);
471 +@@ -155,7 +155,7 @@ static int __init_cache_level(unsigned int cpu)
472 + return 0;
473 + }
474 +
475 +-static int __populate_cache_leaves(unsigned int cpu)
476 ++int populate_cache_leaves(unsigned int cpu)
477 + {
478 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
479 + struct cacheinfo *this_leaf = this_cpu_ci->info_list;
480 +@@ -187,6 +187,3 @@ static int __populate_cache_leaves(unsigned int cpu)
481 +
482 + return 0;
483 + }
484 +-
485 +-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
486 +-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
487 +diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
488 +index 401cf670a2439..37b1bbd1a27cc 100644
489 +--- a/arch/s390/pci/pci_mmio.c
490 ++++ b/arch/s390/pci/pci_mmio.c
491 +@@ -128,7 +128,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
492 + mmap_read_lock(current->mm);
493 + ret = -EINVAL;
494 + vma = find_vma(current->mm, user_addr);
495 +- if (!vma)
496 ++ if (!vma || user_addr < vma->vm_start)
497 + goto out;
498 + ret = -EACCES;
499 + if (!(vma->vm_flags & access))
500 +diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
501 +index c17b8e5ec1869..d11b3d41c3785 100644
502 +--- a/arch/um/drivers/virtio_uml.c
503 ++++ b/arch/um/drivers/virtio_uml.c
504 +@@ -1113,7 +1113,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
505 + rc = os_connect_socket(pdata->socket_path);
506 + } while (rc == -EINTR);
507 + if (rc < 0)
508 +- return rc;
509 ++ goto error_free;
510 + vu_dev->sock = rc;
511 +
512 + spin_lock_init(&vu_dev->sock_lock);
513 +@@ -1132,6 +1132,8 @@ static int virtio_uml_probe(struct platform_device *pdev)
514 +
515 + error_init:
516 + os_close_file(vu_dev->sock);
517 ++error_free:
518 ++ kfree(vu_dev);
519 + return rc;
520 + }
521 +
522 +diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
523 +index f9ac682e75e78..b458b0fd98bf6 100644
524 +--- a/arch/x86/kernel/cpu/cacheinfo.c
525 ++++ b/arch/x86/kernel/cpu/cacheinfo.c
526 +@@ -985,7 +985,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
527 + this_leaf->priv = base->nb;
528 + }
529 +
530 +-static int __init_cache_level(unsigned int cpu)
531 ++int init_cache_level(unsigned int cpu)
532 + {
533 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
534 +
535 +@@ -1014,7 +1014,7 @@ static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
536 + id4_regs->id = c->apicid >> index_msb;
537 + }
538 +
539 +-static int __populate_cache_leaves(unsigned int cpu)
540 ++int populate_cache_leaves(unsigned int cpu)
541 + {
542 + unsigned int idx, ret;
543 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
544 +@@ -1033,6 +1033,3 @@ static int __populate_cache_leaves(unsigned int cpu)
545 +
546 + return 0;
547 + }
548 +-
549 +-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
550 +-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
551 +diff --git a/block/blk-mq.c b/block/blk-mq.c
552 +index 9e3fedbaa644b..6dcb86c1c985d 100644
553 +--- a/block/blk-mq.c
554 ++++ b/block/blk-mq.c
555 +@@ -2109,6 +2109,18 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
556 + }
557 + }
558 +
559 ++/*
560 ++ * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
561 ++ * queues. This is important for md arrays to benefit from merging
562 ++ * requests.
563 ++ */
564 ++static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
565 ++{
566 ++ if (plug->multiple_queues)
567 ++ return BLK_MAX_REQUEST_COUNT * 4;
568 ++ return BLK_MAX_REQUEST_COUNT;
569 ++}
570 ++
571 + /**
572 + * blk_mq_submit_bio - Create and send a request to block device.
573 + * @bio: Bio pointer.
574 +@@ -2202,7 +2214,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
575 + else
576 + last = list_entry_rq(plug->mq_list.prev);
577 +
578 +- if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
579 ++ if (request_count >= blk_plug_max_rq_count(plug) || (last &&
580 + blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
581 + blk_flush_plug_list(plug, false);
582 + trace_block_plug(q);
583 +diff --git a/block/blk-throttle.c b/block/blk-throttle.c
584 +index 63e9d00a08321..c53a254171a29 100644
585 +--- a/block/blk-throttle.c
586 ++++ b/block/blk-throttle.c
587 +@@ -2452,6 +2452,7 @@ int blk_throtl_init(struct request_queue *q)
588 + void blk_throtl_exit(struct request_queue *q)
589 + {
590 + BUG_ON(!q->td);
591 ++ del_timer_sync(&q->td->service_queue.pending_timer);
592 + throtl_shutdown_wq(q);
593 + blkcg_deactivate_policy(q, &blkcg_policy_throtl);
594 + free_percpu(q->td->latency_buckets[READ]);
595 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
596 +index c7ac49042cee6..192b1c7286b36 100644
597 +--- a/drivers/base/power/main.c
598 ++++ b/drivers/base/power/main.c
599 +@@ -1644,7 +1644,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
600 + }
601 +
602 + dev->power.may_skip_resume = true;
603 +- dev->power.must_resume = false;
604 ++ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
605 +
606 + dpm_watchdog_set(&wd, dev);
607 + device_lock(dev);
608 +diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
609 +index 4f8224a6ac956..3ca7de37dd8f2 100644
610 +--- a/drivers/dma-buf/Kconfig
611 ++++ b/drivers/dma-buf/Kconfig
612 +@@ -42,6 +42,7 @@ config UDMABUF
613 + config DMABUF_MOVE_NOTIFY
614 + bool "Move notify between drivers (EXPERIMENTAL)"
615 + default n
616 ++ depends on DMA_SHARED_BUFFER
617 + help
618 + Don't pin buffers if the dynamic DMA-buf interface is available on
619 + both the exporter as well as the importer. This fixes a security
620 +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
621 +index f28bb2334e747..08013345d1f24 100644
622 +--- a/drivers/dma/Kconfig
623 ++++ b/drivers/dma/Kconfig
624 +@@ -285,7 +285,7 @@ config INTEL_IDMA64
625 +
626 + config INTEL_IDXD
627 + tristate "Intel Data Accelerators support"
628 +- depends on PCI && X86_64
629 ++ depends on PCI && X86_64 && !UML
630 + depends on PCI_MSI
631 + depends on SBITMAP
632 + select DMA_ENGINE
633 +@@ -299,7 +299,7 @@ config INTEL_IDXD
634 +
635 + config INTEL_IOATDMA
636 + tristate "Intel I/OAT DMA support"
637 +- depends on PCI && X86_64
638 ++ depends on PCI && X86_64 && !UML
639 + select DMA_ENGINE
640 + select DMA_ENGINE_RAID
641 + select DCA
642 +diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
643 +index 235f1396f9686..52768dc8ce124 100644
644 +--- a/drivers/dma/acpi-dma.c
645 ++++ b/drivers/dma/acpi-dma.c
646 +@@ -70,10 +70,14 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
647 +
648 + si = (const struct acpi_csrt_shared_info *)&grp[1];
649 +
650 +- /* Match device by MMIO and IRQ */
651 ++ /* Match device by MMIO */
652 + if (si->mmio_base_low != lower_32_bits(mem) ||
653 +- si->mmio_base_high != upper_32_bits(mem) ||
654 +- si->gsi_interrupt != irq)
655 ++ si->mmio_base_high != upper_32_bits(mem))
656 ++ return 0;
657 ++
658 ++ /* Match device by Linux vIRQ */
659 ++ ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity);
660 ++ if (ret != irq)
661 + return 0;
662 +
663 + dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
664 +diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
665 +index 417048e3c42aa..0368c5490788f 100644
666 +--- a/drivers/dma/idxd/submit.c
667 ++++ b/drivers/dma/idxd/submit.c
668 +@@ -45,7 +45,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
669 + if (signal_pending_state(TASK_INTERRUPTIBLE, current))
670 + break;
671 + idx = sbitmap_queue_get(sbq, &cpu);
672 +- if (idx > 0)
673 ++ if (idx >= 0)
674 + break;
675 + schedule();
676 + }
677 +diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
678 +index 0ef5ca81ba4d0..4357d2395e6b7 100644
679 +--- a/drivers/dma/sprd-dma.c
680 ++++ b/drivers/dma/sprd-dma.c
681 +@@ -1265,6 +1265,7 @@ static const struct of_device_id sprd_dma_match[] = {
682 + { .compatible = "sprd,sc9860-dma", },
683 + {},
684 + };
685 ++MODULE_DEVICE_TABLE(of, sprd_dma_match);
686 +
687 + static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
688 + {
689 +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
690 +index 9ffdbeec436bd..cab4719e4cf9c 100644
691 +--- a/drivers/dma/xilinx/xilinx_dma.c
692 ++++ b/drivers/dma/xilinx/xilinx_dma.c
693 +@@ -3070,7 +3070,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
694 + xdev->ext_addr = false;
695 +
696 + /* Set the dma mask bits */
697 +- dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
698 ++ dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
699 +
700 + /* Initialize the DMA engine */
701 + xdev->common.dev = &pdev->dev;
702 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
703 +index b76425164e297..7931528bc864b 100644
704 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
705 ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
706 +@@ -27,6 +27,9 @@
707 + #include <linux/pci.h>
708 + #include <linux/slab.h>
709 + #include <asm/div64.h>
710 ++#if IS_ENABLED(CONFIG_X86_64)
711 ++#include <asm/intel-family.h>
712 ++#endif
713 + #include <drm/amdgpu_drm.h>
714 + #include "ppatomctrl.h"
715 + #include "atombios.h"
716 +@@ -1606,6 +1609,17 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
717 + return result;
718 + }
719 +
720 ++static bool intel_core_rkl_chk(void)
721 ++{
722 ++#if IS_ENABLED(CONFIG_X86_64)
723 ++ struct cpuinfo_x86 *c = &cpu_data(0);
724 ++
725 ++ return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
726 ++#else
727 ++ return false;
728 ++#endif
729 ++}
730 ++
731 + static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
732 + {
733 + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
734 +@@ -1629,7 +1643,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
735 +
736 + data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
737 + data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
738 +- data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
739 ++ data->pcie_dpm_key_disabled =
740 ++ intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
741 + /* need to set voltage control types before EVV patching */
742 + data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
743 + data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
744 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
745 +index b0ece71aefdee..ce774579c89d1 100644
746 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
747 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
748 +@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
749 + args->v0.count = 0;
750 + args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
751 + args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
752 +- args->v0.pwrsrc = -ENOSYS;
753 ++ args->v0.pwrsrc = -ENODEV;
754 + args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
755 + }
756 +
757 +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
758 +index fa57986c2309c..28de889aa5164 100644
759 +--- a/drivers/iommu/amd/init.c
760 ++++ b/drivers/iommu/amd/init.c
761 +@@ -298,6 +298,22 @@ int amd_iommu_get_num_iommus(void)
762 + return amd_iommus_present;
763 + }
764 +
765 ++#ifdef CONFIG_IRQ_REMAP
766 ++static bool check_feature_on_all_iommus(u64 mask)
767 ++{
768 ++ bool ret = false;
769 ++ struct amd_iommu *iommu;
770 ++
771 ++ for_each_iommu(iommu) {
772 ++ ret = iommu_feature(iommu, mask);
773 ++ if (!ret)
774 ++ return false;
775 ++ }
776 ++
777 ++ return true;
778 ++}
779 ++#endif
780 ++
781 + /*
782 + * For IVHD type 0x11/0x40, EFR is also available via IVHD.
783 + * Default to IVHD EFR since it is available sooner
784 +@@ -854,13 +870,6 @@ static int iommu_init_ga(struct amd_iommu *iommu)
785 + int ret = 0;
786 +
787 + #ifdef CONFIG_IRQ_REMAP
788 +- /* Note: We have already checked GASup from IVRS table.
789 +- * Now, we need to make sure that GAMSup is set.
790 +- */
791 +- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
792 +- !iommu_feature(iommu, FEATURE_GAM_VAPIC))
793 +- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
794 +-
795 + ret = iommu_init_ga_log(iommu);
796 + #endif /* CONFIG_IRQ_REMAP */
797 +
798 +@@ -2396,6 +2405,14 @@ static void early_enable_iommus(void)
799 + }
800 +
801 + #ifdef CONFIG_IRQ_REMAP
802 ++ /*
803 ++ * Note: We have already checked GASup from IVRS table.
804 ++ * Now, we need to make sure that GAMSup is set.
805 ++ */
806 ++ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
807 ++ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
808 ++ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
809 ++
810 + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
811 + amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
812 + #endif
813 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
814 +index 37edd663603f6..ebac53a73bd10 100644
815 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c
816 ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
817 +@@ -5723,6 +5723,12 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
818 + u8 cause;
819 + bool reset_required;
820 +
821 ++ if (event_type >= GAUDI_EVENT_SIZE) {
822 ++ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
823 ++ event_type, GAUDI_EVENT_SIZE - 1);
824 ++ return;
825 ++ }
826 ++
827 + gaudi->events_stat[event_type]++;
828 + gaudi->events_stat_aggregate[event_type]++;
829 +
830 +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
831 +index 5b5d6275c2495..c8023b4428c5c 100644
832 +--- a/drivers/misc/habanalabs/goya/goya.c
833 ++++ b/drivers/misc/habanalabs/goya/goya.c
834 +@@ -4623,6 +4623,12 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
835 + >> EQ_CTL_EVENT_TYPE_SHIFT);
836 + struct goya_device *goya = hdev->asic_specific;
837 +
838 ++ if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) {
839 ++ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
840 ++ event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1);
841 ++ return;
842 ++ }
843 ++
844 + goya->events_stat[event_type]++;
845 + goya->events_stat_aggregate[event_type]++;
846 +
847 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
848 +index 4cba110f6ef8c..0e699330ae77c 100644
849 +--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
850 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
851 +@@ -376,48 +376,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
852 + #endif
853 + }
854 +
855 +-#define MLX5_TRAP_DROP(_id, _group_id) \
856 +- DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
857 +- DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
858 +- DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT)
859 +-
860 +-static const struct devlink_trap mlx5_traps_arr[] = {
861 +- MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
862 +-};
863 +-
864 +-static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
865 +- DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
866 +-};
867 +-
868 +-static int mlx5_devlink_traps_register(struct devlink *devlink)
869 +-{
870 +- struct mlx5_core_dev *core_dev = devlink_priv(devlink);
871 +- int err;
872 +-
873 +- err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
874 +- ARRAY_SIZE(mlx5_trap_groups_arr));
875 +- if (err)
876 +- return err;
877 +-
878 +- err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
879 +- &core_dev->priv);
880 +- if (err)
881 +- goto err_trap_group;
882 +- return 0;
883 +-
884 +-err_trap_group:
885 +- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
886 +- ARRAY_SIZE(mlx5_trap_groups_arr));
887 +- return err;
888 +-}
889 +-
890 +-static void mlx5_devlink_traps_unregister(struct devlink *devlink)
891 +-{
892 +- devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
893 +- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
894 +- ARRAY_SIZE(mlx5_trap_groups_arr));
895 +-}
896 +-
897 + int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
898 + {
899 + int err;
900 +@@ -432,16 +390,8 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
901 + goto params_reg_err;
902 + mlx5_devlink_set_params_init_values(devlink);
903 + devlink_params_publish(devlink);
904 +-
905 +- err = mlx5_devlink_traps_register(devlink);
906 +- if (err)
907 +- goto traps_reg_err;
908 +-
909 + return 0;
910 +
911 +-traps_reg_err:
912 +- devlink_params_unregister(devlink, mlx5_devlink_params,
913 +- ARRAY_SIZE(mlx5_devlink_params));
914 + params_reg_err:
915 + devlink_unregister(devlink);
916 + return err;
917 +@@ -449,7 +399,6 @@ params_reg_err:
918 +
919 + void mlx5_devlink_unregister(struct devlink *devlink)
920 + {
921 +- mlx5_devlink_traps_unregister(devlink);
922 + devlink_params_unpublish(devlink);
923 + devlink_params_unregister(devlink, mlx5_devlink_params,
924 + ARRAY_SIZE(mlx5_devlink_params));
925 +diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
926 +index 889d7ce282ebb..952a92504df69 100644
927 +--- a/drivers/parisc/dino.c
928 ++++ b/drivers/parisc/dino.c
929 +@@ -156,15 +156,6 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
930 + return container_of(hba, struct dino_device, hba);
931 + }
932 +
933 +-/* Check if PCI device is behind a Card-mode Dino. */
934 +-static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
935 +-{
936 +- struct dino_device *dino_dev;
937 +-
938 +- dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
939 +- return is_card_dino(&dino_dev->hba.dev->id);
940 +-}
941 +-
942 + /*
943 + * Dino Configuration Space Accessor Functions
944 + */
945 +@@ -447,6 +438,15 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
946 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
947 +
948 + #ifdef CONFIG_TULIP
949 ++/* Check if PCI device is behind a Card-mode Dino. */
950 ++static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
951 ++{
952 ++ struct dino_device *dino_dev;
953 ++
954 ++ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
955 ++ return is_card_dino(&dino_dev->hba.dev->id);
956 ++}
957 ++
958 + static void pci_fixup_tulip(struct pci_dev *dev)
959 + {
960 + if (!pci_dev_is_behind_card_dino(dev))
961 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
962 +index 88e19ad54f646..f175cff39b460 100644
963 +--- a/drivers/pci/controller/pci-aardvark.c
964 ++++ b/drivers/pci/controller/pci-aardvark.c
965 +@@ -225,6 +225,8 @@
966 +
967 + #define MSI_IRQ_NUM 32
968 +
969 ++#define CFG_RD_CRS_VAL 0xffff0001
970 ++
971 + struct advk_pcie {
972 + struct platform_device *pdev;
973 + void __iomem *base;
974 +@@ -587,7 +589,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
975 + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
976 + }
977 +
978 +-static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
979 ++static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
980 + {
981 + struct device *dev = &pcie->pdev->dev;
982 + u32 reg;
983 +@@ -629,9 +631,30 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
984 + strcomp_status = "UR";
985 + break;
986 + case PIO_COMPLETION_STATUS_CRS:
987 ++ if (allow_crs && val) {
988 ++ /* PCIe r4.0, sec 2.3.2, says:
989 ++ * If CRS Software Visibility is enabled:
990 ++ * For a Configuration Read Request that includes both
991 ++ * bytes of the Vendor ID field of a device Function's
992 ++ * Configuration Space Header, the Root Complex must
993 ++ * complete the Request to the host by returning a
994 ++ * read-data value of 0001h for the Vendor ID field and
995 ++ * all '1's for any additional bytes included in the
996 ++ * request.
997 ++ *
998 ++ * So CRS in this case is not an error status.
999 ++ */
1000 ++ *val = CFG_RD_CRS_VAL;
1001 ++ strcomp_status = NULL;
1002 ++ break;
1003 ++ }
1004 + /* PCIe r4.0, sec 2.3.2, says:
1005 + * If CRS Software Visibility is not enabled, the Root Complex
1006 + * must re-issue the Configuration Request as a new Request.
1007 ++ * If CRS Software Visibility is enabled: For a Configuration
1008 ++ * Write Request or for any other Configuration Read Request,
1009 ++ * the Root Complex must re-issue the Configuration Request as
1010 ++ * a new Request.
1011 + * A Root Complex implementation may choose to limit the number
1012 + * of Configuration Request/CRS Completion Status loops before
1013 + * determining that something is wrong with the target of the
1014 +@@ -700,6 +723,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
1015 + case PCI_EXP_RTCTL: {
1016 + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1017 + *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
1018 ++ *value |= PCI_EXP_RTCAP_CRSVIS << 16;
1019 + return PCI_BRIDGE_EMUL_HANDLED;
1020 + }
1021 +
1022 +@@ -781,6 +805,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
1023 + static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
1024 + {
1025 + struct pci_bridge_emul *bridge = &pcie->bridge;
1026 ++ int ret;
1027 +
1028 + bridge->conf.vendor =
1029 + cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
1030 +@@ -804,7 +829,15 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
1031 + bridge->data = pcie;
1032 + bridge->ops = &advk_pci_bridge_emul_ops;
1033 +
1034 +- return pci_bridge_emul_init(bridge, 0);
1035 ++ /* PCIe config space can be initialized after pci_bridge_emul_init() */
1036 ++ ret = pci_bridge_emul_init(bridge, 0);
1037 ++ if (ret < 0)
1038 ++ return ret;
1039 ++
1040 ++ /* Indicates supports for Completion Retry Status */
1041 ++ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
1042 ++
1043 ++ return 0;
1044 + }
1045 +
1046 + static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
1047 +@@ -856,6 +889,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
1048 + int where, int size, u32 *val)
1049 + {
1050 + struct advk_pcie *pcie = bus->sysdata;
1051 ++ bool allow_crs;
1052 + u32 reg;
1053 + int ret;
1054 +
1055 +@@ -868,7 +902,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
1056 + return pci_bridge_emul_conf_read(&pcie->bridge, where,
1057 + size, val);
1058 +
1059 ++ /*
1060 ++ * Completion Retry Status is possible to return only when reading all
1061 ++ * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
1062 ++ * CRSSVE flag on Root Bridge is enabled.
1063 ++ */
1064 ++ allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
1065 ++ (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
1066 ++ PCI_EXP_RTCTL_CRSSVE);
1067 ++
1068 + if (advk_pcie_pio_is_running(pcie)) {
1069 ++ /*
1070 ++ * If it is possible return Completion Retry Status so caller
1071 ++ * tries to issue the request again instead of failing.
1072 ++ */
1073 ++ if (allow_crs) {
1074 ++ *val = CFG_RD_CRS_VAL;
1075 ++ return PCIBIOS_SUCCESSFUL;
1076 ++ }
1077 + *val = 0xffffffff;
1078 + return PCIBIOS_SET_FAILED;
1079 + }
1080 +@@ -896,12 +947,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
1081 +
1082 + ret = advk_pcie_wait_pio(pcie);
1083 + if (ret < 0) {
1084 ++ /*
1085 ++ * If it is possible return Completion Retry Status so caller
1086 ++ * tries to issue the request again instead of failing.
1087 ++ */
1088 ++ if (allow_crs) {
1089 ++ *val = CFG_RD_CRS_VAL;
1090 ++ return PCIBIOS_SUCCESSFUL;
1091 ++ }
1092 + *val = 0xffffffff;
1093 + return PCIBIOS_SET_FAILED;
1094 + }
1095 +
1096 + /* Check PIO status and get the read result */
1097 +- ret = advk_pcie_check_pio_status(pcie, val);
1098 ++ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
1099 + if (ret < 0) {
1100 + *val = 0xffffffff;
1101 + return PCIBIOS_SET_FAILED;
1102 +@@ -970,7 +1029,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
1103 + if (ret < 0)
1104 + return PCIBIOS_SET_FAILED;
1105 +
1106 +- ret = advk_pcie_check_pio_status(pcie, NULL);
1107 ++ ret = advk_pcie_check_pio_status(pcie, false, NULL);
1108 + if (ret < 0)
1109 + return PCIBIOS_SET_FAILED;
1110 +
1111 +diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
1112 +index b31883022a8e6..49bbd37ee318a 100644
1113 +--- a/drivers/pci/pci-bridge-emul.h
1114 ++++ b/drivers/pci/pci-bridge-emul.h
1115 +@@ -54,7 +54,7 @@ struct pci_bridge_emul_pcie_conf {
1116 + __le16 slotctl;
1117 + __le16 slotsta;
1118 + __le16 rootctl;
1119 +- __le16 rsvd;
1120 ++ __le16 rootcap;
1121 + __le32 rootsta;
1122 + __le32 devcap2;
1123 + __le16 devctl2;
1124 +diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
1125 +index 41baccba033f7..f901d2e43166c 100644
1126 +--- a/drivers/platform/chrome/Makefile
1127 ++++ b/drivers/platform/chrome/Makefile
1128 +@@ -20,7 +20,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
1129 + obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
1130 + obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
1131 + obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
1132 +-cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
1133 ++cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
1134 + obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
1135 + obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
1136 + obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
1137 +diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
1138 +index 8921f24e83bac..98e37080f7609 100644
1139 +--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
1140 ++++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
1141 +@@ -17,6 +17,8 @@
1142 + #include <linux/sort.h>
1143 + #include <linux/slab.h>
1144 +
1145 ++#include "cros_ec_trace.h"
1146 ++
1147 + /* Precision of fixed point for the m values from the filter */
1148 + #define M_PRECISION BIT(23)
1149 +
1150 +@@ -291,6 +293,7 @@ cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
1151 + state->median_m = 0;
1152 + state->median_error = 0;
1153 + }
1154 ++ trace_cros_ec_sensorhub_filter(state, dx, dy);
1155 + }
1156 +
1157 + /**
1158 +@@ -427,6 +430,11 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
1159 + if (new_timestamp - *current_timestamp > 0)
1160 + *current_timestamp = new_timestamp;
1161 + }
1162 ++ trace_cros_ec_sensorhub_timestamp(in->timestamp,
1163 ++ fifo_info->timestamp,
1164 ++ fifo_timestamp,
1165 ++ *current_timestamp,
1166 ++ now);
1167 + }
1168 +
1169 + if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
1170 +@@ -460,6 +468,12 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
1171 +
1172 + /* Regular sample */
1173 + out->sensor_id = in->sensor_num;
1174 ++ trace_cros_ec_sensorhub_data(in->sensor_num,
1175 ++ fifo_info->timestamp,
1176 ++ fifo_timestamp,
1177 ++ *current_timestamp,
1178 ++ now);
1179 ++
1180 + if (*current_timestamp - now > 0) {
1181 + /*
1182 + * This fix is needed to overcome the timestamp filter putting
1183 +diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
1184 +index f744b21bc655f..7e7cfc98657a4 100644
1185 +--- a/drivers/platform/chrome/cros_ec_trace.h
1186 ++++ b/drivers/platform/chrome/cros_ec_trace.h
1187 +@@ -15,6 +15,7 @@
1188 + #include <linux/types.h>
1189 + #include <linux/platform_data/cros_ec_commands.h>
1190 + #include <linux/platform_data/cros_ec_proto.h>
1191 ++#include <linux/platform_data/cros_ec_sensorhub.h>
1192 +
1193 + #include <linux/tracepoint.h>
1194 +
1195 +@@ -70,6 +71,99 @@ TRACE_EVENT(cros_ec_request_done,
1196 + __entry->retval)
1197 + );
1198 +
1199 ++TRACE_EVENT(cros_ec_sensorhub_timestamp,
1200 ++ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
1201 ++ s64 current_timestamp, s64 current_time),
1202 ++ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
1203 ++ current_time),
1204 ++ TP_STRUCT__entry(
1205 ++ __field(u32, ec_sample_timestamp)
1206 ++ __field(u32, ec_fifo_timestamp)
1207 ++ __field(s64, fifo_timestamp)
1208 ++ __field(s64, current_timestamp)
1209 ++ __field(s64, current_time)
1210 ++ __field(s64, delta)
1211 ++ ),
1212 ++ TP_fast_assign(
1213 ++ __entry->ec_sample_timestamp = ec_sample_timestamp;
1214 ++ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
1215 ++ __entry->fifo_timestamp = fifo_timestamp;
1216 ++ __entry->current_timestamp = current_timestamp;
1217 ++ __entry->current_time = current_time;
1218 ++ __entry->delta = current_timestamp - current_time;
1219 ++ ),
1220 ++ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
1221 ++ __entry->ec_sample_timestamp,
1222 ++ __entry->ec_fifo_timestamp,
1223 ++ __entry->fifo_timestamp,
1224 ++ __entry->current_timestamp,
1225 ++ __entry->current_time,
1226 ++ __entry->delta
1227 ++ )
1228 ++);
1229 ++
1230 ++TRACE_EVENT(cros_ec_sensorhub_data,
1231 ++ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
1232 ++ s64 current_timestamp, s64 current_time),
1233 ++ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
1234 ++ TP_STRUCT__entry(
1235 ++ __field(u32, ec_sensor_num)
1236 ++ __field(u32, ec_fifo_timestamp)
1237 ++ __field(s64, fifo_timestamp)
1238 ++ __field(s64, current_timestamp)
1239 ++ __field(s64, current_time)
1240 ++ __field(s64, delta)
1241 ++ ),
1242 ++ TP_fast_assign(
1243 ++ __entry->ec_sensor_num = ec_sensor_num;
1244 ++ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
1245 ++ __entry->fifo_timestamp = fifo_timestamp;
1246 ++ __entry->current_timestamp = current_timestamp;
1247 ++ __entry->current_time = current_time;
1248 ++ __entry->delta = current_timestamp - current_time;
1249 ++ ),
1250 ++ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
1251 ++ __entry->ec_sensor_num,
1252 ++ __entry->ec_fifo_timestamp,
1253 ++ __entry->fifo_timestamp,
1254 ++ __entry->current_timestamp,
1255 ++ __entry->current_time,
1256 ++ __entry->delta
1257 ++ )
1258 ++);
1259 ++
1260 ++TRACE_EVENT(cros_ec_sensorhub_filter,
1261 ++ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
1262 ++ TP_ARGS(state, dx, dy),
1263 ++ TP_STRUCT__entry(
1264 ++ __field(s64, dx)
1265 ++ __field(s64, dy)
1266 ++ __field(s64, median_m)
1267 ++ __field(s64, median_error)
1268 ++ __field(s64, history_len)
1269 ++ __field(s64, x)
1270 ++ __field(s64, y)
1271 ++ ),
1272 ++ TP_fast_assign(
1273 ++ __entry->dx = dx;
1274 ++ __entry->dy = dy;
1275 ++ __entry->median_m = state->median_m;
1276 ++ __entry->median_error = state->median_error;
1277 ++ __entry->history_len = state->history_len;
1278 ++ __entry->x = state->x_offset;
1279 ++ __entry->y = state->y_offset;
1280 ++ ),
1281 ++ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
1282 ++ __entry->dx,
1283 ++ __entry->dy,
1284 ++ __entry->median_m,
1285 ++ __entry->median_error,
1286 ++ __entry->history_len,
1287 ++ __entry->x,
1288 ++ __entry->y
1289 ++ )
1290 ++);
1291 ++
1292 +
1293 + #endif /* _CROS_EC_TRACE_H_ */
1294 +
1295 +diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
1296 +index 22c002e685b34..37f9b688661d4 100644
1297 +--- a/drivers/pwm/pwm-img.c
1298 ++++ b/drivers/pwm/pwm-img.c
1299 +@@ -329,23 +329,7 @@ err_pm_disable:
1300 + static int img_pwm_remove(struct platform_device *pdev)
1301 + {
1302 + struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
1303 +- u32 val;
1304 +- unsigned int i;
1305 +- int ret;
1306 +-
1307 +- ret = pm_runtime_get_sync(&pdev->dev);
1308 +- if (ret < 0) {
1309 +- pm_runtime_put(&pdev->dev);
1310 +- return ret;
1311 +- }
1312 +-
1313 +- for (i = 0; i < pwm_chip->chip.npwm; i++) {
1314 +- val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
1315 +- val &= ~BIT(i);
1316 +- img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
1317 +- }
1318 +
1319 +- pm_runtime_put(&pdev->dev);
1320 + pm_runtime_disable(&pdev->dev);
1321 + if (!pm_runtime_status_suspended(&pdev->dev))
1322 + img_pwm_runtime_suspend(&pdev->dev);
1323 +diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
1324 +index 710d9a207d2b0..522f862eca526 100644
1325 +--- a/drivers/pwm/pwm-lpc32xx.c
1326 ++++ b/drivers/pwm/pwm-lpc32xx.c
1327 +@@ -120,17 +120,17 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
1328 + lpc32xx->chip.npwm = 1;
1329 + lpc32xx->chip.base = -1;
1330 +
1331 ++ /* If PWM is disabled, configure the output to the default value */
1332 ++ val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
1333 ++ val &= ~PWM_PIN_LEVEL;
1334 ++ writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
1335 ++
1336 + ret = pwmchip_add(&lpc32xx->chip);
1337 + if (ret < 0) {
1338 + dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
1339 + return ret;
1340 + }
1341 +
1342 +- /* When PWM is disable, configure the output to the default value */
1343 +- val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
1344 +- val &= ~PWM_PIN_LEVEL;
1345 +- writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
1346 +-
1347 + platform_set_drvdata(pdev, lpc32xx);
1348 +
1349 + return 0;
1350 +diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
1351 +index 7ce616923c52a..41bdbe71ae46b 100644
1352 +--- a/drivers/pwm/pwm-mxs.c
1353 ++++ b/drivers/pwm/pwm-mxs.c
1354 +@@ -148,6 +148,11 @@ static int mxs_pwm_probe(struct platform_device *pdev)
1355 + return ret;
1356 + }
1357 +
1358 ++ /* FIXME: Only do this if the PWM isn't already running */
1359 ++ ret = stmp_reset_block(mxs->base);
1360 ++ if (ret)
1361 ++ return dev_err_probe(&pdev->dev, ret, "failed to reset PWM\n");
1362 ++
1363 + ret = pwmchip_add(&mxs->chip);
1364 + if (ret < 0) {
1365 + dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret);
1366 +@@ -156,15 +161,7 @@ static int mxs_pwm_probe(struct platform_device *pdev)
1367 +
1368 + platform_set_drvdata(pdev, mxs);
1369 +
1370 +- ret = stmp_reset_block(mxs->base);
1371 +- if (ret)
1372 +- goto pwm_remove;
1373 +-
1374 + return 0;
1375 +-
1376 +-pwm_remove:
1377 +- pwmchip_remove(&mxs->chip);
1378 +- return ret;
1379 + }
1380 +
1381 + static int mxs_pwm_remove(struct platform_device *pdev)
1382 +diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
1383 +index 3b8da7b0091b1..1f3079562b38d 100644
1384 +--- a/drivers/pwm/pwm-rockchip.c
1385 ++++ b/drivers/pwm/pwm-rockchip.c
1386 +@@ -382,20 +382,6 @@ static int rockchip_pwm_remove(struct platform_device *pdev)
1387 + {
1388 + struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
1389 +
1390 +- /*
1391 +- * Disable the PWM clk before unpreparing it if the PWM device is still
1392 +- * running. This should only happen when the last PWM user left it
1393 +- * enabled, or when nobody requested a PWM that was previously enabled
1394 +- * by the bootloader.
1395 +- *
1396 +- * FIXME: Maybe the core should disable all PWM devices in
1397 +- * pwmchip_remove(). In this case we'd only have to call
1398 +- * clk_unprepare() after pwmchip_remove().
1399 +- *
1400 +- */
1401 +- if (pwm_is_enabled(pc->chip.pwms))
1402 +- clk_disable(pc->clk);
1403 +-
1404 + clk_unprepare(pc->pclk);
1405 + clk_unprepare(pc->clk);
1406 +
1407 +diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
1408 +index 134c14621ee01..945a8b2b85648 100644
1409 +--- a/drivers/pwm/pwm-stm32-lp.c
1410 ++++ b/drivers/pwm/pwm-stm32-lp.c
1411 +@@ -225,8 +225,6 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev)
1412 + {
1413 + struct stm32_pwm_lp *priv = platform_get_drvdata(pdev);
1414 +
1415 +- pwm_disable(&priv->chip.pwms[0]);
1416 +-
1417 + return pwmchip_remove(&priv->chip);
1418 + }
1419 +
1420 +diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
1421 +index 33e4ecd6c6659..54cf5ec8f4019 100644
1422 +--- a/drivers/rtc/Kconfig
1423 ++++ b/drivers/rtc/Kconfig
1424 +@@ -624,6 +624,7 @@ config RTC_DRV_FM3130
1425 +
1426 + config RTC_DRV_RX8010
1427 + tristate "Epson RX8010SJ"
1428 ++ select REGMAP_I2C
1429 + help
1430 + If you say yes here you get support for the Epson RX8010SJ RTC
1431 + chip.
1432 +diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
1433 +index 03d31e52b3999..4523e825a61a8 100644
1434 +--- a/drivers/staging/rtl8192u/r8192U_core.c
1435 ++++ b/drivers/staging/rtl8192u/r8192U_core.c
1436 +@@ -4271,7 +4271,7 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
1437 + bpacket_match_bssid = (type != IEEE80211_FTYPE_CTL) &&
1438 + (ether_addr_equal(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
1439 + && (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV);
1440 +- bpacket_toself = bpacket_match_bssid &
1441 ++ bpacket_toself = bpacket_match_bssid &&
1442 + (ether_addr_equal(praddr, priv->ieee80211->dev->dev_addr));
1443 +
1444 + if (WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BEACON)
1445 +diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
1446 +index e9a90bc23b11d..f4ab4c5b4b626 100644
1447 +--- a/drivers/thermal/samsung/exynos_tmu.c
1448 ++++ b/drivers/thermal/samsung/exynos_tmu.c
1449 +@@ -1073,6 +1073,7 @@ static int exynos_tmu_probe(struct platform_device *pdev)
1450 + data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
1451 + if (IS_ERR(data->sclk)) {
1452 + dev_err(&pdev->dev, "Failed to get sclk\n");
1453 ++ ret = PTR_ERR(data->sclk);
1454 + goto err_clk;
1455 + } else {
1456 + ret = clk_prepare_enable(data->sclk);
1457 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1458 +index 06757b1d4aecd..cea40ef090b77 100644
1459 +--- a/drivers/tty/vt/vt.c
1460 ++++ b/drivers/tty/vt/vt.c
1461 +@@ -2060,7 +2060,7 @@ static void restore_cur(struct vc_data *vc)
1462 +
1463 + enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
1464 + EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
1465 +- ESpalette, ESosc };
1466 ++ ESpalette, ESosc, ESapc, ESpm, ESdcs };
1467 +
1468 + /* console_lock is held (except via vc_init()) */
1469 + static void reset_terminal(struct vc_data *vc, int do_clear)
1470 +@@ -2134,20 +2134,28 @@ static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
1471 + vc->vc_translate = set_translate(*charset, vc);
1472 + }
1473 +
1474 ++/* is this state an ANSI control string? */
1475 ++static bool ansi_control_string(unsigned int state)
1476 ++{
1477 ++ if (state == ESosc || state == ESapc || state == ESpm || state == ESdcs)
1478 ++ return true;
1479 ++ return false;
1480 ++}
1481 ++
1482 + /* console_lock is held */
1483 + static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1484 + {
1485 + /*
1486 + * Control characters can be used in the _middle_
1487 +- * of an escape sequence.
1488 ++ * of an escape sequence, aside from ANSI control strings.
1489 + */
1490 +- if (vc->vc_state == ESosc && c>=8 && c<=13) /* ... except for OSC */
1491 ++ if (ansi_control_string(vc->vc_state) && c >= 8 && c <= 13)
1492 + return;
1493 + switch (c) {
1494 + case 0:
1495 + return;
1496 + case 7:
1497 +- if (vc->vc_state == ESosc)
1498 ++ if (ansi_control_string(vc->vc_state))
1499 + vc->vc_state = ESnormal;
1500 + else if (vc->vc_bell_duration)
1501 + kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
1502 +@@ -2208,6 +2216,12 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1503 + case ']':
1504 + vc->vc_state = ESnonstd;
1505 + return;
1506 ++ case '_':
1507 ++ vc->vc_state = ESapc;
1508 ++ return;
1509 ++ case '^':
1510 ++ vc->vc_state = ESpm;
1511 ++ return;
1512 + case '%':
1513 + vc->vc_state = ESpercent;
1514 + return;
1515 +@@ -2225,6 +2239,9 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1516 + if (vc->state.x < VC_TABSTOPS_COUNT)
1517 + set_bit(vc->state.x, vc->vc_tab_stop);
1518 + return;
1519 ++ case 'P':
1520 ++ vc->vc_state = ESdcs;
1521 ++ return;
1522 + case 'Z':
1523 + respond_ID(tty);
1524 + return;
1525 +@@ -2521,8 +2538,14 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1526 + vc_setGx(vc, 1, c);
1527 + vc->vc_state = ESnormal;
1528 + return;
1529 ++ case ESapc:
1530 ++ return;
1531 + case ESosc:
1532 + return;
1533 ++ case ESpm:
1534 ++ return;
1535 ++ case ESdcs:
1536 ++ return;
1537 + default:
1538 + vc->vc_state = ESnormal;
1539 + }
1540 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
1541 +index b4fcc48f255b3..509811aabb3fd 100644
1542 +--- a/fs/btrfs/volumes.c
1543 ++++ b/fs/btrfs/volumes.c
1544 +@@ -568,6 +568,8 @@ static int btrfs_free_stale_devices(const char *path,
1545 + struct btrfs_device *device, *tmp_device;
1546 + int ret = 0;
1547 +
1548 ++ lockdep_assert_held(&uuid_mutex);
1549 ++
1550 + if (path)
1551 + ret = -ENOENT;
1552 +
1553 +@@ -999,11 +1001,12 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1554 + struct btrfs_device *orig_dev;
1555 + int ret = 0;
1556 +
1557 ++ lockdep_assert_held(&uuid_mutex);
1558 ++
1559 + fs_devices = alloc_fs_devices(orig->fsid, NULL);
1560 + if (IS_ERR(fs_devices))
1561 + return fs_devices;
1562 +
1563 +- mutex_lock(&orig->device_list_mutex);
1564 + fs_devices->total_devices = orig->total_devices;
1565 +
1566 + list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1567 +@@ -1035,10 +1038,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
1568 + device->fs_devices = fs_devices;
1569 + fs_devices->num_devices++;
1570 + }
1571 +- mutex_unlock(&orig->device_list_mutex);
1572 + return fs_devices;
1573 + error:
1574 +- mutex_unlock(&orig->device_list_mutex);
1575 + free_fs_devices(fs_devices);
1576 + return ERR_PTR(ret);
1577 + }
1578 +@@ -1855,15 +1856,17 @@ out:
1579 + * Function to update ctime/mtime for a given device path.
1580 + * Mainly used for ctime/mtime based probe like libblkid.
1581 + */
1582 +-static void update_dev_time(const char *path_name)
1583 ++static void update_dev_time(struct block_device *bdev)
1584 + {
1585 +- struct file *filp;
1586 ++ struct inode *inode = bdev->bd_inode;
1587 ++ struct timespec64 now;
1588 +
1589 +- filp = filp_open(path_name, O_RDWR, 0);
1590 +- if (IS_ERR(filp))
1591 ++ /* Shouldn't happen but just in case. */
1592 ++ if (!inode)
1593 + return;
1594 +- file_update_time(filp);
1595 +- filp_close(filp, NULL);
1596 ++
1597 ++ now = current_time(inode);
1598 ++ generic_update_time(inode, &now, S_MTIME | S_CTIME);
1599 + }
1600 +
1601 + static int btrfs_rm_dev_item(struct btrfs_device *device)
1602 +@@ -2038,7 +2041,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
1603 + btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1604 +
1605 + /* Update ctime/mtime for device path for libblkid */
1606 +- update_dev_time(device_path);
1607 ++ update_dev_time(bdev);
1608 + }
1609 +
1610 + int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
1611 +@@ -2681,7 +2684,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
1612 + btrfs_forget_devices(device_path);
1613 +
1614 + /* Update ctime/mtime for blkid or udev */
1615 +- update_dev_time(device_path);
1616 ++ update_dev_time(bdev);
1617 +
1618 + return ret;
1619 +
1620 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
1621 +index 678dac8365ed3..48ea95b81df84 100644
1622 +--- a/fs/ceph/caps.c
1623 ++++ b/fs/ceph/caps.c
1624 +@@ -1868,6 +1868,8 @@ static u64 __mark_caps_flushing(struct inode *inode,
1625 + * try to invalidate mapping pages without blocking.
1626 + */
1627 + static int try_nonblocking_invalidate(struct inode *inode)
1628 ++ __releases(ci->i_ceph_lock)
1629 ++ __acquires(ci->i_ceph_lock)
1630 + {
1631 + struct ceph_inode_info *ci = ceph_inode(inode);
1632 + u32 invalidating_gen = ci->i_rdcache_gen;
1633 +@@ -3169,7 +3171,16 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
1634 + break;
1635 + }
1636 + }
1637 +- BUG_ON(!found);
1638 ++
1639 ++ if (!found) {
1640 ++ /*
1641 ++ * The capsnap should already be removed when removing
1642 ++ * auth cap in the case of a forced unmount.
1643 ++ */
1644 ++ WARN_ON_ONCE(ci->i_auth_cap);
1645 ++ goto unlock;
1646 ++ }
1647 ++
1648 + capsnap->dirty_pages -= nr;
1649 + if (capsnap->dirty_pages == 0) {
1650 + complete_capsnap = true;
1651 +@@ -3191,6 +3202,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
1652 + complete_capsnap ? " (complete capsnap)" : "");
1653 + }
1654 +
1655 ++unlock:
1656 + spin_unlock(&ci->i_ceph_lock);
1657 +
1658 + if (last) {
1659 +@@ -3657,6 +3669,43 @@ out:
1660 + iput(inode);
1661 + }
1662 +
1663 ++void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
1664 ++ bool *wake_ci, bool *wake_mdsc)
1665 ++{
1666 ++ struct ceph_inode_info *ci = ceph_inode(inode);
1667 ++ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1668 ++ bool ret;
1669 ++
1670 ++ lockdep_assert_held(&ci->i_ceph_lock);
1671 ++
1672 ++ dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci);
1673 ++
1674 ++ list_del_init(&capsnap->ci_item);
1675 ++ ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
1676 ++ if (wake_ci)
1677 ++ *wake_ci = ret;
1678 ++
1679 ++ spin_lock(&mdsc->cap_dirty_lock);
1680 ++ if (list_empty(&ci->i_cap_flush_list))
1681 ++ list_del_init(&ci->i_flushing_item);
1682 ++
1683 ++ ret = __detach_cap_flush_from_mdsc(mdsc, &capsnap->cap_flush);
1684 ++ if (wake_mdsc)
1685 ++ *wake_mdsc = ret;
1686 ++ spin_unlock(&mdsc->cap_dirty_lock);
1687 ++}
1688 ++
1689 ++void ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
1690 ++ bool *wake_ci, bool *wake_mdsc)
1691 ++{
1692 ++ struct ceph_inode_info *ci = ceph_inode(inode);
1693 ++
1694 ++ lockdep_assert_held(&ci->i_ceph_lock);
1695 ++
1696 ++ WARN_ON_ONCE(capsnap->dirty_pages || capsnap->writing);
1697 ++ __ceph_remove_capsnap(inode, capsnap, wake_ci, wake_mdsc);
1698 ++}
1699 ++
1700 + /*
1701 + * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
1702 + * throw away our cap_snap.
1703 +@@ -3694,23 +3743,10 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
1704 + capsnap, capsnap->follows);
1705 + }
1706 + }
1707 +- if (flushed) {
1708 +- WARN_ON(capsnap->dirty_pages || capsnap->writing);
1709 +- dout(" removing %p cap_snap %p follows %lld\n",
1710 +- inode, capsnap, follows);
1711 +- list_del(&capsnap->ci_item);
1712 +- wake_ci |= __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
1713 +-
1714 +- spin_lock(&mdsc->cap_dirty_lock);
1715 +-
1716 +- if (list_empty(&ci->i_cap_flush_list))
1717 +- list_del_init(&ci->i_flushing_item);
1718 +-
1719 +- wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc,
1720 +- &capsnap->cap_flush);
1721 +- spin_unlock(&mdsc->cap_dirty_lock);
1722 +- }
1723 ++ if (flushed)
1724 ++ ceph_remove_capsnap(inode, capsnap, &wake_ci, &wake_mdsc);
1725 + spin_unlock(&ci->i_ceph_lock);
1726 ++
1727 + if (flushed) {
1728 + ceph_put_snap_context(capsnap->context);
1729 + ceph_put_cap_snap(capsnap);
1730 +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
1731 +index a4d48370b2b32..f63c1a090139c 100644
1732 +--- a/fs/ceph/dir.c
1733 ++++ b/fs/ceph/dir.c
1734 +@@ -1797,8 +1797,7 @@ static void ceph_d_release(struct dentry *dentry)
1735 + dentry->d_fsdata = NULL;
1736 + spin_unlock(&dentry->d_lock);
1737 +
1738 +- if (di->lease_session)
1739 +- ceph_put_mds_session(di->lease_session);
1740 ++ ceph_put_mds_session(di->lease_session);
1741 + kmem_cache_free(ceph_dentry_cachep, di);
1742 + }
1743 +
1744 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
1745 +index 3d2e3dd4ee01d..f1895f78ab452 100644
1746 +--- a/fs/ceph/file.c
1747 ++++ b/fs/ceph/file.c
1748 +@@ -1723,32 +1723,26 @@ retry_snap:
1749 + goto out;
1750 + }
1751 +
1752 +- err = file_remove_privs(file);
1753 +- if (err)
1754 ++ down_read(&osdc->lock);
1755 ++ map_flags = osdc->osdmap->flags;
1756 ++ pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1757 ++ up_read(&osdc->lock);
1758 ++ if ((map_flags & CEPH_OSDMAP_FULL) ||
1759 ++ (pool_flags & CEPH_POOL_FLAG_FULL)) {
1760 ++ err = -ENOSPC;
1761 + goto out;
1762 ++ }
1763 +
1764 +- err = file_update_time(file);
1765 ++ err = file_remove_privs(file);
1766 + if (err)
1767 + goto out;
1768 +
1769 +- inode_inc_iversion_raw(inode);
1770 +-
1771 + if (ci->i_inline_version != CEPH_INLINE_NONE) {
1772 + err = ceph_uninline_data(file, NULL);
1773 + if (err < 0)
1774 + goto out;
1775 + }
1776 +
1777 +- down_read(&osdc->lock);
1778 +- map_flags = osdc->osdmap->flags;
1779 +- pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1780 +- up_read(&osdc->lock);
1781 +- if ((map_flags & CEPH_OSDMAP_FULL) ||
1782 +- (pool_flags & CEPH_POOL_FLAG_FULL)) {
1783 +- err = -ENOSPC;
1784 +- goto out;
1785 +- }
1786 +-
1787 + dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1788 + inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1789 + if (fi->fmode & CEPH_FILE_MODE_LAZY)
1790 +@@ -1761,6 +1755,12 @@ retry_snap:
1791 + if (err < 0)
1792 + goto out;
1793 +
1794 ++ err = file_update_time(file);
1795 ++ if (err)
1796 ++ goto out_caps;
1797 ++
1798 ++ inode_inc_iversion_raw(inode);
1799 ++
1800 + dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1801 + inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1802 +
1803 +@@ -1844,6 +1844,8 @@ retry_snap:
1804 + }
1805 +
1806 + goto out_unlocked;
1807 ++out_caps:
1808 ++ ceph_put_cap_refs(ci, got);
1809 + out:
1810 + if (direct_lock)
1811 + ceph_end_io_direct(inode);
1812 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
1813 +index 57cd78e942c08..63e781e4f7e44 100644
1814 +--- a/fs/ceph/inode.c
1815 ++++ b/fs/ceph/inode.c
1816 +@@ -1121,8 +1121,7 @@ static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1817 + __update_dentry_lease(dir, dentry, lease, session, from_time,
1818 + &old_lease_session);
1819 + spin_unlock(&dentry->d_lock);
1820 +- if (old_lease_session)
1821 +- ceph_put_mds_session(old_lease_session);
1822 ++ ceph_put_mds_session(old_lease_session);
1823 + }
1824 +
1825 + /*
1826 +@@ -1167,8 +1166,7 @@ static void update_dentry_lease_careful(struct dentry *dentry,
1827 + from_time, &old_lease_session);
1828 + out_unlock:
1829 + spin_unlock(&dentry->d_lock);
1830 +- if (old_lease_session)
1831 +- ceph_put_mds_session(old_lease_session);
1832 ++ ceph_put_mds_session(old_lease_session);
1833 + }
1834 +
1835 + /*
1836 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1837 +index 816cea4975372..0f57b7d094578 100644
1838 +--- a/fs/ceph/mds_client.c
1839 ++++ b/fs/ceph/mds_client.c
1840 +@@ -661,6 +661,9 @@ struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
1841 +
1842 + void ceph_put_mds_session(struct ceph_mds_session *s)
1843 + {
1844 ++ if (IS_ERR_OR_NULL(s))
1845 ++ return;
1846 ++
1847 + dout("mdsc put_session %p %d -> %d\n", s,
1848 + refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
1849 + if (refcount_dec_and_test(&s->s_ref)) {
1850 +@@ -1435,8 +1438,7 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1851 +
1852 + for (i = 0; i < mi->num_export_targets; i++) {
1853 + ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1854 +- if (!IS_ERR(ts))
1855 +- ceph_put_mds_session(ts);
1856 ++ ceph_put_mds_session(ts);
1857 + }
1858 + }
1859 +
1860 +@@ -1585,14 +1587,39 @@ out:
1861 + return ret;
1862 + }
1863 +
1864 ++static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
1865 ++{
1866 ++ struct ceph_inode_info *ci = ceph_inode(inode);
1867 ++ struct ceph_cap_snap *capsnap;
1868 ++ int capsnap_release = 0;
1869 ++
1870 ++ lockdep_assert_held(&ci->i_ceph_lock);
1871 ++
1872 ++ dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
1873 ++
1874 ++ while (!list_empty(&ci->i_cap_snaps)) {
1875 ++ capsnap = list_first_entry(&ci->i_cap_snaps,
1876 ++ struct ceph_cap_snap, ci_item);
1877 ++ __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
1878 ++ ceph_put_snap_context(capsnap->context);
1879 ++ ceph_put_cap_snap(capsnap);
1880 ++ capsnap_release++;
1881 ++ }
1882 ++ wake_up_all(&ci->i_cap_wq);
1883 ++ wake_up_all(&mdsc->cap_flushing_wq);
1884 ++ return capsnap_release;
1885 ++}
1886 ++
1887 + static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1888 + void *arg)
1889 + {
1890 + struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1891 ++ struct ceph_mds_client *mdsc = fsc->mdsc;
1892 + struct ceph_inode_info *ci = ceph_inode(inode);
1893 + LIST_HEAD(to_remove);
1894 + bool dirty_dropped = false;
1895 + bool invalidate = false;
1896 ++ int capsnap_release = 0;
1897 +
1898 + dout("removing cap %p, ci is %p, inode is %p\n",
1899 + cap, ci, &ci->vfs_inode);
1900 +@@ -1600,7 +1627,6 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1901 + __ceph_remove_cap(cap, false);
1902 + if (!ci->i_auth_cap) {
1903 + struct ceph_cap_flush *cf;
1904 +- struct ceph_mds_client *mdsc = fsc->mdsc;
1905 +
1906 + if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1907 + if (inode->i_data.nrpages > 0)
1908 +@@ -1664,6 +1690,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1909 + list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1910 + ci->i_prealloc_cap_flush = NULL;
1911 + }
1912 ++
1913 ++ if (!list_empty(&ci->i_cap_snaps))
1914 ++ capsnap_release = remove_capsnaps(mdsc, inode);
1915 + }
1916 + spin_unlock(&ci->i_ceph_lock);
1917 + while (!list_empty(&to_remove)) {
1918 +@@ -1680,6 +1709,8 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1919 + ceph_queue_invalidate(inode);
1920 + if (dirty_dropped)
1921 + iput(inode);
1922 ++ while (capsnap_release--)
1923 ++ iput(inode);
1924 + return 0;
1925 + }
1926 +
1927 +@@ -4857,7 +4888,6 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
1928 +
1929 + ceph_metric_destroy(&mdsc->metric);
1930 +
1931 +- flush_delayed_work(&mdsc->metric.delayed_work);
1932 + fsc->mdsc = NULL;
1933 + kfree(mdsc);
1934 + dout("mdsc_destroy %p done\n", mdsc);
1935 +diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
1936 +index fee4c47783132..9e0a0e26294ee 100644
1937 +--- a/fs/ceph/metric.c
1938 ++++ b/fs/ceph/metric.c
1939 +@@ -224,6 +224,8 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
1940 + if (!m)
1941 + return;
1942 +
1943 ++ cancel_delayed_work_sync(&m->delayed_work);
1944 ++
1945 + percpu_counter_destroy(&m->total_inodes);
1946 + percpu_counter_destroy(&m->opened_inodes);
1947 + percpu_counter_destroy(&m->i_caps_mis);
1948 +@@ -231,10 +233,7 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
1949 + percpu_counter_destroy(&m->d_lease_mis);
1950 + percpu_counter_destroy(&m->d_lease_hit);
1951 +
1952 +- cancel_delayed_work_sync(&m->delayed_work);
1953 +-
1954 +- if (m->session)
1955 +- ceph_put_mds_session(m->session);
1956 ++ ceph_put_mds_session(m->session);
1957 + }
1958 +
1959 + static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
1960 +diff --git a/fs/ceph/super.h b/fs/ceph/super.h
1961 +index a8c460393b01b..9362eeb5812d9 100644
1962 +--- a/fs/ceph/super.h
1963 ++++ b/fs/ceph/super.h
1964 +@@ -1134,6 +1134,12 @@ extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
1965 + int had);
1966 + extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
1967 + struct ceph_snap_context *snapc);
1968 ++extern void __ceph_remove_capsnap(struct inode *inode,
1969 ++ struct ceph_cap_snap *capsnap,
1970 ++ bool *wake_ci, bool *wake_mdsc);
1971 ++extern void ceph_remove_capsnap(struct inode *inode,
1972 ++ struct ceph_cap_snap *capsnap,
1973 ++ bool *wake_ci, bool *wake_mdsc);
1974 + extern void ceph_flush_snaps(struct ceph_inode_info *ci,
1975 + struct ceph_mds_session **psession);
1976 + extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
1977 +diff --git a/fs/coredump.c b/fs/coredump.c
1978 +index c6acfc694f658..c56a3bdce7cd4 100644
1979 +--- a/fs/coredump.c
1980 ++++ b/fs/coredump.c
1981 +@@ -1111,8 +1111,10 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
1982 +
1983 + mmap_write_unlock(mm);
1984 +
1985 +- if (WARN_ON(i != *vma_count))
1986 ++ if (WARN_ON(i != *vma_count)) {
1987 ++ kvfree(*vma_meta);
1988 + return -EFAULT;
1989 ++ }
1990 +
1991 + *vma_data_size_ptr = vma_data_size;
1992 + return 0;
1993 +diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
1994 +index 9c6c0e2e5880a..57afd06db62de 100644
1995 +--- a/fs/nilfs2/sysfs.c
1996 ++++ b/fs/nilfs2/sysfs.c
1997 +@@ -64,11 +64,9 @@ static const struct sysfs_ops nilfs_##name##_attr_ops = { \
1998 + #define NILFS_DEV_INT_GROUP_TYPE(name, parent_name) \
1999 + static void nilfs_##name##_attr_release(struct kobject *kobj) \
2000 + { \
2001 +- struct nilfs_sysfs_##parent_name##_subgroups *subgroups; \
2002 +- struct the_nilfs *nilfs = container_of(kobj->parent, \
2003 +- struct the_nilfs, \
2004 +- ns_##parent_name##_kobj); \
2005 +- subgroups = nilfs->ns_##parent_name##_subgroups; \
2006 ++ struct nilfs_sysfs_##parent_name##_subgroups *subgroups = container_of(kobj, \
2007 ++ struct nilfs_sysfs_##parent_name##_subgroups, \
2008 ++ sg_##name##_kobj); \
2009 + complete(&subgroups->sg_##name##_kobj_unregister); \
2010 + } \
2011 + static struct kobj_type nilfs_##name##_ktype = { \
2012 +@@ -94,12 +92,12 @@ static int nilfs_sysfs_create_##name##_group(struct the_nilfs *nilfs) \
2013 + err = kobject_init_and_add(kobj, &nilfs_##name##_ktype, parent, \
2014 + #name); \
2015 + if (err) \
2016 +- return err; \
2017 +- return 0; \
2018 ++ kobject_put(kobj); \
2019 ++ return err; \
2020 + } \
2021 + static void nilfs_sysfs_delete_##name##_group(struct the_nilfs *nilfs) \
2022 + { \
2023 +- kobject_del(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \
2024 ++ kobject_put(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \
2025 + }
2026 +
2027 + /************************************************************************
2028 +@@ -210,14 +208,14 @@ int nilfs_sysfs_create_snapshot_group(struct nilfs_root *root)
2029 + }
2030 +
2031 + if (err)
2032 +- return err;
2033 ++ kobject_put(&root->snapshot_kobj);
2034 +
2035 +- return 0;
2036 ++ return err;
2037 + }
2038 +
2039 + void nilfs_sysfs_delete_snapshot_group(struct nilfs_root *root)
2040 + {
2041 +- kobject_del(&root->snapshot_kobj);
2042 ++ kobject_put(&root->snapshot_kobj);
2043 + }
2044 +
2045 + /************************************************************************
2046 +@@ -999,7 +997,7 @@ int nilfs_sysfs_create_device_group(struct super_block *sb)
2047 + err = kobject_init_and_add(&nilfs->ns_dev_kobj, &nilfs_dev_ktype, NULL,
2048 + "%s", sb->s_id);
2049 + if (err)
2050 +- goto free_dev_subgroups;
2051 ++ goto cleanup_dev_kobject;
2052 +
2053 + err = nilfs_sysfs_create_mounted_snapshots_group(nilfs);
2054 + if (err)
2055 +@@ -1036,9 +1034,7 @@ delete_mounted_snapshots_group:
2056 + nilfs_sysfs_delete_mounted_snapshots_group(nilfs);
2057 +
2058 + cleanup_dev_kobject:
2059 +- kobject_del(&nilfs->ns_dev_kobj);
2060 +-
2061 +-free_dev_subgroups:
2062 ++ kobject_put(&nilfs->ns_dev_kobj);
2063 + kfree(nilfs->ns_dev_subgroups);
2064 +
2065 + failed_create_device_group:
2066 +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
2067 +index 221a1cc597f06..c20ebecd7bc24 100644
2068 +--- a/fs/nilfs2/the_nilfs.c
2069 ++++ b/fs/nilfs2/the_nilfs.c
2070 +@@ -792,14 +792,13 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
2071 +
2072 + void nilfs_put_root(struct nilfs_root *root)
2073 + {
2074 +- if (refcount_dec_and_test(&root->count)) {
2075 +- struct the_nilfs *nilfs = root->nilfs;
2076 ++ struct the_nilfs *nilfs = root->nilfs;
2077 +
2078 +- nilfs_sysfs_delete_snapshot_group(root);
2079 +-
2080 +- spin_lock(&nilfs->ns_cptree_lock);
2081 ++ if (refcount_dec_and_lock(&root->count, &nilfs->ns_cptree_lock)) {
2082 + rb_erase(&root->rb_node, &nilfs->ns_cptree);
2083 + spin_unlock(&nilfs->ns_cptree_lock);
2084 ++
2085 ++ nilfs_sysfs_delete_snapshot_group(root);
2086 + iput(root->ifile);
2087 +
2088 + kfree(root);
2089 +diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
2090 +index 4f72b47973c30..2f909ed084c63 100644
2091 +--- a/include/linux/cacheinfo.h
2092 ++++ b/include/linux/cacheinfo.h
2093 +@@ -79,24 +79,6 @@ struct cpu_cacheinfo {
2094 + bool cpu_map_populated;
2095 + };
2096 +
2097 +-/*
2098 +- * Helpers to make sure "func" is executed on the cpu whose cache
2099 +- * attributes are being detected
2100 +- */
2101 +-#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
2102 +-static inline void _##func(void *ret) \
2103 +-{ \
2104 +- int cpu = smp_processor_id(); \
2105 +- *(int *)ret = __##func(cpu); \
2106 +-} \
2107 +- \
2108 +-int func(unsigned int cpu) \
2109 +-{ \
2110 +- int ret; \
2111 +- smp_call_function_single(cpu, _##func, &ret, true); \
2112 +- return ret; \
2113 +-}
2114 +-
2115 + struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
2116 + int init_cache_level(unsigned int cpu);
2117 + int populate_cache_leaves(unsigned int cpu);
2118 +diff --git a/include/linux/thermal.h b/include/linux/thermal.h
2119 +index d07ea27e72a94..176d9454e8f36 100644
2120 +--- a/include/linux/thermal.h
2121 ++++ b/include/linux/thermal.h
2122 +@@ -410,12 +410,13 @@ static inline void thermal_zone_device_unregister(
2123 + struct thermal_zone_device *tz)
2124 + { }
2125 + static inline struct thermal_cooling_device *
2126 +-thermal_cooling_device_register(char *type, void *devdata,
2127 ++thermal_cooling_device_register(const char *type, void *devdata,
2128 + const struct thermal_cooling_device_ops *ops)
2129 + { return ERR_PTR(-ENODEV); }
2130 + static inline struct thermal_cooling_device *
2131 + thermal_of_cooling_device_register(struct device_node *np,
2132 +- char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
2133 ++ const char *type, void *devdata,
2134 ++ const struct thermal_cooling_device_ops *ops)
2135 + { return ERR_PTR(-ENODEV); }
2136 + static inline struct thermal_cooling_device *
2137 + devm_thermal_of_cooling_device_register(struct device *dev,
2138 +diff --git a/kernel/profile.c b/kernel/profile.c
2139 +index 6f69a4195d563..b47fe52f0ade4 100644
2140 +--- a/kernel/profile.c
2141 ++++ b/kernel/profile.c
2142 +@@ -41,7 +41,8 @@ struct profile_hit {
2143 + #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
2144 +
2145 + static atomic_t *prof_buffer;
2146 +-static unsigned long prof_len, prof_shift;
2147 ++static unsigned long prof_len;
2148 ++static unsigned short int prof_shift;
2149 +
2150 + int prof_on __read_mostly;
2151 + EXPORT_SYMBOL_GPL(prof_on);
2152 +@@ -67,8 +68,8 @@ int profile_setup(char *str)
2153 + if (str[strlen(sleepstr)] == ',')
2154 + str += strlen(sleepstr) + 1;
2155 + if (get_option(&str, &par))
2156 +- prof_shift = par;
2157 +- pr_info("kernel sleep profiling enabled (shift: %ld)\n",
2158 ++ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
2159 ++ pr_info("kernel sleep profiling enabled (shift: %u)\n",
2160 + prof_shift);
2161 + #else
2162 + pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
2163 +@@ -78,21 +79,21 @@ int profile_setup(char *str)
2164 + if (str[strlen(schedstr)] == ',')
2165 + str += strlen(schedstr) + 1;
2166 + if (get_option(&str, &par))
2167 +- prof_shift = par;
2168 +- pr_info("kernel schedule profiling enabled (shift: %ld)\n",
2169 ++ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
2170 ++ pr_info("kernel schedule profiling enabled (shift: %u)\n",
2171 + prof_shift);
2172 + } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
2173 + prof_on = KVM_PROFILING;
2174 + if (str[strlen(kvmstr)] == ',')
2175 + str += strlen(kvmstr) + 1;
2176 + if (get_option(&str, &par))
2177 +- prof_shift = par;
2178 +- pr_info("kernel KVM profiling enabled (shift: %ld)\n",
2179 ++ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
2180 ++ pr_info("kernel KVM profiling enabled (shift: %u)\n",
2181 + prof_shift);
2182 + } else if (get_option(&str, &par)) {
2183 +- prof_shift = par;
2184 ++ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
2185 + prof_on = CPU_PROFILING;
2186 +- pr_info("kernel profiling enabled (shift: %ld)\n",
2187 ++ pr_info("kernel profiling enabled (shift: %u)\n",
2188 + prof_shift);
2189 + }
2190 + return 1;
2191 +@@ -468,7 +469,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2192 + unsigned long p = *ppos;
2193 + ssize_t read;
2194 + char *pnt;
2195 +- unsigned int sample_step = 1 << prof_shift;
2196 ++ unsigned long sample_step = 1UL << prof_shift;
2197 +
2198 + profile_flip_buffers();
2199 + if (p >= (prof_len+1)*sizeof(unsigned int))
2200 +diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
2201 +index 36b545f17206f..2593a733c0849 100644
2202 +--- a/kernel/sched/idle.c
2203 ++++ b/kernel/sched/idle.c
2204 +@@ -372,10 +372,10 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns)
2205 + cpuidle_use_deepest_state(latency_ns);
2206 +
2207 + it.done = 0;
2208 +- hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2209 ++ hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
2210 + it.timer.function = idle_inject_timer_fn;
2211 + hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
2212 +- HRTIMER_MODE_REL_PINNED);
2213 ++ HRTIMER_MODE_REL_PINNED_HARD);
2214 +
2215 + while (!READ_ONCE(it.done))
2216 + do_idle();
2217 +diff --git a/kernel/sys.c b/kernel/sys.c
2218 +index a730c03ee607c..24a3a28ae2284 100644
2219 +--- a/kernel/sys.c
2220 ++++ b/kernel/sys.c
2221 +@@ -1941,13 +1941,6 @@ static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
2222 +
2223 + error = -EINVAL;
2224 +
2225 +- /*
2226 +- * @brk should be after @end_data in traditional maps.
2227 +- */
2228 +- if (prctl_map->start_brk <= prctl_map->end_data ||
2229 +- prctl_map->brk <= prctl_map->end_data)
2230 +- goto out;
2231 +-
2232 + /*
2233 + * Neither we should allow to override limits if they set.
2234 + */
2235 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
2236 +index bf174798afcb9..95f909540587c 100644
2237 +--- a/lib/Kconfig.debug
2238 ++++ b/lib/Kconfig.debug
2239 +@@ -981,7 +981,6 @@ config HARDLOCKUP_DETECTOR
2240 + depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH
2241 + select LOCKUP_DETECTOR
2242 + select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF
2243 +- select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH
2244 + help
2245 + Say Y here to enable the kernel to act as a watchdog to detect
2246 + hard lockups.
2247 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
2248 +index a3cd90a74012b..f582351d84ecb 100644
2249 +--- a/net/9p/trans_virtio.c
2250 ++++ b/net/9p/trans_virtio.c
2251 +@@ -605,7 +605,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
2252 + chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
2253 + if (!chan->vc_wq) {
2254 + err = -ENOMEM;
2255 +- goto out_free_tag;
2256 ++ goto out_remove_file;
2257 + }
2258 + init_waitqueue_head(chan->vc_wq);
2259 + chan->ring_bufs_avail = 1;
2260 +@@ -623,6 +623,8 @@ static int p9_virtio_probe(struct virtio_device *vdev)
2261 +
2262 + return 0;
2263 +
2264 ++out_remove_file:
2265 ++ sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr);
2266 + out_free_tag:
2267 + kfree(tag);
2268 + out_free_vq:
2269 +diff --git a/net/sctp/input.c b/net/sctp/input.c
2270 +index ddb5b5c2550ef..49c49a4d203f0 100644
2271 +--- a/net/sctp/input.c
2272 ++++ b/net/sctp/input.c
2273 +@@ -1168,6 +1168,9 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
2274 + union sctp_addr_param *param;
2275 + union sctp_addr paddr;
2276 +
2277 ++ if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr))
2278 ++ return NULL;
2279 ++
2280 + /* Skip over the ADDIP header and find the Address parameter */
2281 + param = (union sctp_addr_param *)(asconf + 1);
2282 +
2283 +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
2284 +index 7411fa4428214..fa0d96320baae 100644
2285 +--- a/net/sctp/sm_make_chunk.c
2286 ++++ b/net/sctp/sm_make_chunk.c
2287 +@@ -2150,9 +2150,16 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
2288 + break;
2289 +
2290 + case SCTP_PARAM_SET_PRIMARY:
2291 +- if (ep->asconf_enable)
2292 +- break;
2293 +- goto unhandled;
2294 ++ if (!ep->asconf_enable)
2295 ++ goto unhandled;
2296 ++
2297 ++ if (ntohs(param.p->length) < sizeof(struct sctp_addip_param) +
2298 ++ sizeof(struct sctp_paramhdr)) {
2299 ++ sctp_process_inv_paramlength(asoc, param.p,
2300 ++ chunk, err_chunk);
2301 ++ retval = SCTP_IERROR_ABORT;
2302 ++ }
2303 ++ break;
2304 +
2305 + case SCTP_PARAM_HOST_NAME_ADDRESS:
2306 + /* Tell the peer, we won't support this param. */
2307 +diff --git a/tools/bootconfig/scripts/ftrace2bconf.sh b/tools/bootconfig/scripts/ftrace2bconf.sh
2308 +index a0c3bcc6da4f3..fb201d5afe2c1 100755
2309 +--- a/tools/bootconfig/scripts/ftrace2bconf.sh
2310 ++++ b/tools/bootconfig/scripts/ftrace2bconf.sh
2311 +@@ -222,8 +222,8 @@ instance_options() { # [instance-name]
2312 + emit_kv $PREFIX.cpumask = $val
2313 + fi
2314 + val=`cat $INSTANCE/tracing_on`
2315 +- if [ `echo $val | sed -e s/f//g`x != x ]; then
2316 +- emit_kv $PREFIX.tracing_on = $val
2317 ++ if [ "$val" = "0" ]; then
2318 ++ emit_kv $PREFIX.tracing_on = 0
2319 + fi
2320 +
2321 + val=
2322 +diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
2323 +index 5e9e781905edc..db5c99318c799 100644
2324 +--- a/tools/include/linux/string.h
2325 ++++ b/tools/include/linux/string.h
2326 +@@ -46,4 +46,5 @@ extern char * __must_check skip_spaces(const char *);
2327 +
2328 + extern char *strim(char *);
2329 +
2330 ++extern void *memchr_inv(const void *start, int c, size_t bytes);
2331 + #endif /* _TOOLS_LINUX_STRING_H_ */
2332 +diff --git a/tools/lib/string.c b/tools/lib/string.c
2333 +index f645343815de6..8b6892f959abd 100644
2334 +--- a/tools/lib/string.c
2335 ++++ b/tools/lib/string.c
2336 +@@ -168,3 +168,61 @@ char *strreplace(char *s, char old, char new)
2337 + *s = new;
2338 + return s;
2339 + }
2340 ++
2341 ++static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
2342 ++{
2343 ++ while (bytes) {
2344 ++ if (*start != value)
2345 ++ return (void *)start;
2346 ++ start++;
2347 ++ bytes--;
2348 ++ }
2349 ++ return NULL;
2350 ++}
2351 ++
2352 ++/**
2353 ++ * memchr_inv - Find an unmatching character in an area of memory.
2354 ++ * @start: The memory area
2355 ++ * @c: Find a character other than c
2356 ++ * @bytes: The size of the area.
2357 ++ *
2358 ++ * returns the address of the first character other than @c, or %NULL
2359 ++ * if the whole buffer contains just @c.
2360 ++ */
2361 ++void *memchr_inv(const void *start, int c, size_t bytes)
2362 ++{
2363 ++ u8 value = c;
2364 ++ u64 value64;
2365 ++ unsigned int words, prefix;
2366 ++
2367 ++ if (bytes <= 16)
2368 ++ return check_bytes8(start, value, bytes);
2369 ++
2370 ++ value64 = value;
2371 ++ value64 |= value64 << 8;
2372 ++ value64 |= value64 << 16;
2373 ++ value64 |= value64 << 32;
2374 ++
2375 ++ prefix = (unsigned long)start % 8;
2376 ++ if (prefix) {
2377 ++ u8 *r;
2378 ++
2379 ++ prefix = 8 - prefix;
2380 ++ r = check_bytes8(start, value, prefix);
2381 ++ if (r)
2382 ++ return r;
2383 ++ start += prefix;
2384 ++ bytes -= prefix;
2385 ++ }
2386 ++
2387 ++ words = bytes / 8;
2388 ++
2389 ++ while (words) {
2390 ++ if (*(u64 *)start != value64)
2391 ++ return check_bytes8(start, value, 8);
2392 ++ start += 8;
2393 ++ words--;
2394 ++ }
2395 ++
2396 ++ return check_bytes8(start, value, bytes % 8);
2397 ++}
2398 +diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
2399 +index 8345ff4acedf2..e5832b74a845d 100644
2400 +--- a/tools/perf/tests/bpf.c
2401 ++++ b/tools/perf/tests/bpf.c
2402 +@@ -199,7 +199,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
2403 + }
2404 +
2405 + if (count != expect * evlist->core.nr_entries) {
2406 +- pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
2407 ++ pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect * evlist->core.nr_entries, count);
2408 + goto out_delete_evlist;
2409 + }
2410 +
2411 +diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
2412 +index b1ff0c9f32daf..5e9902fa1dc8a 100644
2413 +--- a/tools/perf/util/dso.c
2414 ++++ b/tools/perf/util/dso.c
2415 +@@ -1336,6 +1336,16 @@ void dso__set_build_id(struct dso *dso, struct build_id *bid)
2416 +
2417 + bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
2418 + {
2419 ++ if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
2420 ++ /*
2421 ++ * For the backward compatibility, it allows a build-id has
2422 ++ * trailing zeros.
2423 ++ */
2424 ++ return !memcmp(dso->bid.data, bid->data, bid->size) &&
2425 ++ !memchr_inv(&dso->bid.data[bid->size], 0,
2426 ++ dso->bid.size - bid->size);
2427 ++ }
2428 ++
2429 + return dso->bid.size == bid->size &&
2430 + memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
2431 + }