Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 05 Jun 2018 11:22:46
Message-Id: 1528197750.33fd88fcdbb4c65ca1d6374047292523c3c5e1dc.mpagano@gentoo
1 commit: 33fd88fcdbb4c65ca1d6374047292523c3c5e1dc
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jun 5 11:22:30 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jun 5 11:22:30 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=33fd88fc
7
8 Linux patch 4.14.48
9
10 0000_README | 4 +
11 1047_linux-4.14.48.patch | 2503 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2507 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 7a38d72..8190d0f 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -231,6 +231,10 @@ Patch: 1046_linux-4.14.47.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.47
21
22 +Patch: 1047_linux-4.14.48.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.48
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1047_linux-4.14.48.patch b/1047_linux-4.14.48.patch
31 new file mode 100644
32 index 0000000..891c066
33 --- /dev/null
34 +++ b/1047_linux-4.14.48.patch
35 @@ -0,0 +1,2503 @@
36 +diff --git a/Makefile b/Makefile
37 +index d6db01a02252..7a246f1ce44e 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 14
44 +-SUBLEVEL = 47
45 ++SUBLEVEL = 48
46 + EXTRAVERSION =
47 + NAME = Petit Gorille
48 +
49 +@@ -369,11 +369,6 @@ HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS)
50 + HOSTLDFLAGS := $(HOST_LFS_LDFLAGS)
51 + HOST_LOADLIBES := $(HOST_LFS_LIBS)
52 +
53 +-ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
54 +-HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
55 +- -Wno-missing-field-initializers -fno-delete-null-pointer-checks
56 +-endif
57 +-
58 + # Make variables (CC, etc...)
59 + AS = $(CROSS_COMPILE)as
60 + LD = $(CROSS_COMPILE)ld
61 +@@ -711,7 +706,6 @@ KBUILD_CFLAGS += $(stackp-flag)
62 +
63 + ifeq ($(cc-name),clang)
64 + KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
65 +-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
66 + KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
67 + KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
68 + KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
69 +@@ -729,9 +723,9 @@ else
70 + # These warnings generated too much noise in a regular build.
71 + # Use make W=1 to enable them (see scripts/Makefile.extrawarn)
72 + KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
73 +-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
74 + endif
75 +
76 ++KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
77 + ifdef CONFIG_FRAME_POINTER
78 + KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
79 + else
80 +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
81 +index 2f2d176396aa..e1ddb94a6522 100644
82 +--- a/arch/mips/kernel/process.c
83 ++++ b/arch/mips/kernel/process.c
84 +@@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
85 + if (value & ~known_bits)
86 + return -EOPNOTSUPP;
87 +
88 ++ /* Setting FRE without FR is not supported. */
89 ++ if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
90 ++ return -EOPNOTSUPP;
91 ++
92 + /* Avoid inadvertently triggering emulation */
93 + if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
94 + !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
95 +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
96 +index 006105fb12fe..e058cd300713 100644
97 +--- a/arch/mips/kernel/ptrace.c
98 ++++ b/arch/mips/kernel/ptrace.c
99 +@@ -809,7 +809,7 @@ long arch_ptrace(struct task_struct *child, long request,
100 + break;
101 + }
102 + #endif
103 +- tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
104 ++ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
105 + break;
106 + case PC:
107 + tmp = regs->cp0_epc;
108 +diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
109 +index 4a157d3249ac..89026d33a07b 100644
110 +--- a/arch/mips/kernel/ptrace32.c
111 ++++ b/arch/mips/kernel/ptrace32.c
112 +@@ -108,7 +108,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
113 + addr & 1);
114 + break;
115 + }
116 +- tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
117 ++ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
118 + break;
119 + case PC:
120 + tmp = regs->cp0_epc;
121 +diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
122 +new file mode 100644
123 +index 000000000000..db0dedab65ee
124 +--- /dev/null
125 ++++ b/arch/powerpc/include/asm/book3s/64/slice.h
126 +@@ -0,0 +1,27 @@
127 ++/* SPDX-License-Identifier: GPL-2.0 */
128 ++#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
129 ++#define _ASM_POWERPC_BOOK3S_64_SLICE_H
130 ++
131 ++#ifdef CONFIG_PPC_MM_SLICES
132 ++
133 ++#define SLICE_LOW_SHIFT 28
134 ++#define SLICE_LOW_TOP (0x100000000ul)
135 ++#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
136 ++#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
137 ++
138 ++#define SLICE_HIGH_SHIFT 40
139 ++#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
140 ++#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
141 ++
142 ++#else /* CONFIG_PPC_MM_SLICES */
143 ++
144 ++#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
145 ++#define slice_set_user_psize(mm, psize) \
146 ++do { \
147 ++ (mm)->context.user_psize = (psize); \
148 ++ (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
149 ++} while (0)
150 ++
151 ++#endif /* CONFIG_PPC_MM_SLICES */
152 ++
153 ++#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
154 +diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
155 +index 5bb3dbede41a..1325e5b5f680 100644
156 +--- a/arch/powerpc/include/asm/mmu-8xx.h
157 ++++ b/arch/powerpc/include/asm/mmu-8xx.h
158 +@@ -169,6 +169,12 @@ typedef struct {
159 + unsigned int id;
160 + unsigned int active;
161 + unsigned long vdso_base;
162 ++#ifdef CONFIG_PPC_MM_SLICES
163 ++ u16 user_psize; /* page size index */
164 ++ u64 low_slices_psize; /* page size encodings */
165 ++ unsigned char high_slices_psize[0];
166 ++ unsigned long addr_limit;
167 ++#endif
168 + } mm_context_t;
169 +
170 + #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
171 +diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
172 +new file mode 100644
173 +index 000000000000..95d532e18092
174 +--- /dev/null
175 ++++ b/arch/powerpc/include/asm/nohash/32/slice.h
176 +@@ -0,0 +1,18 @@
177 ++/* SPDX-License-Identifier: GPL-2.0 */
178 ++#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
179 ++#define _ASM_POWERPC_NOHASH_32_SLICE_H
180 ++
181 ++#ifdef CONFIG_PPC_MM_SLICES
182 ++
183 ++#define SLICE_LOW_SHIFT 28
184 ++#define SLICE_LOW_TOP (0x100000000ull)
185 ++#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
186 ++#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
187 ++
188 ++#define SLICE_HIGH_SHIFT 0
189 ++#define SLICE_NUM_HIGH 0ul
190 ++#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
191 ++
192 ++#endif /* CONFIG_PPC_MM_SLICES */
193 ++
194 ++#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
195 +diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h
196 +new file mode 100644
197 +index 000000000000..ad0d6e3cc1c5
198 +--- /dev/null
199 ++++ b/arch/powerpc/include/asm/nohash/64/slice.h
200 +@@ -0,0 +1,12 @@
201 ++/* SPDX-License-Identifier: GPL-2.0 */
202 ++#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H
203 ++#define _ASM_POWERPC_NOHASH_64_SLICE_H
204 ++
205 ++#ifdef CONFIG_PPC_64K_PAGES
206 ++#define get_slice_psize(mm, addr) MMU_PAGE_64K
207 ++#else /* CONFIG_PPC_64K_PAGES */
208 ++#define get_slice_psize(mm, addr) MMU_PAGE_4K
209 ++#endif /* !CONFIG_PPC_64K_PAGES */
210 ++#define slice_set_user_psize(mm, psize) do { BUG(); } while (0)
211 ++
212 ++#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */
213 +diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
214 +index 8da5d4c1cab2..d5f1c41b7dba 100644
215 +--- a/arch/powerpc/include/asm/page.h
216 ++++ b/arch/powerpc/include/asm/page.h
217 +@@ -344,5 +344,6 @@ typedef struct page *pgtable_t;
218 +
219 + #include <asm-generic/memory_model.h>
220 + #endif /* __ASSEMBLY__ */
221 ++#include <asm/slice.h>
222 +
223 + #endif /* _ASM_POWERPC_PAGE_H */
224 +diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
225 +index c4d9654bd637..af04acdb873f 100644
226 +--- a/arch/powerpc/include/asm/page_64.h
227 ++++ b/arch/powerpc/include/asm/page_64.h
228 +@@ -86,65 +86,6 @@ extern u64 ppc64_pft_size;
229 +
230 + #endif /* __ASSEMBLY__ */
231 +
232 +-#ifdef CONFIG_PPC_MM_SLICES
233 +-
234 +-#define SLICE_LOW_SHIFT 28
235 +-#define SLICE_HIGH_SHIFT 40
236 +-
237 +-#define SLICE_LOW_TOP (0x100000000ul)
238 +-#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
239 +-#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
240 +-
241 +-#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
242 +-#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
243 +-
244 +-#ifndef __ASSEMBLY__
245 +-struct mm_struct;
246 +-
247 +-extern unsigned long slice_get_unmapped_area(unsigned long addr,
248 +- unsigned long len,
249 +- unsigned long flags,
250 +- unsigned int psize,
251 +- int topdown);
252 +-
253 +-extern unsigned int get_slice_psize(struct mm_struct *mm,
254 +- unsigned long addr);
255 +-
256 +-extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
257 +-extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
258 +- unsigned long len, unsigned int psize);
259 +-
260 +-#endif /* __ASSEMBLY__ */
261 +-#else
262 +-#define slice_init()
263 +-#ifdef CONFIG_PPC_STD_MMU_64
264 +-#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
265 +-#define slice_set_user_psize(mm, psize) \
266 +-do { \
267 +- (mm)->context.user_psize = (psize); \
268 +- (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
269 +-} while (0)
270 +-#else /* CONFIG_PPC_STD_MMU_64 */
271 +-#ifdef CONFIG_PPC_64K_PAGES
272 +-#define get_slice_psize(mm, addr) MMU_PAGE_64K
273 +-#else /* CONFIG_PPC_64K_PAGES */
274 +-#define get_slice_psize(mm, addr) MMU_PAGE_4K
275 +-#endif /* !CONFIG_PPC_64K_PAGES */
276 +-#define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
277 +-#endif /* !CONFIG_PPC_STD_MMU_64 */
278 +-
279 +-#define slice_set_range_psize(mm, start, len, psize) \
280 +- slice_set_user_psize((mm), (psize))
281 +-#endif /* CONFIG_PPC_MM_SLICES */
282 +-
283 +-#ifdef CONFIG_HUGETLB_PAGE
284 +-
285 +-#ifdef CONFIG_PPC_MM_SLICES
286 +-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
287 +-#endif
288 +-
289 +-#endif /* !CONFIG_HUGETLB_PAGE */
290 +-
291 + #define VM_DATA_DEFAULT_FLAGS \
292 + (is_32bit_task() ? \
293 + VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
294 +diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
295 +new file mode 100644
296 +index 000000000000..172711fadb1c
297 +--- /dev/null
298 ++++ b/arch/powerpc/include/asm/slice.h
299 +@@ -0,0 +1,42 @@
300 ++/* SPDX-License-Identifier: GPL-2.0 */
301 ++#ifndef _ASM_POWERPC_SLICE_H
302 ++#define _ASM_POWERPC_SLICE_H
303 ++
304 ++#ifdef CONFIG_PPC_BOOK3S_64
305 ++#include <asm/book3s/64/slice.h>
306 ++#elif defined(CONFIG_PPC64)
307 ++#include <asm/nohash/64/slice.h>
308 ++#elif defined(CONFIG_PPC_MMU_NOHASH)
309 ++#include <asm/nohash/32/slice.h>
310 ++#endif
311 ++
312 ++#ifdef CONFIG_PPC_MM_SLICES
313 ++
314 ++#ifdef CONFIG_HUGETLB_PAGE
315 ++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
316 ++#endif
317 ++#define HAVE_ARCH_UNMAPPED_AREA
318 ++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
319 ++
320 ++#ifndef __ASSEMBLY__
321 ++
322 ++struct mm_struct;
323 ++
324 ++unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
325 ++ unsigned long flags, unsigned int psize,
326 ++ int topdown);
327 ++
328 ++unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
329 ++
330 ++void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
331 ++void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
332 ++ unsigned long len, unsigned int psize);
333 ++#endif /* __ASSEMBLY__ */
334 ++
335 ++#else /* CONFIG_PPC_MM_SLICES */
336 ++
337 ++#define slice_set_range_psize(mm, start, len, psize) \
338 ++ slice_set_user_psize((mm), (psize))
339 ++#endif /* CONFIG_PPC_MM_SLICES */
340 ++
341 ++#endif /* _ASM_POWERPC_SLICE_H */
342 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
343 +index b4fcb54b9686..008447664643 100644
344 +--- a/arch/powerpc/kernel/setup-common.c
345 ++++ b/arch/powerpc/kernel/setup-common.c
346 +@@ -915,6 +915,8 @@ void __init setup_arch(char **cmdline_p)
347 + #ifdef CONFIG_PPC_MM_SLICES
348 + #ifdef CONFIG_PPC64
349 + init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
350 ++#elif defined(CONFIG_PPC_8xx)
351 ++ init_mm.context.addr_limit = DEFAULT_MAP_WINDOW;
352 + #else
353 + #error "context.addr_limit not initialized."
354 + #endif
355 +diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
356 +index f29212e40f40..0be77709446c 100644
357 +--- a/arch/powerpc/mm/8xx_mmu.c
358 ++++ b/arch/powerpc/mm/8xx_mmu.c
359 +@@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd)
360 + mtspr(SPRN_M_TW, __pa(pgd) - offset);
361 +
362 + /* Update context */
363 +- mtspr(SPRN_M_CASID, id);
364 ++ mtspr(SPRN_M_CASID, id - 1);
365 + /* sync */
366 + mb();
367 + }
368 +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
369 +index 1571a498a33f..4c9e5f9c7a44 100644
370 +--- a/arch/powerpc/mm/hugetlbpage.c
371 ++++ b/arch/powerpc/mm/hugetlbpage.c
372 +@@ -552,9 +552,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
373 + struct hstate *hstate = hstate_file(file);
374 + int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
375 +
376 ++#ifdef CONFIG_PPC_RADIX_MMU
377 + if (radix_enabled())
378 + return radix__hugetlb_get_unmapped_area(file, addr, len,
379 + pgoff, flags);
380 ++#endif
381 + return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
382 + }
383 + #endif
384 +diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
385 +index 4554d6527682..e2b28b3a512e 100644
386 +--- a/arch/powerpc/mm/mmu_context_nohash.c
387 ++++ b/arch/powerpc/mm/mmu_context_nohash.c
388 +@@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
389 + {
390 + pr_hard("initing context for mm @%p\n", mm);
391 +
392 ++#ifdef CONFIG_PPC_MM_SLICES
393 ++ if (!mm->context.addr_limit)
394 ++ mm->context.addr_limit = DEFAULT_MAP_WINDOW;
395 ++
396 ++ /*
397 ++ * We have MMU_NO_CONTEXT set to be ~0. Hence check
398 ++ * explicitly against context.id == 0. This ensures that we properly
399 ++ * initialize context slice details for newly allocated mm's (which will
400 ++ * have id == 0) and don't alter context slice inherited via fork (which
401 ++ * will have id != 0).
402 ++ */
403 ++ if (mm->context.id == 0)
404 ++ slice_set_user_psize(mm, mmu_virtual_psize);
405 ++#endif
406 + mm->context.id = MMU_NO_CONTEXT;
407 + mm->context.active = 0;
408 + return 0;
409 +@@ -428,8 +442,8 @@ void __init mmu_context_init(void)
410 + * -- BenH
411 + */
412 + if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
413 +- first_context = 0;
414 +- last_context = 15;
415 ++ first_context = 1;
416 ++ last_context = 16;
417 + no_selective_tlbil = true;
418 + } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
419 + first_context = 1;
420 +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
421 +index a4f93699194b..8baaa6c6f21c 100644
422 +--- a/arch/powerpc/mm/slice.c
423 ++++ b/arch/powerpc/mm/slice.c
424 +@@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
425 + unsigned long end = start + len - 1;
426 +
427 + ret->low_slices = 0;
428 +- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
429 ++ if (SLICE_NUM_HIGH)
430 ++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
431 +
432 + if (start < SLICE_LOW_TOP) {
433 +- unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
434 ++ unsigned long mend = min(end,
435 ++ (unsigned long)(SLICE_LOW_TOP - 1));
436 +
437 + ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
438 + - (1u << GET_LOW_SLICE_INDEX(start));
439 +@@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
440 + unsigned long start = slice << SLICE_HIGH_SHIFT;
441 + unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
442 +
443 ++#ifdef CONFIG_PPC64
444 + /* Hack, so that each addresses is controlled by exactly one
445 + * of the high or low area bitmaps, the first high area starts
446 + * at 4GB, not 0 */
447 + if (start == 0)
448 + start = SLICE_LOW_TOP;
449 ++#endif
450 +
451 + return !slice_area_is_free(mm, start, end - start);
452 + }
453 +@@ -127,7 +131,8 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
454 + unsigned long i;
455 +
456 + ret->low_slices = 0;
457 +- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
458 ++ if (SLICE_NUM_HIGH)
459 ++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
460 +
461 + for (i = 0; i < SLICE_NUM_LOW; i++)
462 + if (!slice_low_has_vma(mm, i))
463 +@@ -149,7 +154,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
464 + u64 lpsizes;
465 +
466 + ret->low_slices = 0;
467 +- bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
468 ++ if (SLICE_NUM_HIGH)
469 ++ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
470 +
471 + lpsizes = mm->context.low_slices_psize;
472 + for (i = 0; i < SLICE_NUM_LOW; i++)
473 +@@ -171,6 +177,10 @@ static int slice_check_fit(struct mm_struct *mm,
474 + DECLARE_BITMAP(result, SLICE_NUM_HIGH);
475 + unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
476 +
477 ++ if (!SLICE_NUM_HIGH)
478 ++ return (mask.low_slices & available.low_slices) ==
479 ++ mask.low_slices;
480 ++
481 + bitmap_and(result, mask.high_slices,
482 + available.high_slices, slice_count);
483 +
484 +@@ -180,6 +190,7 @@ static int slice_check_fit(struct mm_struct *mm,
485 +
486 + static void slice_flush_segments(void *parm)
487 + {
488 ++#ifdef CONFIG_PPC64
489 + struct mm_struct *mm = parm;
490 + unsigned long flags;
491 +
492 +@@ -191,6 +202,7 @@ static void slice_flush_segments(void *parm)
493 + local_irq_save(flags);
494 + slb_flush_and_rebolt();
495 + local_irq_restore(flags);
496 ++#endif
497 + }
498 +
499 + static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
500 +@@ -379,21 +391,21 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
501 +
502 + static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
503 + {
504 +- DECLARE_BITMAP(result, SLICE_NUM_HIGH);
505 +-
506 + dst->low_slices |= src->low_slices;
507 +- bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
508 +- bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
509 ++ if (!SLICE_NUM_HIGH)
510 ++ return;
511 ++ bitmap_or(dst->high_slices, dst->high_slices, src->high_slices,
512 ++ SLICE_NUM_HIGH);
513 + }
514 +
515 + static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
516 + {
517 +- DECLARE_BITMAP(result, SLICE_NUM_HIGH);
518 +-
519 + dst->low_slices &= ~src->low_slices;
520 +
521 +- bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
522 +- bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
523 ++ if (!SLICE_NUM_HIGH)
524 ++ return;
525 ++ bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices,
526 ++ SLICE_NUM_HIGH);
527 + }
528 +
529 + #ifdef CONFIG_PPC_64K_PAGES
530 +@@ -441,14 +453,17 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
531 + * init different masks
532 + */
533 + mask.low_slices = 0;
534 +- bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
535 +
536 + /* silence stupid warning */;
537 + potential_mask.low_slices = 0;
538 +- bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
539 +
540 + compat_mask.low_slices = 0;
541 +- bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
542 ++
543 ++ if (SLICE_NUM_HIGH) {
544 ++ bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
545 ++ bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
546 ++ bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
547 ++ }
548 +
549 + /* Sanity checks */
550 + BUG_ON(mm->task_size == 0);
551 +@@ -586,7 +601,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
552 + convert:
553 + slice_andnot_mask(&mask, &good_mask);
554 + slice_andnot_mask(&mask, &compat_mask);
555 +- if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
556 ++ if (mask.low_slices ||
557 ++ (SLICE_NUM_HIGH &&
558 ++ !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) {
559 + slice_convert(mm, mask, psize);
560 + if (psize > MMU_PAGE_BASE)
561 + on_each_cpu(slice_flush_segments, mm, 1);
562 +diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
563 +index a78f255111f2..3ce376b42330 100644
564 +--- a/arch/powerpc/platforms/Kconfig.cputype
565 ++++ b/arch/powerpc/platforms/Kconfig.cputype
566 +@@ -325,6 +325,7 @@ config PPC_BOOK3E_MMU
567 + config PPC_MM_SLICES
568 + bool
569 + default y if PPC_STD_MMU_64
570 ++ default y if PPC_8xx && HUGETLB_PAGE
571 + default n
572 +
573 + config PPC_HAVE_PMU_SUPPORT
574 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
575 +index 259c75d7a2a0..dbcb01006749 100644
576 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
577 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
578 +@@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = {
579 + [SMCA_SMU] = { "smu", "System Management Unit" },
580 + };
581 +
582 ++static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
583 ++{
584 ++ [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
585 ++};
586 ++
587 + const char *smca_get_name(enum smca_bank_types t)
588 + {
589 + if (t >= N_SMCA_BANK_TYPES)
590 +@@ -429,52 +434,51 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
591 + wrmsr(MSR_CU_DEF_ERR, low, high);
592 + }
593 +
594 +-static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
595 +- unsigned int bank, unsigned int block)
596 ++static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
597 ++ unsigned int block)
598 + {
599 +- u32 addr = 0, offset = 0;
600 ++ u32 low, high;
601 ++ u32 addr = 0;
602 +
603 +- if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
604 ++ if (smca_get_bank_type(bank) == SMCA_RESERVED)
605 + return addr;
606 +
607 +- /* Get address from already initialized block. */
608 +- if (per_cpu(threshold_banks, cpu)) {
609 +- struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank];
610 ++ if (!block)
611 ++ return MSR_AMD64_SMCA_MCx_MISC(bank);
612 +
613 +- if (bankp && bankp->blocks) {
614 +- struct threshold_block *blockp = &bankp->blocks[block];
615 ++ /* Check our cache first: */
616 ++ if (smca_bank_addrs[bank][block] != -1)
617 ++ return smca_bank_addrs[bank][block];
618 +
619 +- if (blockp)
620 +- return blockp->address;
621 +- }
622 +- }
623 ++ /*
624 ++ * For SMCA enabled processors, BLKPTR field of the first MISC register
625 ++ * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
626 ++ */
627 ++ if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
628 ++ goto out;
629 +
630 +- if (mce_flags.smca) {
631 +- if (smca_get_bank_type(bank) == SMCA_RESERVED)
632 +- return addr;
633 ++ if (!(low & MCI_CONFIG_MCAX))
634 ++ goto out;
635 +
636 +- if (!block) {
637 +- addr = MSR_AMD64_SMCA_MCx_MISC(bank);
638 +- } else {
639 +- /*
640 +- * For SMCA enabled processors, BLKPTR field of the
641 +- * first MISC register (MCx_MISC0) indicates presence of
642 +- * additional MISC register set (MISC1-4).
643 +- */
644 +- u32 low, high;
645 ++ if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
646 ++ (low & MASK_BLKPTR_LO))
647 ++ addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
648 +
649 +- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
650 +- return addr;
651 ++out:
652 ++ smca_bank_addrs[bank][block] = addr;
653 ++ return addr;
654 ++}
655 +
656 +- if (!(low & MCI_CONFIG_MCAX))
657 +- return addr;
658 ++static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
659 ++ unsigned int bank, unsigned int block)
660 ++{
661 ++ u32 addr = 0, offset = 0;
662 +
663 +- if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
664 +- (low & MASK_BLKPTR_LO))
665 +- addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
666 +- }
667 ++ if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
668 + return addr;
669 +- }
670 ++
671 ++ if (mce_flags.smca)
672 ++ return smca_get_block_address(cpu, bank, block);
673 +
674 + /* Fall back to method we used for older processors: */
675 + switch (block) {
676 +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
677 +index 4a038dcf5361..bc1cb284111c 100644
678 +--- a/drivers/dma-buf/dma-buf.c
679 ++++ b/drivers/dma-buf/dma-buf.c
680 +@@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
681 + struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
682 + enum dma_data_direction direction)
683 + {
684 +- struct sg_table *sg_table = ERR_PTR(-EINVAL);
685 ++ struct sg_table *sg_table;
686 +
687 + might_sleep();
688 +
689 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
690 +index b33935fcf428..e6c6994e74ba 100644
691 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
692 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
693 +@@ -176,10 +176,10 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
694 + cz_dpm_powerup_uvd(hwmgr);
695 + cgs_set_clockgating_state(hwmgr->device,
696 + AMD_IP_BLOCK_TYPE_UVD,
697 +- AMD_PG_STATE_UNGATE);
698 ++ AMD_CG_STATE_UNGATE);
699 + cgs_set_powergating_state(hwmgr->device,
700 + AMD_IP_BLOCK_TYPE_UVD,
701 +- AMD_CG_STATE_UNGATE);
702 ++ AMD_PG_STATE_UNGATE);
703 + cz_dpm_update_uvd_dpm(hwmgr, false);
704 + }
705 +
706 +@@ -208,11 +208,11 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
707 + cgs_set_clockgating_state(
708 + hwmgr->device,
709 + AMD_IP_BLOCK_TYPE_VCE,
710 +- AMD_PG_STATE_UNGATE);
711 ++ AMD_CG_STATE_UNGATE);
712 + cgs_set_powergating_state(
713 + hwmgr->device,
714 + AMD_IP_BLOCK_TYPE_VCE,
715 +- AMD_CG_STATE_UNGATE);
716 ++ AMD_PG_STATE_UNGATE);
717 + cz_dpm_update_vce_dpm(hwmgr);
718 + cz_enable_disable_vce_dpm(hwmgr, true);
719 + return 0;
720 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
721 +index 261b828ad590..2f3509be226f 100644
722 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
723 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
724 +@@ -162,7 +162,7 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
725 + AMD_CG_STATE_UNGATE);
726 + cgs_set_powergating_state(hwmgr->device,
727 + AMD_IP_BLOCK_TYPE_UVD,
728 +- AMD_CG_STATE_UNGATE);
729 ++ AMD_PG_STATE_UNGATE);
730 + smu7_update_uvd_dpm(hwmgr, false);
731 + }
732 +
733 +diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
734 +index 08af8d6b844b..493d8f56d14e 100644
735 +--- a/drivers/gpu/drm/drm_dp_helper.c
736 ++++ b/drivers/gpu/drm/drm_dp_helper.c
737 +@@ -1139,6 +1139,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
738 + static const u16 psr_setup_time_us[] = {
739 + PSR_SETUP_TIME(330),
740 + PSR_SETUP_TIME(275),
741 ++ PSR_SETUP_TIME(220),
742 + PSR_SETUP_TIME(165),
743 + PSR_SETUP_TIME(110),
744 + PSR_SETUP_TIME(55),
745 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
746 +index 3b2c0538e48d..90359c7954c8 100644
747 +--- a/drivers/gpu/drm/i915/i915_gem.c
748 ++++ b/drivers/gpu/drm/i915/i915_gem.c
749 +@@ -3378,24 +3378,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
750 + return 0;
751 + }
752 +
753 +-static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
754 +-{
755 +- return wait_for(intel_engine_is_idle(engine), timeout_ms);
756 +-}
757 +-
758 + static int wait_for_engines(struct drm_i915_private *i915)
759 + {
760 +- struct intel_engine_cs *engine;
761 +- enum intel_engine_id id;
762 +-
763 +- for_each_engine(engine, i915, id) {
764 +- if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
765 +- i915_gem_set_wedged(i915);
766 +- return -EIO;
767 +- }
768 +-
769 +- GEM_BUG_ON(intel_engine_get_seqno(engine) !=
770 +- intel_engine_last_submit(engine));
771 ++ if (wait_for(intel_engines_are_idle(i915), 50)) {
772 ++ DRM_ERROR("Failed to idle engines, declaring wedged!\n");
773 ++ i915_gem_set_wedged(i915);
774 ++ return -EIO;
775 + }
776 +
777 + return 0;
778 +@@ -4575,7 +4563,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
779 + ret = i915_gem_wait_for_idle(dev_priv,
780 + I915_WAIT_INTERRUPTIBLE |
781 + I915_WAIT_LOCKED);
782 +- if (ret)
783 ++ if (ret && ret != -EIO)
784 + goto err_unlock;
785 +
786 + assert_kernel_context_is_current(dev_priv);
787 +@@ -4619,11 +4607,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
788 + * machine in an unusable condition.
789 + */
790 + i915_gem_sanitize(dev_priv);
791 +- goto out_rpm_put;
792 ++
793 ++ intel_runtime_pm_put(dev_priv);
794 ++ return 0;
795 +
796 + err_unlock:
797 + mutex_unlock(&dev->struct_mutex);
798 +-out_rpm_put:
799 + intel_runtime_pm_put(dev_priv);
800 + return ret;
801 + }
802 +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
803 +index 240308f1b6dd..dae4e22a2c3f 100644
804 +--- a/drivers/gpu/drm/i915/intel_lvds.c
805 ++++ b/drivers/gpu/drm/i915/intel_lvds.c
806 +@@ -565,6 +565,36 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
807 + return NOTIFY_OK;
808 + }
809 +
810 ++static int
811 ++intel_lvds_connector_register(struct drm_connector *connector)
812 ++{
813 ++ struct intel_lvds_connector *lvds = to_lvds_connector(connector);
814 ++ int ret;
815 ++
816 ++ ret = intel_connector_register(connector);
817 ++ if (ret)
818 ++ return ret;
819 ++
820 ++ lvds->lid_notifier.notifier_call = intel_lid_notify;
821 ++ if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
822 ++ DRM_DEBUG_KMS("lid notifier registration failed\n");
823 ++ lvds->lid_notifier.notifier_call = NULL;
824 ++ }
825 ++
826 ++ return 0;
827 ++}
828 ++
829 ++static void
830 ++intel_lvds_connector_unregister(struct drm_connector *connector)
831 ++{
832 ++ struct intel_lvds_connector *lvds = to_lvds_connector(connector);
833 ++
834 ++ if (lvds->lid_notifier.notifier_call)
835 ++ acpi_lid_notifier_unregister(&lvds->lid_notifier);
836 ++
837 ++ intel_connector_unregister(connector);
838 ++}
839 ++
840 + /**
841 + * intel_lvds_destroy - unregister and free LVDS structures
842 + * @connector: connector to free
843 +@@ -577,9 +607,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
844 + struct intel_lvds_connector *lvds_connector =
845 + to_lvds_connector(connector);
846 +
847 +- if (lvds_connector->lid_notifier.notifier_call)
848 +- acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
849 +-
850 + if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
851 + kfree(lvds_connector->base.edid);
852 +
853 +@@ -600,8 +627,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
854 + .fill_modes = drm_helper_probe_single_connector_modes,
855 + .atomic_get_property = intel_digital_connector_atomic_get_property,
856 + .atomic_set_property = intel_digital_connector_atomic_set_property,
857 +- .late_register = intel_connector_register,
858 +- .early_unregister = intel_connector_unregister,
859 ++ .late_register = intel_lvds_connector_register,
860 ++ .early_unregister = intel_lvds_connector_unregister,
861 + .destroy = intel_lvds_destroy,
862 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
863 + .atomic_duplicate_state = intel_digital_connector_duplicate_state,
864 +@@ -818,6 +845,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
865 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
866 + },
867 + },
868 ++ {
869 ++ .callback = intel_no_lvds_dmi_callback,
870 ++ .ident = "Radiant P845",
871 ++ .matches = {
872 ++ DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
873 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
874 ++ },
875 ++ },
876 +
877 + { } /* terminating entry */
878 + };
879 +@@ -1149,12 +1184,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
880 +
881 + lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
882 +
883 +- lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
884 +- if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
885 +- DRM_DEBUG_KMS("lid notifier registration failed\n");
886 +- lvds_connector->lid_notifier.notifier_call = NULL;
887 +- }
888 +-
889 + return;
890 +
891 + failed:
892 +diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
893 +index dfb57eaa9f22..58ac786634dc 100644
894 +--- a/drivers/hwtracing/intel_th/msu.c
895 ++++ b/drivers/hwtracing/intel_th/msu.c
896 +@@ -741,8 +741,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
897 + /* Reset the page to write-back before releasing */
898 + set_memory_wb((unsigned long)win->block[i].bdesc, 1);
899 + #endif
900 +- dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
901 +- win->block[i].addr);
902 ++ dma_free_coherent(msc_dev(msc)->parent->parent, size,
903 ++ win->block[i].bdesc, win->block[i].addr);
904 + }
905 + kfree(win);
906 +
907 +@@ -777,7 +777,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
908 + /* Reset the page to write-back before releasing */
909 + set_memory_wb((unsigned long)win->block[i].bdesc, 1);
910 + #endif
911 +- dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
912 ++ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
913 + win->block[i].bdesc, win->block[i].addr);
914 + }
915 +
916 +diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
917 +index f129869e05a9..736862967e32 100644
918 +--- a/drivers/hwtracing/stm/core.c
919 ++++ b/drivers/hwtracing/stm/core.c
920 +@@ -27,6 +27,7 @@
921 + #include <linux/stm.h>
922 + #include <linux/fs.h>
923 + #include <linux/mm.h>
924 ++#include <linux/vmalloc.h>
925 + #include "stm.h"
926 +
927 + #include <uapi/linux/stm.h>
928 +@@ -682,7 +683,7 @@ static void stm_device_release(struct device *dev)
929 + {
930 + struct stm_device *stm = to_stm_device(dev);
931 +
932 +- kfree(stm);
933 ++ vfree(stm);
934 + }
935 +
936 + int stm_register_device(struct device *parent, struct stm_data *stm_data,
937 +@@ -699,7 +700,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
938 + return -EINVAL;
939 +
940 + nmasters = stm_data->sw_end - stm_data->sw_start + 1;
941 +- stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
942 ++ stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
943 + if (!stm)
944 + return -ENOMEM;
945 +
946 +@@ -752,7 +753,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
947 + /* matches device_initialize() above */
948 + put_device(&stm->dev);
949 + err_free:
950 +- kfree(stm);
951 ++ vfree(stm);
952 +
953 + return err;
954 + }
955 +diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
956 +index 1d13bf03c758..369a2c632e46 100644
957 +--- a/drivers/iio/adc/Kconfig
958 ++++ b/drivers/iio/adc/Kconfig
959 +@@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
960 + tristate "Atmel AT91 SAMA5D2 ADC"
961 + depends on ARCH_AT91 || COMPILE_TEST
962 + depends on HAS_IOMEM
963 ++ select IIO_BUFFER
964 + select IIO_TRIGGERED_BUFFER
965 + help
966 + Say yes here to build support for Atmel SAMA5D2 ADC which is
967 +diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
968 +index 47c3d7f32900..07246a6037e3 100644
969 +--- a/drivers/iio/adc/ad7793.c
970 ++++ b/drivers/iio/adc/ad7793.c
971 +@@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
972 + static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
973 + 33, 0, 17, 16, 12, 10, 8, 6, 4};
974 +
975 +-static ssize_t ad7793_read_frequency(struct device *dev,
976 +- struct device_attribute *attr,
977 +- char *buf)
978 +-{
979 +- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
980 +- struct ad7793_state *st = iio_priv(indio_dev);
981 +-
982 +- return sprintf(buf, "%d\n",
983 +- st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
984 +-}
985 +-
986 +-static ssize_t ad7793_write_frequency(struct device *dev,
987 +- struct device_attribute *attr,
988 +- const char *buf,
989 +- size_t len)
990 +-{
991 +- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
992 +- struct ad7793_state *st = iio_priv(indio_dev);
993 +- long lval;
994 +- int i, ret;
995 +-
996 +- ret = kstrtol(buf, 10, &lval);
997 +- if (ret)
998 +- return ret;
999 +-
1000 +- if (lval == 0)
1001 +- return -EINVAL;
1002 +-
1003 +- for (i = 0; i < 16; i++)
1004 +- if (lval == st->chip_info->sample_freq_avail[i])
1005 +- break;
1006 +- if (i == 16)
1007 +- return -EINVAL;
1008 +-
1009 +- ret = iio_device_claim_direct_mode(indio_dev);
1010 +- if (ret)
1011 +- return ret;
1012 +- st->mode &= ~AD7793_MODE_RATE(-1);
1013 +- st->mode |= AD7793_MODE_RATE(i);
1014 +- ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
1015 +- iio_device_release_direct_mode(indio_dev);
1016 +-
1017 +- return len;
1018 +-}
1019 +-
1020 +-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
1021 +- ad7793_read_frequency,
1022 +- ad7793_write_frequency);
1023 +-
1024 + static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
1025 + "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
1026 +
1027 +@@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
1028 + ad7793_show_scale_available, NULL, 0);
1029 +
1030 + static struct attribute *ad7793_attributes[] = {
1031 +- &iio_dev_attr_sampling_frequency.dev_attr.attr,
1032 + &iio_const_attr_sampling_frequency_available.dev_attr.attr,
1033 + &iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
1034 + NULL
1035 +@@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
1036 + };
1037 +
1038 + static struct attribute *ad7797_attributes[] = {
1039 +- &iio_dev_attr_sampling_frequency.dev_attr.attr,
1040 + &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
1041 + NULL
1042 + };
1043 +@@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
1044 + *val -= offset;
1045 + }
1046 + return IIO_VAL_INT;
1047 ++ case IIO_CHAN_INFO_SAMP_FREQ:
1048 ++ *val = st->chip_info
1049 ++ ->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
1050 ++ return IIO_VAL_INT;
1051 + }
1052 + return -EINVAL;
1053 + }
1054 +@@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
1055 + break;
1056 + }
1057 + break;
1058 ++ case IIO_CHAN_INFO_SAMP_FREQ:
1059 ++ if (!val) {
1060 ++ ret = -EINVAL;
1061 ++ break;
1062 ++ }
1063 ++
1064 ++ for (i = 0; i < 16; i++)
1065 ++ if (val == st->chip_info->sample_freq_avail[i])
1066 ++ break;
1067 ++
1068 ++ if (i == 16) {
1069 ++ ret = -EINVAL;
1070 ++ break;
1071 ++ }
1072 ++
1073 ++ st->mode &= ~AD7793_MODE_RATE(-1);
1074 ++ st->mode |= AD7793_MODE_RATE(i);
1075 ++ ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
1076 ++ st->mode);
1077 ++ break;
1078 + default:
1079 + ret = -EINVAL;
1080 + }
1081 +diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
1082 +index ff03324dee13..0a7289571b68 100644
1083 +--- a/drivers/iio/buffer/industrialio-buffer-dma.c
1084 ++++ b/drivers/iio/buffer/industrialio-buffer-dma.c
1085 +@@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
1086 + * Should be used as the set_length callback for iio_buffer_access_ops
1087 + * struct for DMA buffers.
1088 + */
1089 +-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
1090 ++int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
1091 + {
1092 + /* Avoid an invalid state */
1093 + if (length < 2)
1094 +diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
1095 +index 047fe757ab97..70c302a93d7f 100644
1096 +--- a/drivers/iio/buffer/kfifo_buf.c
1097 ++++ b/drivers/iio/buffer/kfifo_buf.c
1098 +@@ -22,11 +22,18 @@ struct iio_kfifo {
1099 + #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
1100 +
1101 + static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
1102 +- int bytes_per_datum, int length)
1103 ++ size_t bytes_per_datum, unsigned int length)
1104 + {
1105 + if ((length == 0) || (bytes_per_datum == 0))
1106 + return -EINVAL;
1107 +
1108 ++ /*
1109 ++ * Make sure we don't overflow an unsigned int after kfifo rounds up to
1110 ++ * the next power of 2.
1111 ++ */
1112 ++ if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
1113 ++ return -EINVAL;
1114 ++
1115 + return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
1116 + bytes_per_datum, GFP_KERNEL);
1117 + }
1118 +@@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
1119 + return 0;
1120 + }
1121 +
1122 +-static int iio_set_length_kfifo(struct iio_buffer *r, int length)
1123 ++static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
1124 + {
1125 + /* Avoid an invalid state */
1126 + if (length < 2)
1127 +diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
1128 +index 77515638c55c..896cfd9303b0 100644
1129 +--- a/drivers/infiniband/core/cache.c
1130 ++++ b/drivers/infiniband/core/cache.c
1131 +@@ -434,7 +434,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
1132 + return -EINVAL;
1133 +
1134 + if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
1135 +- return -EAGAIN;
1136 ++ return -EINVAL;
1137 +
1138 + memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
1139 + if (attr) {
1140 +diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
1141 +index 29f99529b187..cfcb32559925 100644
1142 +--- a/drivers/input/mouse/elan_i2c_smbus.c
1143 ++++ b/drivers/input/mouse/elan_i2c_smbus.c
1144 +@@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
1145 + bool max_baseline, u8 *value)
1146 + {
1147 + int error;
1148 +- u8 val[3];
1149 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1150 +
1151 + error = i2c_smbus_read_block_data(client,
1152 + max_baseline ?
1153 +@@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
1154 + bool iap, u8 *version)
1155 + {
1156 + int error;
1157 +- u8 val[3];
1158 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1159 +
1160 + error = i2c_smbus_read_block_data(client,
1161 + iap ? ETP_SMBUS_IAP_VERSION_CMD :
1162 +@@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
1163 + u8 *clickpad)
1164 + {
1165 + int error;
1166 +- u8 val[3];
1167 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1168 +
1169 + error = i2c_smbus_read_block_data(client,
1170 + ETP_SMBUS_SM_VERSION_CMD, val);
1171 +@@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
1172 + static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
1173 + {
1174 + int error;
1175 +- u8 val[3];
1176 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1177 +
1178 + error = i2c_smbus_read_block_data(client,
1179 + ETP_SMBUS_UNIQUEID_CMD, val);
1180 +@@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
1181 + bool iap, u16 *csum)
1182 + {
1183 + int error;
1184 +- u8 val[3];
1185 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1186 +
1187 + error = i2c_smbus_read_block_data(client,
1188 + iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
1189 +@@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
1190 + {
1191 + int ret;
1192 + int error;
1193 +- u8 val[3];
1194 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1195 +
1196 + ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
1197 + if (ret != 3) {
1198 +@@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
1199 + {
1200 + int ret;
1201 + int error;
1202 +- u8 val[3];
1203 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1204 +
1205 + ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
1206 + if (ret != 3) {
1207 +@@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
1208 + {
1209 + int ret;
1210 + int error;
1211 +- u8 val[3];
1212 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1213 +
1214 + ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
1215 + if (ret != 3) {
1216 +@@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
1217 + {
1218 + int error;
1219 + u16 constant;
1220 +- u8 val[3];
1221 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1222 +
1223 + error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
1224 + if (error < 0) {
1225 +@@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
1226 + int len;
1227 + int error;
1228 + enum tp_mode mode;
1229 +- u8 val[3];
1230 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1231 + u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
1232 + u16 password;
1233 +
1234 +@@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
1235 + struct device *dev = &client->dev;
1236 + int error;
1237 + u16 result;
1238 +- u8 val[3];
1239 ++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1240 +
1241 + /*
1242 + * Due to the limitation of smbus protocol limiting
1243 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1244 +index a246fc686bb7..6c4bbd38700e 100644
1245 +--- a/drivers/input/mouse/synaptics.c
1246 ++++ b/drivers/input/mouse/synaptics.c
1247 +@@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = {
1248 + "LEN0048", /* X1 Carbon 3 */
1249 + "LEN0046", /* X250 */
1250 + "LEN004a", /* W541 */
1251 ++ "LEN0071", /* T480 */
1252 ++ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
1253 ++ "LEN0073", /* X1 Carbon G5 (Elantech) */
1254 ++ "LEN0092", /* X1 Carbon 6 */
1255 ++ "LEN0096", /* X280 */
1256 ++ "LEN0097", /* X280 -> ALPS trackpoint */
1257 + "LEN200f", /* T450s */
1258 + NULL
1259 + };
1260 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1261 +index a2c1ca5c76d1..e1660b92b20c 100644
1262 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1263 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1264 +@@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
1265 +
1266 + /*
1267 + * Determine IFS values
1268 +- * - Use TXOP_BACKOFF for probe and management frames except beacons
1269 ++ * - Use TXOP_BACKOFF for management frames except beacons
1270 + * - Use TXOP_SIFS for fragment bursts
1271 + * - Use TXOP_HTTXOP for everything else
1272 + *
1273 + * Note: rt2800 devices won't use CTS protection (if used)
1274 + * for frames not transmitted with TXOP_HTTXOP
1275 + */
1276 +- if ((ieee80211_is_mgmt(hdr->frame_control) &&
1277 +- !ieee80211_is_beacon(hdr->frame_control)) ||
1278 +- (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
1279 ++ if (ieee80211_is_mgmt(hdr->frame_control) &&
1280 ++ !ieee80211_is_beacon(hdr->frame_control))
1281 + txdesc->u.ht.txop = TXOP_BACKOFF;
1282 + else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
1283 + txdesc->u.ht.txop = TXOP_SIFS;
1284 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1285 +index 9cff6bc4049c..cf551785eb08 100644
1286 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1287 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1288 +@@ -299,9 +299,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
1289 + writeVal = 0x00000000;
1290 + if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
1291 + writeVal = writeVal - 0x06060606;
1292 +- else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
1293 +- TXHIGHPWRLEVEL_BT2)
1294 +- writeVal = writeVal;
1295 + *(p_outwriteval + rf) = writeVal;
1296 + }
1297 + }
1298 +diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
1299 +index 73b724143be0..c91662927de0 100644
1300 +--- a/drivers/pci/host/pci-hyperv.c
1301 ++++ b/drivers/pci/host/pci-hyperv.c
1302 +@@ -531,6 +531,8 @@ struct hv_pci_compl {
1303 + s32 completion_status;
1304 + };
1305 +
1306 ++static void hv_pci_onchannelcallback(void *context);
1307 ++
1308 + /**
1309 + * hv_pci_generic_compl() - Invoked for a completion packet
1310 + * @context: Set up by the sender of the packet.
1311 +@@ -675,6 +677,31 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1312 + }
1313 + }
1314 +
1315 ++static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1316 ++{
1317 ++ u16 ret;
1318 ++ unsigned long flags;
1319 ++ void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
1320 ++ PCI_VENDOR_ID;
1321 ++
1322 ++ spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
1323 ++
1324 ++ /* Choose the function to be read. (See comment above) */
1325 ++ writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
1326 ++ /* Make sure the function was chosen before we start reading. */
1327 ++ mb();
1328 ++ /* Read from that function's config space. */
1329 ++ ret = readw(addr);
1330 ++ /*
1331 ++ * mb() is not required here, because the spin_unlock_irqrestore()
1332 ++ * is a barrier.
1333 ++ */
1334 ++
1335 ++ spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1336 ++
1337 ++ return ret;
1338 ++}
1339 ++
1340 + /**
1341 + * _hv_pcifront_write_config() - Internal PCI config write
1342 + * @hpdev: The PCI driver's representation of the device
1343 +@@ -1121,8 +1148,37 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1344 + * Since this function is called with IRQ locks held, can't
1345 + * do normal wait for completion; instead poll.
1346 + */
1347 +- while (!try_wait_for_completion(&comp.comp_pkt.host_event))
1348 ++ while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1349 ++ /* 0xFFFF means an invalid PCI VENDOR ID. */
1350 ++ if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1351 ++ dev_err_once(&hbus->hdev->device,
1352 ++ "the device has gone\n");
1353 ++ goto free_int_desc;
1354 ++ }
1355 ++
1356 ++ /*
1357 ++ * When the higher level interrupt code calls us with
1358 ++ * interrupt disabled, we must poll the channel by calling
1359 ++ * the channel callback directly when channel->target_cpu is
1360 ++ * the current CPU. When the higher level interrupt code
1361 ++ * calls us with interrupt enabled, let's add the
1362 ++ * local_bh_disable()/enable() to avoid race.
1363 ++ */
1364 ++ local_bh_disable();
1365 ++
1366 ++ if (hbus->hdev->channel->target_cpu == smp_processor_id())
1367 ++ hv_pci_onchannelcallback(hbus);
1368 ++
1369 ++ local_bh_enable();
1370 ++
1371 ++ if (hpdev->state == hv_pcichild_ejecting) {
1372 ++ dev_err_once(&hbus->hdev->device,
1373 ++ "the device is being ejected\n");
1374 ++ goto free_int_desc;
1375 ++ }
1376 ++
1377 + udelay(100);
1378 ++ }
1379 +
1380 + if (comp.comp_pkt.completion_status < 0) {
1381 + dev_err(&hbus->hdev->device,
1382 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
1383 +index 19cd357bb464..ff491da64dab 100644
1384 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
1385 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
1386 +@@ -818,7 +818,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
1387 + return -EINVAL;
1388 +
1389 + chip = &pctrl->chip;
1390 +- chip->base = -1;
1391 ++ chip->base = 0;
1392 + chip->ngpio = ngpio;
1393 + chip->label = dev_name(pctrl->dev);
1394 + chip->parent = pctrl->dev;
1395 +diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
1396 +index 1baf720faf69..87e9747d229a 100644
1397 +--- a/drivers/platform/chrome/cros_ec_lpc.c
1398 ++++ b/drivers/platform/chrome/cros_ec_lpc.c
1399 +@@ -54,7 +54,6 @@ static int ec_response_timed_out(void)
1400 + static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
1401 + struct cros_ec_command *msg)
1402 + {
1403 +- struct ec_host_request *request;
1404 + struct ec_host_response response;
1405 + u8 sum;
1406 + int ret = 0;
1407 +@@ -65,8 +64,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
1408 + /* Write buffer */
1409 + cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
1410 +
1411 +- request = (struct ec_host_request *)ec->dout;
1412 +-
1413 + /* Here we go */
1414 + sum = EC_COMMAND_PROTOCOL_3;
1415 + cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum);
1416 +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
1417 +index 36f6190931bc..456ce9f19569 100644
1418 +--- a/drivers/scsi/scsi_transport_srp.c
1419 ++++ b/drivers/scsi/scsi_transport_srp.c
1420 +@@ -51,6 +51,8 @@ struct srp_internal {
1421 + struct transport_container rport_attr_cont;
1422 + };
1423 +
1424 ++static int scsi_is_srp_rport(const struct device *dev);
1425 ++
1426 + #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
1427 +
1428 + #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
1429 +@@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
1430 + return dev_to_shost(r->dev.parent);
1431 + }
1432 +
1433 ++static int find_child_rport(struct device *dev, void *data)
1434 ++{
1435 ++ struct device **child = data;
1436 ++
1437 ++ if (scsi_is_srp_rport(dev)) {
1438 ++ WARN_ON_ONCE(*child);
1439 ++ *child = dev;
1440 ++ }
1441 ++ return 0;
1442 ++}
1443 ++
1444 + static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
1445 + {
1446 +- return transport_class_to_srp_rport(&shost->shost_gendev);
1447 ++ struct device *child = NULL;
1448 ++
1449 ++ WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
1450 ++ find_child_rport) < 0);
1451 ++ return child ? dev_to_rport(child) : NULL;
1452 + }
1453 +
1454 + /**
1455 +@@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
1456 + struct srp_rport *rport = shost_to_rport(shost);
1457 +
1458 + pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
1459 +- return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
1460 ++ return rport && rport->fast_io_fail_tmo < 0 &&
1461 ++ rport->dev_loss_tmo < 0 &&
1462 + i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
1463 + BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
1464 + }
1465 +diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c
1466 +index 8d8659463b3e..feeb17cebc25 100644
1467 +--- a/drivers/soc/lantiq/gphy.c
1468 ++++ b/drivers/soc/lantiq/gphy.c
1469 +@@ -30,7 +30,6 @@ struct xway_gphy_priv {
1470 + struct clk *gphy_clk_gate;
1471 + struct reset_control *gphy_reset;
1472 + struct reset_control *gphy_reset2;
1473 +- struct notifier_block gphy_reboot_nb;
1474 + void __iomem *membase;
1475 + char *fw_name;
1476 + };
1477 +@@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = {
1478 + };
1479 + MODULE_DEVICE_TABLE(of, xway_gphy_match);
1480 +
1481 +-static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb)
1482 +-{
1483 +- return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb);
1484 +-}
1485 +-
1486 +-static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb,
1487 +- unsigned long code, void *unused)
1488 +-{
1489 +- struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb);
1490 +-
1491 +- if (priv) {
1492 +- reset_control_assert(priv->gphy_reset);
1493 +- reset_control_assert(priv->gphy_reset2);
1494 +- }
1495 +-
1496 +- return NOTIFY_DONE;
1497 +-}
1498 +-
1499 + static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
1500 + dma_addr_t *dev_addr)
1501 + {
1502 +@@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev)
1503 + reset_control_deassert(priv->gphy_reset);
1504 + reset_control_deassert(priv->gphy_reset2);
1505 +
1506 +- /* assert the gphy reset because it can hang after a reboot: */
1507 +- priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify;
1508 +- priv->gphy_reboot_nb.priority = -1;
1509 +-
1510 +- ret = register_reboot_notifier(&priv->gphy_reboot_nb);
1511 +- if (ret)
1512 +- dev_warn(dev, "Failed to register reboot notifier\n");
1513 +-
1514 + platform_set_drvdata(pdev, priv);
1515 +
1516 + return ret;
1517 +@@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev)
1518 +
1519 + static int xway_gphy_remove(struct platform_device *pdev)
1520 + {
1521 +- struct device *dev = &pdev->dev;
1522 + struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
1523 +- int ret;
1524 +-
1525 +- reset_control_assert(priv->gphy_reset);
1526 +- reset_control_assert(priv->gphy_reset2);
1527 +
1528 + iowrite32be(0, priv->membase);
1529 +
1530 + clk_disable_unprepare(priv->gphy_clk_gate);
1531 +
1532 +- ret = unregister_reboot_notifier(&priv->gphy_reboot_nb);
1533 +- if (ret)
1534 +- dev_warn(dev, "Failed to unregister reboot notifier\n");
1535 +-
1536 + return 0;
1537 + }
1538 +
1539 +diff --git a/fs/aio.c b/fs/aio.c
1540 +index 4e23958c2509..3a749c3a92e3 100644
1541 +--- a/fs/aio.c
1542 ++++ b/fs/aio.c
1543 +@@ -643,9 +643,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
1544 + while (!list_empty(&ctx->active_reqs)) {
1545 + req = list_first_entry(&ctx->active_reqs,
1546 + struct aio_kiocb, ki_list);
1547 +-
1548 +- list_del_init(&req->ki_list);
1549 + kiocb_cancel(req);
1550 ++ list_del_init(&req->ki_list);
1551 + }
1552 +
1553 + spin_unlock_irq(&ctx->ctx_lock);
1554 +diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
1555 +index f965ce832bc0..516e0c57cf9c 100644
1556 +--- a/fs/xfs/libxfs/xfs_alloc.c
1557 ++++ b/fs/xfs/libxfs/xfs_alloc.c
1558 +@@ -52,6 +52,23 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
1559 + STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
1560 + xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
1561 +
1562 ++/*
1563 ++ * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
1564 ++ * the beginning of the block for a proper header with the location information
1565 ++ * and CRC.
1566 ++ */
1567 ++unsigned int
1568 ++xfs_agfl_size(
1569 ++ struct xfs_mount *mp)
1570 ++{
1571 ++ unsigned int size = mp->m_sb.sb_sectsize;
1572 ++
1573 ++ if (xfs_sb_version_hascrc(&mp->m_sb))
1574 ++ size -= sizeof(struct xfs_agfl);
1575 ++
1576 ++ return size / sizeof(xfs_agblock_t);
1577 ++}
1578 ++
1579 + unsigned int
1580 + xfs_refc_block(
1581 + struct xfs_mount *mp)
1582 +@@ -540,7 +557,7 @@ xfs_agfl_verify(
1583 + if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
1584 + return false;
1585 +
1586 +- for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
1587 ++ for (i = 0; i < xfs_agfl_size(mp); i++) {
1588 + if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
1589 + be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
1590 + return false;
1591 +@@ -2039,6 +2056,93 @@ xfs_alloc_space_available(
1592 + return true;
1593 + }
1594 +
1595 ++/*
1596 ++ * Check the agfl fields of the agf for inconsistency or corruption. The purpose
1597 ++ * is to detect an agfl header padding mismatch between current and early v5
1598 ++ * kernels. This problem manifests as a 1-slot size difference between the
1599 ++ * on-disk flcount and the active [first, last] range of a wrapped agfl. This
1600 ++ * may also catch variants of agfl count corruption unrelated to padding. Either
1601 ++ * way, we'll reset the agfl and warn the user.
1602 ++ *
1603 ++ * Return true if a reset is required before the agfl can be used, false
1604 ++ * otherwise.
1605 ++ */
1606 ++static bool
1607 ++xfs_agfl_needs_reset(
1608 ++ struct xfs_mount *mp,
1609 ++ struct xfs_agf *agf)
1610 ++{
1611 ++ uint32_t f = be32_to_cpu(agf->agf_flfirst);
1612 ++ uint32_t l = be32_to_cpu(agf->agf_fllast);
1613 ++ uint32_t c = be32_to_cpu(agf->agf_flcount);
1614 ++ int agfl_size = xfs_agfl_size(mp);
1615 ++ int active;
1616 ++
1617 ++ /* no agfl header on v4 supers */
1618 ++ if (!xfs_sb_version_hascrc(&mp->m_sb))
1619 ++ return false;
1620 ++
1621 ++ /*
1622 ++ * The agf read verifier catches severe corruption of these fields.
1623 ++ * Repeat some sanity checks to cover a packed -> unpacked mismatch if
1624 ++ * the verifier allows it.
1625 ++ */
1626 ++ if (f >= agfl_size || l >= agfl_size)
1627 ++ return true;
1628 ++ if (c > agfl_size)
1629 ++ return true;
1630 ++
1631 ++ /*
1632 ++ * Check consistency between the on-disk count and the active range. An
1633 ++ * agfl padding mismatch manifests as an inconsistent flcount.
1634 ++ */
1635 ++ if (c && l >= f)
1636 ++ active = l - f + 1;
1637 ++ else if (c)
1638 ++ active = agfl_size - f + l + 1;
1639 ++ else
1640 ++ active = 0;
1641 ++
1642 ++ return active != c;
1643 ++}
1644 ++
1645 ++/*
1646 ++ * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
1647 ++ * agfl content cannot be trusted. Warn the user that a repair is required to
1648 ++ * recover leaked blocks.
1649 ++ *
1650 ++ * The purpose of this mechanism is to handle filesystems affected by the agfl
1651 ++ * header padding mismatch problem. A reset keeps the filesystem online with a
1652 ++ * relatively minor free space accounting inconsistency rather than suffer the
1653 ++ * inevitable crash from use of an invalid agfl block.
1654 ++ */
1655 ++static void
1656 ++xfs_agfl_reset(
1657 ++ struct xfs_trans *tp,
1658 ++ struct xfs_buf *agbp,
1659 ++ struct xfs_perag *pag)
1660 ++{
1661 ++ struct xfs_mount *mp = tp->t_mountp;
1662 ++ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
1663 ++
1664 ++ ASSERT(pag->pagf_agflreset);
1665 ++ trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
1666 ++
1667 ++ xfs_warn(mp,
1668 ++ "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
1669 ++ "Please unmount and run xfs_repair.",
1670 ++ pag->pag_agno, pag->pagf_flcount);
1671 ++
1672 ++ agf->agf_flfirst = 0;
1673 ++ agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
1674 ++ agf->agf_flcount = 0;
1675 ++ xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
1676 ++ XFS_AGF_FLCOUNT);
1677 ++
1678 ++ pag->pagf_flcount = 0;
1679 ++ pag->pagf_agflreset = false;
1680 ++}
1681 ++
1682 + /*
1683 + * Decide whether to use this allocation group for this allocation.
1684 + * If so, fix up the btree freelist's size.
1685 +@@ -2100,6 +2204,10 @@ xfs_alloc_fix_freelist(
1686 + }
1687 + }
1688 +
1689 ++ /* reset a padding mismatched agfl before final free space check */
1690 ++ if (pag->pagf_agflreset)
1691 ++ xfs_agfl_reset(tp, agbp, pag);
1692 ++
1693 + /* If there isn't enough total space or single-extent, reject it. */
1694 + need = xfs_alloc_min_freelist(mp, pag);
1695 + if (!xfs_alloc_space_available(args, need, flags))
1696 +@@ -2252,10 +2360,11 @@ xfs_alloc_get_freelist(
1697 + bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
1698 + be32_add_cpu(&agf->agf_flfirst, 1);
1699 + xfs_trans_brelse(tp, agflbp);
1700 +- if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1701 ++ if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
1702 + agf->agf_flfirst = 0;
1703 +
1704 + pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1705 ++ ASSERT(!pag->pagf_agflreset);
1706 + be32_add_cpu(&agf->agf_flcount, -1);
1707 + xfs_trans_agflist_delta(tp, -1);
1708 + pag->pagf_flcount--;
1709 +@@ -2363,10 +2472,11 @@ xfs_alloc_put_freelist(
1710 + be32_to_cpu(agf->agf_seqno), &agflbp)))
1711 + return error;
1712 + be32_add_cpu(&agf->agf_fllast, 1);
1713 +- if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
1714 ++ if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
1715 + agf->agf_fllast = 0;
1716 +
1717 + pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1718 ++ ASSERT(!pag->pagf_agflreset);
1719 + be32_add_cpu(&agf->agf_flcount, 1);
1720 + xfs_trans_agflist_delta(tp, 1);
1721 + pag->pagf_flcount++;
1722 +@@ -2381,7 +2491,7 @@ xfs_alloc_put_freelist(
1723 +
1724 + xfs_alloc_log_agf(tp, agbp, logflags);
1725 +
1726 +- ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
1727 ++ ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
1728 +
1729 + agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
1730 + blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
1731 +@@ -2414,9 +2524,9 @@ xfs_agf_verify(
1732 + if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
1733 + XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
1734 + be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
1735 +- be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
1736 +- be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
1737 +- be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
1738 ++ be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
1739 ++ be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
1740 ++ be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
1741 + return false;
1742 +
1743 + if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
1744 +@@ -2572,6 +2682,7 @@ xfs_alloc_read_agf(
1745 + pag->pagb_count = 0;
1746 + pag->pagb_tree = RB_ROOT;
1747 + pag->pagf_init = 1;
1748 ++ pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
1749 + }
1750 + #ifdef DEBUG
1751 + else if (!XFS_FORCED_SHUTDOWN(mp)) {
1752 +diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
1753 +index ef26edc2e938..346ba8ab68b5 100644
1754 +--- a/fs/xfs/libxfs/xfs_alloc.h
1755 ++++ b/fs/xfs/libxfs/xfs_alloc.h
1756 +@@ -26,6 +26,8 @@ struct xfs_trans;
1757 +
1758 + extern struct workqueue_struct *xfs_alloc_wq;
1759 +
1760 ++unsigned int xfs_agfl_size(struct xfs_mount *mp);
1761 ++
1762 + /*
1763 + * Freespace allocation types. Argument to xfs_alloc_[v]extent.
1764 + */
1765 +diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
1766 +index 23229f0c5b15..ed4481b2f113 100644
1767 +--- a/fs/xfs/libxfs/xfs_format.h
1768 ++++ b/fs/xfs/libxfs/xfs_format.h
1769 +@@ -798,24 +798,13 @@ typedef struct xfs_agi {
1770 + &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
1771 + (__be32 *)(bp)->b_addr)
1772 +
1773 +-/*
1774 +- * Size of the AGFL. For CRC-enabled filesystes we steal a couple of
1775 +- * slots in the beginning of the block for a proper header with the
1776 +- * location information and CRC.
1777 +- */
1778 +-#define XFS_AGFL_SIZE(mp) \
1779 +- (((mp)->m_sb.sb_sectsize - \
1780 +- (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
1781 +- sizeof(struct xfs_agfl) : 0)) / \
1782 +- sizeof(xfs_agblock_t))
1783 +-
1784 + typedef struct xfs_agfl {
1785 + __be32 agfl_magicnum;
1786 + __be32 agfl_seqno;
1787 + uuid_t agfl_uuid;
1788 + __be64 agfl_lsn;
1789 + __be32 agfl_crc;
1790 +- __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
1791 ++ __be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */
1792 + } __attribute__((packed)) xfs_agfl_t;
1793 +
1794 + #define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
1795 +diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
1796 +index 8f22fc579dbb..40783a313df9 100644
1797 +--- a/fs/xfs/xfs_fsops.c
1798 ++++ b/fs/xfs/xfs_fsops.c
1799 +@@ -294,7 +294,7 @@ xfs_growfs_data_private(
1800 + }
1801 +
1802 + agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
1803 +- for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
1804 ++ for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
1805 + agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
1806 +
1807 + error = xfs_bwrite(bp);
1808 +diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
1809 +index e0792d036be2..d359a88ea249 100644
1810 +--- a/fs/xfs/xfs_mount.h
1811 ++++ b/fs/xfs/xfs_mount.h
1812 +@@ -353,6 +353,7 @@ typedef struct xfs_perag {
1813 + char pagi_inodeok; /* The agi is ok for inodes */
1814 + uint8_t pagf_levels[XFS_BTNUM_AGF];
1815 + /* # of levels in bno & cnt btree */
1816 ++ bool pagf_agflreset; /* agfl requires reset before use */
1817 + uint32_t pagf_flcount; /* count of blocks in freelist */
1818 + xfs_extlen_t pagf_freeblks; /* total free blocks */
1819 + xfs_extlen_t pagf_longest; /* longest free space */
1820 +diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
1821 +index bb5514688d47..06bc87369632 100644
1822 +--- a/fs/xfs/xfs_trace.h
1823 ++++ b/fs/xfs/xfs_trace.h
1824 +@@ -1513,7 +1513,7 @@ TRACE_EVENT(xfs_extent_busy_trim,
1825 + __entry->tlen)
1826 + );
1827 +
1828 +-TRACE_EVENT(xfs_agf,
1829 ++DECLARE_EVENT_CLASS(xfs_agf_class,
1830 + TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
1831 + unsigned long caller_ip),
1832 + TP_ARGS(mp, agf, flags, caller_ip),
1833 +@@ -1569,6 +1569,13 @@ TRACE_EVENT(xfs_agf,
1834 + __entry->longest,
1835 + (void *)__entry->caller_ip)
1836 + );
1837 ++#define DEFINE_AGF_EVENT(name) \
1838 ++DEFINE_EVENT(xfs_agf_class, name, \
1839 ++ TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
1840 ++ unsigned long caller_ip), \
1841 ++ TP_ARGS(mp, agf, flags, caller_ip))
1842 ++DEFINE_AGF_EVENT(xfs_agf);
1843 ++DEFINE_AGF_EVENT(xfs_agfl_reset);
1844 +
1845 + TRACE_EVENT(xfs_free_extent,
1846 + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
1847 +diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
1848 +index b9e22b7e2f28..d1171db23742 100644
1849 +--- a/include/linux/iio/buffer_impl.h
1850 ++++ b/include/linux/iio/buffer_impl.h
1851 +@@ -53,7 +53,7 @@ struct iio_buffer_access_funcs {
1852 + int (*request_update)(struct iio_buffer *buffer);
1853 +
1854 + int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
1855 +- int (*set_length)(struct iio_buffer *buffer, int length);
1856 ++ int (*set_length)(struct iio_buffer *buffer, unsigned int length);
1857 +
1858 + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
1859 + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
1860 +@@ -72,10 +72,10 @@ struct iio_buffer_access_funcs {
1861 + */
1862 + struct iio_buffer {
1863 + /** @length: Number of datums in buffer. */
1864 +- int length;
1865 ++ unsigned int length;
1866 +
1867 + /** @bytes_per_datum: Size of individual datum including timestamp. */
1868 +- int bytes_per_datum;
1869 ++ size_t bytes_per_datum;
1870 +
1871 + /**
1872 + * @access: Buffer access functions associated with the
1873 +diff --git a/include/linux/tcp.h b/include/linux/tcp.h
1874 +index e8418fc77a43..fe322fa611e6 100644
1875 +--- a/include/linux/tcp.h
1876 ++++ b/include/linux/tcp.h
1877 +@@ -334,7 +334,7 @@ struct tcp_sock {
1878 +
1879 + /* Receiver queue space */
1880 + struct {
1881 +- int space;
1882 ++ u32 space;
1883 + u32 seq;
1884 + u64 time;
1885 + } rcvq_space;
1886 +diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
1887 +index 3fab6c81917f..f41ea5af22ee 100644
1888 +--- a/include/uapi/linux/nl80211.h
1889 ++++ b/include/uapi/linux/nl80211.h
1890 +@@ -2604,7 +2604,7 @@ enum nl80211_attrs {
1891 + #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
1892 + #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
1893 +
1894 +-#define NL80211_WIPHY_NAME_MAXLEN 128
1895 ++#define NL80211_WIPHY_NAME_MAXLEN 64
1896 +
1897 + #define NL80211_MAX_SUPP_RATES 32
1898 + #define NL80211_MAX_SUPP_HT_RATES 77
1899 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1900 +index 76bcc80b893e..520ecaf61dc4 100644
1901 +--- a/kernel/trace/trace.c
1902 ++++ b/kernel/trace/trace.c
1903 +@@ -894,7 +894,7 @@ int __trace_bputs(unsigned long ip, const char *str)
1904 + EXPORT_SYMBOL_GPL(__trace_bputs);
1905 +
1906 + #ifdef CONFIG_TRACER_SNAPSHOT
1907 +-static void tracing_snapshot_instance(struct trace_array *tr)
1908 ++void tracing_snapshot_instance(struct trace_array *tr)
1909 + {
1910 + struct tracer *tracer = tr->current_trace;
1911 + unsigned long flags;
1912 +@@ -950,7 +950,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1913 + struct trace_buffer *size_buf, int cpu_id);
1914 + static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1915 +
1916 +-static int alloc_snapshot(struct trace_array *tr)
1917 ++int tracing_alloc_snapshot_instance(struct trace_array *tr)
1918 + {
1919 + int ret;
1920 +
1921 +@@ -996,7 +996,7 @@ int tracing_alloc_snapshot(void)
1922 + struct trace_array *tr = &global_trace;
1923 + int ret;
1924 +
1925 +- ret = alloc_snapshot(tr);
1926 ++ ret = tracing_alloc_snapshot_instance(tr);
1927 + WARN_ON(ret < 0);
1928 +
1929 + return ret;
1930 +@@ -5400,7 +5400,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
1931 +
1932 + #ifdef CONFIG_TRACER_MAX_TRACE
1933 + if (t->use_max_tr && !had_max_tr) {
1934 +- ret = alloc_snapshot(tr);
1935 ++ ret = tracing_alloc_snapshot_instance(tr);
1936 + if (ret < 0)
1937 + goto out;
1938 + }
1939 +@@ -6378,7 +6378,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
1940 + }
1941 + #endif
1942 + if (!tr->allocated_snapshot) {
1943 +- ret = alloc_snapshot(tr);
1944 ++ ret = tracing_alloc_snapshot_instance(tr);
1945 + if (ret < 0)
1946 + break;
1947 + }
1948 +@@ -7099,7 +7099,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
1949 + return ret;
1950 +
1951 + out_reg:
1952 +- ret = alloc_snapshot(tr);
1953 ++ ret = tracing_alloc_snapshot_instance(tr);
1954 + if (ret < 0)
1955 + goto out;
1956 +
1957 +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
1958 +index 401b0639116f..851cd1605085 100644
1959 +--- a/kernel/trace/trace.h
1960 ++++ b/kernel/trace/trace.h
1961 +@@ -1807,6 +1807,17 @@ static inline void __init trace_event_init(void) { }
1962 + static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1963 + #endif
1964 +
1965 ++#ifdef CONFIG_TRACER_SNAPSHOT
1966 ++void tracing_snapshot_instance(struct trace_array *tr);
1967 ++int tracing_alloc_snapshot_instance(struct trace_array *tr);
1968 ++#else
1969 ++static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1970 ++static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1971 ++{
1972 ++ return 0;
1973 ++}
1974 ++#endif
1975 ++
1976 + extern struct trace_iterator *tracepoint_print_iter;
1977 +
1978 + #endif /* _LINUX_KERNEL_TRACE_H */
1979 +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
1980 +index f2ac9d44f6c4..b413fab7d75b 100644
1981 +--- a/kernel/trace/trace_events_trigger.c
1982 ++++ b/kernel/trace/trace_events_trigger.c
1983 +@@ -482,9 +482,10 @@ clear_event_triggers(struct trace_array *tr)
1984 + struct trace_event_file *file;
1985 +
1986 + list_for_each_entry(file, &tr->events, list) {
1987 +- struct event_trigger_data *data;
1988 +- list_for_each_entry_rcu(data, &file->triggers, list) {
1989 ++ struct event_trigger_data *data, *n;
1990 ++ list_for_each_entry_safe(data, n, &file->triggers, list) {
1991 + trace_event_trigger_enable_disable(file, 0);
1992 ++ list_del_rcu(&data->list);
1993 + if (data->ops->free)
1994 + data->ops->free(data->ops, data);
1995 + }
1996 +@@ -641,6 +642,7 @@ event_trigger_callback(struct event_command *cmd_ops,
1997 + trigger_data->count = -1;
1998 + trigger_data->ops = trigger_ops;
1999 + trigger_data->cmd_ops = cmd_ops;
2000 ++ trigger_data->private_data = file;
2001 + INIT_LIST_HEAD(&trigger_data->list);
2002 + INIT_LIST_HEAD(&trigger_data->named_list);
2003 +
2004 +@@ -1041,7 +1043,12 @@ static struct event_command trigger_traceoff_cmd = {
2005 + static void
2006 + snapshot_trigger(struct event_trigger_data *data, void *rec)
2007 + {
2008 +- tracing_snapshot();
2009 ++ struct trace_event_file *file = data->private_data;
2010 ++
2011 ++ if (file)
2012 ++ tracing_snapshot_instance(file->tr);
2013 ++ else
2014 ++ tracing_snapshot();
2015 + }
2016 +
2017 + static void
2018 +@@ -1063,7 +1070,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
2019 + {
2020 + int ret = register_trigger(glob, ops, data, file);
2021 +
2022 +- if (ret > 0 && tracing_alloc_snapshot() != 0) {
2023 ++ if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
2024 + unregister_trigger(glob, ops, data, file);
2025 + ret = 0;
2026 + }
2027 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2028 +index e774898c91d5..8af604f3b370 100644
2029 +--- a/mm/huge_memory.c
2030 ++++ b/mm/huge_memory.c
2031 +@@ -2388,7 +2388,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2032 + __split_huge_page_tail(head, i, lruvec, list);
2033 + /* Some pages can be beyond i_size: drop them from page cache */
2034 + if (head[i].index >= end) {
2035 +- __ClearPageDirty(head + i);
2036 ++ ClearPageDirty(head + i);
2037 + __delete_from_page_cache(head + i, NULL);
2038 + if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2039 + shmem_uncharge(head->mapping->host, 1);
2040 +diff --git a/mm/vmscan.c b/mm/vmscan.c
2041 +index 1a581468a9cf..be56e2e1931e 100644
2042 +--- a/mm/vmscan.c
2043 ++++ b/mm/vmscan.c
2044 +@@ -1451,7 +1451,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
2045 + return ret;
2046 +
2047 + mapping = page_mapping(page);
2048 +- migrate_dirty = mapping && mapping->a_ops->migratepage;
2049 ++ migrate_dirty = !mapping || mapping->a_ops->migratepage;
2050 + unlock_page(page);
2051 + if (!migrate_dirty)
2052 + return ret;
2053 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2054 +index ebbb54bcbcac..125b49c166a4 100644
2055 +--- a/net/ipv4/tcp_input.c
2056 ++++ b/net/ipv4/tcp_input.c
2057 +@@ -591,8 +591,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
2058 + void tcp_rcv_space_adjust(struct sock *sk)
2059 + {
2060 + struct tcp_sock *tp = tcp_sk(sk);
2061 ++ u32 copied;
2062 + int time;
2063 +- int copied;
2064 +
2065 + tcp_mstamp_refresh(tp);
2066 + time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
2067 +@@ -615,12 +615,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
2068 +
2069 + if (sysctl_tcp_moderate_rcvbuf &&
2070 + !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
2071 +- int rcvwin, rcvmem, rcvbuf;
2072 ++ int rcvmem, rcvbuf;
2073 ++ u64 rcvwin;
2074 +
2075 + /* minimal window to cope with packet losses, assuming
2076 + * steady state. Add some cushion because of small variations.
2077 + */
2078 +- rcvwin = (copied << 1) + 16 * tp->advmss;
2079 ++ rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
2080 +
2081 + /* If rate increased by 25%,
2082 + * assume slow start, rcvwin = 3 * copied
2083 +@@ -640,7 +641,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
2084 + while (tcp_win_from_space(rcvmem) < tp->advmss)
2085 + rcvmem += 128;
2086 +
2087 +- rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
2088 ++ do_div(rcvwin, tp->advmss);
2089 ++ rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
2090 + if (rcvbuf > sk->sk_rcvbuf) {
2091 + sk->sk_rcvbuf = rcvbuf;
2092 +
2093 +diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
2094 +index c9c031e3d1ae..b275743e23cc 100644
2095 +--- a/security/selinux/ss/services.c
2096 ++++ b/security/selinux/ss/services.c
2097 +@@ -1448,7 +1448,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
2098 + scontext_len, &context, def_sid);
2099 + if (rc == -EINVAL && force) {
2100 + context.str = str;
2101 +- context.len = scontext_len;
2102 ++ context.len = strlen(str) + 1;
2103 + str = NULL;
2104 + } else if (rc)
2105 + goto out_unlock;
2106 +diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
2107 +index a086c35f91bb..79a9fdf94d38 100644
2108 +--- a/sound/soc/intel/common/sst-firmware.c
2109 ++++ b/sound/soc/intel/common/sst-firmware.c
2110 +@@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst)
2111 + struct sst_pdata *sst_pdata = sst->pdata;
2112 + struct sst_dma *dma;
2113 + struct resource mem;
2114 +- const char *dma_dev_name;
2115 + int ret = 0;
2116 +
2117 + if (sst->pdata->resindex_dma_base == -1)
2118 +@@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst)
2119 + * is attached to the ADSP IP. */
2120 + switch (sst->pdata->dma_engine) {
2121 + case SST_DMA_TYPE_DW:
2122 +- dma_dev_name = "dw_dmac";
2123 + break;
2124 + default:
2125 + dev_err(sst->dev, "error: invalid DMA engine %d\n",
2126 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
2127 +index c8b8b7101c6f..e128d1c71c30 100644
2128 +--- a/tools/objtool/check.c
2129 ++++ b/tools/objtool/check.c
2130 +@@ -59,6 +59,31 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
2131 + return next;
2132 + }
2133 +
2134 ++static struct instruction *next_insn_same_func(struct objtool_file *file,
2135 ++ struct instruction *insn)
2136 ++{
2137 ++ struct instruction *next = list_next_entry(insn, list);
2138 ++ struct symbol *func = insn->func;
2139 ++
2140 ++ if (!func)
2141 ++ return NULL;
2142 ++
2143 ++ if (&next->list != &file->insn_list && next->func == func)
2144 ++ return next;
2145 ++
2146 ++ /* Check if we're already in the subfunction: */
2147 ++ if (func == func->cfunc)
2148 ++ return NULL;
2149 ++
2150 ++ /* Move to the subfunction: */
2151 ++ return find_insn(file, func->cfunc->sec, func->cfunc->offset);
2152 ++}
2153 ++
2154 ++#define func_for_each_insn_all(file, func, insn) \
2155 ++ for (insn = find_insn(file, func->sec, func->offset); \
2156 ++ insn; \
2157 ++ insn = next_insn_same_func(file, insn))
2158 ++
2159 + #define func_for_each_insn(file, func, insn) \
2160 + for (insn = find_insn(file, func->sec, func->offset); \
2161 + insn && &insn->list != &file->insn_list && \
2162 +@@ -148,10 +173,14 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
2163 + if (!strcmp(func->name, global_noreturns[i]))
2164 + return 1;
2165 +
2166 +- if (!func->sec)
2167 ++ if (!func->len)
2168 + return 0;
2169 +
2170 +- func_for_each_insn(file, func, insn) {
2171 ++ insn = find_insn(file, func->sec, func->offset);
2172 ++ if (!insn->func)
2173 ++ return 0;
2174 ++
2175 ++ func_for_each_insn_all(file, func, insn) {
2176 + empty = false;
2177 +
2178 + if (insn->type == INSN_RETURN)
2179 +@@ -166,35 +195,28 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
2180 + * case, the function's dead-end status depends on whether the target
2181 + * of the sibling call returns.
2182 + */
2183 +- func_for_each_insn(file, func, insn) {
2184 +- if (insn->sec != func->sec ||
2185 +- insn->offset >= func->offset + func->len)
2186 +- break;
2187 +-
2188 ++ func_for_each_insn_all(file, func, insn) {
2189 + if (insn->type == INSN_JUMP_UNCONDITIONAL) {
2190 + struct instruction *dest = insn->jump_dest;
2191 +- struct symbol *dest_func;
2192 +
2193 + if (!dest)
2194 + /* sibling call to another file */
2195 + return 0;
2196 +
2197 +- if (dest->sec != func->sec ||
2198 +- dest->offset < func->offset ||
2199 +- dest->offset >= func->offset + func->len) {
2200 +- /* local sibling call */
2201 +- dest_func = find_symbol_by_offset(dest->sec,
2202 +- dest->offset);
2203 +- if (!dest_func)
2204 +- continue;
2205 ++ if (dest->func && dest->func->pfunc != insn->func->pfunc) {
2206 +
2207 ++ /* local sibling call */
2208 + if (recursion == 5) {
2209 +- WARN_FUNC("infinite recursion (objtool bug!)",
2210 +- dest->sec, dest->offset);
2211 +- return -1;
2212 ++ /*
2213 ++ * Infinite recursion: two functions
2214 ++ * have sibling calls to each other.
2215 ++ * This is a very rare case. It means
2216 ++ * they aren't dead ends.
2217 ++ */
2218 ++ return 0;
2219 + }
2220 +
2221 +- return __dead_end_function(file, dest_func,
2222 ++ return __dead_end_function(file, dest->func,
2223 + recursion + 1);
2224 + }
2225 + }
2226 +@@ -421,7 +443,7 @@ static void add_ignores(struct objtool_file *file)
2227 + if (!ignore_func(file, func))
2228 + continue;
2229 +
2230 +- func_for_each_insn(file, func, insn)
2231 ++ func_for_each_insn_all(file, func, insn)
2232 + insn->ignore = true;
2233 + }
2234 + }
2235 +@@ -781,30 +803,35 @@ static int add_special_section_alts(struct objtool_file *file)
2236 + return ret;
2237 + }
2238 +
2239 +-static int add_switch_table(struct objtool_file *file, struct symbol *func,
2240 +- struct instruction *insn, struct rela *table,
2241 +- struct rela *next_table)
2242 ++static int add_switch_table(struct objtool_file *file, struct instruction *insn,
2243 ++ struct rela *table, struct rela *next_table)
2244 + {
2245 + struct rela *rela = table;
2246 + struct instruction *alt_insn;
2247 + struct alternative *alt;
2248 ++ struct symbol *pfunc = insn->func->pfunc;
2249 ++ unsigned int prev_offset = 0;
2250 +
2251 + list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
2252 + if (rela == next_table)
2253 + break;
2254 +
2255 +- if (rela->sym->sec != insn->sec ||
2256 +- rela->addend <= func->offset ||
2257 +- rela->addend >= func->offset + func->len)
2258 ++ /* Make sure the switch table entries are consecutive: */
2259 ++ if (prev_offset && rela->offset != prev_offset + 8)
2260 + break;
2261 +
2262 +- alt_insn = find_insn(file, insn->sec, rela->addend);
2263 +- if (!alt_insn) {
2264 +- WARN("%s: can't find instruction at %s+0x%x",
2265 +- file->rodata->rela->name, insn->sec->name,
2266 +- rela->addend);
2267 +- return -1;
2268 +- }
2269 ++ /* Detect function pointers from contiguous objects: */
2270 ++ if (rela->sym->sec == pfunc->sec &&
2271 ++ rela->addend == pfunc->offset)
2272 ++ break;
2273 ++
2274 ++ alt_insn = find_insn(file, rela->sym->sec, rela->addend);
2275 ++ if (!alt_insn)
2276 ++ break;
2277 ++
2278 ++ /* Make sure the jmp dest is in the function or subfunction: */
2279 ++ if (alt_insn->func->pfunc != pfunc)
2280 ++ break;
2281 +
2282 + alt = malloc(sizeof(*alt));
2283 + if (!alt) {
2284 +@@ -814,6 +841,13 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
2285 +
2286 + alt->insn = alt_insn;
2287 + list_add_tail(&alt->list, &insn->alts);
2288 ++ prev_offset = rela->offset;
2289 ++ }
2290 ++
2291 ++ if (!prev_offset) {
2292 ++ WARN_FUNC("can't find switch jump table",
2293 ++ insn->sec, insn->offset);
2294 ++ return -1;
2295 + }
2296 +
2297 + return 0;
2298 +@@ -868,40 +902,21 @@ static struct rela *find_switch_table(struct objtool_file *file,
2299 + {
2300 + struct rela *text_rela, *rodata_rela;
2301 + struct instruction *orig_insn = insn;
2302 ++ unsigned long table_offset;
2303 +
2304 +- text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
2305 +- if (text_rela && text_rela->sym == file->rodata->sym) {
2306 +- /* case 1 */
2307 +- rodata_rela = find_rela_by_dest(file->rodata,
2308 +- text_rela->addend);
2309 +- if (rodata_rela)
2310 +- return rodata_rela;
2311 +-
2312 +- /* case 2 */
2313 +- rodata_rela = find_rela_by_dest(file->rodata,
2314 +- text_rela->addend + 4);
2315 +- if (!rodata_rela)
2316 +- return NULL;
2317 +-
2318 +- file->ignore_unreachables = true;
2319 +- return rodata_rela;
2320 +- }
2321 +-
2322 +- /* case 3 */
2323 + /*
2324 + * Backward search using the @first_jump_src links, these help avoid
2325 + * much of the 'in between' code. Which avoids us getting confused by
2326 + * it.
2327 + */
2328 +- for (insn = list_prev_entry(insn, list);
2329 +-
2330 ++ for (;
2331 + &insn->list != &file->insn_list &&
2332 + insn->sec == func->sec &&
2333 + insn->offset >= func->offset;
2334 +
2335 + insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
2336 +
2337 +- if (insn->type == INSN_JUMP_DYNAMIC)
2338 ++ if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2339 + break;
2340 +
2341 + /* allow small jumps within the range */
2342 +@@ -917,18 +932,29 @@ static struct rela *find_switch_table(struct objtool_file *file,
2343 + if (!text_rela || text_rela->sym != file->rodata->sym)
2344 + continue;
2345 +
2346 ++ table_offset = text_rela->addend;
2347 ++ if (text_rela->type == R_X86_64_PC32)
2348 ++ table_offset += 4;
2349 ++
2350 + /*
2351 + * Make sure the .rodata address isn't associated with a
2352 + * symbol. gcc jump tables are anonymous data.
2353 + */
2354 +- if (find_symbol_containing(file->rodata, text_rela->addend))
2355 ++ if (find_symbol_containing(file->rodata, table_offset))
2356 + continue;
2357 +
2358 +- rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
2359 +- if (!rodata_rela)
2360 +- continue;
2361 ++ rodata_rela = find_rela_by_dest(file->rodata, table_offset);
2362 ++ if (rodata_rela) {
2363 ++ /*
2364 ++ * Use of RIP-relative switch jumps is quite rare, and
2365 ++ * indicates a rare GCC quirk/bug which can leave dead
2366 ++ * code behind.
2367 ++ */
2368 ++ if (text_rela->type == R_X86_64_PC32)
2369 ++ file->ignore_unreachables = true;
2370 +
2371 +- return rodata_rela;
2372 ++ return rodata_rela;
2373 ++ }
2374 + }
2375 +
2376 + return NULL;
2377 +@@ -942,7 +968,7 @@ static int add_func_switch_tables(struct objtool_file *file,
2378 + struct rela *rela, *prev_rela = NULL;
2379 + int ret;
2380 +
2381 +- func_for_each_insn(file, func, insn) {
2382 ++ func_for_each_insn_all(file, func, insn) {
2383 + if (!last)
2384 + last = insn;
2385 +
2386 +@@ -973,8 +999,7 @@ static int add_func_switch_tables(struct objtool_file *file,
2387 + * the beginning of another switch table in the same function.
2388 + */
2389 + if (prev_jump) {
2390 +- ret = add_switch_table(file, func, prev_jump, prev_rela,
2391 +- rela);
2392 ++ ret = add_switch_table(file, prev_jump, prev_rela, rela);
2393 + if (ret)
2394 + return ret;
2395 + }
2396 +@@ -984,7 +1009,7 @@ static int add_func_switch_tables(struct objtool_file *file,
2397 + }
2398 +
2399 + if (prev_jump) {
2400 +- ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
2401 ++ ret = add_switch_table(file, prev_jump, prev_rela, NULL);
2402 + if (ret)
2403 + return ret;
2404 + }
2405 +@@ -1748,15 +1773,13 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
2406 + while (1) {
2407 + next_insn = next_insn_same_sec(file, insn);
2408 +
2409 +-
2410 +- if (file->c_file && func && insn->func && func != insn->func) {
2411 ++ if (file->c_file && func && insn->func && func != insn->func->pfunc) {
2412 + WARN("%s() falls through to next function %s()",
2413 + func->name, insn->func->name);
2414 + return 1;
2415 + }
2416 +
2417 +- if (insn->func)
2418 +- func = insn->func;
2419 ++ func = insn->func ? insn->func->pfunc : NULL;
2420 +
2421 + if (func && insn->ignore) {
2422 + WARN_FUNC("BUG: why am I validating an ignored function?",
2423 +@@ -1777,7 +1800,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
2424 +
2425 + i = insn;
2426 + save_insn = NULL;
2427 +- func_for_each_insn_continue_reverse(file, func, i) {
2428 ++ func_for_each_insn_continue_reverse(file, insn->func, i) {
2429 + if (i->save) {
2430 + save_insn = i;
2431 + break;
2432 +@@ -1864,7 +1887,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
2433 + case INSN_JUMP_UNCONDITIONAL:
2434 + if (insn->jump_dest &&
2435 + (!func || !insn->jump_dest->func ||
2436 +- func == insn->jump_dest->func)) {
2437 ++ insn->jump_dest->func->pfunc == func)) {
2438 + ret = validate_branch(file, insn->jump_dest,
2439 + state);
2440 + if (ret)
2441 +@@ -2059,7 +2082,7 @@ static int validate_functions(struct objtool_file *file)
2442 +
2443 + for_each_sec(file, sec) {
2444 + list_for_each_entry(func, &sec->symbol_list, list) {
2445 +- if (func->type != STT_FUNC)
2446 ++ if (func->type != STT_FUNC || func->pfunc != func)
2447 + continue;
2448 +
2449 + insn = find_insn(file, sec, func->offset);
2450 +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
2451 +index c1c338661699..4e60e105583e 100644
2452 +--- a/tools/objtool/elf.c
2453 ++++ b/tools/objtool/elf.c
2454 +@@ -79,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
2455 + return NULL;
2456 + }
2457 +
2458 ++struct symbol *find_symbol_by_name(struct elf *elf, const char *name)
2459 ++{
2460 ++ struct section *sec;
2461 ++ struct symbol *sym;
2462 ++
2463 ++ list_for_each_entry(sec, &elf->sections, list)
2464 ++ list_for_each_entry(sym, &sec->symbol_list, list)
2465 ++ if (!strcmp(sym->name, name))
2466 ++ return sym;
2467 ++
2468 ++ return NULL;
2469 ++}
2470 ++
2471 + struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
2472 + {
2473 + struct symbol *sym;
2474 +@@ -203,10 +216,11 @@ static int read_sections(struct elf *elf)
2475 +
2476 + static int read_symbols(struct elf *elf)
2477 + {
2478 +- struct section *symtab;
2479 +- struct symbol *sym;
2480 ++ struct section *symtab, *sec;
2481 ++ struct symbol *sym, *pfunc;
2482 + struct list_head *entry, *tmp;
2483 + int symbols_nr, i;
2484 ++ char *coldstr;
2485 +
2486 + symtab = find_section_by_name(elf, ".symtab");
2487 + if (!symtab) {
2488 +@@ -281,6 +295,30 @@ static int read_symbols(struct elf *elf)
2489 + hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
2490 + }
2491 +
2492 ++ /* Create parent/child links for any cold subfunctions */
2493 ++ list_for_each_entry(sec, &elf->sections, list) {
2494 ++ list_for_each_entry(sym, &sec->symbol_list, list) {
2495 ++ if (sym->type != STT_FUNC)
2496 ++ continue;
2497 ++ sym->pfunc = sym->cfunc = sym;
2498 ++ coldstr = strstr(sym->name, ".cold.");
2499 ++ if (coldstr) {
2500 ++ coldstr[0] = '\0';
2501 ++ pfunc = find_symbol_by_name(elf, sym->name);
2502 ++ coldstr[0] = '.';
2503 ++
2504 ++ if (!pfunc) {
2505 ++ WARN("%s(): can't find parent function",
2506 ++ sym->name);
2507 ++ goto err;
2508 ++ }
2509 ++
2510 ++ sym->pfunc = pfunc;
2511 ++ pfunc->cfunc = sym;
2512 ++ }
2513 ++ }
2514 ++ }
2515 ++
2516 + return 0;
2517 +
2518 + err:
2519 +diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
2520 +index d86e2ff14466..de5cd2ddded9 100644
2521 +--- a/tools/objtool/elf.h
2522 ++++ b/tools/objtool/elf.h
2523 +@@ -61,6 +61,7 @@ struct symbol {
2524 + unsigned char bind, type;
2525 + unsigned long offset;
2526 + unsigned int len;
2527 ++ struct symbol *pfunc, *cfunc;
2528 + };
2529 +
2530 + struct rela {
2531 +@@ -86,6 +87,7 @@ struct elf {
2532 + struct elf *elf_open(const char *name, int flags);
2533 + struct section *find_section_by_name(struct elf *elf, const char *name);
2534 + struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
2535 ++struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
2536 + struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
2537 + struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
2538 + struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,