Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.20 commit in: /
Date: Sat, 26 Jan 2019 15:13:38
Message-Id: 1548515583.c7d4e009e62d5b1adb52025357b644b14b04aeef.mpagano@gentoo
1 commit: c7d4e009e62d5b1adb52025357b644b14b04aeef
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 26 15:13:03 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 26 15:13:03 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c7d4e009
7
8 proj/linux-patches: Linux patch 4.20.5
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1004_linux-4.20.5.patch | 5548 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5552 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index a9b0f09..32a3dd6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.20.4.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.20.4
23
24 +Patch: 1004_linux-4.20.5.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.20.5
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1004_linux-4.20.5.patch b/1004_linux-4.20.5.patch
33 new file mode 100644
34 index 0000000..380f45d
35 --- /dev/null
36 +++ b/1004_linux-4.20.5.patch
37 @@ -0,0 +1,5548 @@
38 +diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
39 +index 12a5e6e693b6..2a4e63f5122c 100644
40 +--- a/Documentation/filesystems/proc.txt
41 ++++ b/Documentation/filesystems/proc.txt
42 +@@ -496,7 +496,9 @@ manner. The codes are the following:
43 +
44 + Note that there is no guarantee that every flag and associated mnemonic will
45 + be present in all further kernel releases. Things get changed, the flags may
46 +-be vanished or the reverse -- new added.
47 ++be vanished or the reverse -- new added. Interpretation of their meaning
48 ++might change in future as well. So each consumer of these flags has to
49 ++follow each specific kernel version for the exact semantic.
50 +
51 + This file is only present if the CONFIG_MMU kernel configuration option is
52 + enabled.
53 +diff --git a/Makefile b/Makefile
54 +index a056dba5ede0..690f6a9d9f1b 100644
55 +--- a/Makefile
56 ++++ b/Makefile
57 +@@ -1,7 +1,7 @@
58 + # SPDX-License-Identifier: GPL-2.0
59 + VERSION = 4
60 + PATCHLEVEL = 20
61 +-SUBLEVEL = 4
62 ++SUBLEVEL = 5
63 + EXTRAVERSION =
64 + NAME = Shy Crocodile
65 +
66 +@@ -1035,6 +1035,8 @@ ifdef CONFIG_GDB_SCRIPTS
67 + endif
68 + +$(call if_changed,link-vmlinux)
69 +
70 ++targets := vmlinux
71 ++
72 + # Build samples along the rest of the kernel. This needs headers_install.
73 + ifdef CONFIG_SAMPLES
74 + vmlinux-dirs += samples
75 +@@ -1753,13 +1755,12 @@ quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
76 + cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
77 + $(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*)
78 +
79 +-# read all saved command lines
80 +-cmd_files := $(wildcard .*.cmd)
81 ++# read saved command lines for existing targets
82 ++existing-targets := $(wildcard $(sort $(targets)))
83 +
84 +-ifneq ($(cmd_files),)
85 +- $(cmd_files): ; # Do not try to update included dependency files
86 +- include $(cmd_files)
87 +-endif
88 ++cmd_files := $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
89 ++$(cmd_files): ; # Do not try to update included dependency files
90 ++-include $(cmd_files)
91 +
92 + endif # ifeq ($(config-targets),1)
93 + endif # ifeq ($(mixed-targets),1)
94 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
95 +index 6cb9fc7e9382..8978f60779c4 100644
96 +--- a/arch/arm64/Makefile
97 ++++ b/arch/arm64/Makefile
98 +@@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y)
99 + # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
100 + # for relative relocs, since this leads to better Image compression
101 + # with the relocation offsets always being zero.
102 +-LDFLAGS_vmlinux += -pie -shared -Bsymbolic \
103 ++LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \
104 + $(call ld-option, --no-apply-dynamic-relocs)
105 + endif
106 +
107 +diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
108 +index 6142402c2eb4..08b216c200c9 100644
109 +--- a/arch/arm64/include/asm/assembler.h
110 ++++ b/arch/arm64/include/asm/assembler.h
111 +@@ -377,27 +377,33 @@ alternative_endif
112 + * size: size of the region
113 + * Corrupts: kaddr, size, tmp1, tmp2
114 + */
115 ++ .macro __dcache_op_workaround_clean_cache, op, kaddr
116 ++alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
117 ++ dc \op, \kaddr
118 ++alternative_else
119 ++ dc civac, \kaddr
120 ++alternative_endif
121 ++ .endm
122 ++
123 + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
124 + dcache_line_size \tmp1, \tmp2
125 + add \size, \kaddr, \size
126 + sub \tmp2, \tmp1, #1
127 + bic \kaddr, \kaddr, \tmp2
128 + 9998:
129 +- .if (\op == cvau || \op == cvac)
130 +-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
131 +- dc \op, \kaddr
132 +-alternative_else
133 +- dc civac, \kaddr
134 +-alternative_endif
135 +- .elseif (\op == cvap)
136 +-alternative_if ARM64_HAS_DCPOP
137 +- sys 3, c7, c12, 1, \kaddr // dc cvap
138 +-alternative_else
139 +- dc cvac, \kaddr
140 +-alternative_endif
141 ++ .ifc \op, cvau
142 ++ __dcache_op_workaround_clean_cache \op, \kaddr
143 ++ .else
144 ++ .ifc \op, cvac
145 ++ __dcache_op_workaround_clean_cache \op, \kaddr
146 ++ .else
147 ++ .ifc \op, cvap
148 ++ sys 3, c7, c12, 1, \kaddr // dc cvap
149 + .else
150 + dc \op, \kaddr
151 + .endif
152 ++ .endif
153 ++ .endif
154 + add \kaddr, \kaddr, \tmp1
155 + cmp \kaddr, \size
156 + b.lo 9998b
157 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
158 +index f0a5c9531e8b..778af0b7f7fd 100644
159 +--- a/arch/arm64/include/asm/memory.h
160 ++++ b/arch/arm64/include/asm/memory.h
161 +@@ -67,12 +67,17 @@
162 + /*
163 + * KASAN requires 1/8th of the kernel virtual address space for the shadow
164 + * region. KASAN can bloat the stack significantly, so double the (minimum)
165 +- * stack size when KASAN is in use.
166 ++ * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
167 ++ * on.
168 + */
169 + #ifdef CONFIG_KASAN
170 + #define KASAN_SHADOW_SCALE_SHIFT 3
171 + #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
172 ++#ifdef CONFIG_KASAN_EXTRA
173 ++#define KASAN_THREAD_SHIFT 2
174 ++#else
175 + #define KASAN_THREAD_SHIFT 1
176 ++#endif /* CONFIG_KASAN_EXTRA */
177 + #else
178 + #define KASAN_SHADOW_SIZE (0)
179 + #define KASAN_THREAD_SHIFT 0
180 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
181 +index 6ad715d67df8..99622e5ad21b 100644
182 +--- a/arch/arm64/kernel/cpu_errata.c
183 ++++ b/arch/arm64/kernel/cpu_errata.c
184 +@@ -135,7 +135,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
185 + const char *hyp_vecs_start,
186 + const char *hyp_vecs_end)
187 + {
188 +- static DEFINE_SPINLOCK(bp_lock);
189 ++ static DEFINE_RAW_SPINLOCK(bp_lock);
190 + int cpu, slot = -1;
191 +
192 + /*
193 +@@ -147,7 +147,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
194 + return;
195 + }
196 +
197 +- spin_lock(&bp_lock);
198 ++ raw_spin_lock(&bp_lock);
199 + for_each_possible_cpu(cpu) {
200 + if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
201 + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
202 +@@ -163,7 +163,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
203 +
204 + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
205 + __this_cpu_write(bp_hardening_data.fn, fn);
206 +- spin_unlock(&bp_lock);
207 ++ raw_spin_unlock(&bp_lock);
208 + }
209 + #else
210 + #define __smccc_workaround_1_smc_start NULL
211 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
212 +index e213f8e867f6..8a91ac067d44 100644
213 +--- a/arch/arm64/kernel/perf_event.c
214 ++++ b/arch/arm64/kernel/perf_event.c
215 +@@ -1274,6 +1274,7 @@ static struct platform_driver armv8_pmu_driver = {
216 + .driver = {
217 + .name = ARMV8_PMU_PDEV_NAME,
218 + .of_match_table = armv8_pmu_of_device_ids,
219 ++ .suppress_bind_attrs = true,
220 + },
221 + .probe = armv8_pmu_device_probe,
222 + };
223 +diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
224 +index 03b00007553d..7fa008374907 100644
225 +--- a/arch/arm64/kernel/vmlinux.lds.S
226 ++++ b/arch/arm64/kernel/vmlinux.lds.S
227 +@@ -99,7 +99,8 @@ SECTIONS
228 + *(.discard)
229 + *(.discard.*)
230 + *(.interp .dynamic)
231 +- *(.dynsym .dynstr .hash)
232 ++ *(.dynsym .dynstr .hash .gnu.hash)
233 ++ *(.eh_frame)
234 + }
235 +
236 + . = KIMAGE_VADDR + TEXT_OFFSET;
237 +@@ -192,12 +193,12 @@ SECTIONS
238 +
239 + PERCPU_SECTION(L1_CACHE_BYTES)
240 +
241 +- .rela : ALIGN(8) {
242 ++ .rela.dyn : ALIGN(8) {
243 + *(.rela .rela*)
244 + }
245 +
246 +- __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
247 +- __rela_size = SIZEOF(.rela);
248 ++ __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
249 ++ __rela_size = SIZEOF(.rela.dyn);
250 +
251 + . = ALIGN(SEGMENT_ALIGN);
252 + __initdata_end = .;
253 +diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
254 +index 0c22ede52f90..a194fd0e837f 100644
255 +--- a/arch/arm64/mm/cache.S
256 ++++ b/arch/arm64/mm/cache.S
257 +@@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area)
258 + * - size - size in question
259 + */
260 + ENTRY(__clean_dcache_area_pop)
261 ++ alternative_if_not ARM64_HAS_DCPOP
262 ++ b __clean_dcache_area_poc
263 ++ alternative_else_nop_endif
264 + dcache_by_line_op cvap, sy, x0, x1, x2, x3
265 + ret
266 + ENDPIPROC(__clean_dcache_area_pop)
267 +diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
268 +index 63527e585aac..fcb2ca30b6f1 100644
269 +--- a/arch/arm64/mm/kasan_init.c
270 ++++ b/arch/arm64/mm/kasan_init.c
271 +@@ -39,7 +39,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
272 + {
273 + void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
274 + __pa(MAX_DMA_ADDRESS),
275 +- MEMBLOCK_ALLOC_ACCESSIBLE, node);
276 ++ MEMBLOCK_ALLOC_KASAN, node);
277 + return __pa(p);
278 + }
279 +
280 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
281 +index 6207b41473a0..bfb3d8451c0a 100644
282 +--- a/arch/mips/Kconfig
283 ++++ b/arch/mips/Kconfig
284 +@@ -794,6 +794,7 @@ config SIBYTE_SWARM
285 + select SYS_SUPPORTS_HIGHMEM
286 + select SYS_SUPPORTS_LITTLE_ENDIAN
287 + select ZONE_DMA32 if 64BIT
288 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
289 +
290 + config SIBYTE_LITTLESUR
291 + bool "Sibyte BCM91250C2-LittleSur"
292 +@@ -814,6 +815,7 @@ config SIBYTE_SENTOSA
293 + select SYS_HAS_CPU_SB1
294 + select SYS_SUPPORTS_BIG_ENDIAN
295 + select SYS_SUPPORTS_LITTLE_ENDIAN
296 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
297 +
298 + config SIBYTE_BIGSUR
299 + bool "Sibyte BCM91480B-BigSur"
300 +@@ -826,6 +828,7 @@ config SIBYTE_BIGSUR
301 + select SYS_SUPPORTS_HIGHMEM
302 + select SYS_SUPPORTS_LITTLE_ENDIAN
303 + select ZONE_DMA32 if 64BIT
304 ++ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
305 +
306 + config SNI_RM
307 + bool "SNI RM200/300/400"
308 +diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
309 +index dacbdb84516a..532b49b1dbb3 100644
310 +--- a/arch/mips/include/asm/cpu.h
311 ++++ b/arch/mips/include/asm/cpu.h
312 +@@ -248,8 +248,9 @@
313 + #define PRID_REV_LOONGSON3A_R1 0x0005
314 + #define PRID_REV_LOONGSON3B_R1 0x0006
315 + #define PRID_REV_LOONGSON3B_R2 0x0007
316 +-#define PRID_REV_LOONGSON3A_R2 0x0008
317 ++#define PRID_REV_LOONGSON3A_R2_0 0x0008
318 + #define PRID_REV_LOONGSON3A_R3_0 0x0009
319 ++#define PRID_REV_LOONGSON3A_R2_1 0x000c
320 + #define PRID_REV_LOONGSON3A_R3_1 0x000d
321 +
322 + /*
323 +diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
324 +index cbac603ced19..b5e288a12dfe 100644
325 +--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
326 ++++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
327 +@@ -31,7 +31,7 @@
328 + /* Enable STFill Buffer */
329 + mfc0 t0, CP0_PRID
330 + andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
331 +- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
332 ++ slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
333 + bnez t0, 1f
334 + mfc0 t0, CP0_CONFIG6
335 + or t0, 0x100
336 +@@ -60,7 +60,7 @@
337 + /* Enable STFill Buffer */
338 + mfc0 t0, CP0_PRID
339 + andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
340 +- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
341 ++ slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
342 + bnez t0, 1f
343 + mfc0 t0, CP0_CONFIG6
344 + or t0, 0x100
345 +diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
346 +index d535fc706a8b..f70cf6447cfb 100644
347 +--- a/arch/mips/kernel/cpu-probe.c
348 ++++ b/arch/mips/kernel/cpu-probe.c
349 +@@ -1843,7 +1843,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
350 + switch (c->processor_id & PRID_IMP_MASK) {
351 + case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
352 + switch (c->processor_id & PRID_REV_MASK) {
353 +- case PRID_REV_LOONGSON3A_R2:
354 ++ case PRID_REV_LOONGSON3A_R2_0:
355 ++ case PRID_REV_LOONGSON3A_R2_1:
356 + c->cputype = CPU_LOONGSON3;
357 + __cpu_name[cpu] = "ICT Loongson-3";
358 + set_elf_platform(cpu, "loongson3a");
359 +diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
360 +index 046846999efd..909b7a87c89c 100644
361 +--- a/arch/mips/kernel/idle.c
362 ++++ b/arch/mips/kernel/idle.c
363 +@@ -183,7 +183,7 @@ void __init check_wait(void)
364 + cpu_wait = r4k_wait;
365 + break;
366 + case CPU_LOONGSON3:
367 +- if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
368 ++ if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
369 + cpu_wait = r4k_wait;
370 + break;
371 +
372 +diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
373 +index 8f68ee02a8c2..72e5f8fb2b35 100644
374 +--- a/arch/mips/loongson64/common/env.c
375 ++++ b/arch/mips/loongson64/common/env.c
376 +@@ -197,7 +197,8 @@ void __init prom_init_env(void)
377 + cpu_clock_freq = 797000000;
378 + break;
379 + case PRID_REV_LOONGSON3A_R1:
380 +- case PRID_REV_LOONGSON3A_R2:
381 ++ case PRID_REV_LOONGSON3A_R2_0:
382 ++ case PRID_REV_LOONGSON3A_R2_1:
383 + case PRID_REV_LOONGSON3A_R3_0:
384 + case PRID_REV_LOONGSON3A_R3_1:
385 + cpu_clock_freq = 900000000;
386 +diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
387 +index b5c1e0aa955e..8fba0aa48bf4 100644
388 +--- a/arch/mips/loongson64/loongson-3/smp.c
389 ++++ b/arch/mips/loongson64/loongson-3/smp.c
390 +@@ -682,7 +682,8 @@ void play_dead(void)
391 + play_dead_at_ckseg1 =
392 + (void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
393 + break;
394 +- case PRID_REV_LOONGSON3A_R2:
395 ++ case PRID_REV_LOONGSON3A_R2_0:
396 ++ case PRID_REV_LOONGSON3A_R2_1:
397 + case PRID_REV_LOONGSON3A_R3_0:
398 + case PRID_REV_LOONGSON3A_R3_1:
399 + play_dead_at_ckseg1 =
400 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
401 +index 2a6ad461286f..96d666a0f4a0 100644
402 +--- a/arch/mips/mm/c-r4k.c
403 ++++ b/arch/mips/mm/c-r4k.c
404 +@@ -1381,7 +1381,7 @@ static void probe_pcache(void)
405 + c->dcache.ways *
406 + c->dcache.linesz;
407 + c->dcache.waybit = 0;
408 +- if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
409 ++ if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
410 + c->options |= MIPS_CPU_PREFETCH;
411 + break;
412 +
413 +diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
414 +index b3d6bf23a662..3ef3fb658136 100644
415 +--- a/arch/mips/sibyte/common/Makefile
416 ++++ b/arch/mips/sibyte/common/Makefile
417 +@@ -1,4 +1,5 @@
418 + obj-y := cfe.o
419 ++obj-$(CONFIG_SWIOTLB) += dma.o
420 + obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
421 + obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
422 + obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
423 +diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
424 +new file mode 100644
425 +index 000000000000..eb47a94f3583
426 +--- /dev/null
427 ++++ b/arch/mips/sibyte/common/dma.c
428 +@@ -0,0 +1,14 @@
429 ++// SPDX-License-Identifier: GPL-2.0+
430 ++/*
431 ++ * DMA support for Broadcom SiByte platforms.
432 ++ *
433 ++ * Copyright (c) 2018 Maciej W. Rozycki
434 ++ */
435 ++
436 ++#include <linux/swiotlb.h>
437 ++#include <asm/bootinfo.h>
438 ++
439 ++void __init plat_swiotlb_setup(void)
440 ++{
441 ++ swiotlb_init(1);
442 ++}
443 +diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
444 +index 401d2ecbebc5..f8176ae3a5a7 100644
445 +--- a/arch/powerpc/kvm/book3s_hv_nested.c
446 ++++ b/arch/powerpc/kvm/book3s_hv_nested.c
447 +@@ -1220,6 +1220,8 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
448 + return ret;
449 + shift = kvmppc_radix_level_to_shift(level);
450 + }
451 ++ /* Align gfn to the start of the page */
452 ++ gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
453 +
454 + /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
455 +
456 +@@ -1227,6 +1229,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
457 + perm |= gpte.may_read ? 0UL : _PAGE_READ;
458 + perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
459 + perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
460 ++ /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
461 ++ perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
462 ++ perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
463 + pte = __pte(pte_val(pte) & ~perm);
464 +
465 + /* What size pte can we insert? */
466 +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
467 +index 36b8dc47a3c3..b566203d09c5 100644
468 +--- a/arch/powerpc/xmon/xmon.c
469 ++++ b/arch/powerpc/xmon/xmon.c
470 +@@ -75,6 +75,9 @@ static int xmon_gate;
471 + #define xmon_owner 0
472 + #endif /* CONFIG_SMP */
473 +
474 ++#ifdef CONFIG_PPC_PSERIES
475 ++static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
476 ++#endif
477 + static unsigned long in_xmon __read_mostly = 0;
478 + static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
479 +
480 +@@ -358,7 +361,6 @@ static inline void disable_surveillance(void)
481 + #ifdef CONFIG_PPC_PSERIES
482 + /* Since this can't be a module, args should end up below 4GB. */
483 + static struct rtas_args args;
484 +- int token;
485 +
486 + /*
487 + * At this point we have got all the cpus we can into
488 +@@ -367,11 +369,11 @@ static inline void disable_surveillance(void)
489 + * If we did try to take rtas.lock there would be a
490 + * real possibility of deadlock.
491 + */
492 +- token = rtas_token("set-indicator");
493 +- if (token == RTAS_UNKNOWN_SERVICE)
494 ++ if (set_indicator_token == RTAS_UNKNOWN_SERVICE)
495 + return;
496 +
497 +- rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
498 ++ rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL,
499 ++ SURVEILLANCE_TOKEN, 0, 0);
500 +
501 + #endif /* CONFIG_PPC_PSERIES */
502 + }
503 +@@ -3688,6 +3690,14 @@ static void xmon_init(int enable)
504 + __debugger_iabr_match = xmon_iabr_match;
505 + __debugger_break_match = xmon_break_match;
506 + __debugger_fault_handler = xmon_fault_handler;
507 ++
508 ++#ifdef CONFIG_PPC_PSERIES
509 ++ /*
510 ++ * Get the token here to avoid trying to get a lock
511 ++ * during the crash, causing a deadlock.
512 ++ */
513 ++ set_indicator_token = rtas_token("set-indicator");
514 ++#endif
515 + } else {
516 + __debugger = NULL;
517 + __debugger_ipi = NULL;
518 +diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
519 +index 3de69330e6c5..afbc87206886 100644
520 +--- a/arch/x86/include/asm/traps.h
521 ++++ b/arch/x86/include/asm/traps.h
522 +@@ -104,9 +104,9 @@ extern int panic_on_unrecovered_nmi;
523 +
524 + void math_emulate(struct math_emu_info *);
525 + #ifndef CONFIG_X86_32
526 +-asmlinkage void smp_thermal_interrupt(void);
527 +-asmlinkage void smp_threshold_interrupt(void);
528 +-asmlinkage void smp_deferred_error_interrupt(void);
529 ++asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
530 ++asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
531 ++asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
532 + #endif
533 +
534 + extern void ist_enter(struct pt_regs *regs);
535 +diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
536 +index 44272b7107ad..2d0a565fd0bb 100644
537 +--- a/arch/x86/kernel/cpu/intel_rdt.c
538 ++++ b/arch/x86/kernel/cpu/intel_rdt.c
539 +@@ -421,7 +421,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
540 + struct list_head *l;
541 +
542 + if (id < 0)
543 +- return ERR_PTR(id);
544 ++ return ERR_PTR(-ENODEV);
545 +
546 + list_for_each(l, &r->domains) {
547 + d = list_entry(l, struct rdt_domain, list);
548 +diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
549 +index efa4a519f5e5..c8b72aff55e0 100644
550 +--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
551 ++++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
552 +@@ -467,7 +467,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
553 +
554 + r = &rdt_resources_all[resid];
555 + d = rdt_find_domain(r, domid, NULL);
556 +- if (!d) {
557 ++ if (IS_ERR_OR_NULL(d)) {
558 + ret = -ENOENT;
559 + goto out;
560 + }
561 +diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
562 +index f27b8115ffa2..951c61367688 100644
563 +--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
564 ++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
565 +@@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
566 + * peer RDT CDP resource. Hence the WARN.
567 + */
568 + _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
569 +- if (WARN_ON(!_d_cdp)) {
570 ++ if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
571 + _r_cdp = NULL;
572 + ret = -EINVAL;
573 + }
574 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
575 +index e12454e21b8a..9f915a8791cc 100644
576 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
577 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
578 +@@ -23,6 +23,7 @@
579 + #include <linux/string.h>
580 +
581 + #include <asm/amd_nb.h>
582 ++#include <asm/traps.h>
583 + #include <asm/apic.h>
584 + #include <asm/mce.h>
585 + #include <asm/msr.h>
586 +@@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
587 + [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
588 + };
589 +
590 +-const char *smca_get_name(enum smca_bank_types t)
591 ++static const char *smca_get_name(enum smca_bank_types t)
592 + {
593 + if (t >= N_SMCA_BANK_TYPES)
594 + return NULL;
595 +@@ -824,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
596 + mce_log(&m);
597 + }
598 +
599 +-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
600 ++asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
601 + {
602 + entering_irq();
603 + trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
604 +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
605 +index 2da67b70ba98..ee229ceee745 100644
606 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
607 ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
608 +@@ -25,6 +25,7 @@
609 + #include <linux/cpu.h>
610 +
611 + #include <asm/processor.h>
612 ++#include <asm/traps.h>
613 + #include <asm/apic.h>
614 + #include <asm/mce.h>
615 + #include <asm/msr.h>
616 +@@ -390,7 +391,7 @@ static void unexpected_thermal_interrupt(void)
617 +
618 + static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
619 +
620 +-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
621 ++asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
622 + {
623 + entering_irq();
624 + trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
625 +diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
626 +index 2b584b319eff..c21e0a1efd0f 100644
627 +--- a/arch/x86/kernel/cpu/mcheck/threshold.c
628 ++++ b/arch/x86/kernel/cpu/mcheck/threshold.c
629 +@@ -6,6 +6,7 @@
630 + #include <linux/kernel.h>
631 +
632 + #include <asm/irq_vectors.h>
633 ++#include <asm/traps.h>
634 + #include <asm/apic.h>
635 + #include <asm/mce.h>
636 + #include <asm/trace/irq_vectors.h>
637 +@@ -18,7 +19,7 @@ static void default_threshold_interrupt(void)
638 +
639 + void (*mce_threshold_vector)(void) = default_threshold_interrupt;
640 +
641 +-asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
642 ++asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
643 + {
644 + entering_irq();
645 + trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
646 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
647 +index a9134d1910b9..ccd1f2a8e557 100644
648 +--- a/arch/x86/kernel/smpboot.c
649 ++++ b/arch/x86/kernel/smpboot.c
650 +@@ -1347,7 +1347,7 @@ void __init calculate_max_logical_packages(void)
651 + * extrapolate the boot cpu's data to all packages.
652 + */
653 + ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
654 +- __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
655 ++ __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
656 + pr_info("Max logical packages: %u\n", __max_logical_packages);
657 + }
658 +
659 +diff --git a/crypto/ecc.c b/crypto/ecc.c
660 +index 8facafd67802..adcce310f646 100644
661 +--- a/crypto/ecc.c
662 ++++ b/crypto/ecc.c
663 +@@ -842,15 +842,23 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
664 +
665 + static void ecc_point_mult(struct ecc_point *result,
666 + const struct ecc_point *point, const u64 *scalar,
667 +- u64 *initial_z, u64 *curve_prime,
668 ++ u64 *initial_z, const struct ecc_curve *curve,
669 + unsigned int ndigits)
670 + {
671 + /* R0 and R1 */
672 + u64 rx[2][ECC_MAX_DIGITS];
673 + u64 ry[2][ECC_MAX_DIGITS];
674 + u64 z[ECC_MAX_DIGITS];
675 ++ u64 sk[2][ECC_MAX_DIGITS];
676 ++ u64 *curve_prime = curve->p;
677 + int i, nb;
678 +- int num_bits = vli_num_bits(scalar, ndigits);
679 ++ int num_bits;
680 ++ int carry;
681 ++
682 ++ carry = vli_add(sk[0], scalar, curve->n, ndigits);
683 ++ vli_add(sk[1], sk[0], curve->n, ndigits);
684 ++ scalar = sk[!carry];
685 ++ num_bits = sizeof(u64) * ndigits * 8 + 1;
686 +
687 + vli_set(rx[1], point->x, ndigits);
688 + vli_set(ry[1], point->y, ndigits);
689 +@@ -1004,7 +1012,7 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
690 + goto out;
691 + }
692 +
693 +- ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
694 ++ ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
695 + if (ecc_point_is_zero(pk)) {
696 + ret = -EAGAIN;
697 + goto err_free_point;
698 +@@ -1090,7 +1098,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
699 + goto err_alloc_product;
700 + }
701 +
702 +- ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
703 ++ ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
704 +
705 + ecc_swap_digits(product->x, secret, ndigits);
706 +
707 +diff --git a/drivers/base/bus.c b/drivers/base/bus.c
708 +index 8bfd27ec73d6..585e2e1c9c8f 100644
709 +--- a/drivers/base/bus.c
710 ++++ b/drivers/base/bus.c
711 +@@ -31,6 +31,9 @@ static struct kset *system_kset;
712 +
713 + #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
714 +
715 ++#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
716 ++ struct driver_attribute driver_attr_##_name = \
717 ++ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
718 +
719 + static int __must_check bus_rescan_devices_helper(struct device *dev,
720 + void *data);
721 +@@ -195,7 +198,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
722 + bus_put(bus);
723 + return err;
724 + }
725 +-static DRIVER_ATTR_WO(unbind);
726 ++static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
727 +
728 + /*
729 + * Manually attach a device to a driver.
730 +@@ -231,7 +234,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
731 + bus_put(bus);
732 + return err;
733 + }
734 +-static DRIVER_ATTR_WO(bind);
735 ++static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
736 +
737 + static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
738 + {
739 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
740 +index 7439a7eb50ac..05c8a7ed859c 100644
741 +--- a/drivers/bluetooth/btusb.c
742 ++++ b/drivers/bluetooth/btusb.c
743 +@@ -344,6 +344,7 @@ static const struct usb_device_id blacklist_table[] = {
744 + /* Intel Bluetooth devices */
745 + { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
746 + { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
747 ++ { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
748 + { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
749 + { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
750 + { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
751 +@@ -2055,6 +2056,35 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
752 + return -EILSEQ;
753 + }
754 +
755 ++static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
756 ++ struct intel_boot_params *params,
757 ++ char *fw_name, size_t len,
758 ++ const char *suffix)
759 ++{
760 ++ switch (ver->hw_variant) {
761 ++ case 0x0b: /* SfP */
762 ++ case 0x0c: /* WsP */
763 ++ snprintf(fw_name, len, "intel/ibt-%u-%u.%s",
764 ++ le16_to_cpu(ver->hw_variant),
765 ++ le16_to_cpu(params->dev_revid),
766 ++ suffix);
767 ++ break;
768 ++ case 0x11: /* JfP */
769 ++ case 0x12: /* ThP */
770 ++ case 0x13: /* HrP */
771 ++ case 0x14: /* CcP */
772 ++ snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s",
773 ++ le16_to_cpu(ver->hw_variant),
774 ++ le16_to_cpu(ver->hw_revision),
775 ++ le16_to_cpu(ver->fw_revision),
776 ++ suffix);
777 ++ break;
778 ++ default:
779 ++ return false;
780 ++ }
781 ++ return true;
782 ++}
783 ++
784 + static int btusb_setup_intel_new(struct hci_dev *hdev)
785 + {
786 + struct btusb_data *data = hci_get_drvdata(hdev);
787 +@@ -2106,7 +2136,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
788 + case 0x11: /* JfP */
789 + case 0x12: /* ThP */
790 + case 0x13: /* HrP */
791 +- case 0x14: /* QnJ, IcP */
792 ++ case 0x14: /* CcP */
793 + break;
794 + default:
795 + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
796 +@@ -2190,23 +2220,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
797 + * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
798 + *
799 + */
800 +- switch (ver.hw_variant) {
801 +- case 0x0b: /* SfP */
802 +- case 0x0c: /* WsP */
803 +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
804 +- le16_to_cpu(ver.hw_variant),
805 +- le16_to_cpu(params.dev_revid));
806 +- break;
807 +- case 0x11: /* JfP */
808 +- case 0x12: /* ThP */
809 +- case 0x13: /* HrP */
810 +- case 0x14: /* QnJ, IcP */
811 +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
812 +- le16_to_cpu(ver.hw_variant),
813 +- le16_to_cpu(ver.hw_revision),
814 +- le16_to_cpu(ver.fw_revision));
815 +- break;
816 +- default:
817 ++ err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
818 ++ sizeof(fwname), "sfi");
819 ++ if (!err) {
820 + bt_dev_err(hdev, "Unsupported Intel firmware naming");
821 + return -EINVAL;
822 + }
823 +@@ -2222,23 +2238,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
824 + /* Save the DDC file name for later use to apply once the firmware
825 + * downloading is done.
826 + */
827 +- switch (ver.hw_variant) {
828 +- case 0x0b: /* SfP */
829 +- case 0x0c: /* WsP */
830 +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
831 +- le16_to_cpu(ver.hw_variant),
832 +- le16_to_cpu(params.dev_revid));
833 +- break;
834 +- case 0x11: /* JfP */
835 +- case 0x12: /* ThP */
836 +- case 0x13: /* HrP */
837 +- case 0x14: /* QnJ, IcP */
838 +- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
839 +- le16_to_cpu(ver.hw_variant),
840 +- le16_to_cpu(ver.hw_revision),
841 +- le16_to_cpu(ver.fw_revision));
842 +- break;
843 +- default:
844 ++ err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
845 ++ sizeof(fwname), "ddc");
846 ++ if (!err) {
847 + bt_dev_err(hdev, "Unsupported Intel firmware naming");
848 + return -EINVAL;
849 + }
850 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
851 +index a74ce885b541..c518659b4d9f 100644
852 +--- a/drivers/char/ipmi/ipmi_msghandler.c
853 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
854 +@@ -32,6 +32,7 @@
855 + #include <linux/moduleparam.h>
856 + #include <linux/workqueue.h>
857 + #include <linux/uuid.h>
858 ++#include <linux/nospec.h>
859 +
860 + #define IPMI_DRIVER_VERSION "39.2"
861 +
862 +@@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
863 + { }
864 + #endif
865 +
866 +-static int initialized;
867 ++static bool initialized;
868 ++static bool drvregistered;
869 +
870 + enum ipmi_panic_event_op {
871 + IPMI_SEND_PANIC_EVENT_NONE,
872 +@@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
873 +
874 + static LIST_HEAD(ipmi_interfaces);
875 + static DEFINE_MUTEX(ipmi_interfaces_mutex);
876 +-DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
877 ++struct srcu_struct ipmi_interfaces_srcu;
878 +
879 + /*
880 + * List of watchers that want to know when smi's are added and deleted.
881 +@@ -720,7 +722,15 @@ struct watcher_entry {
882 + int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
883 + {
884 + struct ipmi_smi *intf;
885 +- int index;
886 ++ int index, rv;
887 ++
888 ++ /*
889 ++ * Make sure the driver is actually initialized, this handles
890 ++ * problems with initialization order.
891 ++ */
892 ++ rv = ipmi_init_msghandler();
893 ++ if (rv)
894 ++ return rv;
895 +
896 + mutex_lock(&smi_watchers_mutex);
897 +
898 +@@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
899 +
900 + if (user) {
901 + user->handler->ipmi_recv_hndl(msg, user->handler_data);
902 +- release_ipmi_user(msg->user, index);
903 ++ release_ipmi_user(user, index);
904 + } else {
905 + /* User went away, give up. */
906 + ipmi_free_recv_msg(msg);
907 +@@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num,
908 + {
909 + unsigned long flags;
910 + struct ipmi_user *new_user;
911 +- int rv = 0, index;
912 ++ int rv, index;
913 + struct ipmi_smi *intf;
914 +
915 + /*
916 +@@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num,
917 + * Make sure the driver is actually initialized, this handles
918 + * problems with initialization order.
919 + */
920 +- if (!initialized) {
921 +- rv = ipmi_init_msghandler();
922 +- if (rv)
923 +- return rv;
924 +-
925 +- /*
926 +- * The init code doesn't return an error if it was turned
927 +- * off, but it won't initialize. Check that.
928 +- */
929 +- if (!initialized)
930 +- return -ENODEV;
931 +- }
932 ++ rv = ipmi_init_msghandler();
933 ++ if (rv)
934 ++ return rv;
935 +
936 + new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
937 + if (!new_user)
938 +@@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
939 + static void free_user(struct kref *ref)
940 + {
941 + struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
942 ++ cleanup_srcu_struct(&user->release_barrier);
943 + kfree(user);
944 + }
945 +
946 +@@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
947 + {
948 + _ipmi_destroy_user(user);
949 +
950 +- cleanup_srcu_struct(&user->release_barrier);
951 + kref_put(&user->refcount, free_user);
952 +
953 + return 0;
954 +@@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
955 + if (!user)
956 + return -ENODEV;
957 +
958 +- if (channel >= IPMI_MAX_CHANNELS)
959 ++ if (channel >= IPMI_MAX_CHANNELS) {
960 + rv = -EINVAL;
961 +- else
962 ++ } else {
963 ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
964 + user->intf->addrinfo[channel].address = address;
965 ++ }
966 + release_ipmi_user(user, index);
967 +
968 + return rv;
969 +@@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
970 + if (!user)
971 + return -ENODEV;
972 +
973 +- if (channel >= IPMI_MAX_CHANNELS)
974 ++ if (channel >= IPMI_MAX_CHANNELS) {
975 + rv = -EINVAL;
976 +- else
977 ++ } else {
978 ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
979 + *address = user->intf->addrinfo[channel].address;
980 ++ }
981 + release_ipmi_user(user, index);
982 +
983 + return rv;
984 +@@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
985 + if (!user)
986 + return -ENODEV;
987 +
988 +- if (channel >= IPMI_MAX_CHANNELS)
989 ++ if (channel >= IPMI_MAX_CHANNELS) {
990 + rv = -EINVAL;
991 +- else
992 ++ } else {
993 ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
994 + user->intf->addrinfo[channel].lun = LUN & 0x3;
995 ++ }
996 + release_ipmi_user(user, index);
997 +
998 + return rv;
999 +@@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
1000 + if (!user)
1001 + return -ENODEV;
1002 +
1003 +- if (channel >= IPMI_MAX_CHANNELS)
1004 ++ if (channel >= IPMI_MAX_CHANNELS) {
1005 + rv = -EINVAL;
1006 +- else
1007 ++ } else {
1008 ++ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1009 + *address = user->intf->addrinfo[channel].lun;
1010 ++ }
1011 + release_ipmi_user(user, index);
1012 +
1013 + return rv;
1014 +@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
1015 + {
1016 + if (addr->channel >= IPMI_MAX_CHANNELS)
1017 + return -EINVAL;
1018 ++ addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
1019 + *lun = intf->addrinfo[addr->channel].lun;
1020 + *saddr = intf->addrinfo[addr->channel].address;
1021 + return 0;
1022 +@@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
1023 + * Make sure the driver is actually initialized, this handles
1024 + * problems with initialization order.
1025 + */
1026 +- if (!initialized) {
1027 +- rv = ipmi_init_msghandler();
1028 +- if (rv)
1029 +- return rv;
1030 +- /*
1031 +- * The init code doesn't return an error if it was turned
1032 +- * off, but it won't initialize. Check that.
1033 +- */
1034 +- if (!initialized)
1035 +- return -ENODEV;
1036 +- }
1037 ++ rv = ipmi_init_msghandler();
1038 ++ if (rv)
1039 ++ return rv;
1040 +
1041 + intf = kzalloc(sizeof(*intf), GFP_KERNEL);
1042 + if (!intf)
1043 +@@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this,
1044 + return NOTIFY_DONE;
1045 + }
1046 +
1047 ++/* Must be called with ipmi_interfaces_mutex held. */
1048 ++static int ipmi_register_driver(void)
1049 ++{
1050 ++ int rv;
1051 ++
1052 ++ if (drvregistered)
1053 ++ return 0;
1054 ++
1055 ++ rv = driver_register(&ipmidriver.driver);
1056 ++ if (rv)
1057 ++ pr_err("Could not register IPMI driver\n");
1058 ++ else
1059 ++ drvregistered = true;
1060 ++ return rv;
1061 ++}
1062 ++
1063 + static struct notifier_block panic_block = {
1064 + .notifier_call = panic_event,
1065 + .next = NULL,
1066 +@@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void)
1067 + {
1068 + int rv;
1069 +
1070 ++ mutex_lock(&ipmi_interfaces_mutex);
1071 ++ rv = ipmi_register_driver();
1072 ++ if (rv)
1073 ++ goto out;
1074 + if (initialized)
1075 +- return 0;
1076 +-
1077 +- rv = driver_register(&ipmidriver.driver);
1078 +- if (rv) {
1079 +- pr_err("Could not register IPMI driver\n");
1080 +- return rv;
1081 +- }
1082 ++ goto out;
1083 +
1084 +- pr_info("version " IPMI_DRIVER_VERSION "\n");
1085 ++ init_srcu_struct(&ipmi_interfaces_srcu);
1086 +
1087 + timer_setup(&ipmi_timer, ipmi_timeout, 0);
1088 + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
1089 +
1090 + atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
1091 +
1092 +- initialized = 1;
1093 ++ initialized = true;
1094 +
1095 +- return 0;
1096 ++out:
1097 ++ mutex_unlock(&ipmi_interfaces_mutex);
1098 ++ return rv;
1099 + }
1100 +
1101 + static int __init ipmi_init_msghandler_mod(void)
1102 + {
1103 +- ipmi_init_msghandler();
1104 +- return 0;
1105 ++ int rv;
1106 ++
1107 ++ pr_info("version " IPMI_DRIVER_VERSION "\n");
1108 ++
1109 ++ mutex_lock(&ipmi_interfaces_mutex);
1110 ++ rv = ipmi_register_driver();
1111 ++ mutex_unlock(&ipmi_interfaces_mutex);
1112 ++
1113 ++ return rv;
1114 + }
1115 +
1116 + static void __exit cleanup_ipmi(void)
1117 + {
1118 + int count;
1119 +
1120 +- if (!initialized)
1121 +- return;
1122 +-
1123 +- atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
1124 ++ if (initialized) {
1125 ++ atomic_notifier_chain_unregister(&panic_notifier_list,
1126 ++ &panic_block);
1127 +
1128 +- /*
1129 +- * This can't be called if any interfaces exist, so no worry
1130 +- * about shutting down the interfaces.
1131 +- */
1132 ++ /*
1133 ++ * This can't be called if any interfaces exist, so no worry
1134 ++ * about shutting down the interfaces.
1135 ++ */
1136 +
1137 +- /*
1138 +- * Tell the timer to stop, then wait for it to stop. This
1139 +- * avoids problems with race conditions removing the timer
1140 +- * here.
1141 +- */
1142 +- atomic_inc(&stop_operation);
1143 +- del_timer_sync(&ipmi_timer);
1144 ++ /*
1145 ++ * Tell the timer to stop, then wait for it to stop. This
1146 ++ * avoids problems with race conditions removing the timer
1147 ++ * here.
1148 ++ */
1149 ++ atomic_inc(&stop_operation);
1150 ++ del_timer_sync(&ipmi_timer);
1151 +
1152 +- driver_unregister(&ipmidriver.driver);
1153 ++ initialized = false;
1154 +
1155 +- initialized = 0;
1156 ++ /* Check for buffer leaks. */
1157 ++ count = atomic_read(&smi_msg_inuse_count);
1158 ++ if (count != 0)
1159 ++ pr_warn("SMI message count %d at exit\n", count);
1160 ++ count = atomic_read(&recv_msg_inuse_count);
1161 ++ if (count != 0)
1162 ++ pr_warn("recv message count %d at exit\n", count);
1163 +
1164 +- /* Check for buffer leaks. */
1165 +- count = atomic_read(&smi_msg_inuse_count);
1166 +- if (count != 0)
1167 +- pr_warn("SMI message count %d at exit\n", count);
1168 +- count = atomic_read(&recv_msg_inuse_count);
1169 +- if (count != 0)
1170 +- pr_warn("recv message count %d at exit\n", count);
1171 ++ cleanup_srcu_struct(&ipmi_interfaces_srcu);
1172 ++ }
1173 ++ if (drvregistered)
1174 ++ driver_unregister(&ipmidriver.driver);
1175 + }
1176 + module_exit(cleanup_ipmi);
1177 +
1178 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
1179 +index ca9528c4f183..b7a1ae2afaea 100644
1180 +--- a/drivers/char/ipmi/ipmi_ssif.c
1181 ++++ b/drivers/char/ipmi/ipmi_ssif.c
1182 +@@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1183 +
1184 + /* Remove the multi-part read marker. */
1185 + len -= 2;
1186 ++ data += 2;
1187 + for (i = 0; i < len; i++)
1188 +- ssif_info->data[i] = data[i+2];
1189 ++ ssif_info->data[i] = data[i];
1190 + ssif_info->multi_len = len;
1191 + ssif_info->multi_pos = 1;
1192 +
1193 +@@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1194 + }
1195 +
1196 + blocknum = data[0];
1197 ++ len--;
1198 ++ data++;
1199 ++
1200 ++ if (blocknum != 0xff && len != 31) {
1201 ++ /* All blocks but the last must have 31 data bytes. */
1202 ++ result = -EIO;
1203 ++ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
1204 ++ pr_info("Received middle message <31\n");
1205 +
1206 +- if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
1207 ++ goto continue_op;
1208 ++ }
1209 ++
1210 ++ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
1211 + /* Received message too big, abort the operation. */
1212 + result = -E2BIG;
1213 + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
1214 +@@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1215 + goto continue_op;
1216 + }
1217 +
1218 +- /* Remove the blocknum from the data. */
1219 +- len--;
1220 + for (i = 0; i < len; i++)
1221 +- ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
1222 ++ ssif_info->data[i + ssif_info->multi_len] = data[i];
1223 + ssif_info->multi_len += len;
1224 + if (blocknum == 0xff) {
1225 + /* End of read */
1226 + len = ssif_info->multi_len;
1227 + data = ssif_info->data;
1228 +- } else if (blocknum + 1 != ssif_info->multi_pos) {
1229 ++ } else if (blocknum != ssif_info->multi_pos) {
1230 + /*
1231 + * Out of sequence block, just abort. Block
1232 + * numbers start at zero for the second block,
1233 +@@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1234 + }
1235 + }
1236 +
1237 ++ continue_op:
1238 + if (result < 0) {
1239 + ssif_inc_stat(ssif_info, receive_errors);
1240 + } else {
1241 +@@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1242 + ssif_inc_stat(ssif_info, received_message_parts);
1243 + }
1244 +
1245 +-
1246 +- continue_op:
1247 + if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
1248 + pr_info("DONE 1: state = %d, result=%d\n",
1249 + ssif_info->ssif_state, result);
1250 +diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
1251 +index 99036527eb0d..e695622c5aa5 100644
1252 +--- a/drivers/clk/imx/clk-busy.c
1253 ++++ b/drivers/clk/imx/clk-busy.c
1254 +@@ -154,7 +154,7 @@ static const struct clk_ops clk_busy_mux_ops = {
1255 +
1256 + struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
1257 + u8 width, void __iomem *busy_reg, u8 busy_shift,
1258 +- const char **parent_names, int num_parents)
1259 ++ const char * const *parent_names, int num_parents)
1260 + {
1261 + struct clk_busy_mux *busy;
1262 + struct clk *clk;
1263 +diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
1264 +index c9b327e0a8dd..44817c1b0b88 100644
1265 +--- a/drivers/clk/imx/clk-fixup-mux.c
1266 ++++ b/drivers/clk/imx/clk-fixup-mux.c
1267 +@@ -70,7 +70,7 @@ static const struct clk_ops clk_fixup_mux_ops = {
1268 + };
1269 +
1270 + struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
1271 +- u8 shift, u8 width, const char **parents,
1272 ++ u8 shift, u8 width, const char * const *parents,
1273 + int num_parents, void (*fixup)(u32 *val))
1274 + {
1275 + struct clk_fixup_mux *fixup_mux;
1276 +diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
1277 +index bbe0c60f4d09..59f6a3e087db 100644
1278 +--- a/drivers/clk/imx/clk-imx6q.c
1279 ++++ b/drivers/clk/imx/clk-imx6q.c
1280 +@@ -508,8 +508,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
1281 + * lvds1_gate and lvds2_gate are pseudo-gates. Both can be
1282 + * independently configured as clock inputs or outputs. We treat
1283 + * the "output_enable" bit as a gate, even though it's really just
1284 +- * enabling clock output.
1285 ++ * enabling clock output. Initially the gate bits are cleared, as
1286 ++ * otherwise the exclusive configuration gets locked in the setup done
1287 ++ * by software running before the clock driver, with no way to change
1288 ++ * it.
1289 + */
1290 ++ writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
1291 + clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
1292 + clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
1293 +
1294 +diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
1295 +index 5895e2237b6c..2c377e188281 100644
1296 +--- a/drivers/clk/imx/clk.h
1297 ++++ b/drivers/clk/imx/clk.h
1298 +@@ -63,14 +63,14 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
1299 +
1300 + struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
1301 + u8 width, void __iomem *busy_reg, u8 busy_shift,
1302 +- const char **parent_names, int num_parents);
1303 ++ const char * const *parent_names, int num_parents);
1304 +
1305 + struct clk *imx_clk_fixup_divider(const char *name, const char *parent,
1306 + void __iomem *reg, u8 shift, u8 width,
1307 + void (*fixup)(u32 *val));
1308 +
1309 + struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
1310 +- u8 shift, u8 width, const char **parents,
1311 ++ u8 shift, u8 width, const char * const *parents,
1312 + int num_parents, void (*fixup)(u32 *val));
1313 +
1314 + static inline struct clk *imx_clk_fixed(const char *name, int rate)
1315 +@@ -79,7 +79,8 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate)
1316 + }
1317 +
1318 + static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
1319 +- u8 shift, u8 width, const char **parents, int num_parents)
1320 ++ u8 shift, u8 width, const char * const *parents,
1321 ++ int num_parents)
1322 + {
1323 + return clk_register_mux(NULL, name, parents, num_parents,
1324 + CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
1325 +@@ -199,7 +200,8 @@ static inline struct clk *imx_clk_gate4(const char *name, const char *parent,
1326 + }
1327 +
1328 + static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
1329 +- u8 shift, u8 width, const char **parents, int num_parents)
1330 ++ u8 shift, u8 width, const char * const *parents,
1331 ++ int num_parents)
1332 + {
1333 + return clk_register_mux(NULL, name, parents, num_parents,
1334 + CLK_SET_RATE_NO_REPARENT, reg, shift,
1335 +@@ -207,7 +209,8 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
1336 + }
1337 +
1338 + static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
1339 +- u8 shift, u8 width, const char **parents, int num_parents)
1340 ++ u8 shift, u8 width, const char * const *parents,
1341 ++ int num_parents)
1342 + {
1343 + return clk_register_mux(NULL, name, parents, num_parents,
1344 + CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
1345 +@@ -215,8 +218,9 @@ static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
1346 + }
1347 +
1348 + static inline struct clk *imx_clk_mux_flags(const char *name,
1349 +- void __iomem *reg, u8 shift, u8 width, const char **parents,
1350 +- int num_parents, unsigned long flags)
1351 ++ void __iomem *reg, u8 shift, u8 width,
1352 ++ const char * const *parents, int num_parents,
1353 ++ unsigned long flags)
1354 + {
1355 + return clk_register_mux(NULL, name, parents, num_parents,
1356 + flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
1357 +diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
1358 +index 346b9e165b7a..1d39273d7a04 100644
1359 +--- a/drivers/clk/meson/meson8b.c
1360 ++++ b/drivers/clk/meson/meson8b.c
1361 +@@ -42,6 +42,11 @@ static const struct pll_params_table sys_pll_params_table[] = {
1362 + PLL_PARAMS(62, 1),
1363 + PLL_PARAMS(63, 1),
1364 + PLL_PARAMS(64, 1),
1365 ++ PLL_PARAMS(65, 1),
1366 ++ PLL_PARAMS(66, 1),
1367 ++ PLL_PARAMS(67, 1),
1368 ++ PLL_PARAMS(68, 1),
1369 ++ PLL_PARAMS(84, 1),
1370 + { /* sentinel */ },
1371 + };
1372 +
1373 +@@ -579,13 +584,14 @@ static struct clk_fixed_factor meson8b_cpu_div3 = {
1374 + };
1375 +
1376 + static const struct clk_div_table cpu_scale_table[] = {
1377 +- { .val = 2, .div = 4 },
1378 +- { .val = 3, .div = 6 },
1379 +- { .val = 4, .div = 8 },
1380 +- { .val = 5, .div = 10 },
1381 +- { .val = 6, .div = 12 },
1382 +- { .val = 7, .div = 14 },
1383 +- { .val = 8, .div = 16 },
1384 ++ { .val = 1, .div = 4 },
1385 ++ { .val = 2, .div = 6 },
1386 ++ { .val = 3, .div = 8 },
1387 ++ { .val = 4, .div = 10 },
1388 ++ { .val = 5, .div = 12 },
1389 ++ { .val = 6, .div = 14 },
1390 ++ { .val = 7, .div = 16 },
1391 ++ { .val = 8, .div = 18 },
1392 + { /* sentinel */ },
1393 + };
1394 +
1395 +diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
1396 +index 76e526f58620..19fb7de4b928 100644
1397 +--- a/drivers/clocksource/timer-integrator-ap.c
1398 ++++ b/drivers/clocksource/timer-integrator-ap.c
1399 +@@ -181,8 +181,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
1400 + int irq;
1401 + struct clk *clk;
1402 + unsigned long rate;
1403 +- struct device_node *pri_node;
1404 +- struct device_node *sec_node;
1405 ++ struct device_node *alias_node;
1406 +
1407 + base = of_io_request_and_map(node, 0, "integrator-timer");
1408 + if (IS_ERR(base))
1409 +@@ -204,7 +203,18 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
1410 + return err;
1411 + }
1412 +
1413 +- pri_node = of_find_node_by_path(path);
1414 ++ alias_node = of_find_node_by_path(path);
1415 ++
1416 ++ /*
1417 ++ * The pointer is used as an identifier not as a pointer, we
1418 ++ * can drop the refcount on the of__node immediately after
1419 ++ * getting it.
1420 ++ */
1421 ++ of_node_put(alias_node);
1422 ++
1423 ++ if (node == alias_node)
1424 ++ /* The primary timer lacks IRQ, use as clocksource */
1425 ++ return integrator_clocksource_init(rate, base);
1426 +
1427 + err = of_property_read_string(of_aliases,
1428 + "arm,timer-secondary", &path);
1429 +@@ -213,14 +223,11 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
1430 + return err;
1431 + }
1432 +
1433 ++ alias_node = of_find_node_by_path(path);
1434 +
1435 +- sec_node = of_find_node_by_path(path);
1436 +-
1437 +- if (node == pri_node)
1438 +- /* The primary timer lacks IRQ, use as clocksource */
1439 +- return integrator_clocksource_init(rate, base);
1440 ++ of_node_put(alias_node);
1441 +
1442 +- if (node == sec_node) {
1443 ++ if (node == alias_node) {
1444 + /* The secondary timer will drive the clock event */
1445 + irq = irq_of_parse_and_map(node, 0);
1446 + return integrator_clockevent_init(rate, base, irq);
1447 +diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
1448 +index 9e56bc411061..74c247972bb3 100644
1449 +--- a/drivers/cpuidle/cpuidle-pseries.c
1450 ++++ b/drivers/cpuidle/cpuidle-pseries.c
1451 +@@ -247,7 +247,13 @@ static int pseries_idle_probe(void)
1452 + return -ENODEV;
1453 +
1454 + if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1455 +- if (lppaca_shared_proc(get_lppaca())) {
1456 ++ /*
1457 ++ * Use local_paca instead of get_lppaca() since
1458 ++ * preemption is not disabled, and it is not required in
1459 ++ * fact, since lppaca_ptr does not need to be the value
1460 ++ * associated to the current CPU, it can be from any CPU.
1461 ++ */
1462 ++ if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
1463 + cpuidle_state_table = shared_states;
1464 + max_idle_state = ARRAY_SIZE(shared_states);
1465 + } else {
1466 +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
1467 +index c51627660dbb..d9845099635e 100644
1468 +--- a/drivers/firmware/efi/libstub/Makefile
1469 ++++ b/drivers/firmware/efi/libstub/Makefile
1470 +@@ -9,7 +9,10 @@ cflags-$(CONFIG_X86_32) := -march=i386
1471 + cflags-$(CONFIG_X86_64) := -mcmodel=small
1472 + cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
1473 + -fPIC -fno-strict-aliasing -mno-red-zone \
1474 +- -mno-mmx -mno-sse -fshort-wchar
1475 ++ -mno-mmx -mno-sse -fshort-wchar \
1476 ++ -Wno-pointer-sign \
1477 ++ $(call cc-disable-warning, address-of-packed-member) \
1478 ++ $(call cc-disable-warning, gnu)
1479 +
1480 + # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
1481 + # disable the stackleak plugin
1482 +diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
1483 +index 610a1558e0ed..d9fa7d4bf11f 100644
1484 +--- a/drivers/fpga/altera-cvp.c
1485 ++++ b/drivers/fpga/altera-cvp.c
1486 +@@ -466,14 +466,6 @@ static int altera_cvp_probe(struct pci_dev *pdev,
1487 + if (ret)
1488 + goto err_unmap;
1489 +
1490 +- ret = driver_create_file(&altera_cvp_driver.driver,
1491 +- &driver_attr_chkcfg);
1492 +- if (ret) {
1493 +- dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
1494 +- fpga_mgr_unregister(mgr);
1495 +- goto err_unmap;
1496 +- }
1497 +-
1498 + return 0;
1499 +
1500 + err_unmap:
1501 +@@ -491,7 +483,6 @@ static void altera_cvp_remove(struct pci_dev *pdev)
1502 + struct altera_cvp_conf *conf = mgr->priv;
1503 + u16 cmd;
1504 +
1505 +- driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
1506 + fpga_mgr_unregister(mgr);
1507 + pci_iounmap(pdev, conf->map);
1508 + pci_release_region(pdev, CVP_BAR);
1509 +@@ -500,7 +491,30 @@ static void altera_cvp_remove(struct pci_dev *pdev)
1510 + pci_write_config_word(pdev, PCI_COMMAND, cmd);
1511 + }
1512 +
1513 +-module_pci_driver(altera_cvp_driver);
1514 ++static int __init altera_cvp_init(void)
1515 ++{
1516 ++ int ret;
1517 ++
1518 ++ ret = pci_register_driver(&altera_cvp_driver);
1519 ++ if (ret)
1520 ++ return ret;
1521 ++
1522 ++ ret = driver_create_file(&altera_cvp_driver.driver,
1523 ++ &driver_attr_chkcfg);
1524 ++ if (ret)
1525 ++ pr_warn("Can't create sysfs chkcfg file\n");
1526 ++
1527 ++ return 0;
1528 ++}
1529 ++
1530 ++static void __exit altera_cvp_exit(void)
1531 ++{
1532 ++ driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
1533 ++ pci_unregister_driver(&altera_cvp_driver);
1534 ++}
1535 ++
1536 ++module_init(altera_cvp_init);
1537 ++module_exit(altera_cvp_exit);
1538 +
1539 + MODULE_LICENSE("GPL v2");
1540 + MODULE_AUTHOR("Anatolij Gustschin <agust@××××.de>");
1541 +diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
1542 +index 2afd9de84a0d..dc42571e6fdc 100644
1543 +--- a/drivers/gpio/gpio-pl061.c
1544 ++++ b/drivers/gpio/gpio-pl061.c
1545 +@@ -54,6 +54,7 @@ struct pl061 {
1546 +
1547 + void __iomem *base;
1548 + struct gpio_chip gc;
1549 ++ struct irq_chip irq_chip;
1550 + int parent_irq;
1551 +
1552 + #ifdef CONFIG_PM
1553 +@@ -281,15 +282,6 @@ static int pl061_irq_set_wake(struct irq_data *d, unsigned int state)
1554 + return irq_set_irq_wake(pl061->parent_irq, state);
1555 + }
1556 +
1557 +-static struct irq_chip pl061_irqchip = {
1558 +- .name = "pl061",
1559 +- .irq_ack = pl061_irq_ack,
1560 +- .irq_mask = pl061_irq_mask,
1561 +- .irq_unmask = pl061_irq_unmask,
1562 +- .irq_set_type = pl061_irq_type,
1563 +- .irq_set_wake = pl061_irq_set_wake,
1564 +-};
1565 +-
1566 + static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
1567 + {
1568 + struct device *dev = &adev->dev;
1569 +@@ -328,6 +320,13 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
1570 + /*
1571 + * irq_chip support
1572 + */
1573 ++ pl061->irq_chip.name = dev_name(dev);
1574 ++ pl061->irq_chip.irq_ack = pl061_irq_ack;
1575 ++ pl061->irq_chip.irq_mask = pl061_irq_mask;
1576 ++ pl061->irq_chip.irq_unmask = pl061_irq_unmask;
1577 ++ pl061->irq_chip.irq_set_type = pl061_irq_type;
1578 ++ pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
1579 ++
1580 + writeb(0, pl061->base + GPIOIE); /* disable irqs */
1581 + irq = adev->irq[0];
1582 + if (irq < 0) {
1583 +@@ -336,14 +335,14 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
1584 + }
1585 + pl061->parent_irq = irq;
1586 +
1587 +- ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip,
1588 ++ ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip,
1589 + 0, handle_bad_irq,
1590 + IRQ_TYPE_NONE);
1591 + if (ret) {
1592 + dev_info(&adev->dev, "could not add irqchip\n");
1593 + return ret;
1594 + }
1595 +- gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip,
1596 ++ gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip,
1597 + irq, pl061_irq_handler);
1598 +
1599 + amba_set_drvdata(adev, pl061);
1600 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
1601 +index 1fc17bf39fed..44ca41837187 100644
1602 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
1603 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
1604 +@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
1605 + if (r)
1606 + return r;
1607 +
1608 +- r = amdgpu_uvd_resume(adev);
1609 +- if (r)
1610 +- return r;
1611 +-
1612 + ring = &adev->uvd.inst->ring;
1613 + sprintf(ring->name, "uvd");
1614 + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
1615 + if (r)
1616 + return r;
1617 +
1618 ++ r = amdgpu_uvd_resume(adev);
1619 ++ if (r)
1620 ++ return r;
1621 ++
1622 + r = amdgpu_uvd_entity_init(adev);
1623 +
1624 + return r;
1625 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
1626 +index fde6ad5ac9ab..6bb05ae232b2 100644
1627 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
1628 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
1629 +@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
1630 + if (r)
1631 + return r;
1632 +
1633 +- r = amdgpu_uvd_resume(adev);
1634 +- if (r)
1635 +- return r;
1636 +-
1637 + ring = &adev->uvd.inst->ring;
1638 + sprintf(ring->name, "uvd");
1639 + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
1640 + if (r)
1641 + return r;
1642 +
1643 ++ r = amdgpu_uvd_resume(adev);
1644 ++ if (r)
1645 ++ return r;
1646 ++
1647 + r = amdgpu_uvd_entity_init(adev);
1648 +
1649 + return r;
1650 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1651 +index 7a5b40275e8e..07fd96df4321 100644
1652 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1653 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1654 +@@ -416,16 +416,16 @@ static int uvd_v6_0_sw_init(void *handle)
1655 + DRM_INFO("UVD ENC is disabled\n");
1656 + }
1657 +
1658 +- r = amdgpu_uvd_resume(adev);
1659 +- if (r)
1660 +- return r;
1661 +-
1662 + ring = &adev->uvd.inst->ring;
1663 + sprintf(ring->name, "uvd");
1664 + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
1665 + if (r)
1666 + return r;
1667 +
1668 ++ r = amdgpu_uvd_resume(adev);
1669 ++ if (r)
1670 ++ return r;
1671 ++
1672 + if (uvd_v6_0_enc_support(adev)) {
1673 + for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1674 + ring = &adev->uvd.inst->ring_enc[i];
1675 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
1676 +index 58b39afcfb86..1ef023a7b8ec 100644
1677 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
1678 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
1679 +@@ -447,10 +447,6 @@ static int uvd_v7_0_sw_init(void *handle)
1680 + DRM_INFO("PSP loading UVD firmware\n");
1681 + }
1682 +
1683 +- r = amdgpu_uvd_resume(adev);
1684 +- if (r)
1685 +- return r;
1686 +-
1687 + for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1688 + if (adev->uvd.harvest_config & (1 << j))
1689 + continue;
1690 +@@ -482,6 +478,10 @@ static int uvd_v7_0_sw_init(void *handle)
1691 + }
1692 + }
1693 +
1694 ++ r = amdgpu_uvd_resume(adev);
1695 ++ if (r)
1696 ++ return r;
1697 ++
1698 + r = amdgpu_uvd_entity_init(adev);
1699 + if (r)
1700 + return r;
1701 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1702 +index e4ded890b1cb..6edaf11d69aa 100644
1703 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1704 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
1705 +@@ -688,6 +688,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1706 + {
1707 + uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
1708 + bool is_patched = false;
1709 ++ unsigned long flags;
1710 +
1711 + if (!kfd->init_complete)
1712 + return;
1713 +@@ -697,7 +698,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1714 + return;
1715 + }
1716 +
1717 +- spin_lock(&kfd->interrupt_lock);
1718 ++ spin_lock_irqsave(&kfd->interrupt_lock, flags);
1719 +
1720 + if (kfd->interrupts_active
1721 + && interrupt_is_wanted(kfd, ih_ring_entry,
1722 +@@ -706,7 +707,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1723 + is_patched ? patched_ihre : ih_ring_entry))
1724 + queue_work(kfd->ih_wq, &kfd->interrupt_work);
1725 +
1726 +- spin_unlock(&kfd->interrupt_lock);
1727 ++ spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
1728 + }
1729 +
1730 + int kgd2kfd_quiesce_mm(struct mm_struct *mm)
1731 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1732 +index 01fc5717b657..f088ac585978 100644
1733 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1734 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1735 +@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
1736 + return -EINVAL;
1737 + }
1738 +
1739 ++ if (!stream_state) {
1740 ++ DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
1741 ++ return -EINVAL;
1742 ++ }
1743 ++
1744 + /* When enabling CRC, we should also disable dithering. */
1745 + if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
1746 + if (dc_stream_configure_crc(stream_state->ctx->dc,
1747 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
1748 +index dcb3c5530236..cd1ebe57ed59 100644
1749 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
1750 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
1751 +@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
1752 + if (src_y_offset >= (int)param->viewport.height)
1753 + cur_en = 0; /* not visible beyond bottom edge*/
1754 +
1755 +- if (src_y_offset < 0)
1756 ++ if (src_y_offset + (int)height <= 0)
1757 + cur_en = 0; /* not visible beyond top edge*/
1758 +
1759 + REG_UPDATE(CURSOR0_CONTROL,
1760 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
1761 +index 74132a1f3046..a34f0fdf7be2 100644
1762 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
1763 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
1764 +@@ -1134,7 +1134,7 @@ void hubp1_cursor_set_position(
1765 + if (src_y_offset >= (int)param->viewport.height)
1766 + cur_en = 0; /* not visible beyond bottom edge*/
1767 +
1768 +- if (src_y_offset < 0) //+ (int)hubp->curs_attr.height
1769 ++ if (src_y_offset + (int)hubp->curs_attr.height <= 0)
1770 + cur_en = 0; /* not visible beyond top edge*/
1771 +
1772 + if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
1773 +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
1774 +index d8b526b7932c..b4e292a56046 100644
1775 +--- a/drivers/gpu/drm/drm_atomic_helper.c
1776 ++++ b/drivers/gpu/drm/drm_atomic_helper.c
1777 +@@ -1445,6 +1445,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1778 + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1779 + crtc->base.id, crtc->name);
1780 + }
1781 ++
1782 ++ if (old_state->fake_commit)
1783 ++ complete_all(&old_state->fake_commit->flip_done);
1784 + }
1785 + EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1786 +
1787 +diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
1788 +index 44fe587aaef9..c5bbbd7cb2de 100644
1789 +--- a/drivers/gpu/drm/scheduler/sched_main.c
1790 ++++ b/drivers/gpu/drm/scheduler/sched_main.c
1791 +@@ -60,6 +60,8 @@
1792 +
1793 + static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
1794 +
1795 ++static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
1796 ++
1797 + /**
1798 + * drm_sched_rq_init - initialize a given run queue struct
1799 + *
1800 +@@ -215,7 +217,7 @@ static void drm_sched_job_finish(struct work_struct *work)
1801 +
1802 + spin_lock(&sched->job_list_lock);
1803 + /* remove job from ring_mirror_list */
1804 +- list_del(&s_job->node);
1805 ++ list_del_init(&s_job->node);
1806 + /* queue TDR for next job */
1807 + drm_sched_start_timeout(sched);
1808 + spin_unlock(&sched->job_list_lock);
1809 +@@ -378,6 +380,8 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
1810 + r);
1811 + dma_fence_put(fence);
1812 + } else {
1813 ++ if (s_fence->finished.error < 0)
1814 ++ drm_sched_expel_job_unlocked(s_job);
1815 + drm_sched_process_job(NULL, &s_fence->cb);
1816 + }
1817 + spin_lock(&sched->job_list_lock);
1818 +@@ -567,6 +571,8 @@ static int drm_sched_main(void *param)
1819 + r);
1820 + dma_fence_put(fence);
1821 + } else {
1822 ++ if (s_fence->finished.error < 0)
1823 ++ drm_sched_expel_job_unlocked(sched_job);
1824 + drm_sched_process_job(NULL, &s_fence->cb);
1825 + }
1826 +
1827 +@@ -575,6 +581,15 @@ static int drm_sched_main(void *param)
1828 + return 0;
1829 + }
1830 +
1831 ++static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
1832 ++{
1833 ++ struct drm_gpu_scheduler *sched = s_job->sched;
1834 ++
1835 ++ spin_lock(&sched->job_list_lock);
1836 ++ list_del_init(&s_job->node);
1837 ++ spin_unlock(&sched->job_list_lock);
1838 ++}
1839 ++
1840 + /**
1841 + * drm_sched_init - Init a gpu scheduler instance
1842 + *
1843 +diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
1844 +index 53fc83b72a49..5864ac55e275 100644
1845 +--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
1846 ++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
1847 +@@ -86,7 +86,7 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
1848 +
1849 + static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
1850 + {
1851 +- coresight_disclaim_device(drvdata);
1852 ++ coresight_disclaim_device(drvdata->base);
1853 + __tmc_etb_disable_hw(drvdata);
1854 + }
1855 +
1856 +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
1857 +index 0b91ff36768a..598e23cf01fc 100644
1858 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
1859 ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
1860 +@@ -336,13 +336,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
1861 +
1862 + usnic_dbg("\n");
1863 +
1864 +- mutex_lock(&us_ibdev->usdev_lock);
1865 + if (ib_get_eth_speed(ibdev, port, &props->active_speed,
1866 +- &props->active_width)) {
1867 +- mutex_unlock(&us_ibdev->usdev_lock);
1868 ++ &props->active_width))
1869 + return -EINVAL;
1870 +- }
1871 +
1872 ++ /*
1873 ++ * usdev_lock is acquired after (and not before) ib_get_eth_speed call
1874 ++ * because acquiring rtnl_lock in ib_get_eth_speed, while holding
1875 ++ * usdev_lock could lead to a deadlock.
1876 ++ */
1877 ++ mutex_lock(&us_ibdev->usdev_lock);
1878 + /* props being zeroed by the caller, avoid zeroing it here */
1879 +
1880 + props->lid = 0;
1881 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
1882 +index 6c361d70d7cd..46f62f71cd28 100644
1883 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
1884 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
1885 +@@ -643,6 +643,7 @@ next_wqe:
1886 + rmr->access = wqe->wr.wr.reg.access;
1887 + rmr->lkey = wqe->wr.wr.reg.key;
1888 + rmr->rkey = wqe->wr.wr.reg.key;
1889 ++ rmr->iova = wqe->wr.wr.reg.mr->iova;
1890 + wqe->state = wqe_state_done;
1891 + wqe->status = IB_WC_SUCCESS;
1892 + } else {
1893 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1894 +index b8eec515a003..fc7d8b8a654f 100644
1895 +--- a/drivers/md/dm-crypt.c
1896 ++++ b/drivers/md/dm-crypt.c
1897 +@@ -49,7 +49,7 @@ struct convert_context {
1898 + struct bio *bio_out;
1899 + struct bvec_iter iter_in;
1900 + struct bvec_iter iter_out;
1901 +- sector_t cc_sector;
1902 ++ u64 cc_sector;
1903 + atomic_t cc_pending;
1904 + union {
1905 + struct skcipher_request *req;
1906 +@@ -81,7 +81,7 @@ struct dm_crypt_request {
1907 + struct convert_context *ctx;
1908 + struct scatterlist sg_in[4];
1909 + struct scatterlist sg_out[4];
1910 +- sector_t iv_sector;
1911 ++ u64 iv_sector;
1912 + };
1913 +
1914 + struct crypt_config;
1915 +@@ -160,7 +160,7 @@ struct crypt_config {
1916 + struct iv_lmk_private lmk;
1917 + struct iv_tcw_private tcw;
1918 + } iv_gen_private;
1919 +- sector_t iv_offset;
1920 ++ u64 iv_offset;
1921 + unsigned int iv_size;
1922 + unsigned short int sector_size;
1923 + unsigned char sector_shift;
1924 +@@ -2781,7 +2781,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1925 + }
1926 +
1927 + ret = -EINVAL;
1928 +- if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1929 ++ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
1930 + ti->error = "Invalid device sector";
1931 + goto bad;
1932 + }
1933 +diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
1934 +index 2fb7bb4304ad..fddffe251bf6 100644
1935 +--- a/drivers/md/dm-delay.c
1936 ++++ b/drivers/md/dm-delay.c
1937 +@@ -141,7 +141,7 @@ static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **a
1938 + unsigned long long tmpll;
1939 + char dummy;
1940 +
1941 +- if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
1942 ++ if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
1943 + ti->error = "Invalid device sector";
1944 + return -EINVAL;
1945 + }
1946 +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
1947 +index 3cb97fa4c11d..8261aa8c7fe1 100644
1948 +--- a/drivers/md/dm-flakey.c
1949 ++++ b/drivers/md/dm-flakey.c
1950 +@@ -213,7 +213,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1951 + devname = dm_shift_arg(&as);
1952 +
1953 + r = -EINVAL;
1954 +- if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
1955 ++ if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
1956 + ti->error = "Invalid device sector";
1957 + goto bad;
1958 + }
1959 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
1960 +index 2fc4213e02b5..671c24332802 100644
1961 +--- a/drivers/md/dm-kcopyd.c
1962 ++++ b/drivers/md/dm-kcopyd.c
1963 +@@ -56,15 +56,17 @@ struct dm_kcopyd_client {
1964 + atomic_t nr_jobs;
1965 +
1966 + /*
1967 +- * We maintain three lists of jobs:
1968 ++ * We maintain four lists of jobs:
1969 + *
1970 + * i) jobs waiting for pages
1971 + * ii) jobs that have pages, and are waiting for the io to be issued.
1972 +- * iii) jobs that have completed.
1973 ++ * iii) jobs that don't need to do any IO and just run a callback
1974 ++ * iv) jobs that have completed.
1975 + *
1976 +- * All three of these are protected by job_lock.
1977 ++ * All four of these are protected by job_lock.
1978 + */
1979 + spinlock_t job_lock;
1980 ++ struct list_head callback_jobs;
1981 + struct list_head complete_jobs;
1982 + struct list_head io_jobs;
1983 + struct list_head pages_jobs;
1984 +@@ -625,6 +627,7 @@ static void do_work(struct work_struct *work)
1985 + struct dm_kcopyd_client *kc = container_of(work,
1986 + struct dm_kcopyd_client, kcopyd_work);
1987 + struct blk_plug plug;
1988 ++ unsigned long flags;
1989 +
1990 + /*
1991 + * The order that these are called is *very* important.
1992 +@@ -633,6 +636,10 @@ static void do_work(struct work_struct *work)
1993 + * list. io jobs call wake when they complete and it all
1994 + * starts again.
1995 + */
1996 ++ spin_lock_irqsave(&kc->job_lock, flags);
1997 ++ list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
1998 ++ spin_unlock_irqrestore(&kc->job_lock, flags);
1999 ++
2000 + blk_start_plug(&plug);
2001 + process_jobs(&kc->complete_jobs, kc, run_complete_job);
2002 + process_jobs(&kc->pages_jobs, kc, run_pages_job);
2003 +@@ -650,7 +657,7 @@ static void dispatch_job(struct kcopyd_job *job)
2004 + struct dm_kcopyd_client *kc = job->kc;
2005 + atomic_inc(&kc->nr_jobs);
2006 + if (unlikely(!job->source.count))
2007 +- push(&kc->complete_jobs, job);
2008 ++ push(&kc->callback_jobs, job);
2009 + else if (job->pages == &zero_page_list)
2010 + push(&kc->io_jobs, job);
2011 + else
2012 +@@ -858,7 +865,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
2013 + job->read_err = read_err;
2014 + job->write_err = write_err;
2015 +
2016 +- push(&kc->complete_jobs, job);
2017 ++ push(&kc->callback_jobs, job);
2018 + wake(kc);
2019 + }
2020 + EXPORT_SYMBOL(dm_kcopyd_do_callback);
2021 +@@ -888,6 +895,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
2022 + return ERR_PTR(-ENOMEM);
2023 +
2024 + spin_lock_init(&kc->job_lock);
2025 ++ INIT_LIST_HEAD(&kc->callback_jobs);
2026 + INIT_LIST_HEAD(&kc->complete_jobs);
2027 + INIT_LIST_HEAD(&kc->io_jobs);
2028 + INIT_LIST_HEAD(&kc->pages_jobs);
2029 +@@ -939,6 +947,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
2030 + /* Wait for completion of all jobs submitted by this client. */
2031 + wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
2032 +
2033 ++ BUG_ON(!list_empty(&kc->callback_jobs));
2034 + BUG_ON(!list_empty(&kc->complete_jobs));
2035 + BUG_ON(!list_empty(&kc->io_jobs));
2036 + BUG_ON(!list_empty(&kc->pages_jobs));
2037 +diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
2038 +index 8d7ddee6ac4d..ad980a38fb1e 100644
2039 +--- a/drivers/md/dm-linear.c
2040 ++++ b/drivers/md/dm-linear.c
2041 +@@ -45,7 +45,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2042 + }
2043 +
2044 + ret = -EINVAL;
2045 +- if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
2046 ++ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
2047 + ti->error = "Invalid device sector";
2048 + goto bad;
2049 + }
2050 +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
2051 +index 79eab1071ec2..5a51151f680d 100644
2052 +--- a/drivers/md/dm-raid1.c
2053 ++++ b/drivers/md/dm-raid1.c
2054 +@@ -943,7 +943,8 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
2055 + char dummy;
2056 + int ret;
2057 +
2058 +- if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
2059 ++ if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
2060 ++ offset != (sector_t)offset) {
2061 + ti->error = "Invalid offset";
2062 + return -EINVAL;
2063 + }
2064 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
2065 +index ae4b33d10924..36805b12661e 100644
2066 +--- a/drivers/md/dm-snap.c
2067 ++++ b/drivers/md/dm-snap.c
2068 +@@ -19,6 +19,7 @@
2069 + #include <linux/vmalloc.h>
2070 + #include <linux/log2.h>
2071 + #include <linux/dm-kcopyd.h>
2072 ++#include <linux/semaphore.h>
2073 +
2074 + #include "dm.h"
2075 +
2076 +@@ -105,6 +106,9 @@ struct dm_snapshot {
2077 + /* The on disk metadata handler */
2078 + struct dm_exception_store *store;
2079 +
2080 ++ /* Maximum number of in-flight COW jobs. */
2081 ++ struct semaphore cow_count;
2082 ++
2083 + struct dm_kcopyd_client *kcopyd_client;
2084 +
2085 + /* Wait for events based on state_bits */
2086 +@@ -145,6 +149,19 @@ struct dm_snapshot {
2087 + #define RUNNING_MERGE 0
2088 + #define SHUTDOWN_MERGE 1
2089 +
2090 ++/*
2091 ++ * Maximum number of chunks being copied on write.
2092 ++ *
2093 ++ * The value was decided experimentally as a trade-off between memory
2094 ++ * consumption, stalling the kernel's workqueues and maintaining a high enough
2095 ++ * throughput.
2096 ++ */
2097 ++#define DEFAULT_COW_THRESHOLD 2048
2098 ++
2099 ++static int cow_threshold = DEFAULT_COW_THRESHOLD;
2100 ++module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
2101 ++MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
2102 ++
2103 + DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
2104 + "A percentage of time allocated for copy on write");
2105 +
2106 +@@ -1190,6 +1207,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2107 + goto bad_hash_tables;
2108 + }
2109 +
2110 ++ sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
2111 ++
2112 + s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2113 + if (IS_ERR(s->kcopyd_client)) {
2114 + r = PTR_ERR(s->kcopyd_client);
2115 +@@ -1575,6 +1594,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
2116 + rb_link_node(&pe->out_of_order_node, parent, p);
2117 + rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
2118 + }
2119 ++ up(&s->cow_count);
2120 + }
2121 +
2122 + /*
2123 +@@ -1598,6 +1618,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
2124 + dest.count = src.count;
2125 +
2126 + /* Hand over to kcopyd */
2127 ++ down(&s->cow_count);
2128 + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
2129 + }
2130 +
2131 +@@ -1617,6 +1638,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
2132 + pe->full_bio = bio;
2133 + pe->full_bio_end_io = bio->bi_end_io;
2134 +
2135 ++ down(&s->cow_count);
2136 + callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
2137 + copy_callback, pe);
2138 +
2139 +diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
2140 +index 954b7ab4e684..e673dacf6418 100644
2141 +--- a/drivers/md/dm-unstripe.c
2142 ++++ b/drivers/md/dm-unstripe.c
2143 +@@ -78,7 +78,7 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2144 + goto err;
2145 + }
2146 +
2147 +- if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
2148 ++ if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
2149 + ti->error = "Invalid striped device offset";
2150 + goto err;
2151 + }
2152 +diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
2153 +index 1c933b2cf760..3ef5df1648d7 100644
2154 +--- a/drivers/media/firewire/firedtv-avc.c
2155 ++++ b/drivers/media/firewire/firedtv-avc.c
2156 +@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
2157 + return r->operand[7];
2158 + }
2159 +
2160 +-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
2161 ++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
2162 ++ unsigned int *len)
2163 + {
2164 + struct avc_command_frame *c = (void *)fdtv->avc_data;
2165 + struct avc_response_frame *r = (void *)fdtv->avc_data;
2166 +@@ -1009,7 +1010,8 @@ out:
2167 + return ret;
2168 + }
2169 +
2170 +-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
2171 ++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
2172 ++ unsigned int *len)
2173 + {
2174 + struct avc_command_frame *c = (void *)fdtv->avc_data;
2175 + struct avc_response_frame *r = (void *)fdtv->avc_data;
2176 +diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
2177 +index 876cdec8329b..009905a19947 100644
2178 +--- a/drivers/media/firewire/firedtv.h
2179 ++++ b/drivers/media/firewire/firedtv.h
2180 +@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
2181 + struct dvb_diseqc_master_cmd *diseqcmd);
2182 + void avc_remote_ctrl_work(struct work_struct *work);
2183 + int avc_register_remote_control(struct firedtv *fdtv);
2184 +-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
2185 +-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
2186 ++int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
2187 ++ unsigned int *len);
2188 ++int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
2189 ++ unsigned int *len);
2190 + int avc_ca_reset(struct firedtv *fdtv);
2191 + int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
2192 + int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
2193 +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
2194 +index bb6add9d340e..5b8350e87e75 100644
2195 +--- a/drivers/media/platform/qcom/venus/core.c
2196 ++++ b/drivers/media/platform/qcom/venus/core.c
2197 +@@ -264,6 +264,14 @@ static int venus_probe(struct platform_device *pdev)
2198 + if (ret)
2199 + return ret;
2200 +
2201 ++ if (!dev->dma_parms) {
2202 ++ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
2203 ++ GFP_KERNEL);
2204 ++ if (!dev->dma_parms)
2205 ++ return -ENOMEM;
2206 ++ }
2207 ++ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
2208 ++
2209 + INIT_LIST_HEAD(&core->instances);
2210 + mutex_init(&core->lock);
2211 + INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
2212 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
2213 +index bc369a0934a3..76dc3ee8ca21 100644
2214 +--- a/drivers/media/usb/uvc/uvc_driver.c
2215 ++++ b/drivers/media/usb/uvc/uvc_driver.c
2216 +@@ -1824,11 +1824,7 @@ static void uvc_delete(struct kref *kref)
2217 + usb_put_intf(dev->intf);
2218 + usb_put_dev(dev->udev);
2219 +
2220 +- if (dev->vdev.dev)
2221 +- v4l2_device_unregister(&dev->vdev);
2222 + #ifdef CONFIG_MEDIA_CONTROLLER
2223 +- if (media_devnode_is_registered(dev->mdev.devnode))
2224 +- media_device_unregister(&dev->mdev);
2225 + media_device_cleanup(&dev->mdev);
2226 + #endif
2227 +
2228 +@@ -1885,6 +1881,15 @@ static void uvc_unregister_video(struct uvc_device *dev)
2229 +
2230 + uvc_debugfs_cleanup_stream(stream);
2231 + }
2232 ++
2233 ++ uvc_status_unregister(dev);
2234 ++
2235 ++ if (dev->vdev.dev)
2236 ++ v4l2_device_unregister(&dev->vdev);
2237 ++#ifdef CONFIG_MEDIA_CONTROLLER
2238 ++ if (media_devnode_is_registered(dev->mdev.devnode))
2239 ++ media_device_unregister(&dev->mdev);
2240 ++#endif
2241 + }
2242 +
2243 + int uvc_register_video_device(struct uvc_device *dev,
2244 +diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
2245 +index 0722dc684378..883e4cab45e7 100644
2246 +--- a/drivers/media/usb/uvc/uvc_status.c
2247 ++++ b/drivers/media/usb/uvc/uvc_status.c
2248 +@@ -54,7 +54,7 @@ error:
2249 + return ret;
2250 + }
2251 +
2252 +-static void uvc_input_cleanup(struct uvc_device *dev)
2253 ++static void uvc_input_unregister(struct uvc_device *dev)
2254 + {
2255 + if (dev->input)
2256 + input_unregister_device(dev->input);
2257 +@@ -71,7 +71,7 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
2258 +
2259 + #else
2260 + #define uvc_input_init(dev)
2261 +-#define uvc_input_cleanup(dev)
2262 ++#define uvc_input_unregister(dev)
2263 + #define uvc_input_report_key(dev, code, value)
2264 + #endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */
2265 +
2266 +@@ -292,12 +292,16 @@ int uvc_status_init(struct uvc_device *dev)
2267 + return 0;
2268 + }
2269 +
2270 +-void uvc_status_cleanup(struct uvc_device *dev)
2271 ++void uvc_status_unregister(struct uvc_device *dev)
2272 + {
2273 + usb_kill_urb(dev->int_urb);
2274 ++ uvc_input_unregister(dev);
2275 ++}
2276 ++
2277 ++void uvc_status_cleanup(struct uvc_device *dev)
2278 ++{
2279 + usb_free_urb(dev->int_urb);
2280 + kfree(dev->status);
2281 +- uvc_input_cleanup(dev);
2282 + }
2283 +
2284 + int uvc_status_start(struct uvc_device *dev, gfp_t flags)
2285 +diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
2286 +index c0cbd833d0a4..1db6634b2455 100644
2287 +--- a/drivers/media/usb/uvc/uvcvideo.h
2288 ++++ b/drivers/media/usb/uvc/uvcvideo.h
2289 +@@ -757,6 +757,7 @@ int uvc_register_video_device(struct uvc_device *dev,
2290 +
2291 + /* Status */
2292 + int uvc_status_init(struct uvc_device *dev);
2293 ++void uvc_status_unregister(struct uvc_device *dev);
2294 + void uvc_status_cleanup(struct uvc_device *dev);
2295 + int uvc_status_start(struct uvc_device *dev, gfp_t flags);
2296 + void uvc_status_stop(struct uvc_device *dev);
2297 +diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
2298 +index be53044086c7..fbc56ee99682 100644
2299 +--- a/drivers/mmc/host/atmel-mci.c
2300 ++++ b/drivers/mmc/host/atmel-mci.c
2301 +@@ -1954,13 +1954,14 @@ static void atmci_tasklet_func(unsigned long priv)
2302 + }
2303 +
2304 + atmci_request_end(host, host->mrq);
2305 +- state = STATE_IDLE;
2306 ++ goto unlock; /* atmci_request_end() sets host->state */
2307 + break;
2308 + }
2309 + } while (state != prev_state);
2310 +
2311 + host->state = state;
2312 +
2313 ++unlock:
2314 + spin_unlock(&host->lock);
2315 + }
2316 +
2317 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
2318 +index 24fb6a685039..b2a0e59b6252 100644
2319 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
2320 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
2321 +@@ -2403,6 +2403,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
2322 + return mv88e6xxx_g1_stats_clear(chip);
2323 + }
2324 +
2325 ++/* The mv88e6390 has some hidden registers used for debug and
2326 ++ * development. The errata also makes use of them.
2327 ++ */
2328 ++static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
2329 ++ int reg, u16 val)
2330 ++{
2331 ++ u16 ctrl;
2332 ++ int err;
2333 ++
2334 ++ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
2335 ++ PORT_RESERVED_1A, val);
2336 ++ if (err)
2337 ++ return err;
2338 ++
2339 ++ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
2340 ++ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
2341 ++ reg;
2342 ++
2343 ++ return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
2344 ++ PORT_RESERVED_1A, ctrl);
2345 ++}
2346 ++
2347 ++static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
2348 ++{
2349 ++ return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
2350 ++ PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
2351 ++}
2352 ++
2353 ++
2354 ++static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
2355 ++ int reg, u16 *val)
2356 ++{
2357 ++ u16 ctrl;
2358 ++ int err;
2359 ++
2360 ++ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
2361 ++ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
2362 ++ reg;
2363 ++
2364 ++ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
2365 ++ PORT_RESERVED_1A, ctrl);
2366 ++ if (err)
2367 ++ return err;
2368 ++
2369 ++ err = mv88e6390_hidden_wait(chip);
2370 ++ if (err)
2371 ++ return err;
2372 ++
2373 ++ return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
2374 ++ PORT_RESERVED_1A, val);
2375 ++}
2376 ++
2377 ++/* Check if the errata has already been applied. */
2378 ++static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
2379 ++{
2380 ++ int port;
2381 ++ int err;
2382 ++ u16 val;
2383 ++
2384 ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2385 ++ err = mv88e6390_hidden_read(chip, port, 0, &val);
2386 ++ if (err) {
2387 ++ dev_err(chip->dev,
2388 ++ "Error reading hidden register: %d\n", err);
2389 ++ return false;
2390 ++ }
2391 ++ if (val != 0x01c0)
2392 ++ return false;
2393 ++ }
2394 ++
2395 ++ return true;
2396 ++}
2397 ++
2398 ++/* The 6390 copper ports have an errata which require poking magic
2399 ++ * values into undocumented hidden registers and then performing a
2400 ++ * software reset.
2401 ++ */
2402 ++static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
2403 ++{
2404 ++ int port;
2405 ++ int err;
2406 ++
2407 ++ if (mv88e6390_setup_errata_applied(chip))
2408 ++ return 0;
2409 ++
2410 ++ /* Set the ports into blocking mode */
2411 ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2412 ++ err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
2413 ++ if (err)
2414 ++ return err;
2415 ++ }
2416 ++
2417 ++ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2418 ++ err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
2419 ++ if (err)
2420 ++ return err;
2421 ++ }
2422 ++
2423 ++ return mv88e6xxx_software_reset(chip);
2424 ++}
2425 ++
2426 + static int mv88e6xxx_setup(struct dsa_switch *ds)
2427 + {
2428 + struct mv88e6xxx_chip *chip = ds->priv;
2429 +@@ -2415,6 +2516,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2430 +
2431 + mutex_lock(&chip->reg_lock);
2432 +
2433 ++ if (chip->info->ops->setup_errata) {
2434 ++ err = chip->info->ops->setup_errata(chip);
2435 ++ if (err)
2436 ++ goto unlock;
2437 ++ }
2438 ++
2439 + /* Cache the cmode of each port. */
2440 + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
2441 + if (chip->info->ops->port_get_cmode) {
2442 +@@ -3215,6 +3322,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
2443 +
2444 + static const struct mv88e6xxx_ops mv88e6190_ops = {
2445 + /* MV88E6XXX_FAMILY_6390 */
2446 ++ .setup_errata = mv88e6390_setup_errata,
2447 + .irl_init_all = mv88e6390_g2_irl_init_all,
2448 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2449 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2450 +@@ -3257,6 +3365,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
2451 +
2452 + static const struct mv88e6xxx_ops mv88e6190x_ops = {
2453 + /* MV88E6XXX_FAMILY_6390 */
2454 ++ .setup_errata = mv88e6390_setup_errata,
2455 + .irl_init_all = mv88e6390_g2_irl_init_all,
2456 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2457 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2458 +@@ -3299,6 +3408,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
2459 +
2460 + static const struct mv88e6xxx_ops mv88e6191_ops = {
2461 + /* MV88E6XXX_FAMILY_6390 */
2462 ++ .setup_errata = mv88e6390_setup_errata,
2463 + .irl_init_all = mv88e6390_g2_irl_init_all,
2464 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2465 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2466 +@@ -3390,6 +3500,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
2467 +
2468 + static const struct mv88e6xxx_ops mv88e6290_ops = {
2469 + /* MV88E6XXX_FAMILY_6390 */
2470 ++ .setup_errata = mv88e6390_setup_errata,
2471 + .irl_init_all = mv88e6390_g2_irl_init_all,
2472 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2473 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2474 +@@ -3693,6 +3804,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
2475 +
2476 + static const struct mv88e6xxx_ops mv88e6390_ops = {
2477 + /* MV88E6XXX_FAMILY_6390 */
2478 ++ .setup_errata = mv88e6390_setup_errata,
2479 + .irl_init_all = mv88e6390_g2_irl_init_all,
2480 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2481 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2482 +@@ -3740,6 +3852,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
2483 +
2484 + static const struct mv88e6xxx_ops mv88e6390x_ops = {
2485 + /* MV88E6XXX_FAMILY_6390 */
2486 ++ .setup_errata = mv88e6390_setup_errata,
2487 + .irl_init_all = mv88e6390_g2_irl_init_all,
2488 + .get_eeprom = mv88e6xxx_g2_get_eeprom8,
2489 + .set_eeprom = mv88e6xxx_g2_set_eeprom8,
2490 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
2491 +index f9ecb7872d32..546651d8c3e1 100644
2492 +--- a/drivers/net/dsa/mv88e6xxx/chip.h
2493 ++++ b/drivers/net/dsa/mv88e6xxx/chip.h
2494 +@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus {
2495 + };
2496 +
2497 + struct mv88e6xxx_ops {
2498 ++ /* Switch Setup Errata, called early in the switch setup to
2499 ++ * allow any errata actions to be performed
2500 ++ */
2501 ++ int (*setup_errata)(struct mv88e6xxx_chip *chip);
2502 ++
2503 + int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
2504 + int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
2505 +
2506 +diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
2507 +index 36904c9bf955..091aa0057f1f 100644
2508 +--- a/drivers/net/dsa/mv88e6xxx/port.h
2509 ++++ b/drivers/net/dsa/mv88e6xxx/port.h
2510 +@@ -251,6 +251,16 @@
2511 + /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
2512 + #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
2513 +
2514 ++/* Offset 0x1a: Magic undocumented errata register */
2515 ++#define PORT_RESERVED_1A 0x1a
2516 ++#define PORT_RESERVED_1A_BUSY BIT(15)
2517 ++#define PORT_RESERVED_1A_WRITE BIT(14)
2518 ++#define PORT_RESERVED_1A_READ 0
2519 ++#define PORT_RESERVED_1A_PORT_SHIFT 5
2520 ++#define PORT_RESERVED_1A_BLOCK (0xf << 10)
2521 ++#define PORT_RESERVED_1A_CTRL_PORT 4
2522 ++#define PORT_RESERVED_1A_DATA_PORT 5
2523 ++
2524 + int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
2525 + u16 *val);
2526 + int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
2527 +diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
2528 +index 37c76945ad9b..e1f821edbc21 100644
2529 +--- a/drivers/net/ethernet/intel/e1000e/ptp.c
2530 ++++ b/drivers/net/ethernet/intel/e1000e/ptp.c
2531 +@@ -173,10 +173,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
2532 + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
2533 + ptp_clock_info);
2534 + unsigned long flags;
2535 +- u64 ns;
2536 ++ u64 cycles, ns;
2537 +
2538 + spin_lock_irqsave(&adapter->systim_lock, flags);
2539 +- ns = timecounter_read(&adapter->tc);
2540 ++
2541 ++ /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
2542 ++ cycles = adapter->cc.read(&adapter->cc);
2543 ++ ns = timecounter_cyc2time(&adapter->tc, cycles);
2544 ++
2545 + spin_unlock_irqrestore(&adapter->systim_lock, flags);
2546 +
2547 + *ts = ns_to_timespec64(ns);
2548 +@@ -232,9 +236,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
2549 + systim_overflow_work.work);
2550 + struct e1000_hw *hw = &adapter->hw;
2551 + struct timespec64 ts;
2552 ++ u64 ns;
2553 +
2554 +- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
2555 ++ /* Update the timecounter */
2556 ++ ns = timecounter_read(&adapter->tc);
2557 +
2558 ++ ts = ns_to_timespec64(ns);
2559 + e_dbg("SYSTIM overflow check at %lld.%09lu\n",
2560 + (long long) ts.tv_sec, ts.tv_nsec);
2561 +
2562 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2563 +index fd1b0546fd67..4d77f42e035c 100644
2564 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2565 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
2566 +@@ -4,6 +4,7 @@
2567 + #include "ixgbe.h"
2568 + #include <net/xfrm.h>
2569 + #include <crypto/aead.h>
2570 ++#include <linux/if_bridge.h>
2571 +
2572 + #define IXGBE_IPSEC_KEY_BITS 160
2573 + static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
2574 +@@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
2575 + } else {
2576 + struct tx_sa tsa;
2577 +
2578 +- if (adapter->num_vfs)
2579 ++ if (adapter->num_vfs &&
2580 ++ adapter->bridge_mode != BRIDGE_MODE_VEPA)
2581 + return -EOPNOTSUPP;
2582 +
2583 + /* find the first unused index */
2584 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
2585 +index 12db256c8c9f..ee67d1c4281d 100644
2586 +--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
2587 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
2588 +@@ -668,7 +668,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2589 + if (!cgx->reg_base) {
2590 + dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
2591 + err = -ENOMEM;
2592 +- goto err_release_regions;
2593 ++ goto err_free_irq_vectors;
2594 + }
2595 +
2596 + nvec = CGX_NVEC;
2597 +@@ -693,6 +693,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2598 + err_release_lmac:
2599 + cgx_lmac_exit(cgx);
2600 + list_del(&cgx->cgx_list);
2601 ++err_free_irq_vectors:
2602 ++ pci_free_irq_vectors(pdev);
2603 + err_release_regions:
2604 + pci_release_regions(pdev);
2605 + err_disable_device:
2606 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2607 +index f84b9c02fcc5..124aee09e953 100644
2608 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2609 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2610 +@@ -4738,12 +4738,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
2611 + lower_dev,
2612 + upper_dev);
2613 + } else if (netif_is_lag_master(upper_dev)) {
2614 +- if (info->linking)
2615 ++ if (info->linking) {
2616 + err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2617 + upper_dev);
2618 +- else
2619 ++ } else {
2620 ++ mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
2621 ++ false);
2622 + mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2623 + upper_dev);
2624 ++ }
2625 + } else if (netif_is_ovs_master(upper_dev)) {
2626 + if (info->linking)
2627 + err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
2628 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2629 +index 50080c60a279..69f556ddb934 100644
2630 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2631 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2632 +@@ -1816,7 +1816,7 @@ static void
2633 + mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
2634 + struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
2635 + {
2636 +- u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
2637 ++ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
2638 + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2639 +
2640 + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2641 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2642 +index 78ea9639b622..88e60d6d93dc 100644
2643 +--- a/drivers/net/ethernet/realtek/r8169.c
2644 ++++ b/drivers/net/ethernet/realtek/r8169.c
2645 +@@ -212,6 +212,8 @@ enum cfg_version {
2646 + };
2647 +
2648 + static const struct pci_device_id rtl8169_pci_tbl[] = {
2649 ++ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
2650 ++ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
2651 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
2652 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
2653 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
2654 +diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
2655 +index 7c7cd9d94bcc..3d0114ba2bfe 100644
2656 +--- a/drivers/net/ethernet/socionext/sni_ave.c
2657 ++++ b/drivers/net/ethernet/socionext/sni_ave.c
2658 +@@ -1210,9 +1210,13 @@ static int ave_init(struct net_device *ndev)
2659 +
2660 + priv->phydev = phydev;
2661 +
2662 +- phy_ethtool_get_wol(phydev, &wol);
2663 ++ ave_ethtool_get_wol(ndev, &wol);
2664 + device_set_wakeup_capable(&ndev->dev, !!wol.supported);
2665 +
2666 ++ /* set wol initial state disabled */
2667 ++ wol.wolopts = 0;
2668 ++ ave_ethtool_set_wol(ndev, &wol);
2669 ++
2670 + if (!phy_interface_is_rgmii(phydev))
2671 + phy_set_max_speed(phydev, SPEED_100);
2672 +
2673 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2674 +index 774e1ff01c9a..735ad838e2ba 100644
2675 +--- a/drivers/net/usb/qmi_wwan.c
2676 ++++ b/drivers/net/usb/qmi_wwan.c
2677 +@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev)
2678 + dev->addr_len = 0;
2679 + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2680 + dev->netdev_ops = &qmimux_netdev_ops;
2681 ++ dev->mtu = 1500;
2682 + dev->needs_free_netdev = true;
2683 + }
2684 +
2685 +diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
2686 +index b09cdc699c69..38afbbd9fb44 100644
2687 +--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
2688 ++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
2689 +@@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid
2690 + spin_lock_bh(&ar->data_lock);
2691 +
2692 + peer = ath10k_peer_find_by_id(ar, peer_id);
2693 +- if (!peer)
2694 ++ if (!peer || !peer->sta)
2695 + goto out;
2696 +
2697 + arsta = (struct ath10k_sta *)peer->sta->drv_priv;
2698 +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
2699 +index ffec98f7be50..2c2761d04d01 100644
2700 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
2701 ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
2702 +@@ -2832,7 +2832,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2703 + rcu_read_lock();
2704 + spin_lock_bh(&ar->data_lock);
2705 + peer = ath10k_peer_find_by_id(ar, peer_id);
2706 +- if (!peer) {
2707 ++ if (!peer || !peer->sta) {
2708 + ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2709 + peer_id);
2710 + goto out;
2711 +@@ -2885,7 +2885,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
2712 + rcu_read_lock();
2713 + spin_lock_bh(&ar->data_lock);
2714 + peer = ath10k_peer_find_by_id(ar, peer_id);
2715 +- if (!peer) {
2716 ++ if (!peer || !peer->sta) {
2717 + ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
2718 + peer_id);
2719 + goto out;
2720 +diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
2721 +index bfdc1ad30c13..659e7649fe22 100644
2722 +--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
2723 ++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
2724 +@@ -84,7 +84,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
2725 + size_t *var_resp_size)
2726 + {
2727 + struct qlink_cmd *cmd;
2728 +- const struct qlink_resp *resp;
2729 ++ struct qlink_resp *resp = NULL;
2730 + struct sk_buff *resp_skb = NULL;
2731 + u16 cmd_id;
2732 + u8 mac_id;
2733 +@@ -113,7 +113,12 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
2734 + if (ret)
2735 + goto out;
2736 +
2737 +- resp = (const struct qlink_resp *)resp_skb->data;
2738 ++ if (WARN_ON(!resp_skb || !resp_skb->data)) {
2739 ++ ret = -EFAULT;
2740 ++ goto out;
2741 ++ }
2742 ++
2743 ++ resp = (struct qlink_resp *)resp_skb->data;
2744 + ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
2745 + const_resp_size);
2746 + if (ret)
2747 +@@ -686,7 +691,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
2748 + struct sk_buff *cmd_skb, *resp_skb = NULL;
2749 + struct qlink_cmd_get_sta_info *cmd;
2750 + const struct qlink_resp_get_sta_info *resp;
2751 +- size_t var_resp_len;
2752 ++ size_t var_resp_len = 0;
2753 + int ret = 0;
2754 +
2755 + cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
2756 +@@ -1650,7 +1655,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
2757 + {
2758 + struct sk_buff *cmd_skb, *resp_skb = NULL;
2759 + const struct qlink_resp_get_mac_info *resp;
2760 +- size_t var_data_len;
2761 ++ size_t var_data_len = 0;
2762 + int ret = 0;
2763 +
2764 + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
2765 +@@ -1680,8 +1685,8 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
2766 + {
2767 + struct sk_buff *cmd_skb, *resp_skb = NULL;
2768 + const struct qlink_resp_get_hw_info *resp;
2769 ++ size_t info_len = 0;
2770 + int ret = 0;
2771 +- size_t info_len;
2772 +
2773 + cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
2774 + QLINK_CMD_GET_HW_INFO,
2775 +@@ -1709,9 +1714,9 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
2776 + struct ieee80211_supported_band *band)
2777 + {
2778 + struct sk_buff *cmd_skb, *resp_skb = NULL;
2779 +- size_t info_len;
2780 + struct qlink_cmd_band_info_get *cmd;
2781 + struct qlink_resp_band_info_get *resp;
2782 ++ size_t info_len = 0;
2783 + int ret = 0;
2784 + u8 qband;
2785 +
2786 +@@ -1764,8 +1769,8 @@ out:
2787 + int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
2788 + {
2789 + struct sk_buff *cmd_skb, *resp_skb = NULL;
2790 +- size_t response_size;
2791 + struct qlink_resp_phy_params *resp;
2792 ++ size_t response_size = 0;
2793 + int ret = 0;
2794 +
2795 + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
2796 +@@ -2431,7 +2436,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
2797 + struct sk_buff *cmd_skb, *resp_skb = NULL;
2798 + struct qlink_cmd_get_chan_stats *cmd;
2799 + struct qlink_resp_get_chan_stats *resp;
2800 +- size_t var_data_len;
2801 ++ size_t var_data_len = 0;
2802 + int ret = 0;
2803 +
2804 + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
2805 +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
2806 +index 42b1f73ac5f6..1e058196f23f 100644
2807 +--- a/drivers/of/overlay.c
2808 ++++ b/drivers/of/overlay.c
2809 +@@ -378,7 +378,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
2810 + if (ret)
2811 + return ret;
2812 +
2813 +- return build_changeset_next_level(ovcs, tchild, node);
2814 ++ ret = build_changeset_next_level(ovcs, tchild, node);
2815 ++ of_node_put(tchild);
2816 ++ return ret;
2817 + }
2818 +
2819 + if (node->phandle && tchild->phandle)
2820 +diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
2821 +index f66521c7f846..42efcb850722 100644
2822 +--- a/drivers/platform/mips/cpu_hwmon.c
2823 ++++ b/drivers/platform/mips/cpu_hwmon.c
2824 +@@ -25,9 +25,10 @@ int loongson3_cpu_temp(int cpu)
2825 + case PRID_REV_LOONGSON3A_R1:
2826 + reg = (reg >> 8) & 0xff;
2827 + break;
2828 +- case PRID_REV_LOONGSON3A_R2:
2829 + case PRID_REV_LOONGSON3B_R1:
2830 + case PRID_REV_LOONGSON3B_R2:
2831 ++ case PRID_REV_LOONGSON3A_R2_0:
2832 ++ case PRID_REV_LOONGSON3A_R2_1:
2833 + reg = ((reg >> 8) & 0xff) - 100;
2834 + break;
2835 + case PRID_REV_LOONGSON3A_R3_0:
2836 +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
2837 +index c285a16675ee..37b5de541270 100644
2838 +--- a/drivers/platform/x86/asus-wmi.c
2839 ++++ b/drivers/platform/x86/asus-wmi.c
2840 +@@ -2131,7 +2131,8 @@ static int asus_wmi_add(struct platform_device *pdev)
2841 + err = asus_wmi_backlight_init(asus);
2842 + if (err && err != -ENODEV)
2843 + goto fail_backlight;
2844 +- }
2845 ++ } else
2846 ++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
2847 +
2848 + status = wmi_install_notify_handler(asus->driver->event_guid,
2849 + asus_wmi_notify, asus);
2850 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
2851 +index 59ecbb3b53b5..a33628550425 100644
2852 +--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
2853 ++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
2854 +@@ -1266,7 +1266,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
2855 +
2856 + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
2857 + ld = MR_TargetIdToLdGet(ldCount, drv_map);
2858 +- if (ld >= MAX_LOGICAL_DRIVES_EXT) {
2859 ++ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
2860 + lbInfo[ldCount].loadBalanceFlag = 0;
2861 + continue;
2862 + }
2863 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
2864 +index f74b5ea24f0f..49eaa87608f6 100644
2865 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
2866 ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
2867 +@@ -2832,7 +2832,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
2868 + device_id < instance->fw_supported_vd_count)) {
2869 +
2870 + ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
2871 +- if (ld >= instance->fw_supported_vd_count)
2872 ++ if (ld >= instance->fw_supported_vd_count - 1)
2873 + fp_possible = 0;
2874 + else {
2875 + raid = MR_LdRaidGet(ld, local_map_ptr);
2876 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
2877 +index 2500377d0723..bfd826deabbe 100644
2878 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
2879 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
2880 +@@ -3319,8 +3319,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
2881 + static inline void
2882 + _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2883 + {
2884 ++ wmb();
2885 + __raw_writeq(b, addr);
2886 +- mmiowb();
2887 ++ barrier();
2888 + }
2889 + #else
2890 + static inline void
2891 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
2892 +index 105b0e4d7818..5d7d018dad6e 100644
2893 +--- a/drivers/scsi/qedi/qedi_main.c
2894 ++++ b/drivers/scsi/qedi/qedi_main.c
2895 +@@ -952,6 +952,9 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi,
2896 + cls_sess = iscsi_conn_to_session(cls_conn);
2897 + sess = cls_sess->dd_data;
2898 +
2899 ++ if (!iscsi_is_session_online(cls_sess))
2900 ++ continue;
2901 ++
2902 + if (pri_ctrl_flags) {
2903 + if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
2904 + !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
2905 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
2906 +index a25a07a0b7f0..6f4cb3be97aa 100644
2907 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
2908 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
2909 +@@ -2704,6 +2704,9 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2910 + switch (response->header.iu_type) {
2911 + case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2912 + case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2913 ++ if (io_request->scmd)
2914 ++ io_request->scmd->result = 0;
2915 ++ /* fall through */
2916 + case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2917 + break;
2918 + case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2919 +@@ -6670,6 +6673,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
2920 + * storage.
2921 + */
2922 + rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
2923 ++ pqi_free_interrupts(ctrl_info);
2924 + pqi_reset(ctrl_info);
2925 + if (rc == 0)
2926 + return;
2927 +diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
2928 +index 79d3ba62b298..45e88bada907 100644
2929 +--- a/drivers/staging/erofs/unzip_vle.c
2930 ++++ b/drivers/staging/erofs/unzip_vle.c
2931 +@@ -717,13 +717,18 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
2932 + struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
2933 + bool background = tagptr_unfold_tags(t);
2934 +
2935 +- if (atomic_add_return(bios, &io->pending_bios))
2936 ++ if (!background) {
2937 ++ unsigned long flags;
2938 ++
2939 ++ spin_lock_irqsave(&io->u.wait.lock, flags);
2940 ++ if (!atomic_add_return(bios, &io->pending_bios))
2941 ++ wake_up_locked(&io->u.wait);
2942 ++ spin_unlock_irqrestore(&io->u.wait.lock, flags);
2943 + return;
2944 ++ }
2945 +
2946 +- if (background)
2947 ++ if (!atomic_add_return(bios, &io->pending_bios))
2948 + queue_work(z_erofs_workqueue, &io->u.work);
2949 +- else
2950 +- wake_up(&io->u.wait);
2951 + }
2952 +
2953 + static inline void z_erofs_vle_read_endio(struct bio *bio)
2954 +diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
2955 +index f459118bc11b..c37dd36ec77d 100644
2956 +--- a/drivers/target/target_core_spc.c
2957 ++++ b/drivers/target/target_core_spc.c
2958 +@@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
2959 +
2960 + buf[7] = 0x2; /* CmdQue=1 */
2961 +
2962 +- memcpy(&buf[8], "LIO-ORG ", 8);
2963 +- memset(&buf[16], 0x20, 16);
2964 ++ /*
2965 ++ * ASCII data fields described as being left-aligned shall have any
2966 ++ * unused bytes at the end of the field (i.e., highest offset) and the
2967 ++ * unused bytes shall be filled with ASCII space characters (20h).
2968 ++ */
2969 ++ memset(&buf[8], 0x20, 8 + 16 + 4);
2970 ++ memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
2971 + memcpy(&buf[16], dev->t10_wwn.model,
2972 +- min_t(size_t, strlen(dev->t10_wwn.model), 16));
2973 ++ strnlen(dev->t10_wwn.model, 16));
2974 + memcpy(&buf[32], dev->t10_wwn.revision,
2975 +- min_t(size_t, strlen(dev->t10_wwn.revision), 4));
2976 ++ strnlen(dev->t10_wwn.revision, 4));
2977 + buf[4] = 31; /* Set additional length to 31 */
2978 +
2979 + return 0;
2980 +@@ -251,7 +256,9 @@ check_t10_vend_desc:
2981 + buf[off] = 0x2; /* ASCII */
2982 + buf[off+1] = 0x1; /* T10 Vendor ID */
2983 + buf[off+2] = 0x0;
2984 +- memcpy(&buf[off+4], "LIO-ORG", 8);
2985 ++ /* left align Vendor ID and pad with spaces */
2986 ++ memset(&buf[off+4], 0x20, 8);
2987 ++ memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
2988 + /* Extra Byte for NULL Terminator */
2989 + id_len++;
2990 + /* Identifier Length */
2991 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2992 +index 2cfd61d62e97..ffa5b9f771b5 100644
2993 +--- a/drivers/target/target_core_transport.c
2994 ++++ b/drivers/target/target_core_transport.c
2995 +@@ -224,19 +224,28 @@ void transport_subsystem_check_init(void)
2996 + sub_api_initialized = 1;
2997 + }
2998 +
2999 ++static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
3000 ++{
3001 ++ struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
3002 ++
3003 ++ wake_up(&sess->cmd_list_wq);
3004 ++}
3005 ++
3006 + /**
3007 + * transport_init_session - initialize a session object
3008 + * @se_sess: Session object pointer.
3009 + *
3010 + * The caller must have zero-initialized @se_sess before calling this function.
3011 + */
3012 +-void transport_init_session(struct se_session *se_sess)
3013 ++int transport_init_session(struct se_session *se_sess)
3014 + {
3015 + INIT_LIST_HEAD(&se_sess->sess_list);
3016 + INIT_LIST_HEAD(&se_sess->sess_acl_list);
3017 + INIT_LIST_HEAD(&se_sess->sess_cmd_list);
3018 + spin_lock_init(&se_sess->sess_cmd_lock);
3019 + init_waitqueue_head(&se_sess->cmd_list_wq);
3020 ++ return percpu_ref_init(&se_sess->cmd_count,
3021 ++ target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
3022 + }
3023 + EXPORT_SYMBOL(transport_init_session);
3024 +
3025 +@@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session);
3026 + struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
3027 + {
3028 + struct se_session *se_sess;
3029 ++ int ret;
3030 +
3031 + se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
3032 + if (!se_sess) {
3033 +@@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
3034 + " se_sess_cache\n");
3035 + return ERR_PTR(-ENOMEM);
3036 + }
3037 +- transport_init_session(se_sess);
3038 ++ ret = transport_init_session(se_sess);
3039 ++ if (ret < 0) {
3040 ++ kfree(se_sess);
3041 ++ return ERR_PTR(ret);
3042 ++ }
3043 + se_sess->sup_prot_ops = sup_prot_ops;
3044 +
3045 + return se_sess;
3046 +@@ -581,6 +595,7 @@ void transport_free_session(struct se_session *se_sess)
3047 + sbitmap_queue_free(&se_sess->sess_tag_pool);
3048 + kvfree(se_sess->sess_cmd_map);
3049 + }
3050 ++ percpu_ref_exit(&se_sess->cmd_count);
3051 + kmem_cache_free(se_sess_cache, se_sess);
3052 + }
3053 + EXPORT_SYMBOL(transport_free_session);
3054 +@@ -2719,6 +2734,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
3055 + }
3056 + se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
3057 + list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3058 ++ percpu_ref_get(&se_sess->cmd_count);
3059 + out:
3060 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3061 +
3062 +@@ -2749,8 +2765,6 @@ static void target_release_cmd_kref(struct kref *kref)
3063 + if (se_sess) {
3064 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3065 + list_del_init(&se_cmd->se_cmd_list);
3066 +- if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
3067 +- wake_up(&se_sess->cmd_list_wq);
3068 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3069 + }
3070 +
3071 +@@ -2758,6 +2772,8 @@ static void target_release_cmd_kref(struct kref *kref)
3072 + se_cmd->se_tfo->release_cmd(se_cmd);
3073 + if (compl)
3074 + complete(compl);
3075 ++
3076 ++ percpu_ref_put(&se_sess->cmd_count);
3077 + }
3078 +
3079 + /**
3080 +@@ -2886,6 +2902,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
3081 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3082 + se_sess->sess_tearing_down = 1;
3083 + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3084 ++
3085 ++ percpu_ref_kill(&se_sess->cmd_count);
3086 + }
3087 + EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
3088 +
3089 +@@ -2900,17 +2918,14 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
3090 +
3091 + WARN_ON_ONCE(!se_sess->sess_tearing_down);
3092 +
3093 +- spin_lock_irq(&se_sess->sess_cmd_lock);
3094 + do {
3095 +- ret = wait_event_lock_irq_timeout(
3096 +- se_sess->cmd_list_wq,
3097 +- list_empty(&se_sess->sess_cmd_list),
3098 +- se_sess->sess_cmd_lock, 180 * HZ);
3099 ++ ret = wait_event_timeout(se_sess->cmd_list_wq,
3100 ++ percpu_ref_is_zero(&se_sess->cmd_count),
3101 ++ 180 * HZ);
3102 + list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
3103 + target_show_cmd("session shutdown: still waiting for ",
3104 + cmd);
3105 + } while (ret <= 0);
3106 +- spin_unlock_irq(&se_sess->sess_cmd_lock);
3107 + }
3108 + EXPORT_SYMBOL(target_wait_for_sess_cmds);
3109 +
3110 +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
3111 +index 70adcfdca8d1..124495f953fa 100644
3112 +--- a/drivers/target/target_core_xcopy.c
3113 ++++ b/drivers/target/target_core_xcopy.c
3114 +@@ -479,6 +479,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
3115 +
3116 + int target_xcopy_setup_pt(void)
3117 + {
3118 ++ int ret;
3119 ++
3120 + xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
3121 + if (!xcopy_wq) {
3122 + pr_err("Unable to allocate xcopy_wq\n");
3123 +@@ -496,7 +498,9 @@ int target_xcopy_setup_pt(void)
3124 + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
3125 + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
3126 + memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
3127 +- transport_init_session(&xcopy_pt_sess);
3128 ++ ret = transport_init_session(&xcopy_pt_sess);
3129 ++ if (ret < 0)
3130 ++ return ret;
3131 +
3132 + xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
3133 + xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
3134 +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
3135 +index ebd33c0232e6..89ade213a1a9 100644
3136 +--- a/drivers/tty/serial/amba-pl011.c
3137 ++++ b/drivers/tty/serial/amba-pl011.c
3138 +@@ -2780,6 +2780,7 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
3139 + .name = "sbsa-uart",
3140 + .of_match_table = of_match_ptr(sbsa_uart_of_match),
3141 + .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
3142 ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
3143 + },
3144 + };
3145 +
3146 +@@ -2808,6 +2809,7 @@ static struct amba_driver pl011_driver = {
3147 + .drv = {
3148 + .name = "uart-pl011",
3149 + .pm = &pl011_dev_pm_ops,
3150 ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
3151 + },
3152 + .id_table = pl011_ids,
3153 + .probe = pl011_probe,
3154 +diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
3155 +index fd80d999308d..0bdf1687983f 100644
3156 +--- a/drivers/tty/serial/pic32_uart.c
3157 ++++ b/drivers/tty/serial/pic32_uart.c
3158 +@@ -919,6 +919,7 @@ static struct platform_driver pic32_uart_platform_driver = {
3159 + .driver = {
3160 + .name = PIC32_DEV_NAME,
3161 + .of_match_table = of_match_ptr(pic32_serial_dt_ids),
3162 ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32),
3163 + },
3164 + };
3165 +
3166 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
3167 +index c439a5a1e6c0..d4cca5bdaf1c 100644
3168 +--- a/drivers/tty/serial/serial_core.c
3169 ++++ b/drivers/tty/serial/serial_core.c
3170 +@@ -205,10 +205,15 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
3171 + if (!state->xmit.buf) {
3172 + state->xmit.buf = (unsigned char *) page;
3173 + uart_circ_clear(&state->xmit);
3174 ++ uart_port_unlock(uport, flags);
3175 + } else {
3176 ++ uart_port_unlock(uport, flags);
3177 ++ /*
3178 ++ * Do not free() the page under the port lock, see
3179 ++ * uart_shutdown().
3180 ++ */
3181 + free_page(page);
3182 + }
3183 +- uart_port_unlock(uport, flags);
3184 +
3185 + retval = uport->ops->startup(uport);
3186 + if (retval == 0) {
3187 +@@ -268,6 +273,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
3188 + struct uart_port *uport = uart_port_check(state);
3189 + struct tty_port *port = &state->port;
3190 + unsigned long flags = 0;
3191 ++ char *xmit_buf = NULL;
3192 +
3193 + /*
3194 + * Set the TTY IO error marker
3195 +@@ -298,14 +304,18 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
3196 + tty_port_set_suspended(port, 0);
3197 +
3198 + /*
3199 +- * Free the transmit buffer page.
3200 ++ * Do not free() the transmit buffer page under the port lock since
3201 ++ * this can create various circular locking scenarios. For instance,
3202 ++ * console driver may need to allocate/free a debug object, which
3203 ++ * can endup in printk() recursion.
3204 + */
3205 + uart_port_lock(state, flags);
3206 +- if (state->xmit.buf) {
3207 +- free_page((unsigned long)state->xmit.buf);
3208 +- state->xmit.buf = NULL;
3209 +- }
3210 ++ xmit_buf = state->xmit.buf;
3211 ++ state->xmit.buf = NULL;
3212 + uart_port_unlock(uport, flags);
3213 ++
3214 ++ if (xmit_buf)
3215 ++ free_page((unsigned long)xmit_buf);
3216 + }
3217 +
3218 + /**
3219 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
3220 +index 5413a04023f9..6df252648e40 100644
3221 +--- a/drivers/tty/serial/xilinx_uartps.c
3222 ++++ b/drivers/tty/serial/xilinx_uartps.c
3223 +@@ -1719,6 +1719,7 @@ static struct platform_driver cdns_uart_platform_driver = {
3224 + .name = CDNS_UART_NAME,
3225 + .of_match_table = cdns_uart_of_match,
3226 + .pm = &cdns_uart_dev_pm_ops,
3227 ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART),
3228 + },
3229 + };
3230 +
3231 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
3232 +index 2d6d2c8244de..a00a56b4ae79 100644
3233 +--- a/drivers/usb/dwc2/gadget.c
3234 ++++ b/drivers/usb/dwc2/gadget.c
3235 +@@ -3165,8 +3165,6 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg,
3236 + dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
3237 + }
3238 +
3239 +-static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
3240 +-
3241 + /**
3242 + * dwc2_hsotg_disconnect - disconnect service
3243 + * @hsotg: The device state.
3244 +@@ -3188,9 +3186,11 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
3245 + /* all endpoints should be shutdown */
3246 + for (ep = 0; ep < hsotg->num_of_eps; ep++) {
3247 + if (hsotg->eps_in[ep])
3248 +- dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3249 ++ kill_all_requests(hsotg, hsotg->eps_in[ep],
3250 ++ -ESHUTDOWN);
3251 + if (hsotg->eps_out[ep])
3252 +- dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3253 ++ kill_all_requests(hsotg, hsotg->eps_out[ep],
3254 ++ -ESHUTDOWN);
3255 + }
3256 +
3257 + call_gadget(hsotg, disconnect);
3258 +@@ -3234,6 +3234,7 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
3259 + GINTSTS_PTXFEMP | \
3260 + GINTSTS_RXFLVL)
3261 +
3262 ++static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
3263 + /**
3264 + * dwc2_hsotg_core_init - issue softreset to the core
3265 + * @hsotg: The device state
3266 +@@ -4069,10 +4070,8 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
3267 + struct dwc2_hsotg *hsotg = hs_ep->parent;
3268 + int dir_in = hs_ep->dir_in;
3269 + int index = hs_ep->index;
3270 +- unsigned long flags;
3271 + u32 epctrl_reg;
3272 + u32 ctrl;
3273 +- int locked;
3274 +
3275 + dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
3276 +
3277 +@@ -4088,10 +4087,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
3278 +
3279 + epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
3280 +
3281 +- locked = spin_is_locked(&hsotg->lock);
3282 +- if (!locked)
3283 +- spin_lock_irqsave(&hsotg->lock, flags);
3284 +-
3285 + ctrl = dwc2_readl(hsotg, epctrl_reg);
3286 +
3287 + if (ctrl & DXEPCTL_EPENA)
3288 +@@ -4114,12 +4109,22 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
3289 + hs_ep->fifo_index = 0;
3290 + hs_ep->fifo_size = 0;
3291 +
3292 +- if (!locked)
3293 +- spin_unlock_irqrestore(&hsotg->lock, flags);
3294 +-
3295 + return 0;
3296 + }
3297 +
3298 ++static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
3299 ++{
3300 ++ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
3301 ++ struct dwc2_hsotg *hsotg = hs_ep->parent;
3302 ++ unsigned long flags;
3303 ++ int ret;
3304 ++
3305 ++ spin_lock_irqsave(&hsotg->lock, flags);
3306 ++ ret = dwc2_hsotg_ep_disable(ep);
3307 ++ spin_unlock_irqrestore(&hsotg->lock, flags);
3308 ++ return ret;
3309 ++}
3310 ++
3311 + /**
3312 + * on_list - check request is on the given endpoint
3313 + * @ep: The endpoint to check.
3314 +@@ -4267,7 +4272,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
3315 +
3316 + static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
3317 + .enable = dwc2_hsotg_ep_enable,
3318 +- .disable = dwc2_hsotg_ep_disable,
3319 ++ .disable = dwc2_hsotg_ep_disable_lock,
3320 + .alloc_request = dwc2_hsotg_ep_alloc_request,
3321 + .free_request = dwc2_hsotg_ep_free_request,
3322 + .queue = dwc2_hsotg_ep_queue_lock,
3323 +@@ -4407,9 +4412,9 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
3324 + /* all endpoints should be shutdown */
3325 + for (ep = 1; ep < hsotg->num_of_eps; ep++) {
3326 + if (hsotg->eps_in[ep])
3327 +- dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3328 ++ dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
3329 + if (hsotg->eps_out[ep])
3330 +- dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3331 ++ dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
3332 + }
3333 +
3334 + spin_lock_irqsave(&hsotg->lock, flags);
3335 +@@ -4857,9 +4862,9 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
3336 +
3337 + for (ep = 0; ep < hsotg->num_of_eps; ep++) {
3338 + if (hsotg->eps_in[ep])
3339 +- dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3340 ++ dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
3341 + if (hsotg->eps_out[ep])
3342 +- dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3343 ++ dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
3344 + }
3345 + }
3346 +
3347 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
3348 +index cdffbd1e0316..6e34f9594159 100644
3349 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
3350 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
3351 +@@ -358,6 +358,7 @@ struct renesas_usb3 {
3352 + bool extcon_host; /* check id and set EXTCON_USB_HOST */
3353 + bool extcon_usb; /* check vbus and set EXTCON_USB */
3354 + bool forced_b_device;
3355 ++ bool start_to_connect;
3356 + };
3357 +
3358 + #define gadget_to_renesas_usb3(_gadget) \
3359 +@@ -476,7 +477,8 @@ static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
3360 + static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
3361 + {
3362 + usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
3363 +- usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
3364 ++ if (!usb3->workaround_for_vbus)
3365 ++ usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
3366 + }
3367 +
3368 + static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3)
3369 +@@ -700,8 +702,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
3370 + usb3_set_mode_by_role_sw(usb3, host);
3371 + usb3_vbus_out(usb3, a_dev);
3372 + /* for A-Peripheral or forced B-device mode */
3373 +- if ((!host && a_dev) ||
3374 +- (usb3->workaround_for_vbus && usb3->forced_b_device))
3375 ++ if ((!host && a_dev) || usb3->start_to_connect)
3376 + usb3_connect(usb3);
3377 + spin_unlock_irqrestore(&usb3->lock, flags);
3378 + }
3379 +@@ -2432,7 +2433,11 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
3380 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
3381 + return -EFAULT;
3382 +
3383 +- if (!strncmp(buf, "1", 1))
3384 ++ usb3->start_to_connect = false;
3385 ++ if (usb3->workaround_for_vbus && usb3->forced_b_device &&
3386 ++ !strncmp(buf, "2", 1))
3387 ++ usb3->start_to_connect = true;
3388 ++ else if (!strncmp(buf, "1", 1))
3389 + usb3->forced_b_device = true;
3390 + else
3391 + usb3->forced_b_device = false;
3392 +@@ -2440,7 +2445,7 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
3393 + if (usb3->workaround_for_vbus)
3394 + usb3_disconnect(usb3);
3395 +
3396 +- /* Let this driver call usb3_connect() anyway */
3397 ++ /* Let this driver call usb3_connect() if needed */
3398 + usb3_check_id(usb3);
3399 +
3400 + return count;
3401 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
3402 +index dbbd71f754d0..ba6e5cdaed2c 100644
3403 +--- a/drivers/usb/typec/tcpm/tcpm.c
3404 ++++ b/drivers/usb/typec/tcpm/tcpm.c
3405 +@@ -317,6 +317,9 @@ struct tcpm_port {
3406 + /* Deadline in jiffies to exit src_try_wait state */
3407 + unsigned long max_wait;
3408 +
3409 ++ /* port belongs to a self powered device */
3410 ++ bool self_powered;
3411 ++
3412 + #ifdef CONFIG_DEBUG_FS
3413 + struct dentry *dentry;
3414 + struct mutex logbuffer_lock; /* log buffer access lock */
3415 +@@ -3254,7 +3257,8 @@ static void run_state_machine(struct tcpm_port *port)
3416 + case SRC_HARD_RESET_VBUS_OFF:
3417 + tcpm_set_vconn(port, true);
3418 + tcpm_set_vbus(port, false);
3419 +- tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
3420 ++ tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
3421 ++ TYPEC_HOST);
3422 + tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
3423 + break;
3424 + case SRC_HARD_RESET_VBUS_ON:
3425 +@@ -3267,7 +3271,8 @@ static void run_state_machine(struct tcpm_port *port)
3426 + memset(&port->pps_data, 0, sizeof(port->pps_data));
3427 + tcpm_set_vconn(port, false);
3428 + tcpm_set_charge(port, false);
3429 +- tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
3430 ++ tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
3431 ++ TYPEC_DEVICE);
3432 + /*
3433 + * VBUS may or may not toggle, depending on the adapter.
3434 + * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
3435 +@@ -4412,6 +4417,8 @@ sink:
3436 + return -EINVAL;
3437 + port->operating_snk_mw = mw / 1000;
3438 +
3439 ++ port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
3440 ++
3441 + return 0;
3442 + }
3443 +
3444 +@@ -4720,6 +4727,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
3445 + port->typec_caps.prefer_role = tcfg->default_role;
3446 + port->typec_caps.type = tcfg->type;
3447 + port->typec_caps.data = tcfg->data;
3448 ++ port->self_powered = port->tcpc->config->self_powered;
3449 +
3450 + return 0;
3451 + }
3452 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
3453 +index 329d3afcf304..65e4b8637638 100644
3454 +--- a/fs/btrfs/dev-replace.c
3455 ++++ b/fs/btrfs/dev-replace.c
3456 +@@ -797,39 +797,58 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
3457 + case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
3458 + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
3459 + btrfs_dev_replace_write_unlock(dev_replace);
3460 +- goto leave;
3461 ++ break;
3462 + case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
3463 ++ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
3464 ++ tgt_device = dev_replace->tgtdev;
3465 ++ src_device = dev_replace->srcdev;
3466 ++ btrfs_dev_replace_write_unlock(dev_replace);
3467 ++ btrfs_scrub_cancel(fs_info);
3468 ++ /* btrfs_dev_replace_finishing() will handle the cleanup part */
3469 ++ btrfs_info_in_rcu(fs_info,
3470 ++ "dev_replace from %s (devid %llu) to %s canceled",
3471 ++ btrfs_dev_name(src_device), src_device->devid,
3472 ++ btrfs_dev_name(tgt_device));
3473 ++ break;
3474 + case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
3475 ++ /*
3476 ++ * Scrub doing the replace isn't running so we need to do the
3477 ++ * cleanup step of btrfs_dev_replace_finishing() here
3478 ++ */
3479 + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
3480 + tgt_device = dev_replace->tgtdev;
3481 + src_device = dev_replace->srcdev;
3482 + dev_replace->tgtdev = NULL;
3483 + dev_replace->srcdev = NULL;
3484 +- break;
3485 +- }
3486 +- dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
3487 +- dev_replace->time_stopped = ktime_get_real_seconds();
3488 +- dev_replace->item_needs_writeback = 1;
3489 +- btrfs_dev_replace_write_unlock(dev_replace);
3490 +- btrfs_scrub_cancel(fs_info);
3491 ++ dev_replace->replace_state =
3492 ++ BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
3493 ++ dev_replace->time_stopped = ktime_get_real_seconds();
3494 ++ dev_replace->item_needs_writeback = 1;
3495 +
3496 +- trans = btrfs_start_transaction(root, 0);
3497 +- if (IS_ERR(trans)) {
3498 +- mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
3499 +- return PTR_ERR(trans);
3500 +- }
3501 +- ret = btrfs_commit_transaction(trans);
3502 +- WARN_ON(ret);
3503 ++ btrfs_dev_replace_write_unlock(dev_replace);
3504 +
3505 +- btrfs_info_in_rcu(fs_info,
3506 +- "dev_replace from %s (devid %llu) to %s canceled",
3507 +- btrfs_dev_name(src_device), src_device->devid,
3508 +- btrfs_dev_name(tgt_device));
3509 ++ btrfs_scrub_cancel(fs_info);
3510 ++
3511 ++ trans = btrfs_start_transaction(root, 0);
3512 ++ if (IS_ERR(trans)) {
3513 ++ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
3514 ++ return PTR_ERR(trans);
3515 ++ }
3516 ++ ret = btrfs_commit_transaction(trans);
3517 ++ WARN_ON(ret);
3518 +
3519 +- if (tgt_device)
3520 +- btrfs_destroy_dev_replace_tgtdev(tgt_device);
3521 ++ btrfs_info_in_rcu(fs_info,
3522 ++ "suspended dev_replace from %s (devid %llu) to %s canceled",
3523 ++ btrfs_dev_name(src_device), src_device->devid,
3524 ++ btrfs_dev_name(tgt_device));
3525 ++
3526 ++ if (tgt_device)
3527 ++ btrfs_destroy_dev_replace_tgtdev(tgt_device);
3528 ++ break;
3529 ++ default:
3530 ++ result = -EINVAL;
3531 ++ }
3532 +
3533 +-leave:
3534 + mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
3535 + return result;
3536 + }
3537 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3538 +index 02772f8823cf..561bffcb56a0 100644
3539 +--- a/fs/btrfs/inode.c
3540 ++++ b/fs/btrfs/inode.c
3541 +@@ -6419,14 +6419,19 @@ fail_dir_item:
3542 + err = btrfs_del_root_ref(trans, key.objectid,
3543 + root->root_key.objectid, parent_ino,
3544 + &local_index, name, name_len);
3545 +-
3546 ++ if (err)
3547 ++ btrfs_abort_transaction(trans, err);
3548 + } else if (add_backref) {
3549 + u64 local_index;
3550 + int err;
3551 +
3552 + err = btrfs_del_inode_ref(trans, root, name, name_len,
3553 + ino, parent_ino, &local_index);
3554 ++ if (err)
3555 ++ btrfs_abort_transaction(trans, err);
3556 + }
3557 ++
3558 ++ /* Return the original error code */
3559 + return ret;
3560 + }
3561 +
3562 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3563 +index c872adfc939e..ea5fa9df9405 100644
3564 +--- a/fs/btrfs/volumes.c
3565 ++++ b/fs/btrfs/volumes.c
3566 +@@ -4775,19 +4775,17 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3567 + /*
3568 + * Use the number of data stripes to figure out how big this chunk
3569 + * is really going to be in terms of logical address space,
3570 +- * and compare that answer with the max chunk size
3571 ++ * and compare that answer with the max chunk size. If it's higher,
3572 ++ * we try to reduce stripe_size.
3573 + */
3574 + if (stripe_size * data_stripes > max_chunk_size) {
3575 +- stripe_size = div_u64(max_chunk_size, data_stripes);
3576 +-
3577 +- /* bump the answer up to a 16MB boundary */
3578 +- stripe_size = round_up(stripe_size, SZ_16M);
3579 +-
3580 + /*
3581 +- * But don't go higher than the limits we found while searching
3582 +- * for free extents
3583 ++ * Reduce stripe_size, round it up to a 16MB boundary again and
3584 ++ * then use it, unless it ends up being even bigger than the
3585 ++ * previous value we had already.
3586 + */
3587 +- stripe_size = min(devices_info[ndevs - 1].max_avail,
3588 ++ stripe_size = min(round_up(div_u64(max_chunk_size,
3589 ++ data_stripes), SZ_16M),
3590 + stripe_size);
3591 + }
3592 +
3593 +@@ -7485,6 +7483,8 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
3594 + struct btrfs_path *path;
3595 + struct btrfs_root *root = fs_info->dev_root;
3596 + struct btrfs_key key;
3597 ++ u64 prev_devid = 0;
3598 ++ u64 prev_dev_ext_end = 0;
3599 + int ret = 0;
3600 +
3601 + key.objectid = 1;
3602 +@@ -7529,10 +7529,22 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
3603 + chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
3604 + physical_len = btrfs_dev_extent_length(leaf, dext);
3605 +
3606 ++ /* Check if this dev extent overlaps with the previous one */
3607 ++ if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
3608 ++ btrfs_err(fs_info,
3609 ++"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
3610 ++ devid, physical_offset, prev_dev_ext_end);
3611 ++ ret = -EUCLEAN;
3612 ++ goto out;
3613 ++ }
3614 ++
3615 + ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
3616 + physical_offset, physical_len);
3617 + if (ret < 0)
3618 + goto out;
3619 ++ prev_devid = devid;
3620 ++ prev_dev_ext_end = physical_offset + physical_len;
3621 ++
3622 + ret = btrfs_next_item(root, path);
3623 + if (ret < 0)
3624 + goto out;
3625 +diff --git a/fs/iomap.c b/fs/iomap.c
3626 +index d6bc98ae8d35..ce837d962d47 100644
3627 +--- a/fs/iomap.c
3628 ++++ b/fs/iomap.c
3629 +@@ -492,16 +492,29 @@ done:
3630 + }
3631 + EXPORT_SYMBOL_GPL(iomap_readpages);
3632 +
3633 ++/*
3634 ++ * iomap_is_partially_uptodate checks whether blocks within a page are
3635 ++ * uptodate or not.
3636 ++ *
3637 ++ * Returns true if all blocks which correspond to a file portion
3638 ++ * we want to read within the page are uptodate.
3639 ++ */
3640 + int
3641 + iomap_is_partially_uptodate(struct page *page, unsigned long from,
3642 + unsigned long count)
3643 + {
3644 + struct iomap_page *iop = to_iomap_page(page);
3645 + struct inode *inode = page->mapping->host;
3646 +- unsigned first = from >> inode->i_blkbits;
3647 +- unsigned last = (from + count - 1) >> inode->i_blkbits;
3648 ++ unsigned len, first, last;
3649 + unsigned i;
3650 +
3651 ++ /* Limit range to one page */
3652 ++ len = min_t(unsigned, PAGE_SIZE - from, count);
3653 ++
3654 ++ /* First and last blocks in range within page */
3655 ++ first = from >> inode->i_blkbits;
3656 ++ last = (from + len - 1) >> inode->i_blkbits;
3657 ++
3658 + if (iop) {
3659 + for (i = first; i <= last; i++)
3660 + if (!test_bit(i, iop->uptodate))
3661 +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
3662 +index 902a7dd10e5c..bb6ae387469f 100644
3663 +--- a/fs/jffs2/super.c
3664 ++++ b/fs/jffs2/super.c
3665 +@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
3666 + struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
3667 +
3668 + #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
3669 +- cancel_delayed_work_sync(&c->wbuf_dwork);
3670 ++ if (jffs2_is_writebuffered(c))
3671 ++ cancel_delayed_work_sync(&c->wbuf_dwork);
3672 + #endif
3673 +
3674 + mutex_lock(&c->alloc_sem);
3675 +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
3676 +index 7642b6712c39..30208233f65b 100644
3677 +--- a/fs/ocfs2/localalloc.c
3678 ++++ b/fs/ocfs2/localalloc.c
3679 +@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
3680 + if (num_used
3681 + || alloc->id1.bitmap1.i_used
3682 + || alloc->id1.bitmap1.i_total
3683 +- || la->la_bm_off)
3684 +- mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
3685 ++ || la->la_bm_off) {
3686 ++ mlog(ML_ERROR, "inconsistent detected, clean journal with"
3687 ++ " unrecovered local alloc, please run fsck.ocfs2!\n"
3688 + "found = %u, set = %u, taken = %u, off = %u\n",
3689 + num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
3690 + le32_to_cpu(alloc->id1.bitmap1.i_total),
3691 + OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
3692 +
3693 ++ status = -EINVAL;
3694 ++ goto bail;
3695 ++ }
3696 ++
3697 + osb->local_alloc_bh = alloc_bh;
3698 + osb->local_alloc_state = OCFS2_LA_ENABLED;
3699 +
3700 +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
3701 +index 12e21f789194..79f0e183f135 100644
3702 +--- a/fs/pstore/ram_core.c
3703 ++++ b/fs/pstore/ram_core.c
3704 +@@ -497,6 +497,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
3705 + sig ^= PERSISTENT_RAM_SIG;
3706 +
3707 + if (prz->buffer->sig == sig) {
3708 ++ if (buffer_size(prz) == 0) {
3709 ++ pr_debug("found existing empty buffer\n");
3710 ++ return 0;
3711 ++ }
3712 ++
3713 + if (buffer_size(prz) > prz->buffer_size ||
3714 + buffer_start(prz) > buffer_size(prz))
3715 + pr_info("found existing invalid buffer, size %zu, start %zu\n",
3716 +diff --git a/fs/quota/quota.c b/fs/quota/quota.c
3717 +index f0cbf58ad4da..fd5dd806f1b9 100644
3718 +--- a/fs/quota/quota.c
3719 ++++ b/fs/quota/quota.c
3720 +@@ -791,7 +791,8 @@ static int quotactl_cmd_write(int cmd)
3721 + /* Return true if quotactl command is manipulating quota on/off state */
3722 + static bool quotactl_cmd_onoff(int cmd)
3723 + {
3724 +- return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
3725 ++ return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
3726 ++ (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
3727 + }
3728 +
3729 + /*
3730 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
3731 +index 7a85e609fc27..d8b8323e80f4 100644
3732 +--- a/fs/userfaultfd.c
3733 ++++ b/fs/userfaultfd.c
3734 +@@ -736,10 +736,18 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
3735 + struct userfaultfd_ctx *ctx;
3736 +
3737 + ctx = vma->vm_userfaultfd_ctx.ctx;
3738 +- if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
3739 ++
3740 ++ if (!ctx)
3741 ++ return;
3742 ++
3743 ++ if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
3744 + vm_ctx->ctx = ctx;
3745 + userfaultfd_ctx_get(ctx);
3746 + WRITE_ONCE(ctx->mmap_changing, true);
3747 ++ } else {
3748 ++ /* Drop uffd context if remap feature not enabled */
3749 ++ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
3750 ++ vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
3751 + }
3752 + }
3753 +
3754 +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
3755 +index 9a6bc0951cfa..c31157135598 100644
3756 +--- a/include/linux/backing-dev-defs.h
3757 ++++ b/include/linux/backing-dev-defs.h
3758 +@@ -258,6 +258,14 @@ static inline void wb_get(struct bdi_writeback *wb)
3759 + */
3760 + static inline void wb_put(struct bdi_writeback *wb)
3761 + {
3762 ++ if (WARN_ON_ONCE(!wb->bdi)) {
3763 ++ /*
3764 ++ * A driver bug might cause a file to be removed before bdi was
3765 ++ * initialized.
3766 ++ */
3767 ++ return;
3768 ++ }
3769 ++
3770 + if (wb != &wb->bdi->wb)
3771 + percpu_ref_put(&wb->refcnt);
3772 + }
3773 +diff --git a/include/linux/filter.h b/include/linux/filter.h
3774 +index a8b9d90a8042..25a556589ae8 100644
3775 +--- a/include/linux/filter.h
3776 ++++ b/include/linux/filter.h
3777 +@@ -675,24 +675,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size)
3778 + return size;
3779 + }
3780 +
3781 +-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
3782 +- u32 size_default)
3783 +-{
3784 +- size_default = bpf_ctx_off_adjust_machine(size_default);
3785 +- size_access = bpf_ctx_off_adjust_machine(size_access);
3786 +-
3787 +-#ifdef __LITTLE_ENDIAN
3788 +- return (off & (size_default - 1)) == 0;
3789 +-#else
3790 +- return (off & (size_default - 1)) + size_access == size_default;
3791 +-#endif
3792 +-}
3793 +-
3794 + static inline bool
3795 + bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
3796 + {
3797 +- return bpf_ctx_narrow_align_ok(off, size, size_default) &&
3798 +- size <= size_default && (size & (size - 1)) == 0;
3799 ++ return size <= size_default && (size & (size - 1)) == 0;
3800 + }
3801 +
3802 + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
3803 +diff --git a/include/linux/memblock.h b/include/linux/memblock.h
3804 +index aee299a6aa76..3ef3086ed52f 100644
3805 +--- a/include/linux/memblock.h
3806 ++++ b/include/linux/memblock.h
3807 +@@ -320,6 +320,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
3808 + /* Flags for memblock allocation APIs */
3809 + #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
3810 + #define MEMBLOCK_ALLOC_ACCESSIBLE 0
3811 ++#define MEMBLOCK_ALLOC_KASAN 1
3812 +
3813 + /* We are using top down, so it is safe to use 0 here */
3814 + #define MEMBLOCK_LOW_LIMIT 0
3815 +diff --git a/include/linux/swap.h b/include/linux/swap.h
3816 +index d8a07a4f171d..3d3630b3f63d 100644
3817 +--- a/include/linux/swap.h
3818 ++++ b/include/linux/swap.h
3819 +@@ -233,7 +233,6 @@ struct swap_info_struct {
3820 + unsigned long flags; /* SWP_USED etc: see above */
3821 + signed short prio; /* swap priority of this type */
3822 + struct plist_node list; /* entry in swap_active_head */
3823 +- struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
3824 + signed char type; /* strange name for an index */
3825 + unsigned int max; /* extent of the swap_map */
3826 + unsigned char *swap_map; /* vmalloc'ed array of usage counts */
3827 +@@ -274,6 +273,16 @@ struct swap_info_struct {
3828 + */
3829 + struct work_struct discard_work; /* discard worker */
3830 + struct swap_cluster_list discard_clusters; /* discard clusters list */
3831 ++ struct plist_node avail_lists[0]; /*
3832 ++ * entries in swap_avail_heads, one
3833 ++ * entry per node.
3834 ++ * Must be last as the number of the
3835 ++ * array is nr_node_ids, which is not
3836 ++ * a fixed value so have to allocate
3837 ++ * dynamically.
3838 ++ * And it has to be an array so that
3839 ++ * plist_for_each_* can work.
3840 ++ */
3841 + };
3842 +
3843 + #ifdef CONFIG_64BIT
3844 +diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
3845 +index 7e7fbfb84e8e..50c74a77db55 100644
3846 +--- a/include/linux/usb/tcpm.h
3847 ++++ b/include/linux/usb/tcpm.h
3848 +@@ -89,6 +89,7 @@ struct tcpc_config {
3849 + enum typec_port_data data;
3850 + enum typec_role default_role;
3851 + bool try_role_hw; /* try.{src,snk} implemented in hardware */
3852 ++ bool self_powered; /* port belongs to a self powered device */
3853 +
3854 + const struct typec_altmode_desc *alt_modes;
3855 + };
3856 +diff --git a/include/sound/soc.h b/include/sound/soc.h
3857 +index 70c10a8f3e90..3e0ac310a3df 100644
3858 +--- a/include/sound/soc.h
3859 ++++ b/include/sound/soc.h
3860 +@@ -553,12 +553,12 @@ static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
3861 + }
3862 + #endif
3863 +
3864 +-#ifdef CONFIG_SND_SOC_AC97_BUS
3865 + struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
3866 + struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
3867 + unsigned int id, unsigned int id_mask);
3868 + void snd_soc_free_ac97_component(struct snd_ac97 *ac97);
3869 +
3870 ++#ifdef CONFIG_SND_SOC_AC97_BUS
3871 + int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
3872 + int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
3873 + struct platform_device *pdev);
3874 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
3875 +index e3bdb0550a59..d9fd4eac58c2 100644
3876 +--- a/include/target/target_core_base.h
3877 ++++ b/include/target/target_core_base.h
3878 +@@ -601,6 +601,7 @@ struct se_session {
3879 + struct se_node_acl *se_node_acl;
3880 + struct se_portal_group *se_tpg;
3881 + void *fabric_sess_ptr;
3882 ++ struct percpu_ref cmd_count;
3883 + struct list_head sess_list;
3884 + struct list_head sess_acl_list;
3885 + struct list_head sess_cmd_list;
3886 +diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
3887 +index f4147b398431..eb9d0923c55c 100644
3888 +--- a/include/target/target_core_fabric.h
3889 ++++ b/include/target/target_core_fabric.h
3890 +@@ -116,7 +116,7 @@ struct se_session *target_setup_session(struct se_portal_group *,
3891 + struct se_session *, void *));
3892 + void target_remove_session(struct se_session *);
3893 +
3894 +-void transport_init_session(struct se_session *);
3895 ++int transport_init_session(struct se_session *se_sess);
3896 + struct se_session *transport_alloc_session(enum target_prot_op);
3897 + int transport_alloc_session_tags(struct se_session *, unsigned int,
3898 + unsigned int);
3899 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3900 +index 51ba84d4d34a..eedc7bd4185d 100644
3901 +--- a/kernel/bpf/verifier.c
3902 ++++ b/kernel/bpf/verifier.c
3903 +@@ -3571,12 +3571,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3904 + return err;
3905 +
3906 + if (BPF_SRC(insn->code) == BPF_X) {
3907 ++ struct bpf_reg_state *src_reg = regs + insn->src_reg;
3908 ++ struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
3909 ++
3910 + if (BPF_CLASS(insn->code) == BPF_ALU64) {
3911 + /* case: R1 = R2
3912 + * copy register state to dest reg
3913 + */
3914 +- regs[insn->dst_reg] = regs[insn->src_reg];
3915 +- regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
3916 ++ *dst_reg = *src_reg;
3917 ++ dst_reg->live |= REG_LIVE_WRITTEN;
3918 + } else {
3919 + /* R1 = (u32) R2 */
3920 + if (is_pointer_value(env, insn->src_reg)) {
3921 +@@ -3584,9 +3587,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3922 + "R%d partial copy of pointer\n",
3923 + insn->src_reg);
3924 + return -EACCES;
3925 ++ } else if (src_reg->type == SCALAR_VALUE) {
3926 ++ *dst_reg = *src_reg;
3927 ++ dst_reg->live |= REG_LIVE_WRITTEN;
3928 ++ } else {
3929 ++ mark_reg_unknown(env, regs,
3930 ++ insn->dst_reg);
3931 + }
3932 +- mark_reg_unknown(env, regs, insn->dst_reg);
3933 +- coerce_reg_to_size(&regs[insn->dst_reg], 4);
3934 ++ coerce_reg_to_size(dst_reg, 4);
3935 + }
3936 + } else {
3937 + /* case: R = imm
3938 +@@ -5789,10 +5797,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3939 + int i, cnt, size, ctx_field_size, delta = 0;
3940 + const int insn_cnt = env->prog->len;
3941 + struct bpf_insn insn_buf[16], *insn;
3942 ++ u32 target_size, size_default, off;
3943 + struct bpf_prog *new_prog;
3944 + enum bpf_access_type type;
3945 + bool is_narrower_load;
3946 +- u32 target_size;
3947 +
3948 + if (ops->gen_prologue || env->seen_direct_write) {
3949 + if (!ops->gen_prologue) {
3950 +@@ -5885,9 +5893,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3951 + * we will apply proper mask to the result.
3952 + */
3953 + is_narrower_load = size < ctx_field_size;
3954 ++ size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
3955 ++ off = insn->off;
3956 + if (is_narrower_load) {
3957 +- u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
3958 +- u32 off = insn->off;
3959 + u8 size_code;
3960 +
3961 + if (type == BPF_WRITE) {
3962 +@@ -5915,12 +5923,23 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3963 + }
3964 +
3965 + if (is_narrower_load && size < target_size) {
3966 +- if (ctx_field_size <= 4)
3967 ++ u8 shift = (off & (size_default - 1)) * 8;
3968 ++
3969 ++ if (ctx_field_size <= 4) {
3970 ++ if (shift)
3971 ++ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
3972 ++ insn->dst_reg,
3973 ++ shift);
3974 + insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
3975 + (1 << size * 8) - 1);
3976 +- else
3977 ++ } else {
3978 ++ if (shift)
3979 ++ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
3980 ++ insn->dst_reg,
3981 ++ shift);
3982 + insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
3983 + (1 << size * 8) - 1);
3984 ++ }
3985 + }
3986 +
3987 + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
3988 +diff --git a/mm/memblock.c b/mm/memblock.c
3989 +index 81ae63ca78d0..f45a049532fe 100644
3990 +--- a/mm/memblock.c
3991 ++++ b/mm/memblock.c
3992 +@@ -262,7 +262,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
3993 + phys_addr_t kernel_end, ret;
3994 +
3995 + /* pump up @end */
3996 +- if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
3997 ++ if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
3998 ++ end == MEMBLOCK_ALLOC_KASAN)
3999 + end = memblock.current_limit;
4000 +
4001 + /* avoid allocating the first page */
4002 +@@ -1412,13 +1413,15 @@ again:
4003 + done:
4004 + ptr = phys_to_virt(alloc);
4005 +
4006 +- /*
4007 +- * The min_count is set to 0 so that bootmem allocated blocks
4008 +- * are never reported as leaks. This is because many of these blocks
4009 +- * are only referred via the physical address which is not
4010 +- * looked up by kmemleak.
4011 +- */
4012 +- kmemleak_alloc(ptr, size, 0, 0);
4013 ++ /* Skip kmemleak for kasan_init() due to high volume. */
4014 ++ if (max_addr != MEMBLOCK_ALLOC_KASAN)
4015 ++ /*
4016 ++ * The min_count is set to 0 so that bootmem allocated
4017 ++ * blocks are never reported as leaks. This is because many
4018 ++ * of these blocks are only referred via the physical
4019 ++ * address which is not looked up by kmemleak.
4020 ++ */
4021 ++ kmemleak_alloc(ptr, size, 0, 0);
4022 +
4023 + return ptr;
4024 + }
4025 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
4026 +index 3f690bae6b78..7d1010453fb9 100644
4027 +--- a/mm/page-writeback.c
4028 ++++ b/mm/page-writeback.c
4029 +@@ -2154,6 +2154,7 @@ int write_cache_pages(struct address_space *mapping,
4030 + {
4031 + int ret = 0;
4032 + int done = 0;
4033 ++ int error;
4034 + struct pagevec pvec;
4035 + int nr_pages;
4036 + pgoff_t uninitialized_var(writeback_index);
4037 +@@ -2227,25 +2228,31 @@ continue_unlock:
4038 + goto continue_unlock;
4039 +
4040 + trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
4041 +- ret = (*writepage)(page, wbc, data);
4042 +- if (unlikely(ret)) {
4043 +- if (ret == AOP_WRITEPAGE_ACTIVATE) {
4044 ++ error = (*writepage)(page, wbc, data);
4045 ++ if (unlikely(error)) {
4046 ++ /*
4047 ++ * Handle errors according to the type of
4048 ++ * writeback. There's no need to continue for
4049 ++ * background writeback. Just push done_index
4050 ++ * past this page so media errors won't choke
4051 ++ * writeout for the entire file. For integrity
4052 ++ * writeback, we must process the entire dirty
4053 ++ * set regardless of errors because the fs may
4054 ++ * still have state to clear for each page. In
4055 ++ * that case we continue processing and return
4056 ++ * the first error.
4057 ++ */
4058 ++ if (error == AOP_WRITEPAGE_ACTIVATE) {
4059 + unlock_page(page);
4060 +- ret = 0;
4061 +- } else {
4062 +- /*
4063 +- * done_index is set past this page,
4064 +- * so media errors will not choke
4065 +- * background writeout for the entire
4066 +- * file. This has consequences for
4067 +- * range_cyclic semantics (ie. it may
4068 +- * not be suitable for data integrity
4069 +- * writeout).
4070 +- */
4071 ++ error = 0;
4072 ++ } else if (wbc->sync_mode != WB_SYNC_ALL) {
4073 ++ ret = error;
4074 + done_index = page->index + 1;
4075 + done = 1;
4076 + break;
4077 + }
4078 ++ if (!ret)
4079 ++ ret = error;
4080 + }
4081 +
4082 + /*
4083 +diff --git a/mm/swapfile.c b/mm/swapfile.c
4084 +index 20d3c0f47a5f..dbac1d49469d 100644
4085 +--- a/mm/swapfile.c
4086 ++++ b/mm/swapfile.c
4087 +@@ -2813,8 +2813,9 @@ static struct swap_info_struct *alloc_swap_info(void)
4088 + struct swap_info_struct *p;
4089 + unsigned int type;
4090 + int i;
4091 ++ int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
4092 +
4093 +- p = kvzalloc(sizeof(*p), GFP_KERNEL);
4094 ++ p = kvzalloc(size, GFP_KERNEL);
4095 + if (!p)
4096 + return ERR_PTR(-ENOMEM);
4097 +
4098 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
4099 +index ef9928d7b4fb..ac2826ce162b 100644
4100 +--- a/net/bluetooth/hci_event.c
4101 ++++ b/net/bluetooth/hci_event.c
4102 +@@ -5711,6 +5711,12 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
4103 + return true;
4104 + }
4105 +
4106 ++ /* Check if request ended in Command Status - no way to retreive
4107 ++ * any extra parameters in this case.
4108 ++ */
4109 ++ if (hdr->evt == HCI_EV_CMD_STATUS)
4110 ++ return false;
4111 ++
4112 + if (hdr->evt != HCI_EV_CMD_COMPLETE) {
4113 + bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
4114 + hdr->evt);
4115 +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
4116 +index 5372e2042adf..2cb8da465b98 100644
4117 +--- a/net/bridge/br_forward.c
4118 ++++ b/net/bridge/br_forward.c
4119 +@@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
4120 +
4121 + int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
4122 + {
4123 ++ skb->tstamp = 0;
4124 + return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
4125 + net, sk, skb, NULL, skb->dev,
4126 + br_dev_queue_push_xmit);
4127 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
4128 +index a8217e221e19..eebc3106d30e 100644
4129 +--- a/net/core/skbuff.c
4130 ++++ b/net/core/skbuff.c
4131 +@@ -5202,7 +5202,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
4132 + unsigned long chunk;
4133 + struct sk_buff *skb;
4134 + struct page *page;
4135 +- gfp_t gfp_head;
4136 + int i;
4137 +
4138 + *errcode = -EMSGSIZE;
4139 +@@ -5212,12 +5211,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
4140 + if (npages > MAX_SKB_FRAGS)
4141 + return NULL;
4142 +
4143 +- gfp_head = gfp_mask;
4144 +- if (gfp_head & __GFP_DIRECT_RECLAIM)
4145 +- gfp_head |= __GFP_RETRY_MAYFAIL;
4146 +-
4147 + *errcode = -ENOBUFS;
4148 +- skb = alloc_skb(header_len, gfp_head);
4149 ++ skb = alloc_skb(header_len, gfp_mask);
4150 + if (!skb)
4151 + return NULL;
4152 +
4153 +diff --git a/net/core/sock.c b/net/core/sock.c
4154 +index 98659fb6e9fb..530583ae92bf 100644
4155 +--- a/net/core/sock.c
4156 ++++ b/net/core/sock.c
4157 +@@ -698,6 +698,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
4158 + break;
4159 + case SO_DONTROUTE:
4160 + sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
4161 ++ sk_dst_reset(sk);
4162 + break;
4163 + case SO_BROADCAST:
4164 + sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
4165 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
4166 +index 608a6f4223fb..fecd0e7672b5 100644
4167 +--- a/net/ipv4/devinet.c
4168 ++++ b/net/ipv4/devinet.c
4169 +@@ -1826,7 +1826,7 @@ put_tgt_net:
4170 + if (fillargs.netnsid >= 0)
4171 + put_net(tgt_net);
4172 +
4173 +- return err < 0 ? err : skb->len;
4174 ++ return skb->len ? : err;
4175 + }
4176 +
4177 + static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
4178 +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
4179 +index 2c8d313ae216..fb1e7f237f53 100644
4180 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
4181 ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
4182 +@@ -57,17 +57,14 @@ struct clusterip_config {
4183 + enum clusterip_hashmode hash_mode; /* which hashing mode */
4184 + u_int32_t hash_initval; /* hash initialization */
4185 + struct rcu_head rcu;
4186 +-
4187 ++ struct net *net; /* netns for pernet list */
4188 + char ifname[IFNAMSIZ]; /* device ifname */
4189 +- struct notifier_block notifier; /* refresh c->ifindex in it */
4190 + };
4191 +
4192 + #ifdef CONFIG_PROC_FS
4193 + static const struct file_operations clusterip_proc_fops;
4194 + #endif
4195 +
4196 +-static unsigned int clusterip_net_id __read_mostly;
4197 +-
4198 + struct clusterip_net {
4199 + struct list_head configs;
4200 + /* lock protects the configs list */
4201 +@@ -78,16 +75,30 @@ struct clusterip_net {
4202 + #endif
4203 + };
4204 +
4205 ++static unsigned int clusterip_net_id __read_mostly;
4206 ++static inline struct clusterip_net *clusterip_pernet(struct net *net)
4207 ++{
4208 ++ return net_generic(net, clusterip_net_id);
4209 ++}
4210 ++
4211 + static inline void
4212 + clusterip_config_get(struct clusterip_config *c)
4213 + {
4214 + refcount_inc(&c->refcount);
4215 + }
4216 +
4217 +-
4218 + static void clusterip_config_rcu_free(struct rcu_head *head)
4219 + {
4220 +- kfree(container_of(head, struct clusterip_config, rcu));
4221 ++ struct clusterip_config *config;
4222 ++ struct net_device *dev;
4223 ++
4224 ++ config = container_of(head, struct clusterip_config, rcu);
4225 ++ dev = dev_get_by_name(config->net, config->ifname);
4226 ++ if (dev) {
4227 ++ dev_mc_del(dev, config->clustermac);
4228 ++ dev_put(dev);
4229 ++ }
4230 ++ kfree(config);
4231 + }
4232 +
4233 + static inline void
4234 +@@ -101,9 +112,9 @@ clusterip_config_put(struct clusterip_config *c)
4235 + * entry(rule) is removed, remove the config from lists, but don't free it
4236 + * yet, since proc-files could still be holding references */
4237 + static inline void
4238 +-clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
4239 ++clusterip_config_entry_put(struct clusterip_config *c)
4240 + {
4241 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
4242 ++ struct clusterip_net *cn = clusterip_pernet(c->net);
4243 +
4244 + local_bh_disable();
4245 + if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
4246 +@@ -118,8 +129,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
4247 + spin_unlock(&cn->lock);
4248 + local_bh_enable();
4249 +
4250 +- unregister_netdevice_notifier(&c->notifier);
4251 +-
4252 + return;
4253 + }
4254 + local_bh_enable();
4255 +@@ -129,7 +138,7 @@ static struct clusterip_config *
4256 + __clusterip_config_find(struct net *net, __be32 clusterip)
4257 + {
4258 + struct clusterip_config *c;
4259 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
4260 ++ struct clusterip_net *cn = clusterip_pernet(net);
4261 +
4262 + list_for_each_entry_rcu(c, &cn->configs, list) {
4263 + if (c->clusterip == clusterip)
4264 +@@ -181,32 +190,37 @@ clusterip_netdev_event(struct notifier_block *this, unsigned long event,
4265 + void *ptr)
4266 + {
4267 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4268 ++ struct net *net = dev_net(dev);
4269 ++ struct clusterip_net *cn = clusterip_pernet(net);
4270 + struct clusterip_config *c;
4271 +
4272 +- c = container_of(this, struct clusterip_config, notifier);
4273 +- switch (event) {
4274 +- case NETDEV_REGISTER:
4275 +- if (!strcmp(dev->name, c->ifname)) {
4276 +- c->ifindex = dev->ifindex;
4277 +- dev_mc_add(dev, c->clustermac);
4278 +- }
4279 +- break;
4280 +- case NETDEV_UNREGISTER:
4281 +- if (dev->ifindex == c->ifindex) {
4282 +- dev_mc_del(dev, c->clustermac);
4283 +- c->ifindex = -1;
4284 +- }
4285 +- break;
4286 +- case NETDEV_CHANGENAME:
4287 +- if (!strcmp(dev->name, c->ifname)) {
4288 +- c->ifindex = dev->ifindex;
4289 +- dev_mc_add(dev, c->clustermac);
4290 +- } else if (dev->ifindex == c->ifindex) {
4291 +- dev_mc_del(dev, c->clustermac);
4292 +- c->ifindex = -1;
4293 ++ spin_lock_bh(&cn->lock);
4294 ++ list_for_each_entry_rcu(c, &cn->configs, list) {
4295 ++ switch (event) {
4296 ++ case NETDEV_REGISTER:
4297 ++ if (!strcmp(dev->name, c->ifname)) {
4298 ++ c->ifindex = dev->ifindex;
4299 ++ dev_mc_add(dev, c->clustermac);
4300 ++ }
4301 ++ break;
4302 ++ case NETDEV_UNREGISTER:
4303 ++ if (dev->ifindex == c->ifindex) {
4304 ++ dev_mc_del(dev, c->clustermac);
4305 ++ c->ifindex = -1;
4306 ++ }
4307 ++ break;
4308 ++ case NETDEV_CHANGENAME:
4309 ++ if (!strcmp(dev->name, c->ifname)) {
4310 ++ c->ifindex = dev->ifindex;
4311 ++ dev_mc_add(dev, c->clustermac);
4312 ++ } else if (dev->ifindex == c->ifindex) {
4313 ++ dev_mc_del(dev, c->clustermac);
4314 ++ c->ifindex = -1;
4315 ++ }
4316 ++ break;
4317 + }
4318 +- break;
4319 + }
4320 ++ spin_unlock_bh(&cn->lock);
4321 +
4322 + return NOTIFY_DONE;
4323 + }
4324 +@@ -215,30 +229,44 @@ static struct clusterip_config *
4325 + clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
4326 + __be32 ip, const char *iniface)
4327 + {
4328 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
4329 ++ struct clusterip_net *cn = clusterip_pernet(net);
4330 + struct clusterip_config *c;
4331 ++ struct net_device *dev;
4332 + int err;
4333 +
4334 ++ if (iniface[0] == '\0') {
4335 ++ pr_info("Please specify an interface name\n");
4336 ++ return ERR_PTR(-EINVAL);
4337 ++ }
4338 ++
4339 + c = kzalloc(sizeof(*c), GFP_ATOMIC);
4340 + if (!c)
4341 + return ERR_PTR(-ENOMEM);
4342 +
4343 +- strcpy(c->ifname, iniface);
4344 +- c->ifindex = -1;
4345 +- c->clusterip = ip;
4346 ++ dev = dev_get_by_name(net, iniface);
4347 ++ if (!dev) {
4348 ++ pr_info("no such interface %s\n", iniface);
4349 ++ kfree(c);
4350 ++ return ERR_PTR(-ENOENT);
4351 ++ }
4352 ++ c->ifindex = dev->ifindex;
4353 ++ strcpy(c->ifname, dev->name);
4354 + memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
4355 ++ dev_mc_add(dev, c->clustermac);
4356 ++ dev_put(dev);
4357 ++
4358 ++ c->clusterip = ip;
4359 + c->num_total_nodes = i->num_total_nodes;
4360 + clusterip_config_init_nodelist(c, i);
4361 + c->hash_mode = i->hash_mode;
4362 + c->hash_initval = i->hash_initval;
4363 ++ c->net = net;
4364 + refcount_set(&c->refcount, 1);
4365 +
4366 + spin_lock_bh(&cn->lock);
4367 + if (__clusterip_config_find(net, ip)) {
4368 +- spin_unlock_bh(&cn->lock);
4369 +- kfree(c);
4370 +-
4371 +- return ERR_PTR(-EBUSY);
4372 ++ err = -EBUSY;
4373 ++ goto out_config_put;
4374 + }
4375 +
4376 + list_add_rcu(&c->list, &cn->configs);
4377 +@@ -260,22 +288,17 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
4378 + }
4379 + #endif
4380 +
4381 +- c->notifier.notifier_call = clusterip_netdev_event;
4382 +- err = register_netdevice_notifier(&c->notifier);
4383 +- if (!err) {
4384 +- refcount_set(&c->entries, 1);
4385 +- return c;
4386 +- }
4387 ++ refcount_set(&c->entries, 1);
4388 ++ return c;
4389 +
4390 + #ifdef CONFIG_PROC_FS
4391 +- proc_remove(c->pde);
4392 + err:
4393 + #endif
4394 + spin_lock_bh(&cn->lock);
4395 + list_del_rcu(&c->list);
4396 ++out_config_put:
4397 + spin_unlock_bh(&cn->lock);
4398 + clusterip_config_put(c);
4399 +-
4400 + return ERR_PTR(err);
4401 + }
4402 +
4403 +@@ -475,34 +498,20 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
4404 + &e->ip.dst.s_addr);
4405 + return -EINVAL;
4406 + } else {
4407 +- struct net_device *dev;
4408 +-
4409 +- if (e->ip.iniface[0] == '\0') {
4410 +- pr_info("Please specify an interface name\n");
4411 +- return -EINVAL;
4412 +- }
4413 +-
4414 +- dev = dev_get_by_name(par->net, e->ip.iniface);
4415 +- if (!dev) {
4416 +- pr_info("no such interface %s\n",
4417 +- e->ip.iniface);
4418 +- return -ENOENT;
4419 +- }
4420 +- dev_put(dev);
4421 +-
4422 + config = clusterip_config_init(par->net, cipinfo,
4423 + e->ip.dst.s_addr,
4424 + e->ip.iniface);
4425 + if (IS_ERR(config))
4426 + return PTR_ERR(config);
4427 + }
4428 +- }
4429 ++ } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN))
4430 ++ return -EINVAL;
4431 +
4432 + ret = nf_ct_netns_get(par->net, par->family);
4433 + if (ret < 0) {
4434 + pr_info("cannot load conntrack support for proto=%u\n",
4435 + par->family);
4436 +- clusterip_config_entry_put(par->net, config);
4437 ++ clusterip_config_entry_put(config);
4438 + clusterip_config_put(config);
4439 + return ret;
4440 + }
4441 +@@ -524,7 +533,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
4442 +
4443 + /* if no more entries are referencing the config, remove it
4444 + * from the list and destroy the proc entry */
4445 +- clusterip_config_entry_put(par->net, cipinfo->config);
4446 ++ clusterip_config_entry_put(cipinfo->config);
4447 +
4448 + clusterip_config_put(cipinfo->config);
4449 +
4450 +@@ -806,7 +815,7 @@ static const struct file_operations clusterip_proc_fops = {
4451 +
4452 + static int clusterip_net_init(struct net *net)
4453 + {
4454 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
4455 ++ struct clusterip_net *cn = clusterip_pernet(net);
4456 + int ret;
4457 +
4458 + INIT_LIST_HEAD(&cn->configs);
4459 +@@ -831,13 +840,12 @@ static int clusterip_net_init(struct net *net)
4460 +
4461 + static void clusterip_net_exit(struct net *net)
4462 + {
4463 +- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
4464 ++ struct clusterip_net *cn = clusterip_pernet(net);
4465 + #ifdef CONFIG_PROC_FS
4466 + proc_remove(cn->procdir);
4467 + cn->procdir = NULL;
4468 + #endif
4469 + nf_unregister_net_hook(net, &cip_arp_ops);
4470 +- WARN_ON_ONCE(!list_empty(&cn->configs));
4471 + }
4472 +
4473 + static struct pernet_operations clusterip_net_ops = {
4474 +@@ -847,6 +855,10 @@ static struct pernet_operations clusterip_net_ops = {
4475 + .size = sizeof(struct clusterip_net),
4476 + };
4477 +
4478 ++struct notifier_block cip_netdev_notifier = {
4479 ++ .notifier_call = clusterip_netdev_event
4480 ++};
4481 ++
4482 + static int __init clusterip_tg_init(void)
4483 + {
4484 + int ret;
4485 +@@ -859,11 +871,17 @@ static int __init clusterip_tg_init(void)
4486 + if (ret < 0)
4487 + goto cleanup_subsys;
4488 +
4489 ++ ret = register_netdevice_notifier(&cip_netdev_notifier);
4490 ++ if (ret < 0)
4491 ++ goto unregister_target;
4492 ++
4493 + pr_info("ClusterIP Version %s loaded successfully\n",
4494 + CLUSTERIP_VERSION);
4495 +
4496 + return 0;
4497 +
4498 ++unregister_target:
4499 ++ xt_unregister_target(&clusterip_tg_reg);
4500 + cleanup_subsys:
4501 + unregister_pernet_subsys(&clusterip_net_ops);
4502 + return ret;
4503 +@@ -873,6 +891,7 @@ static void __exit clusterip_tg_exit(void)
4504 + {
4505 + pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
4506 +
4507 ++ unregister_netdevice_notifier(&cip_netdev_notifier);
4508 + xt_unregister_target(&clusterip_tg_reg);
4509 + unregister_pernet_subsys(&clusterip_net_ops);
4510 +
4511 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
4512 +index 045597b9a7c0..e3cb53b0ef67 100644
4513 +--- a/net/ipv6/addrconf.c
4514 ++++ b/net/ipv6/addrconf.c
4515 +@@ -5154,7 +5154,7 @@ put_tgt_net:
4516 + if (fillargs.netnsid >= 0)
4517 + put_net(tgt_net);
4518 +
4519 +- return err < 0 ? err : skb->len;
4520 ++ return skb->len ? : err;
4521 + }
4522 +
4523 + static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
4524 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
4525 +index f0cd291034f0..4dc935838184 100644
4526 +--- a/net/ipv6/af_inet6.c
4527 ++++ b/net/ipv6/af_inet6.c
4528 +@@ -310,6 +310,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
4529 +
4530 + /* Check if the address belongs to the host. */
4531 + if (addr_type == IPV6_ADDR_MAPPED) {
4532 ++ struct net_device *dev = NULL;
4533 + int chk_addr_ret;
4534 +
4535 + /* Binding to v4-mapped address on a v6-only socket
4536 +@@ -320,9 +321,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
4537 + goto out;
4538 + }
4539 +
4540 ++ rcu_read_lock();
4541 ++ if (sk->sk_bound_dev_if) {
4542 ++ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
4543 ++ if (!dev) {
4544 ++ err = -ENODEV;
4545 ++ goto out_unlock;
4546 ++ }
4547 ++ }
4548 ++
4549 + /* Reproduce AF_INET checks to make the bindings consistent */
4550 + v4addr = addr->sin6_addr.s6_addr32[3];
4551 +- chk_addr_ret = inet_addr_type(net, v4addr);
4552 ++ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
4553 ++ rcu_read_unlock();
4554 ++
4555 + if (!inet_can_nonlocal_bind(net, inet) &&
4556 + v4addr != htonl(INADDR_ANY) &&
4557 + chk_addr_ret != RTN_LOCAL &&
4558 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
4559 +index ae3786132c23..6613d8dbb0e5 100644
4560 +--- a/net/ipv6/ip6_fib.c
4561 ++++ b/net/ipv6/ip6_fib.c
4562 +@@ -627,7 +627,11 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
4563 + return -ENOENT;
4564 + }
4565 +
4566 +- res = fib6_dump_table(tb, skb, cb);
4567 ++ if (!cb->args[0]) {
4568 ++ res = fib6_dump_table(tb, skb, cb);
4569 ++ if (!res)
4570 ++ cb->args[0] = 1;
4571 ++ }
4572 + goto out;
4573 + }
4574 +
4575 +diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
4576 +index c00b6a2e8e3c..13ade5782847 100644
4577 +--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
4578 ++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
4579 +@@ -219,10 +219,6 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
4580 + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
4581 + u32 ip;
4582 +
4583 +- /* MAC can be src only */
4584 +- if (!(opt->flags & IPSET_DIM_TWO_SRC))
4585 +- return 0;
4586 +-
4587 + ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
4588 + if (ip < map->first_ip || ip > map->last_ip)
4589 + return -IPSET_ERR_BITMAP_RANGE;
4590 +@@ -233,7 +229,11 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
4591 + return -EINVAL;
4592 +
4593 + e.id = ip_to_id(map, ip);
4594 +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
4595 ++
4596 ++ if (opt->flags & IPSET_DIM_ONE_SRC)
4597 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4598 ++ else
4599 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
4600 +
4601 + return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
4602 + }
4603 +diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
4604 +index 1ab5ed2f6839..fd87de3ed55b 100644
4605 +--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
4606 ++++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
4607 +@@ -103,7 +103,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
4608 + (skb_mac_header(skb) + ETH_HLEN) > skb->data)
4609 + return -EINVAL;
4610 +
4611 +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
4612 ++ if (opt->flags & IPSET_DIM_ONE_SRC)
4613 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4614 ++ else
4615 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
4616 ++
4617 + if (ether_addr_equal(e.ether, invalid_ether))
4618 + return -EINVAL;
4619 +
4620 +@@ -211,15 +215,15 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
4621 + };
4622 + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
4623 +
4624 +- /* MAC can be src only */
4625 +- if (!(opt->flags & IPSET_DIM_TWO_SRC))
4626 +- return 0;
4627 +-
4628 + if (skb_mac_header(skb) < skb->head ||
4629 + (skb_mac_header(skb) + ETH_HLEN) > skb->data)
4630 + return -EINVAL;
4631 +
4632 +- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
4633 ++ if (opt->flags & IPSET_DIM_ONE_SRC)
4634 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4635 ++ else
4636 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
4637 ++
4638 + if (ether_addr_equal(e.ether, invalid_ether))
4639 + return -EINVAL;
4640 +
4641 +diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
4642 +index f9d5a2a1e3d0..4fe5f243d0a3 100644
4643 +--- a/net/netfilter/ipset/ip_set_hash_mac.c
4644 ++++ b/net/netfilter/ipset/ip_set_hash_mac.c
4645 +@@ -81,15 +81,15 @@ hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
4646 + struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
4647 + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
4648 +
4649 +- /* MAC can be src only */
4650 +- if (!(opt->flags & IPSET_DIM_ONE_SRC))
4651 +- return 0;
4652 +-
4653 + if (skb_mac_header(skb) < skb->head ||
4654 + (skb_mac_header(skb) + ETH_HLEN) > skb->data)
4655 + return -EINVAL;
4656 +
4657 +- ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4658 ++ if (opt->flags & IPSET_DIM_ONE_SRC)
4659 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
4660 ++ else
4661 ++ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
4662 ++
4663 + if (is_zero_ether_addr(e.ether))
4664 + return -EINVAL;
4665 + return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
4666 +diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
4667 +index 35966da84769..f920a347ee1c 100644
4668 +--- a/net/openvswitch/flow.c
4669 ++++ b/net/openvswitch/flow.c
4670 +@@ -276,10 +276,12 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
4671 +
4672 + nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
4673 + if (flags & IP6_FH_F_FRAG) {
4674 +- if (frag_off)
4675 ++ if (frag_off) {
4676 + key->ip.frag = OVS_FRAG_TYPE_LATER;
4677 +- else
4678 +- key->ip.frag = OVS_FRAG_TYPE_FIRST;
4679 ++ key->ip.proto = nexthdr;
4680 ++ return 0;
4681 ++ }
4682 ++ key->ip.frag = OVS_FRAG_TYPE_FIRST;
4683 + } else {
4684 + key->ip.frag = OVS_FRAG_TYPE_NONE;
4685 + }
4686 +diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
4687 +index e6d7e0fe155b..96783207de4a 100644
4688 +--- a/samples/bpf/bpf_load.c
4689 ++++ b/samples/bpf/bpf_load.c
4690 +@@ -54,6 +54,23 @@ static int populate_prog_array(const char *event, int prog_fd)
4691 + return 0;
4692 + }
4693 +
4694 ++static int write_kprobe_events(const char *val)
4695 ++{
4696 ++ int fd, ret, flags;
4697 ++
4698 ++ if ((val != NULL) && (val[0] == '\0'))
4699 ++ flags = O_WRONLY | O_TRUNC;
4700 ++ else
4701 ++ flags = O_WRONLY | O_APPEND;
4702 ++
4703 ++ fd = open("/sys/kernel/debug/tracing/kprobe_events", flags);
4704 ++
4705 ++ ret = write(fd, val, strlen(val));
4706 ++ close(fd);
4707 ++
4708 ++ return ret;
4709 ++}
4710 ++
4711 + static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
4712 + {
4713 + bool is_socket = strncmp(event, "socket", 6) == 0;
4714 +@@ -165,10 +182,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
4715 +
4716 + #ifdef __x86_64__
4717 + if (strncmp(event, "sys_", 4) == 0) {
4718 +- snprintf(buf, sizeof(buf),
4719 +- "echo '%c:__x64_%s __x64_%s' >> /sys/kernel/debug/tracing/kprobe_events",
4720 +- is_kprobe ? 'p' : 'r', event, event);
4721 +- err = system(buf);
4722 ++ snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s",
4723 ++ is_kprobe ? 'p' : 'r', event, event);
4724 ++ err = write_kprobe_events(buf);
4725 + if (err >= 0) {
4726 + need_normal_check = false;
4727 + event_prefix = "__x64_";
4728 +@@ -176,10 +192,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
4729 + }
4730 + #endif
4731 + if (need_normal_check) {
4732 +- snprintf(buf, sizeof(buf),
4733 +- "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
4734 +- is_kprobe ? 'p' : 'r', event, event);
4735 +- err = system(buf);
4736 ++ snprintf(buf, sizeof(buf), "%c:%s %s",
4737 ++ is_kprobe ? 'p' : 'r', event, event);
4738 ++ err = write_kprobe_events(buf);
4739 + if (err < 0) {
4740 + printf("failed to create kprobe '%s' error '%s'\n",
4741 + event, strerror(errno));
4742 +@@ -519,7 +534,7 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
4743 + return 1;
4744 +
4745 + /* clear all kprobes */
4746 +- i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
4747 ++ i = write_kprobe_events("");
4748 +
4749 + /* scan over all elf sections to get license and map info */
4750 + for (i = 1; i < ehdr.e_shnum; i++) {
4751 +diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
4752 +index 3d09844405c9..b8c866193ae6 100644
4753 +--- a/scripts/Kbuild.include
4754 ++++ b/scripts/Kbuild.include
4755 +@@ -262,9 +262,8 @@ ifndef CONFIG_TRIM_UNUSED_KSYMS
4756 +
4757 + cmd_and_fixdep = \
4758 + $(echo-cmd) $(cmd_$(1)); \
4759 +- scripts/basic/fixdep $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;\
4760 +- rm -f $(depfile); \
4761 +- mv -f $(dot-target).tmp $(dot-target).cmd;
4762 ++ scripts/basic/fixdep $(depfile) $@ '$(make-cmd)' > $(dot-target).cmd;\
4763 ++ rm -f $(depfile);
4764 +
4765 + else
4766 +
4767 +@@ -287,9 +286,8 @@ cmd_and_fixdep = \
4768 + $(echo-cmd) $(cmd_$(1)); \
4769 + $(ksym_dep_filter) | \
4770 + scripts/basic/fixdep -e $(depfile) $@ '$(make-cmd)' \
4771 +- > $(dot-target).tmp; \
4772 +- rm -f $(depfile); \
4773 +- mv -f $(dot-target).tmp $(dot-target).cmd;
4774 ++ > $(dot-target).cmd; \
4775 ++ rm -f $(depfile);
4776 +
4777 + endif
4778 +
4779 +diff --git a/scripts/Makefile.build b/scripts/Makefile.build
4780 +index 6a6be9f440cf..1d56f181b917 100644
4781 +--- a/scripts/Makefile.build
4782 ++++ b/scripts/Makefile.build
4783 +@@ -527,18 +527,16 @@ FORCE:
4784 + # optimization, we don't need to read them if the target does not
4785 + # exist, we will rebuild anyway in that case.
4786 +
4787 +-cmd_files := $(wildcard $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd))
4788 ++existing-targets := $(wildcard $(sort $(targets)))
4789 +
4790 +-ifneq ($(cmd_files),)
4791 +- include $(cmd_files)
4792 +-endif
4793 ++-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
4794 +
4795 + ifneq ($(KBUILD_SRC),)
4796 + # Create directories for object files if they do not exist
4797 + obj-dirs := $(sort $(obj) $(patsubst %/,%, $(dir $(targets))))
4798 +-# If cmd_files exist, their directories apparently exist. Skip mkdir.
4799 +-exist-dirs := $(sort $(patsubst %/,%, $(dir $(cmd_files))))
4800 +-obj-dirs := $(strip $(filter-out $(exist-dirs), $(obj-dirs)))
4801 ++# If targets exist, their directories apparently exist. Skip mkdir.
4802 ++existing-dirs := $(sort $(patsubst %/,%, $(dir $(existing-targets))))
4803 ++obj-dirs := $(strip $(filter-out $(existing-dirs), $(obj-dirs)))
4804 + ifneq ($(obj-dirs),)
4805 + $(shell mkdir -p $(obj-dirs))
4806 + endif
4807 +diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
4808 +index 25bd2b89fe3f..c2f577d71964 100644
4809 +--- a/scripts/kconfig/zconf.l
4810 ++++ b/scripts/kconfig/zconf.l
4811 +@@ -73,7 +73,7 @@ static void warn_ignored_character(char chr)
4812 + {
4813 + fprintf(stderr,
4814 + "%s:%d:warning: ignoring unsupported character '%c'\n",
4815 +- zconf_curname(), zconf_lineno(), chr);
4816 ++ current_file->name, yylineno, chr);
4817 + }
4818 + %}
4819 +
4820 +@@ -221,6 +221,8 @@ n [A-Za-z0-9_-]
4821 + }
4822 + <<EOF>> {
4823 + BEGIN(INITIAL);
4824 ++ yylval.string = text;
4825 ++ return T_WORD_QUOTE;
4826 + }
4827 + }
4828 +
4829 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4830 +index a67459eb62d5..0f27db6d94a9 100644
4831 +--- a/security/selinux/hooks.c
4832 ++++ b/security/selinux/hooks.c
4833 +@@ -2934,7 +2934,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
4834 + return rc;
4835 +
4836 + /* Allow all mounts performed by the kernel */
4837 +- if (flags & MS_KERNMOUNT)
4838 ++ if (flags & (MS_KERNMOUNT | MS_SUBMOUNT))
4839 + return 0;
4840 +
4841 + ad.type = LSM_AUDIT_DATA_DENTRY;
4842 +diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
4843 +index 8a146b039276..44cedb65bb88 100644
4844 +--- a/sound/firewire/Kconfig
4845 ++++ b/sound/firewire/Kconfig
4846 +@@ -41,6 +41,7 @@ config SND_OXFW
4847 + * Mackie(Loud) U.420/U.420d
4848 + * TASCAM FireOne
4849 + * Stanton Controllers & Systems 1 Deck/Mixer
4850 ++ * APOGEE duet FireWire
4851 +
4852 + To compile this driver as a module, choose M here: the module
4853 + will be called snd-oxfw.
4854 +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
4855 +index 672d13488454..d91874275d2c 100644
4856 +--- a/sound/firewire/bebob/bebob.c
4857 ++++ b/sound/firewire/bebob/bebob.c
4858 +@@ -408,7 +408,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
4859 + /* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
4860 + SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
4861 + /* Apogee Electronics, Ensemble */
4862 +- SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
4863 ++ SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
4864 + /* ESI, Quatafire610 */
4865 + SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
4866 + /* AcousticReality, eARMasterOne */
4867 +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
4868 +index afb78d90384b..3d27f3378d5d 100644
4869 +--- a/sound/firewire/oxfw/oxfw.c
4870 ++++ b/sound/firewire/oxfw/oxfw.c
4871 +@@ -20,6 +20,7 @@
4872 + #define VENDOR_LACIE 0x00d04b
4873 + #define VENDOR_TASCAM 0x00022e
4874 + #define OUI_STANTON 0x001260
4875 ++#define OUI_APOGEE 0x0003db
4876 +
4877 + #define MODEL_SATELLITE 0x00200f
4878 +
4879 +@@ -397,6 +398,13 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
4880 + .vendor_id = OUI_STANTON,
4881 + .model_id = 0x002000,
4882 + },
4883 ++ // APOGEE, duet FireWire
4884 ++ {
4885 ++ .match_flags = IEEE1394_MATCH_VENDOR_ID |
4886 ++ IEEE1394_MATCH_MODEL_ID,
4887 ++ .vendor_id = OUI_APOGEE,
4888 ++ .model_id = 0x01dddd,
4889 ++ },
4890 + { }
4891 + };
4892 + MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table);
4893 +diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
4894 +index cdebab2f8ce5..7ada2c1f4964 100644
4895 +--- a/sound/soc/amd/acp-pcm-dma.c
4896 ++++ b/sound/soc/amd/acp-pcm-dma.c
4897 +@@ -1151,18 +1151,21 @@ static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
4898 + struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
4899 + DRV_NAME);
4900 + struct audio_drv_data *adata = dev_get_drvdata(component->dev);
4901 ++ struct device *parent = component->dev->parent;
4902 +
4903 + switch (adata->asic_type) {
4904 + case CHIP_STONEY:
4905 + ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
4906 + SNDRV_DMA_TYPE_DEV,
4907 +- NULL, ST_MIN_BUFFER,
4908 ++ parent,
4909 ++ ST_MIN_BUFFER,
4910 + ST_MAX_BUFFER);
4911 + break;
4912 + default:
4913 + ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
4914 + SNDRV_DMA_TYPE_DEV,
4915 +- NULL, MIN_BUFFER,
4916 ++ parent,
4917 ++ MIN_BUFFER,
4918 + MAX_BUFFER);
4919 + break;
4920 + }
4921 +diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
4922 +index 52cc950c9fd1..445d025e1409 100644
4923 +--- a/sound/soc/codecs/pcm3168a.c
4924 ++++ b/sound/soc/codecs/pcm3168a.c
4925 +@@ -770,15 +770,22 @@ err_clk:
4926 + }
4927 + EXPORT_SYMBOL_GPL(pcm3168a_probe);
4928 +
4929 +-void pcm3168a_remove(struct device *dev)
4930 ++static void pcm3168a_disable(struct device *dev)
4931 + {
4932 + struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev);
4933 +
4934 +- pm_runtime_disable(dev);
4935 + regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
4936 +- pcm3168a->supplies);
4937 ++ pcm3168a->supplies);
4938 + clk_disable_unprepare(pcm3168a->scki);
4939 + }
4940 ++
4941 ++void pcm3168a_remove(struct device *dev)
4942 ++{
4943 ++ pm_runtime_disable(dev);
4944 ++#ifndef CONFIG_PM
4945 ++ pcm3168a_disable(dev);
4946 ++#endif
4947 ++}
4948 + EXPORT_SYMBOL_GPL(pcm3168a_remove);
4949 +
4950 + #ifdef CONFIG_PM
4951 +@@ -833,10 +840,7 @@ static int pcm3168a_rt_suspend(struct device *dev)
4952 +
4953 + regcache_cache_only(pcm3168a->regmap, true);
4954 +
4955 +- regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
4956 +- pcm3168a->supplies);
4957 +-
4958 +- clk_disable_unprepare(pcm3168a->scki);
4959 ++ pcm3168a_disable(dev);
4960 +
4961 + return 0;
4962 + }
4963 +diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
4964 +index ccdf088461b7..54c306707c02 100644
4965 +--- a/sound/soc/codecs/wm9705.c
4966 ++++ b/sound/soc/codecs/wm9705.c
4967 +@@ -325,8 +325,7 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
4968 + if (wm9705->mfd_pdata) {
4969 + wm9705->ac97 = wm9705->mfd_pdata->ac97;
4970 + regmap = wm9705->mfd_pdata->regmap;
4971 +- } else {
4972 +-#ifdef CONFIG_SND_SOC_AC97_BUS
4973 ++ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
4974 + wm9705->ac97 = snd_soc_new_ac97_component(component, WM9705_VENDOR_ID,
4975 + WM9705_VENDOR_ID_MASK);
4976 + if (IS_ERR(wm9705->ac97)) {
4977 +@@ -339,7 +338,8 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
4978 + snd_soc_free_ac97_component(wm9705->ac97);
4979 + return PTR_ERR(regmap);
4980 + }
4981 +-#endif
4982 ++ } else {
4983 ++ return -ENXIO;
4984 + }
4985 +
4986 + snd_soc_component_set_drvdata(component, wm9705->ac97);
4987 +@@ -350,14 +350,12 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
4988 +
4989 + static void wm9705_soc_remove(struct snd_soc_component *component)
4990 + {
4991 +-#ifdef CONFIG_SND_SOC_AC97_BUS
4992 + struct wm9705_priv *wm9705 = snd_soc_component_get_drvdata(component);
4993 +
4994 +- if (!wm9705->mfd_pdata) {
4995 ++ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9705->mfd_pdata) {
4996 + snd_soc_component_exit_regmap(component);
4997 + snd_soc_free_ac97_component(wm9705->ac97);
4998 + }
4999 +-#endif
5000 + }
5001 +
5002 + static const struct snd_soc_component_driver soc_component_dev_wm9705 = {
5003 +diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
5004 +index e873baa9e778..01949eaba4fd 100644
5005 +--- a/sound/soc/codecs/wm9712.c
5006 ++++ b/sound/soc/codecs/wm9712.c
5007 +@@ -642,8 +642,7 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
5008 + if (wm9712->mfd_pdata) {
5009 + wm9712->ac97 = wm9712->mfd_pdata->ac97;
5010 + regmap = wm9712->mfd_pdata->regmap;
5011 +- } else {
5012 +-#ifdef CONFIG_SND_SOC_AC97_BUS
5013 ++ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
5014 + int ret;
5015 +
5016 + wm9712->ac97 = snd_soc_new_ac97_component(component, WM9712_VENDOR_ID,
5017 +@@ -660,7 +659,8 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
5018 + snd_soc_free_ac97_component(wm9712->ac97);
5019 + return PTR_ERR(regmap);
5020 + }
5021 +-#endif
5022 ++ } else {
5023 ++ return -ENXIO;
5024 + }
5025 +
5026 + snd_soc_component_init_regmap(component, regmap);
5027 +@@ -673,14 +673,12 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
5028 +
5029 + static void wm9712_soc_remove(struct snd_soc_component *component)
5030 + {
5031 +-#ifdef CONFIG_SND_SOC_AC97_BUS
5032 + struct wm9712_priv *wm9712 = snd_soc_component_get_drvdata(component);
5033 +
5034 +- if (!wm9712->mfd_pdata) {
5035 ++ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9712->mfd_pdata) {
5036 + snd_soc_component_exit_regmap(component);
5037 + snd_soc_free_ac97_component(wm9712->ac97);
5038 + }
5039 +-#endif
5040 + }
5041 +
5042 + static const struct snd_soc_component_driver soc_component_dev_wm9712 = {
5043 +diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
5044 +index 643863bb32e0..5a2fdf4f69bf 100644
5045 +--- a/sound/soc/codecs/wm9713.c
5046 ++++ b/sound/soc/codecs/wm9713.c
5047 +@@ -1214,8 +1214,7 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
5048 + if (wm9713->mfd_pdata) {
5049 + wm9713->ac97 = wm9713->mfd_pdata->ac97;
5050 + regmap = wm9713->mfd_pdata->regmap;
5051 +- } else {
5052 +-#ifdef CONFIG_SND_SOC_AC97_BUS
5053 ++ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
5054 + wm9713->ac97 = snd_soc_new_ac97_component(component, WM9713_VENDOR_ID,
5055 + WM9713_VENDOR_ID_MASK);
5056 + if (IS_ERR(wm9713->ac97))
5057 +@@ -1225,7 +1224,8 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
5058 + snd_soc_free_ac97_component(wm9713->ac97);
5059 + return PTR_ERR(regmap);
5060 + }
5061 +-#endif
5062 ++ } else {
5063 ++ return -ENXIO;
5064 + }
5065 +
5066 + snd_soc_component_init_regmap(component, regmap);
5067 +@@ -1238,14 +1238,12 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
5068 +
5069 + static void wm9713_soc_remove(struct snd_soc_component *component)
5070 + {
5071 +-#ifdef CONFIG_SND_SOC_AC97_BUS
5072 + struct wm9713_priv *wm9713 = snd_soc_component_get_drvdata(component);
5073 +
5074 +- if (!wm9713->mfd_pdata) {
5075 ++ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9713->mfd_pdata) {
5076 + snd_soc_component_exit_regmap(component);
5077 + snd_soc_free_ac97_component(wm9713->ac97);
5078 + }
5079 +-#endif
5080 + }
5081 +
5082 + static const struct snd_soc_component_driver soc_component_dev_wm9713 = {
5083 +diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
5084 +index 95563b8e1ad7..ed61fb3a46c0 100644
5085 +--- a/tools/lib/subcmd/Makefile
5086 ++++ b/tools/lib/subcmd/Makefile
5087 +@@ -36,8 +36,6 @@ endif
5088 + CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
5089 +
5090 + CFLAGS += -I$(srctree)/tools/include/
5091 +-CFLAGS += -I$(srctree)/include/uapi
5092 +-CFLAGS += -I$(srctree)/include
5093 +
5094 + SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
5095 +
5096 +diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
5097 +index 70144b98141c..8ea1a02812b0 100644
5098 +--- a/tools/lib/traceevent/event-parse.c
5099 ++++ b/tools/lib/traceevent/event-parse.c
5100 +@@ -3498,7 +3498,7 @@ struct tep_event_format *
5101 + tep_find_event_by_name(struct tep_handle *pevent,
5102 + const char *sys, const char *name)
5103 + {
5104 +- struct tep_event_format *event;
5105 ++ struct tep_event_format *event = NULL;
5106 + int i;
5107 +
5108 + if (pevent->last_event &&
5109 +@@ -4221,7 +4221,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
5110 + unsigned long long ip, val;
5111 + char *ptr;
5112 + void *bptr;
5113 +- int vsize;
5114 ++ int vsize = 0;
5115 +
5116 + field = pevent->bprint_buf_field;
5117 + ip_field = pevent->bprint_ip_field;
5118 +@@ -4881,7 +4881,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
5119 + char format[32];
5120 + int show_func;
5121 + int len_as_arg;
5122 +- int len_arg;
5123 ++ int len_arg = 0;
5124 + int len;
5125 + int ls;
5126 +
5127 +@@ -5147,8 +5147,8 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
5128 + static int migrate_disable_exists;
5129 + unsigned int lat_flags;
5130 + unsigned int pc;
5131 +- int lock_depth;
5132 +- int migrate_disable;
5133 ++ int lock_depth = 0;
5134 ++ int migrate_disable = 0;
5135 + int hardirq;
5136 + int softirq;
5137 + void *data = record->data;
5138 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
5139 +index a0e8c23f9125..acbb657f7ce2 100644
5140 +--- a/tools/perf/Makefile.config
5141 ++++ b/tools/perf/Makefile.config
5142 +@@ -294,6 +294,8 @@ ifndef NO_BIONIC
5143 + $(call feature_check,bionic)
5144 + ifeq ($(feature-bionic), 1)
5145 + BIONIC := 1
5146 ++ CFLAGS += -DLACKS_SIGQUEUE_PROTOTYPE
5147 ++ CFLAGS += -DLACKS_OPEN_MEMSTREAM_PROTOTYPE
5148 + EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
5149 + EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
5150 + endif
5151 +diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
5152 +index db0ba8caf5a2..ba8ecaf52200 100644
5153 +--- a/tools/perf/arch/x86/util/intel-pt.c
5154 ++++ b/tools/perf/arch/x86/util/intel-pt.c
5155 +@@ -524,10 +524,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
5156 + struct perf_evsel *evsel)
5157 + {
5158 + int err;
5159 ++ char c;
5160 +
5161 + if (!evsel)
5162 + return 0;
5163 +
5164 ++ /*
5165 ++ * If supported, force pass-through config term (pt=1) even if user
5166 ++ * sets pt=0, which avoids senseless kernel errors.
5167 ++ */
5168 ++ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
5169 ++ !(evsel->attr.config & 1)) {
5170 ++ pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
5171 ++ evsel->attr.config |= 1;
5172 ++ }
5173 ++
5174 + err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
5175 + "cyc_thresh", "caps/psb_cyc",
5176 + evsel->attr.config);
5177 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
5178 +index a635abfa77b6..1410d66192f7 100644
5179 +--- a/tools/perf/builtin-stat.c
5180 ++++ b/tools/perf/builtin-stat.c
5181 +@@ -709,7 +709,7 @@ static int parse_metric_groups(const struct option *opt,
5182 + return metricgroup__parse_groups(opt, str, &stat_config.metric_events);
5183 + }
5184 +
5185 +-static const struct option stat_options[] = {
5186 ++static struct option stat_options[] = {
5187 + OPT_BOOLEAN('T', "transaction", &transaction_run,
5188 + "hardware transaction statistics"),
5189 + OPT_CALLBACK('e', "event", &evsel_list, "event",
5190 +@@ -1599,6 +1599,12 @@ int cmd_stat(int argc, const char **argv)
5191 + return -ENOMEM;
5192 +
5193 + parse_events__shrink_config_terms();
5194 ++
5195 ++ /* String-parsing callback-based options would segfault when negated */
5196 ++ set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
5197 ++ set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
5198 ++ set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
5199 ++
5200 + argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
5201 + (const char **) stat_usage,
5202 + PARSE_OPT_STOP_AT_NON_OPTION);
5203 +diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
5204 +index a827919c6263..775b99833e51 100644
5205 +--- a/tools/perf/builtin-timechart.c
5206 ++++ b/tools/perf/builtin-timechart.c
5207 +@@ -43,6 +43,10 @@
5208 + #include "util/data.h"
5209 + #include "util/debug.h"
5210 +
5211 ++#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
5212 ++FILE *open_memstream(char **ptr, size_t *sizeloc);
5213 ++#endif
5214 ++
5215 + #define SUPPORT_OLD_POWER_EVENTS 1
5216 + #define PWR_EVENT_EXIT -1
5217 +
5218 +diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
5219 +index 36c903faed0b..71e9737f4614 100644
5220 +--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
5221 ++++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
5222 +@@ -73,7 +73,7 @@
5223 + },
5224 + {
5225 + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
5226 +- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
5227 ++ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
5228 + "MetricGroup": "Memory_Bound;Memory_Lat",
5229 + "MetricName": "Load_Miss_Real_Latency"
5230 + },
5231 +diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
5232 +index 36c903faed0b..71e9737f4614 100644
5233 +--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
5234 ++++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
5235 +@@ -73,7 +73,7 @@
5236 + },
5237 + {
5238 + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
5239 +- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
5240 ++ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
5241 + "MetricGroup": "Memory_Bound;Memory_Lat",
5242 + "MetricName": "Load_Miss_Real_Latency"
5243 + },
5244 +diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
5245 +index a467615c5a0e..910e25e64188 100644
5246 +--- a/tools/perf/tests/bp_signal.c
5247 ++++ b/tools/perf/tests/bp_signal.c
5248 +@@ -291,12 +291,20 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
5249 +
5250 + bool test__bp_signal_is_supported(void)
5251 + {
5252 +-/*
5253 +- * The powerpc so far does not have support to even create
5254 +- * instruction breakpoint using the perf event interface.
5255 +- * Once it's there we can release this.
5256 +- */
5257 +-#if defined(__powerpc__) || defined(__s390x__)
5258 ++ /*
5259 ++ * PowerPC and S390 do not support creation of instruction
5260 ++ * breakpoints using the perf_event interface.
5261 ++ *
5262 ++ * ARM requires explicit rounding down of the instruction
5263 ++ * pointer in Thumb mode, and then requires the single-step
5264 ++ * to be handled explicitly in the overflow handler to avoid
5265 ++ * stepping into the SIGIO handler and getting stuck on the
5266 ++ * breakpointed instruction.
5267 ++ *
5268 ++ * Just disable the test for these architectures until these
5269 ++ * issues are resolved.
5270 ++ */
5271 ++#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
5272 + return false;
5273 + #else
5274 + return true;
5275 +diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
5276 +index 73430b73570d..c2f0c92623f0 100644
5277 +--- a/tools/perf/util/cs-etm.c
5278 ++++ b/tools/perf/util/cs-etm.c
5279 +@@ -1005,7 +1005,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
5280 + }
5281 +
5282 + swap_packet:
5283 +- if (etmq->etm->synth_opts.last_branch) {
5284 ++ if (etm->sample_branches || etm->synth_opts.last_branch) {
5285 + /*
5286 + * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
5287 + * the next incoming packet.
5288 +diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
5289 +index 668d2a9ef0f4..8a806b0758b0 100644
5290 +--- a/tools/perf/util/evlist.c
5291 ++++ b/tools/perf/util/evlist.c
5292 +@@ -34,6 +34,10 @@
5293 + #include <linux/log2.h>
5294 + #include <linux/err.h>
5295 +
5296 ++#ifdef LACKS_SIGQUEUE_PROTOTYPE
5297 ++int sigqueue(pid_t pid, int sig, const union sigval value);
5298 ++#endif
5299 ++
5300 + #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
5301 + #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
5302 +
5303 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
5304 +index 59be3466d64d..920e1e6551dd 100644
5305 +--- a/tools/perf/util/parse-events.c
5306 ++++ b/tools/perf/util/parse-events.c
5307 +@@ -2462,7 +2462,7 @@ restart:
5308 + if (!name_only && strlen(syms->alias))
5309 + snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
5310 + else
5311 +- strncpy(name, syms->symbol, MAX_NAME_LEN);
5312 ++ strlcpy(name, syms->symbol, MAX_NAME_LEN);
5313 +
5314 + evt_list[evt_i] = strdup(name);
5315 + if (evt_list[evt_i] == NULL)
5316 +diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
5317 +index 1cbada2dc6be..f735ee038713 100644
5318 +--- a/tools/perf/util/svghelper.c
5319 ++++ b/tools/perf/util/svghelper.c
5320 +@@ -334,7 +334,7 @@ static char *cpu_model(void)
5321 + if (file) {
5322 + while (fgets(buf, 255, file)) {
5323 + if (strstr(buf, "model name")) {
5324 +- strncpy(cpu_m, &buf[13], 255);
5325 ++ strlcpy(cpu_m, &buf[13], 255);
5326 + break;
5327 + }
5328 + }
5329 +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
5330 +index e39dfb4e7970..ecd79b7fb107 100644
5331 +--- a/tools/testing/selftests/bpf/Makefile
5332 ++++ b/tools/testing/selftests/bpf/Makefile
5333 +@@ -135,6 +135,16 @@ endif
5334 + endif
5335 + endif
5336 +
5337 ++# Have one program compiled without "-target bpf" to test whether libbpf loads
5338 ++# it successfully
5339 ++$(OUTPUT)/test_xdp.o: test_xdp.c
5340 ++ $(CLANG) $(CLANG_FLAGS) \
5341 ++ -O2 -emit-llvm -c $< -o - | \
5342 ++ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
5343 ++ifeq ($(DWARF2BTF),y)
5344 ++ $(BTF_PAHOLE) -J $@
5345 ++endif
5346 ++
5347 + $(OUTPUT)/%.o: %.c
5348 + $(CLANG) $(CLANG_FLAGS) \
5349 + -O2 -target bpf -emit-llvm -c $< -o - | \
5350 +diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
5351 +index 156d89f1edcc..2989b2e2d856 100755
5352 +--- a/tools/testing/selftests/bpf/test_libbpf.sh
5353 ++++ b/tools/testing/selftests/bpf/test_libbpf.sh
5354 +@@ -33,17 +33,11 @@ trap exit_handler 0 2 3 6 9
5355 +
5356 + libbpf_open_file test_l4lb.o
5357 +
5358 +-# TODO: fix libbpf to load noinline functions
5359 +-# [warning] libbpf: incorrect bpf_call opcode
5360 +-#libbpf_open_file test_l4lb_noinline.o
5361 ++# Load a program with BPF-to-BPF calls
5362 ++libbpf_open_file test_l4lb_noinline.o
5363 +
5364 +-# TODO: fix test_xdp_meta.c to load with libbpf
5365 +-# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
5366 +-#libbpf_open_file test_xdp_meta.o
5367 +-
5368 +-# TODO: fix libbpf to handle .eh_frame
5369 +-# [warning] libbpf: relocation failed: no section(10)
5370 +-#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
5371 ++# Load a program compiled without the "-target bpf" flag
5372 ++libbpf_open_file test_xdp.o
5373 +
5374 + # Success
5375 + exit 0
5376 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
5377 +index f8eac4a544f4..444f49176a2d 100644
5378 +--- a/tools/testing/selftests/bpf/test_verifier.c
5379 ++++ b/tools/testing/selftests/bpf/test_verifier.c
5380 +@@ -2903,6 +2903,19 @@ static struct bpf_test tests[] = {
5381 + .result_unpriv = REJECT,
5382 + .result = ACCEPT,
5383 + },
5384 ++ {
5385 ++ "alu32: mov u32 const",
5386 ++ .insns = {
5387 ++ BPF_MOV32_IMM(BPF_REG_7, 0),
5388 ++ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
5389 ++ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
5390 ++ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5391 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
5392 ++ BPF_EXIT_INSN(),
5393 ++ },
5394 ++ .result = ACCEPT,
5395 ++ .retval = 0,
5396 ++ },
5397 + {
5398 + "unpriv: partial copy of pointer",
5399 + .insns = {
5400 +diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
5401 +index 6ae3730c4ee3..76d654ef3234 100644
5402 +--- a/tools/testing/selftests/kselftest_harness.h
5403 ++++ b/tools/testing/selftests/kselftest_harness.h
5404 +@@ -354,7 +354,7 @@
5405 + * ASSERT_EQ(expected, measured): expected == measured
5406 + */
5407 + #define ASSERT_EQ(expected, seen) \
5408 +- __EXPECT(expected, seen, ==, 1)
5409 ++ __EXPECT(expected, #expected, seen, #seen, ==, 1)
5410 +
5411 + /**
5412 + * ASSERT_NE(expected, seen)
5413 +@@ -365,7 +365,7 @@
5414 + * ASSERT_NE(expected, measured): expected != measured
5415 + */
5416 + #define ASSERT_NE(expected, seen) \
5417 +- __EXPECT(expected, seen, !=, 1)
5418 ++ __EXPECT(expected, #expected, seen, #seen, !=, 1)
5419 +
5420 + /**
5421 + * ASSERT_LT(expected, seen)
5422 +@@ -376,7 +376,7 @@
5423 + * ASSERT_LT(expected, measured): expected < measured
5424 + */
5425 + #define ASSERT_LT(expected, seen) \
5426 +- __EXPECT(expected, seen, <, 1)
5427 ++ __EXPECT(expected, #expected, seen, #seen, <, 1)
5428 +
5429 + /**
5430 + * ASSERT_LE(expected, seen)
5431 +@@ -387,7 +387,7 @@
5432 + * ASSERT_LE(expected, measured): expected <= measured
5433 + */
5434 + #define ASSERT_LE(expected, seen) \
5435 +- __EXPECT(expected, seen, <=, 1)
5436 ++ __EXPECT(expected, #expected, seen, #seen, <=, 1)
5437 +
5438 + /**
5439 + * ASSERT_GT(expected, seen)
5440 +@@ -398,7 +398,7 @@
5441 + * ASSERT_GT(expected, measured): expected > measured
5442 + */
5443 + #define ASSERT_GT(expected, seen) \
5444 +- __EXPECT(expected, seen, >, 1)
5445 ++ __EXPECT(expected, #expected, seen, #seen, >, 1)
5446 +
5447 + /**
5448 + * ASSERT_GE(expected, seen)
5449 +@@ -409,7 +409,7 @@
5450 + * ASSERT_GE(expected, measured): expected >= measured
5451 + */
5452 + #define ASSERT_GE(expected, seen) \
5453 +- __EXPECT(expected, seen, >=, 1)
5454 ++ __EXPECT(expected, #expected, seen, #seen, >=, 1)
5455 +
5456 + /**
5457 + * ASSERT_NULL(seen)
5458 +@@ -419,7 +419,7 @@
5459 + * ASSERT_NULL(measured): NULL == measured
5460 + */
5461 + #define ASSERT_NULL(seen) \
5462 +- __EXPECT(NULL, seen, ==, 1)
5463 ++ __EXPECT(NULL, "NULL", seen, #seen, ==, 1)
5464 +
5465 + /**
5466 + * ASSERT_TRUE(seen)
5467 +@@ -429,7 +429,7 @@
5468 + * ASSERT_TRUE(measured): measured != 0
5469 + */
5470 + #define ASSERT_TRUE(seen) \
5471 +- ASSERT_NE(0, seen)
5472 ++ __EXPECT(0, "0", seen, #seen, !=, 1)
5473 +
5474 + /**
5475 + * ASSERT_FALSE(seen)
5476 +@@ -439,7 +439,7 @@
5477 + * ASSERT_FALSE(measured): measured == 0
5478 + */
5479 + #define ASSERT_FALSE(seen) \
5480 +- ASSERT_EQ(0, seen)
5481 ++ __EXPECT(0, "0", seen, #seen, ==, 1)
5482 +
5483 + /**
5484 + * ASSERT_STREQ(expected, seen)
5485 +@@ -472,7 +472,7 @@
5486 + * EXPECT_EQ(expected, measured): expected == measured
5487 + */
5488 + #define EXPECT_EQ(expected, seen) \
5489 +- __EXPECT(expected, seen, ==, 0)
5490 ++ __EXPECT(expected, #expected, seen, #seen, ==, 0)
5491 +
5492 + /**
5493 + * EXPECT_NE(expected, seen)
5494 +@@ -483,7 +483,7 @@
5495 + * EXPECT_NE(expected, measured): expected != measured
5496 + */
5497 + #define EXPECT_NE(expected, seen) \
5498 +- __EXPECT(expected, seen, !=, 0)
5499 ++ __EXPECT(expected, #expected, seen, #seen, !=, 0)
5500 +
5501 + /**
5502 + * EXPECT_LT(expected, seen)
5503 +@@ -494,7 +494,7 @@
5504 + * EXPECT_LT(expected, measured): expected < measured
5505 + */
5506 + #define EXPECT_LT(expected, seen) \
5507 +- __EXPECT(expected, seen, <, 0)
5508 ++ __EXPECT(expected, #expected, seen, #seen, <, 0)
5509 +
5510 + /**
5511 + * EXPECT_LE(expected, seen)
5512 +@@ -505,7 +505,7 @@
5513 + * EXPECT_LE(expected, measured): expected <= measured
5514 + */
5515 + #define EXPECT_LE(expected, seen) \
5516 +- __EXPECT(expected, seen, <=, 0)
5517 ++ __EXPECT(expected, #expected, seen, #seen, <=, 0)
5518 +
5519 + /**
5520 + * EXPECT_GT(expected, seen)
5521 +@@ -516,7 +516,7 @@
5522 + * EXPECT_GT(expected, measured): expected > measured
5523 + */
5524 + #define EXPECT_GT(expected, seen) \
5525 +- __EXPECT(expected, seen, >, 0)
5526 ++ __EXPECT(expected, #expected, seen, #seen, >, 0)
5527 +
5528 + /**
5529 + * EXPECT_GE(expected, seen)
5530 +@@ -527,7 +527,7 @@
5531 + * EXPECT_GE(expected, measured): expected >= measured
5532 + */
5533 + #define EXPECT_GE(expected, seen) \
5534 +- __EXPECT(expected, seen, >=, 0)
5535 ++ __EXPECT(expected, #expected, seen, #seen, >=, 0)
5536 +
5537 + /**
5538 + * EXPECT_NULL(seen)
5539 +@@ -537,7 +537,7 @@
5540 + * EXPECT_NULL(measured): NULL == measured
5541 + */
5542 + #define EXPECT_NULL(seen) \
5543 +- __EXPECT(NULL, seen, ==, 0)
5544 ++ __EXPECT(NULL, "NULL", seen, #seen, ==, 0)
5545 +
5546 + /**
5547 + * EXPECT_TRUE(seen)
5548 +@@ -547,7 +547,7 @@
5549 + * EXPECT_TRUE(measured): 0 != measured
5550 + */
5551 + #define EXPECT_TRUE(seen) \
5552 +- EXPECT_NE(0, seen)
5553 ++ __EXPECT(0, "0", seen, #seen, !=, 0)
5554 +
5555 + /**
5556 + * EXPECT_FALSE(seen)
5557 +@@ -557,7 +557,7 @@
5558 + * EXPECT_FALSE(measured): 0 == measured
5559 + */
5560 + #define EXPECT_FALSE(seen) \
5561 +- EXPECT_EQ(0, seen)
5562 ++ __EXPECT(0, "0", seen, #seen, ==, 0)
5563 +
5564 + /**
5565 + * EXPECT_STREQ(expected, seen)
5566 +@@ -597,7 +597,7 @@
5567 + if (_metadata->passed && _metadata->step < 255) \
5568 + _metadata->step++;
5569 +
5570 +-#define __EXPECT(_expected, _seen, _t, _assert) do { \
5571 ++#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
5572 + /* Avoid multiple evaluation of the cases */ \
5573 + __typeof__(_expected) __exp = (_expected); \
5574 + __typeof__(_seen) __seen = (_seen); \
5575 +@@ -606,8 +606,8 @@
5576 + unsigned long long __exp_print = (uintptr_t)__exp; \
5577 + unsigned long long __seen_print = (uintptr_t)__seen; \
5578 + __TH_LOG("Expected %s (%llu) %s %s (%llu)", \
5579 +- #_expected, __exp_print, #_t, \
5580 +- #_seen, __seen_print); \
5581 ++ _expected_str, __exp_print, #_t, \
5582 ++ _seen_str, __seen_print); \
5583 + _metadata->passed = 0; \
5584 + /* Ensure the optional handler is triggered */ \
5585 + _metadata->trigger = 1; \