Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.8 commit in: /
Date: Thu, 01 Oct 2020 19:00:38
Message-Id: 1601578819.763eb4b84c25bff950fbac603a2248bb551b4f23.mpagano@gentoo
1 commit: 763eb4b84c25bff950fbac603a2248bb551b4f23
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Oct 1 19:00:19 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Oct 1 19:00:19 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=763eb4b8
7
8 Linux patch 5.8.13
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1012_linux-5.8.13.patch | 3615 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3619 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 51cee27..0944db1 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -91,6 +91,10 @@ Patch: 1011_linux-5.8.12.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.8.12
23
24 +Patch: 1012_linux-5.8.13.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.8.13
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1012_linux-5.8.13.patch b/1012_linux-5.8.13.patch
33 new file mode 100644
34 index 0000000..10424ba
35 --- /dev/null
36 +++ b/1012_linux-5.8.13.patch
37 @@ -0,0 +1,3615 @@
38 +diff --git a/Makefile b/Makefile
39 +index d0d40c628dc34..0d81d8cba48b6 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 8
46 +-SUBLEVEL = 12
47 ++SUBLEVEL = 13
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
52 +index 4d0f8ea600ba4..e1254e55835bb 100644
53 +--- a/arch/arm64/include/asm/kvm_emulate.h
54 ++++ b/arch/arm64/include/asm/kvm_emulate.h
55 +@@ -319,7 +319,7 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
56 + return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
57 + }
58 +
59 +-static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
60 ++static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
61 + {
62 + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
63 + }
64 +@@ -327,7 +327,7 @@ static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
65 + static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
66 + {
67 + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
68 +- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
69 ++ kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
70 + }
71 +
72 + static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
73 +@@ -356,6 +356,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
74 + return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
75 + }
76 +
77 ++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
78 ++{
79 ++ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
80 ++}
81 ++
82 + static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
83 + {
84 + return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
85 +@@ -393,6 +398,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
86 +
87 + static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
88 + {
89 ++ if (kvm_vcpu_abt_iss1tw(vcpu))
90 ++ return true;
91 ++
92 + if (kvm_vcpu_trap_is_iabt(vcpu))
93 + return false;
94 +
95 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
96 +index ba225e09aaf15..8564742948d31 100644
97 +--- a/arch/arm64/kvm/hyp/switch.c
98 ++++ b/arch/arm64/kvm/hyp/switch.c
99 +@@ -599,7 +599,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
100 + kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
101 + kvm_vcpu_dabt_isvalid(vcpu) &&
102 + !kvm_vcpu_dabt_isextabt(vcpu) &&
103 +- !kvm_vcpu_dabt_iss1tw(vcpu);
104 ++ !kvm_vcpu_abt_iss1tw(vcpu);
105 +
106 + if (valid) {
107 + int ret = __vgic_v2_perform_cpuif_access(vcpu);
108 +diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
109 +index 4e0366759726d..07e9b6eab59e4 100644
110 +--- a/arch/arm64/kvm/mmio.c
111 ++++ b/arch/arm64/kvm/mmio.c
112 +@@ -146,7 +146,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
113 + }
114 +
115 + /* Page table accesses IO mem: tell guest to fix its TTBR */
116 +- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
117 ++ if (kvm_vcpu_abt_iss1tw(vcpu)) {
118 + kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
119 + return 1;
120 + }
121 +diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
122 +index d906350d543dd..1677107b74de2 100644
123 +--- a/arch/arm64/kvm/mmu.c
124 ++++ b/arch/arm64/kvm/mmu.c
125 +@@ -1845,7 +1845,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
126 + unsigned long vma_pagesize, flags = 0;
127 +
128 + write_fault = kvm_is_write_fault(vcpu);
129 +- exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
130 ++ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
131 + VM_BUG_ON(write_fault && exec_fault);
132 +
133 + if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
134 +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
135 +index 0b3fb4c7af292..8e7b8c6c576ee 100644
136 +--- a/arch/ia64/mm/init.c
137 ++++ b/arch/ia64/mm/init.c
138 +@@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
139 + if (map_start < map_end)
140 + memmap_init_zone((unsigned long)(map_end - map_start),
141 + args->nid, args->zone, page_to_pfn(map_start),
142 +- MEMMAP_EARLY, NULL);
143 ++ MEMINIT_EARLY, NULL);
144 + return 0;
145 + }
146 +
147 +@@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
148 + unsigned long start_pfn)
149 + {
150 + if (!vmem_map) {
151 +- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
152 +- NULL);
153 ++ memmap_init_zone(size, nid, zone, start_pfn,
154 ++ MEMINIT_EARLY, NULL);
155 + } else {
156 + struct page *start;
157 + struct memmap_init_callback_data args;
158 +diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
159 +index 75a7a382da099..3288cef4b168c 100644
160 +--- a/arch/mips/include/asm/cpu-type.h
161 ++++ b/arch/mips/include/asm/cpu-type.h
162 +@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
163 + case CPU_34K:
164 + case CPU_1004K:
165 + case CPU_74K:
166 ++ case CPU_1074K:
167 + case CPU_M14KC:
168 + case CPU_M14KEC:
169 + case CPU_INTERAPTIV:
170 +diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
171 +index cdad3c1a9a18f..7db0935bda3d1 100644
172 +--- a/arch/mips/loongson2ef/Platform
173 ++++ b/arch/mips/loongson2ef/Platform
174 +@@ -22,6 +22,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
175 + endif
176 + endif
177 +
178 ++# Some -march= flags enable MMI instructions, and GCC complains about that
179 ++# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
180 ++cflags-y += $(call cc-option,-mno-loongson-mmi)
181 ++
182 + #
183 + # Loongson Machines' Support
184 + #
185 +diff --git a/arch/mips/loongson64/cop2-ex.c b/arch/mips/loongson64/cop2-ex.c
186 +index f130f62129b86..00055d4b6042f 100644
187 +--- a/arch/mips/loongson64/cop2-ex.c
188 ++++ b/arch/mips/loongson64/cop2-ex.c
189 +@@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
190 + if (res)
191 + goto fault;
192 +
193 +- set_fpr64(current->thread.fpu.fpr,
194 +- insn.loongson3_lswc2_format.rt, value);
195 +- set_fpr64(current->thread.fpu.fpr,
196 +- insn.loongson3_lswc2_format.rq, value_next);
197 ++ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
198 ++ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
199 + compute_return_epc(regs);
200 + own_fpu(1);
201 + }
202 +@@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
203 + goto sigbus;
204 +
205 + lose_fpu(1);
206 +- value_next = get_fpr64(current->thread.fpu.fpr,
207 +- insn.loongson3_lswc2_format.rq);
208 ++ value_next = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
209 +
210 + StoreDW(addr + 8, value_next, res);
211 + if (res)
212 + goto fault;
213 +
214 +- value = get_fpr64(current->thread.fpu.fpr,
215 +- insn.loongson3_lswc2_format.rt);
216 ++ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
217 +
218 + StoreDW(addr, value, res);
219 + if (res)
220 +@@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
221 + if (res)
222 + goto fault;
223 +
224 +- set_fpr64(current->thread.fpu.fpr,
225 +- insn.loongson3_lsdc2_format.rt, value);
226 ++ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
227 + compute_return_epc(regs);
228 + own_fpu(1);
229 +
230 +@@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
231 + if (res)
232 + goto fault;
233 +
234 +- set_fpr64(current->thread.fpu.fpr,
235 +- insn.loongson3_lsdc2_format.rt, value);
236 ++ set_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
237 + compute_return_epc(regs);
238 + own_fpu(1);
239 + break;
240 +@@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
241 + goto sigbus;
242 +
243 + lose_fpu(1);
244 +- value = get_fpr64(current->thread.fpu.fpr,
245 +- insn.loongson3_lsdc2_format.rt);
246 ++ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
247 +
248 + StoreW(addr, value, res);
249 + if (res)
250 +@@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
251 + goto sigbus;
252 +
253 + lose_fpu(1);
254 +- value = get_fpr64(current->thread.fpu.fpr,
255 +- insn.loongson3_lsdc2_format.rt);
256 ++ value = get_fpr64(&current->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
257 +
258 + StoreDW(addr, value, res);
259 + if (res)
260 +diff --git a/arch/riscv/boot/dts/kendryte/k210.dtsi b/arch/riscv/boot/dts/kendryte/k210.dtsi
261 +index c1df56ccb8d55..d2d0ff6456325 100644
262 +--- a/arch/riscv/boot/dts/kendryte/k210.dtsi
263 ++++ b/arch/riscv/boot/dts/kendryte/k210.dtsi
264 +@@ -95,10 +95,12 @@
265 + #clock-cells = <1>;
266 + };
267 +
268 +- clint0: interrupt-controller@2000000 {
269 ++ clint0: clint@2000000 {
270 ++ #interrupt-cells = <1>;
271 + compatible = "riscv,clint0";
272 + reg = <0x2000000 0xC000>;
273 +- interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
274 ++ interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
275 ++ &cpu1_intc 3 &cpu1_intc 7>;
276 + clocks = <&sysctl K210_CLK_ACLK>;
277 + };
278 +
279 +diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
280 +index ace8a6e2d11d3..845002cc2e571 100644
281 +--- a/arch/riscv/include/asm/ftrace.h
282 ++++ b/arch/riscv/include/asm/ftrace.h
283 +@@ -66,6 +66,13 @@ do { \
284 + * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
285 + */
286 + #define MCOUNT_INSN_SIZE 8
287 ++
288 ++#ifndef __ASSEMBLY__
289 ++struct dyn_ftrace;
290 ++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
291 ++#define ftrace_init_nop ftrace_init_nop
292 ++#endif
293 ++
294 + #endif
295 +
296 + #endif /* _ASM_RISCV_FTRACE_H */
297 +diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
298 +index 2ff63d0cbb500..99e12faa54986 100644
299 +--- a/arch/riscv/kernel/ftrace.c
300 ++++ b/arch/riscv/kernel/ftrace.c
301 +@@ -97,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
302 + return __ftrace_modify_call(rec->ip, addr, false);
303 + }
304 +
305 ++
306 ++/*
307 ++ * This is called early on, and isn't wrapped by
308 ++ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
309 ++ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
310 ++ * just directly poke the text, but it's simpler to just take the lock
311 ++ * ourselves.
312 ++ */
313 ++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
314 ++{
315 ++ int out;
316 ++
317 ++ ftrace_arch_code_modify_prepare();
318 ++ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
319 ++ ftrace_arch_code_modify_post_process();
320 ++
321 ++ return out;
322 ++}
323 ++
324 + int ftrace_update_ftrace_func(ftrace_func_t func)
325 + {
326 + int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
327 +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
328 +index 19d603bd1f36e..a60ab538747c8 100644
329 +--- a/arch/s390/include/asm/pgtable.h
330 ++++ b/arch/s390/include/asm/pgtable.h
331 +@@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
332 +
333 + #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
334 +
335 +-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
336 ++static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
337 + {
338 +- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
339 +- return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
340 +- return (p4d_t *) pgd;
341 ++ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
342 ++ return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
343 ++ return (p4d_t *) pgdp;
344 + }
345 ++#define p4d_offset_lockless p4d_offset_lockless
346 +
347 +-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
348 ++static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
349 + {
350 +- if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
351 +- return (pud_t *) p4d_deref(*p4d) + pud_index(address);
352 +- return (pud_t *) p4d;
353 ++ return p4d_offset_lockless(pgdp, *pgdp, address);
354 ++}
355 ++
356 ++static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
357 ++{
358 ++ if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
359 ++ return (pud_t *) p4d_deref(p4d) + pud_index(address);
360 ++ return (pud_t *) p4dp;
361 ++}
362 ++#define pud_offset_lockless pud_offset_lockless
363 ++
364 ++static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
365 ++{
366 ++ return pud_offset_lockless(p4dp, *p4dp, address);
367 + }
368 + #define pud_offset pud_offset
369 +
370 +-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
371 ++static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
372 ++{
373 ++ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
374 ++ return (pmd_t *) pud_deref(pud) + pmd_index(address);
375 ++ return (pmd_t *) pudp;
376 ++}
377 ++#define pmd_offset_lockless pmd_offset_lockless
378 ++
379 ++static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
380 + {
381 +- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
382 +- return (pmd_t *) pud_deref(*pud) + pmd_index(address);
383 +- return (pmd_t *) pud;
384 ++ return pmd_offset_lockless(pudp, *pudp, address);
385 + }
386 + #define pmd_offset pmd_offset
387 +
388 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
389 +index 07aa15ba43b3e..faf30f37c6361 100644
390 +--- a/arch/s390/kernel/setup.c
391 ++++ b/arch/s390/kernel/setup.c
392 +@@ -619,7 +619,7 @@ static struct notifier_block kdump_mem_nb = {
393 + /*
394 + * Make sure that the area behind memory_end is protected
395 + */
396 +-static void reserve_memory_end(void)
397 ++static void __init reserve_memory_end(void)
398 + {
399 + if (memory_end_set)
400 + memblock_reserve(memory_end, ULONG_MAX);
401 +@@ -628,7 +628,7 @@ static void reserve_memory_end(void)
402 + /*
403 + * Make sure that oldmem, where the dump is stored, is protected
404 + */
405 +-static void reserve_oldmem(void)
406 ++static void __init reserve_oldmem(void)
407 + {
408 + #ifdef CONFIG_CRASH_DUMP
409 + if (OLDMEM_BASE)
410 +@@ -640,7 +640,7 @@ static void reserve_oldmem(void)
411 + /*
412 + * Make sure that oldmem, where the dump is stored, is protected
413 + */
414 +-static void remove_oldmem(void)
415 ++static void __init remove_oldmem(void)
416 + {
417 + #ifdef CONFIG_CRASH_DUMP
418 + if (OLDMEM_BASE)
419 +diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
420 +index 606c4e25ee934..e290164df5ada 100644
421 +--- a/arch/x86/entry/common.c
422 ++++ b/arch/x86/entry/common.c
423 +@@ -814,7 +814,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
424 + old_regs = set_irq_regs(regs);
425 +
426 + instrumentation_begin();
427 +- run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
428 ++ run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
429 + instrumentation_begin();
430 +
431 + set_irq_regs(old_regs);
432 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
433 +index d2a00c97e53f6..20f62398477e5 100644
434 +--- a/arch/x86/entry/entry_64.S
435 ++++ b/arch/x86/entry/entry_64.S
436 +@@ -687,6 +687,8 @@ SYM_CODE_END(.Lbad_gs)
437 + * rdx: Function argument (can be NULL if none)
438 + */
439 + SYM_FUNC_START(asm_call_on_stack)
440 ++SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
441 ++SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
442 + /*
443 + * Save the frame pointer unconditionally. This allows the ORC
444 + * unwinder to handle the stack switch.
445 +diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
446 +index 80d3b30d3ee3e..4abe2e5b3fa76 100644
447 +--- a/arch/x86/include/asm/idtentry.h
448 ++++ b/arch/x86/include/asm/idtentry.h
449 +@@ -246,7 +246,7 @@ __visible noinstr void func(struct pt_regs *regs) \
450 + instrumentation_begin(); \
451 + irq_enter_rcu(); \
452 + kvm_set_cpu_l1tf_flush_l1d(); \
453 +- run_on_irqstack_cond(__##func, regs, regs); \
454 ++ run_sysvec_on_irqstack_cond(__##func, regs); \
455 + irq_exit_rcu(); \
456 + instrumentation_end(); \
457 + idtentry_exit_cond_rcu(regs, rcu_exit); \
458 +diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
459 +index 4ae66f097101d..d95616c7e7d40 100644
460 +--- a/arch/x86/include/asm/irq_stack.h
461 ++++ b/arch/x86/include/asm/irq_stack.h
462 +@@ -3,6 +3,7 @@
463 + #define _ASM_X86_IRQ_STACK_H
464 +
465 + #include <linux/ptrace.h>
466 ++#include <linux/irq.h>
467 +
468 + #include <asm/processor.h>
469 +
470 +@@ -12,20 +13,50 @@ static __always_inline bool irqstack_active(void)
471 + return __this_cpu_read(irq_count) != -1;
472 + }
473 +
474 +-void asm_call_on_stack(void *sp, void *func, void *arg);
475 ++void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
476 ++void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
477 ++ struct pt_regs *regs);
478 ++void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
479 ++ struct irq_desc *desc);
480 +
481 +-static __always_inline void __run_on_irqstack(void *func, void *arg)
482 ++static __always_inline void __run_on_irqstack(void (*func)(void))
483 + {
484 + void *tos = __this_cpu_read(hardirq_stack_ptr);
485 +
486 + __this_cpu_add(irq_count, 1);
487 +- asm_call_on_stack(tos - 8, func, arg);
488 ++ asm_call_on_stack(tos - 8, func, NULL);
489 ++ __this_cpu_sub(irq_count, 1);
490 ++}
491 ++
492 ++static __always_inline void
493 ++__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
494 ++ struct pt_regs *regs)
495 ++{
496 ++ void *tos = __this_cpu_read(hardirq_stack_ptr);
497 ++
498 ++ __this_cpu_add(irq_count, 1);
499 ++ asm_call_sysvec_on_stack(tos - 8, func, regs);
500 ++ __this_cpu_sub(irq_count, 1);
501 ++}
502 ++
503 ++static __always_inline void
504 ++__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
505 ++ struct irq_desc *desc)
506 ++{
507 ++ void *tos = __this_cpu_read(hardirq_stack_ptr);
508 ++
509 ++ __this_cpu_add(irq_count, 1);
510 ++ asm_call_irq_on_stack(tos - 8, func, desc);
511 + __this_cpu_sub(irq_count, 1);
512 + }
513 +
514 + #else /* CONFIG_X86_64 */
515 + static inline bool irqstack_active(void) { return false; }
516 +-static inline void __run_on_irqstack(void *func, void *arg) { }
517 ++static inline void __run_on_irqstack(void (*func)(void)) { }
518 ++static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
519 ++ struct pt_regs *regs) { }
520 ++static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
521 ++ struct irq_desc *desc) { }
522 + #endif /* !CONFIG_X86_64 */
523 +
524 + static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
525 +@@ -37,17 +68,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
526 + return !user_mode(regs) && !irqstack_active();
527 + }
528 +
529 +-static __always_inline void run_on_irqstack_cond(void *func, void *arg,
530 ++
531 ++static __always_inline void run_on_irqstack_cond(void (*func)(void),
532 + struct pt_regs *regs)
533 + {
534 +- void (*__func)(void *arg) = func;
535 ++ lockdep_assert_irqs_disabled();
536 ++
537 ++ if (irq_needs_irq_stack(regs))
538 ++ __run_on_irqstack(func);
539 ++ else
540 ++ func();
541 ++}
542 ++
543 ++static __always_inline void
544 ++run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
545 ++ struct pt_regs *regs)
546 ++{
547 ++ lockdep_assert_irqs_disabled();
548 +
549 ++ if (irq_needs_irq_stack(regs))
550 ++ __run_sysvec_on_irqstack(func, regs);
551 ++ else
552 ++ func(regs);
553 ++}
554 ++
555 ++static __always_inline void
556 ++run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
557 ++ struct pt_regs *regs)
558 ++{
559 + lockdep_assert_irqs_disabled();
560 +
561 + if (irq_needs_irq_stack(regs))
562 +- __run_on_irqstack(__func, arg);
563 ++ __run_irq_on_irqstack(func, desc);
564 + else
565 +- __func(arg);
566 ++ func(desc);
567 + }
568 +
569 + #endif
570 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
571 +index 21325a4a78b92..ad4e841b4a00d 100644
572 +--- a/arch/x86/kernel/apic/io_apic.c
573 ++++ b/arch/x86/kernel/apic/io_apic.c
574 +@@ -2243,6 +2243,7 @@ static inline void __init check_timer(void)
575 + legacy_pic->init(0);
576 + legacy_pic->make_irq(0);
577 + apic_write(APIC_LVT0, APIC_DM_EXTINT);
578 ++ legacy_pic->unmask(0);
579 +
580 + unlock_ExtINT_logic();
581 +
582 +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
583 +index 181060247e3cb..c5dd50369e2f3 100644
584 +--- a/arch/x86/kernel/irq.c
585 ++++ b/arch/x86/kernel/irq.c
586 +@@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
587 + struct pt_regs *regs)
588 + {
589 + if (IS_ENABLED(CONFIG_X86_64))
590 +- run_on_irqstack_cond(desc->handle_irq, desc, regs);
591 ++ run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
592 + else
593 + __handle_irq(desc, regs);
594 + }
595 +diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
596 +index 1b4fe93a86c5c..440eed558558d 100644
597 +--- a/arch/x86/kernel/irq_64.c
598 ++++ b/arch/x86/kernel/irq_64.c
599 +@@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
600 +
601 + void do_softirq_own_stack(void)
602 + {
603 +- run_on_irqstack_cond(__do_softirq, NULL, NULL);
604 ++ run_on_irqstack_cond(__do_softirq, NULL);
605 + }
606 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
607 +index f8ead44c3265e..10aba4b6df6ed 100644
608 +--- a/arch/x86/kvm/svm/svm.c
609 ++++ b/arch/x86/kvm/svm/svm.c
610 +@@ -2169,6 +2169,12 @@ static int iret_interception(struct vcpu_svm *svm)
611 + return 1;
612 + }
613 +
614 ++static int invd_interception(struct vcpu_svm *svm)
615 ++{
616 ++ /* Treat an INVD instruction as a NOP and just skip it. */
617 ++ return kvm_skip_emulated_instruction(&svm->vcpu);
618 ++}
619 ++
620 + static int invlpg_interception(struct vcpu_svm *svm)
621 + {
622 + if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
623 +@@ -2758,7 +2764,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
624 + [SVM_EXIT_RDPMC] = rdpmc_interception,
625 + [SVM_EXIT_CPUID] = cpuid_interception,
626 + [SVM_EXIT_IRET] = iret_interception,
627 +- [SVM_EXIT_INVD] = emulate_on_interception,
628 ++ [SVM_EXIT_INVD] = invd_interception,
629 + [SVM_EXIT_PAUSE] = pause_interception,
630 + [SVM_EXIT_HLT] = halt_interception,
631 + [SVM_EXIT_INVLPG] = invlpg_interception,
632 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
633 +index f5481ae588aff..a04f8abd0ead9 100644
634 +--- a/arch/x86/kvm/x86.c
635 ++++ b/arch/x86/kvm/x86.c
636 +@@ -968,6 +968,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
637 + unsigned long old_cr4 = kvm_read_cr4(vcpu);
638 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
639 + X86_CR4_SMEP;
640 ++ unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
641 +
642 + if (kvm_valid_cr4(vcpu, cr4))
643 + return 1;
644 +@@ -995,7 +996,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
645 + if (kvm_x86_ops.set_cr4(vcpu, cr4))
646 + return 1;
647 +
648 +- if (((cr4 ^ old_cr4) & pdptr_bits) ||
649 ++ if (((cr4 ^ old_cr4) & mmu_role_bits) ||
650 + (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
651 + kvm_mmu_reset_context(vcpu);
652 +
653 +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
654 +index b0dfac3d3df71..1847e993ac63a 100644
655 +--- a/arch/x86/lib/usercopy_64.c
656 ++++ b/arch/x86/lib/usercopy_64.c
657 +@@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
658 + */
659 + if (size < 8) {
660 + if (!IS_ALIGNED(dest, 4) || size != 4)
661 +- clean_cache_range(dst, 1);
662 ++ clean_cache_range(dst, size);
663 + } else {
664 + if (!IS_ALIGNED(dest, 8)) {
665 + dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
666 +diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
667 +index 17d47ad03ab79..de50fb0541a20 100644
668 +--- a/drivers/atm/eni.c
669 ++++ b/drivers/atm/eni.c
670 +@@ -2239,7 +2239,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
671 +
672 + rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
673 + if (rc < 0)
674 +- goto out;
675 ++ goto err_disable;
676 +
677 + rc = -ENOMEM;
678 + eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
679 +diff --git a/drivers/base/node.c b/drivers/base/node.c
680 +index 5b02f69769e86..11ffb50fa875b 100644
681 +--- a/drivers/base/node.c
682 ++++ b/drivers/base/node.c
683 +@@ -761,14 +761,36 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
684 + return pfn_to_nid(pfn);
685 + }
686 +
687 ++static int do_register_memory_block_under_node(int nid,
688 ++ struct memory_block *mem_blk)
689 ++{
690 ++ int ret;
691 ++
692 ++ /*
693 ++ * If this memory block spans multiple nodes, we only indicate
694 ++ * the last processed node.
695 ++ */
696 ++ mem_blk->nid = nid;
697 ++
698 ++ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
699 ++ &mem_blk->dev.kobj,
700 ++ kobject_name(&mem_blk->dev.kobj));
701 ++ if (ret)
702 ++ return ret;
703 ++
704 ++ return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
705 ++ &node_devices[nid]->dev.kobj,
706 ++ kobject_name(&node_devices[nid]->dev.kobj));
707 ++}
708 ++
709 + /* register memory section under specified node if it spans that node */
710 +-static int register_mem_sect_under_node(struct memory_block *mem_blk,
711 +- void *arg)
712 ++static int register_mem_block_under_node_early(struct memory_block *mem_blk,
713 ++ void *arg)
714 + {
715 + unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
716 + unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
717 + unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
718 +- int ret, nid = *(int *)arg;
719 ++ int nid = *(int *)arg;
720 + unsigned long pfn;
721 +
722 + for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
723 +@@ -785,38 +807,33 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
724 + }
725 +
726 + /*
727 +- * We need to check if page belongs to nid only for the boot
728 +- * case, during hotplug we know that all pages in the memory
729 +- * block belong to the same node.
730 +- */
731 +- if (system_state == SYSTEM_BOOTING) {
732 +- page_nid = get_nid_for_pfn(pfn);
733 +- if (page_nid < 0)
734 +- continue;
735 +- if (page_nid != nid)
736 +- continue;
737 +- }
738 +-
739 +- /*
740 +- * If this memory block spans multiple nodes, we only indicate
741 +- * the last processed node.
742 ++ * We need to check if page belongs to nid only at the boot
743 ++ * case because node's ranges can be interleaved.
744 + */
745 +- mem_blk->nid = nid;
746 +-
747 +- ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
748 +- &mem_blk->dev.kobj,
749 +- kobject_name(&mem_blk->dev.kobj));
750 +- if (ret)
751 +- return ret;
752 ++ page_nid = get_nid_for_pfn(pfn);
753 ++ if (page_nid < 0)
754 ++ continue;
755 ++ if (page_nid != nid)
756 ++ continue;
757 +
758 +- return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
759 +- &node_devices[nid]->dev.kobj,
760 +- kobject_name(&node_devices[nid]->dev.kobj));
761 ++ return do_register_memory_block_under_node(nid, mem_blk);
762 + }
763 + /* mem section does not span the specified node */
764 + return 0;
765 + }
766 +
767 ++/*
768 ++ * During hotplug we know that all pages in the memory block belong to the same
769 ++ * node.
770 ++ */
771 ++static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
772 ++ void *arg)
773 ++{
774 ++ int nid = *(int *)arg;
775 ++
776 ++ return do_register_memory_block_under_node(nid, mem_blk);
777 ++}
778 ++
779 + /*
780 + * Unregister a memory block device under the node it spans. Memory blocks
781 + * with multiple nodes cannot be offlined and therefore also never be removed.
782 +@@ -832,11 +849,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
783 + kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
784 + }
785 +
786 +-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
787 ++int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
788 ++ enum meminit_context context)
789 + {
790 ++ walk_memory_blocks_func_t func;
791 ++
792 ++ if (context == MEMINIT_HOTPLUG)
793 ++ func = register_mem_block_under_node_hotplug;
794 ++ else
795 ++ func = register_mem_block_under_node_early;
796 ++
797 + return walk_memory_blocks(PFN_PHYS(start_pfn),
798 + PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
799 +- register_mem_sect_under_node);
800 ++ func);
801 + }
802 +
803 + #ifdef CONFIG_HUGETLBFS
804 +diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
805 +index 3d80c4b43f720..d7c01b70e43db 100644
806 +--- a/drivers/base/regmap/internal.h
807 ++++ b/drivers/base/regmap/internal.h
808 +@@ -259,7 +259,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
809 + int regcache_lookup_reg(struct regmap *map, unsigned int reg);
810 +
811 + int _regmap_raw_write(struct regmap *map, unsigned int reg,
812 +- const void *val, size_t val_len);
813 ++ const void *val, size_t val_len, bool noinc);
814 +
815 + void regmap_async_complete_cb(struct regmap_async *async, int ret);
816 +
817 +diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
818 +index a93cafd7be4f2..7f4b3b62492ca 100644
819 +--- a/drivers/base/regmap/regcache.c
820 ++++ b/drivers/base/regmap/regcache.c
821 +@@ -717,7 +717,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
822 +
823 + map->cache_bypass = true;
824 +
825 +- ret = _regmap_raw_write(map, base, *data, count * val_bytes);
826 ++ ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
827 + if (ret)
828 + dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
829 + base, cur - map->reg_stride, ret);
830 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
831 +index 795a62a040220..9751304c5c158 100644
832 +--- a/drivers/base/regmap/regmap.c
833 ++++ b/drivers/base/regmap/regmap.c
834 +@@ -1469,7 +1469,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
835 + }
836 +
837 + static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
838 +- const void *val, size_t val_len)
839 ++ const void *val, size_t val_len, bool noinc)
840 + {
841 + struct regmap_range_node *range;
842 + unsigned long flags;
843 +@@ -1528,7 +1528,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
844 + win_residue, val_len / map->format.val_bytes);
845 + ret = _regmap_raw_write_impl(map, reg, val,
846 + win_residue *
847 +- map->format.val_bytes);
848 ++ map->format.val_bytes, noinc);
849 + if (ret != 0)
850 + return ret;
851 +
852 +@@ -1542,7 +1542,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
853 + win_residue = range->window_len - win_offset;
854 + }
855 +
856 +- ret = _regmap_select_page(map, &reg, range, val_num);
857 ++ ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
858 + if (ret != 0)
859 + return ret;
860 + }
861 +@@ -1750,7 +1750,8 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
862 + map->work_buf +
863 + map->format.reg_bytes +
864 + map->format.pad_bytes,
865 +- map->format.val_bytes);
866 ++ map->format.val_bytes,
867 ++ false);
868 + }
869 +
870 + static inline void *_regmap_map_get_context(struct regmap *map)
871 +@@ -1844,7 +1845,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
872 + EXPORT_SYMBOL_GPL(regmap_write_async);
873 +
874 + int _regmap_raw_write(struct regmap *map, unsigned int reg,
875 +- const void *val, size_t val_len)
876 ++ const void *val, size_t val_len, bool noinc)
877 + {
878 + size_t val_bytes = map->format.val_bytes;
879 + size_t val_count = val_len / val_bytes;
880 +@@ -1865,7 +1866,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
881 +
882 + /* Write as many bytes as possible with chunk_size */
883 + for (i = 0; i < chunk_count; i++) {
884 +- ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
885 ++ ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
886 + if (ret)
887 + return ret;
888 +
889 +@@ -1876,7 +1877,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
890 +
891 + /* Write remaining bytes */
892 + if (val_len)
893 +- ret = _regmap_raw_write_impl(map, reg, val, val_len);
894 ++ ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
895 +
896 + return ret;
897 + }
898 +@@ -1909,7 +1910,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
899 +
900 + map->lock(map->lock_arg);
901 +
902 +- ret = _regmap_raw_write(map, reg, val, val_len);
903 ++ ret = _regmap_raw_write(map, reg, val, val_len, false);
904 +
905 + map->unlock(map->lock_arg);
906 +
907 +@@ -1967,7 +1968,7 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
908 + write_len = map->max_raw_write;
909 + else
910 + write_len = val_len;
911 +- ret = _regmap_raw_write(map, reg, val, write_len);
912 ++ ret = _regmap_raw_write(map, reg, val, write_len, true);
913 + if (ret)
914 + goto out_unlock;
915 + val = ((u8 *)val) + write_len;
916 +@@ -2444,7 +2445,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
917 +
918 + map->async = true;
919 +
920 +- ret = _regmap_raw_write(map, reg, val, val_len);
921 ++ ret = _regmap_raw_write(map, reg, val, val_len, false);
922 +
923 + map->async = false;
924 +
925 +@@ -2455,7 +2456,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
926 + EXPORT_SYMBOL_GPL(regmap_raw_write_async);
927 +
928 + static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
929 +- unsigned int val_len)
930 ++ unsigned int val_len, bool noinc)
931 + {
932 + struct regmap_range_node *range;
933 + int ret;
934 +@@ -2468,7 +2469,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
935 + range = _regmap_range_lookup(map, reg);
936 + if (range) {
937 + ret = _regmap_select_page(map, &reg, range,
938 +- val_len / map->format.val_bytes);
939 ++ noinc ? 1 : val_len / map->format.val_bytes);
940 + if (ret != 0)
941 + return ret;
942 + }
943 +@@ -2506,7 +2507,7 @@ static int _regmap_bus_read(void *context, unsigned int reg,
944 + if (!map->format.parse_val)
945 + return -EINVAL;
946 +
947 +- ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
948 ++ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
949 + if (ret == 0)
950 + *val = map->format.parse_val(work_val);
951 +
952 +@@ -2622,7 +2623,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
953 +
954 + /* Read bytes that fit into whole chunks */
955 + for (i = 0; i < chunk_count; i++) {
956 +- ret = _regmap_raw_read(map, reg, val, chunk_bytes);
957 ++ ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
958 + if (ret != 0)
959 + goto out;
960 +
961 +@@ -2633,7 +2634,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
962 +
963 + /* Read remaining bytes */
964 + if (val_len) {
965 +- ret = _regmap_raw_read(map, reg, val, val_len);
966 ++ ret = _regmap_raw_read(map, reg, val, val_len, false);
967 + if (ret != 0)
968 + goto out;
969 + }
970 +@@ -2708,7 +2709,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
971 + read_len = map->max_raw_read;
972 + else
973 + read_len = val_len;
974 +- ret = _regmap_raw_read(map, reg, val, read_len);
975 ++ ret = _regmap_raw_read(map, reg, val, read_len, true);
976 + if (ret)
977 + goto out_unlock;
978 + val = ((u8 *)val) + read_len;
979 +diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
980 +index ca798249544d0..85c395df9c008 100644
981 +--- a/drivers/clk/versatile/clk-impd1.c
982 ++++ b/drivers/clk/versatile/clk-impd1.c
983 +@@ -109,8 +109,10 @@ static int integrator_impd1_clk_probe(struct platform_device *pdev)
984 +
985 + for_each_available_child_of_node(np, child) {
986 + ret = integrator_impd1_clk_spawn(dev, np, child);
987 +- if (ret)
988 ++ if (ret) {
989 ++ of_node_put(child);
990 + break;
991 ++ }
992 + }
993 +
994 + return ret;
995 +diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
996 +index 1d740a8c42ab3..47114c2a7cb54 100644
997 +--- a/drivers/clocksource/h8300_timer8.c
998 ++++ b/drivers/clocksource/h8300_timer8.c
999 +@@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node)
1000 + return PTR_ERR(clk);
1001 + }
1002 +
1003 +- ret = ENXIO;
1004 ++ ret = -ENXIO;
1005 + base = of_iomap(node, 0);
1006 + if (!base) {
1007 + pr_err("failed to map registers for clockevent\n");
1008 +diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
1009 +index f6fd1c1cc527f..33b3e8aa2cc50 100644
1010 +--- a/drivers/clocksource/timer-ti-dm-systimer.c
1011 ++++ b/drivers/clocksource/timer-ti-dm-systimer.c
1012 +@@ -69,12 +69,33 @@ static bool dmtimer_systimer_revision1(struct dmtimer_systimer *t)
1013 + return !(tidr >> 16);
1014 + }
1015 +
1016 ++static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
1017 ++{
1018 ++ u32 val;
1019 ++
1020 ++ if (dmtimer_systimer_revision1(t))
1021 ++ val = DMTIMER_TYPE1_ENABLE;
1022 ++ else
1023 ++ val = DMTIMER_TYPE2_ENABLE;
1024 ++
1025 ++ writel_relaxed(val, t->base + t->sysc);
1026 ++}
1027 ++
1028 ++static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
1029 ++{
1030 ++ if (!dmtimer_systimer_revision1(t))
1031 ++ return;
1032 ++
1033 ++ writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
1034 ++}
1035 ++
1036 + static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t)
1037 + {
1038 + void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET;
1039 + int ret;
1040 + u32 l;
1041 +
1042 ++ dmtimer_systimer_enable(t);
1043 + writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl);
1044 + ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100,
1045 + DMTIMER_RESET_WAIT);
1046 +@@ -88,6 +109,7 @@ static int __init dmtimer_systimer_type2_reset(struct dmtimer_systimer *t)
1047 + void __iomem *sysc = t->base + t->sysc;
1048 + u32 l;
1049 +
1050 ++ dmtimer_systimer_enable(t);
1051 + l = readl_relaxed(sysc);
1052 + l |= BIT(0);
1053 + writel_relaxed(l, sysc);
1054 +@@ -336,26 +358,6 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
1055 + return 0;
1056 + }
1057 +
1058 +-static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
1059 +-{
1060 +- u32 val;
1061 +-
1062 +- if (dmtimer_systimer_revision1(t))
1063 +- val = DMTIMER_TYPE1_ENABLE;
1064 +- else
1065 +- val = DMTIMER_TYPE2_ENABLE;
1066 +-
1067 +- writel_relaxed(val, t->base + t->sysc);
1068 +-}
1069 +-
1070 +-static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
1071 +-{
1072 +- if (!dmtimer_systimer_revision1(t))
1073 +- return;
1074 +-
1075 +- writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
1076 +-}
1077 +-
1078 + static int __init dmtimer_systimer_setup(struct device_node *np,
1079 + struct dmtimer_systimer *t)
1080 + {
1081 +@@ -409,8 +411,8 @@ static int __init dmtimer_systimer_setup(struct device_node *np,
1082 + t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET;
1083 + t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET;
1084 +
1085 +- dmtimer_systimer_enable(t);
1086 + dmtimer_systimer_reset(t);
1087 ++ dmtimer_systimer_enable(t);
1088 + pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base),
1089 + readl_relaxed(t->base + t->sysc));
1090 +
1091 +diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
1092 +index e94a27804c209..dedd39de73675 100644
1093 +--- a/drivers/devfreq/tegra30-devfreq.c
1094 ++++ b/drivers/devfreq/tegra30-devfreq.c
1095 +@@ -836,7 +836,8 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
1096 + rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
1097 + if (rate < 0) {
1098 + dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
1099 +- return rate;
1100 ++ err = rate;
1101 ++ goto disable_clk;
1102 + }
1103 +
1104 + tegra->max_freq = rate / KHZ;
1105 +@@ -897,6 +898,7 @@ remove_opps:
1106 + dev_pm_opp_remove_all_dynamic(&pdev->dev);
1107 +
1108 + reset_control_reset(tegra->reset);
1109 ++disable_clk:
1110 + clk_disable_unprepare(tegra->clock);
1111 +
1112 + return err;
1113 +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
1114 +index 1ca609f66fdf8..241c4b48d6099 100644
1115 +--- a/drivers/dma-buf/dma-buf.c
1116 ++++ b/drivers/dma-buf/dma-buf.c
1117 +@@ -59,6 +59,8 @@ static void dma_buf_release(struct dentry *dentry)
1118 + struct dma_buf *dmabuf;
1119 +
1120 + dmabuf = dentry->d_fsdata;
1121 ++ if (unlikely(!dmabuf))
1122 ++ return;
1123 +
1124 + BUG_ON(dmabuf->vmapping_counter);
1125 +
1126 +diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
1127 +index cb3dab56a875d..efad23575b16b 100644
1128 +--- a/drivers/edac/ghes_edac.c
1129 ++++ b/drivers/edac/ghes_edac.c
1130 +@@ -469,6 +469,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
1131 + if (!force_load && idx < 0)
1132 + return -ENODEV;
1133 + } else {
1134 ++ force_load = true;
1135 + idx = 0;
1136 + }
1137 +
1138 +@@ -566,6 +567,9 @@ void ghes_edac_unregister(struct ghes *ghes)
1139 + struct mem_ctl_info *mci;
1140 + unsigned long flags;
1141 +
1142 ++ if (!force_load)
1143 ++ return;
1144 ++
1145 + mutex_lock(&ghes_reg_mutex);
1146 +
1147 + if (!refcount_dec_and_test(&ghes_refcount))
1148 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1149 +index aa1e0f0550835..6b00cdbb08368 100644
1150 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1151 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1152 +@@ -1177,6 +1177,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
1153 + dqm->sched_running = false;
1154 + dqm_unlock(dqm);
1155 +
1156 ++ pm_release_ib(&dqm->packets);
1157 ++
1158 + kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1159 + pm_uninit(&dqm->packets, hanging);
1160 +
1161 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1162 +index 3f7eced92c0c8..7c1cc0ba30a55 100644
1163 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1164 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1165 +@@ -5257,19 +5257,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
1166 + {
1167 + }
1168 +
1169 +-static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
1170 +-{
1171 +- struct drm_device *dev = new_crtc_state->crtc->dev;
1172 +- struct drm_plane *plane;
1173 +-
1174 +- drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
1175 +- if (plane->type == DRM_PLANE_TYPE_CURSOR)
1176 +- return true;
1177 +- }
1178 +-
1179 +- return false;
1180 +-}
1181 +-
1182 + static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
1183 + {
1184 + struct drm_atomic_state *state = new_crtc_state->state;
1185 +@@ -5349,19 +5336,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
1186 + return ret;
1187 + }
1188 +
1189 +- /* In some use cases, like reset, no stream is attached */
1190 +- if (!dm_crtc_state->stream)
1191 +- return 0;
1192 +-
1193 + /*
1194 +- * We want at least one hardware plane enabled to use
1195 +- * the stream with a cursor enabled.
1196 ++ * We require the primary plane to be enabled whenever the CRTC is, otherwise
1197 ++ * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
1198 ++ * planes are disabled, which is not supported by the hardware. And there is legacy
1199 ++ * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
1200 + */
1201 +- if (state->enable && state->active &&
1202 +- does_crtc_have_active_cursor(state) &&
1203 +- dm_crtc_state->active_planes == 0)
1204 ++ if (state->enable &&
1205 ++ !(state->plane_mask & drm_plane_mask(crtc->primary)))
1206 + return -EINVAL;
1207 +
1208 ++ /* In some use cases, like reset, no stream is attached */
1209 ++ if (!dm_crtc_state->stream)
1210 ++ return 0;
1211 ++
1212 + if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
1213 + return 0;
1214 +
1215 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1216 +index 2d9055eb3ce92..20bdabebbc434 100644
1217 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1218 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1219 +@@ -409,8 +409,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
1220 + },
1221 + },
1222 + .num_states = 5,
1223 +- .sr_exit_time_us = 8.6,
1224 +- .sr_enter_plus_exit_time_us = 10.9,
1225 ++ .sr_exit_time_us = 11.6,
1226 ++ .sr_enter_plus_exit_time_us = 13.9,
1227 + .urgent_latency_us = 4.0,
1228 + .urgent_latency_pixel_data_only_us = 4.0,
1229 + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
1230 +diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
1231 +index d3192b9d0c3d8..47f8ee2832ff0 100644
1232 +--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
1233 ++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
1234 +@@ -27,7 +27,7 @@
1235 + #define MOD_HDCP_LOG_H_
1236 +
1237 + #ifdef CONFIG_DRM_AMD_DC_HDCP
1238 +-#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
1239 ++#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
1240 + #define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
1241 + #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
1242 + #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
1243 +diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
1244 +index fb1161dd7ea80..3a367a5968ae1 100644
1245 +--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
1246 ++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
1247 +@@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
1248 + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
1249 +
1250 + if (!psp->dtm_context.dtm_initialized) {
1251 +- DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
1252 ++ DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
1253 + display->state = MOD_HDCP_DISPLAY_INACTIVE;
1254 + return MOD_HDCP_STATUS_FAILURE;
1255 + }
1256 +diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
1257 +index f42441b1b14dd..a55a38ad849c1 100644
1258 +--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
1259 ++++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
1260 +@@ -12,7 +12,7 @@ struct sun8i_mixer;
1261 +
1262 + /* VI channel CSC units offsets */
1263 + #define CCSC00_OFFSET 0xAA050
1264 +-#define CCSC01_OFFSET 0xFA000
1265 ++#define CCSC01_OFFSET 0xFA050
1266 + #define CCSC10_OFFSET 0xA0000
1267 + #define CCSC11_OFFSET 0xF0000
1268 +
1269 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
1270 +index 625bfcf52dc4d..bdcc54c87d7e8 100644
1271 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
1272 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
1273 +@@ -1117,6 +1117,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
1274 + card->num_links = 1;
1275 + card->name = "vc4-hdmi";
1276 + card->dev = dev;
1277 ++ card->owner = THIS_MODULE;
1278 +
1279 + /*
1280 + * Be careful, snd_soc_register_card() calls dev_set_drvdata() and
1281 +diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
1282 +index f51702d86a90e..1ad74efcab372 100644
1283 +--- a/drivers/i2c/busses/i2c-aspeed.c
1284 ++++ b/drivers/i2c/busses/i2c-aspeed.c
1285 +@@ -69,6 +69,7 @@
1286 + * These share bit definitions, so use the same values for the enable &
1287 + * status bits.
1288 + */
1289 ++#define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff
1290 + #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14)
1291 + #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13)
1292 + #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7)
1293 +@@ -604,6 +605,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
1294 + writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
1295 + bus->base + ASPEED_I2C_INTR_STS_REG);
1296 + readl(bus->base + ASPEED_I2C_INTR_STS_REG);
1297 ++ irq_received &= ASPEED_I2CD_INTR_RECV_MASK;
1298 + irq_remaining = irq_received;
1299 +
1300 + #if IS_ENABLED(CONFIG_I2C_SLAVE)
1301 +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
1302 +index b099139cbb91e..f9e62c958cf69 100644
1303 +--- a/drivers/i2c/busses/i2c-mt65xx.c
1304 ++++ b/drivers/i2c/busses/i2c-mt65xx.c
1305 +@@ -736,7 +736,7 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
1306 + for (clk_div = 1; clk_div <= max_clk_div; clk_div++) {
1307 + clk_src = parent_clk / clk_div;
1308 +
1309 +- if (target_speed > I2C_MAX_FAST_MODE_FREQ) {
1310 ++ if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
1311 + /* Set master code speed register */
1312 + ret = mtk_i2c_calculate_speed(i2c, clk_src,
1313 + I2C_MAX_FAST_MODE_FREQ,
1314 +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
1315 +index 4f09d4c318287..7031393c74806 100644
1316 +--- a/drivers/i2c/i2c-core-base.c
1317 ++++ b/drivers/i2c/i2c-core-base.c
1318 +@@ -1336,8 +1336,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
1319 +
1320 + /* create pre-declared device nodes */
1321 + of_i2c_register_devices(adap);
1322 +- i2c_acpi_register_devices(adap);
1323 + i2c_acpi_install_space_handler(adap);
1324 ++ i2c_acpi_register_devices(adap);
1325 +
1326 + if (adap->nr < __i2c_first_dynamic_bus_num)
1327 + i2c_scan_static_board_info(adap);
1328 +diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
1329 +index eadba29432dd7..abcfe4dc1284f 100644
1330 +--- a/drivers/infiniband/core/device.c
1331 ++++ b/drivers/infiniband/core/device.c
1332 +@@ -1282,6 +1282,8 @@ static void disable_device(struct ib_device *device)
1333 + remove_client_context(device, cid);
1334 + }
1335 +
1336 ++ ib_cq_pool_destroy(device);
1337 ++
1338 + /* Pairs with refcount_set in enable_device */
1339 + ib_device_put(device);
1340 + wait_for_completion(&device->unreg_completion);
1341 +@@ -1325,6 +1327,8 @@ static int enable_device_and_get(struct ib_device *device)
1342 + goto out;
1343 + }
1344 +
1345 ++ ib_cq_pool_init(device);
1346 ++
1347 + down_read(&clients_rwsem);
1348 + xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1349 + ret = add_client_context(device, client);
1350 +@@ -1397,7 +1401,6 @@ int ib_register_device(struct ib_device *device, const char *name)
1351 + goto dev_cleanup;
1352 + }
1353 +
1354 +- ib_cq_pool_init(device);
1355 + ret = enable_device_and_get(device);
1356 + dev_set_uevent_suppress(&device->dev, false);
1357 + /* Mark for userspace that device is ready */
1358 +@@ -1452,7 +1455,6 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
1359 + goto out;
1360 +
1361 + disable_device(ib_dev);
1362 +- ib_cq_pool_destroy(ib_dev);
1363 +
1364 + /* Expedite removing unregistered pointers from the hash table */
1365 + free_netdevs(ib_dev);
1366 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1367 +index 49c758fef8cb6..548ad06094e98 100644
1368 +--- a/drivers/md/dm.c
1369 ++++ b/drivers/md/dm.c
1370 +@@ -1728,23 +1728,6 @@ out:
1371 + return ret;
1372 + }
1373 +
1374 +-static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
1375 +-{
1376 +- unsigned len, sector_count;
1377 +-
1378 +- sector_count = bio_sectors(*bio);
1379 +- len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
1380 +-
1381 +- if (sector_count > len) {
1382 +- struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
1383 +-
1384 +- bio_chain(split, *bio);
1385 +- trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
1386 +- generic_make_request(*bio);
1387 +- *bio = split;
1388 +- }
1389 +-}
1390 +-
1391 + static blk_qc_t dm_process_bio(struct mapped_device *md,
1392 + struct dm_table *map, struct bio *bio)
1393 + {
1394 +@@ -1772,14 +1755,12 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
1395 + if (current->bio_list) {
1396 + if (is_abnormal_io(bio))
1397 + blk_queue_split(md->queue, &bio);
1398 +- else
1399 +- dm_queue_split(md, ti, &bio);
1400 ++ /* regular IO is split by __split_and_process_bio */
1401 + }
1402 +
1403 + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1404 + return __process_bio(md, map, bio, ti);
1405 +- else
1406 +- return __split_and_process_bio(md, map, bio);
1407 ++ return __split_and_process_bio(md, map, bio);
1408 + }
1409 +
1410 + static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1411 +diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
1412 +index 6a04d19a96b2e..accc893243295 100644
1413 +--- a/drivers/media/cec/core/cec-adap.c
1414 ++++ b/drivers/media/cec/core/cec-adap.c
1415 +@@ -1199,7 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
1416 + /* Cancel the pending timeout work */
1417 + if (!cancel_delayed_work(&data->work)) {
1418 + mutex_unlock(&adap->lock);
1419 +- flush_scheduled_work();
1420 ++ cancel_delayed_work_sync(&data->work);
1421 + mutex_lock(&adap->lock);
1422 + }
1423 + /*
1424 +diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
1425 +index 5dbc5a156626a..206b73aa6d7a7 100644
1426 +--- a/drivers/net/ethernet/intel/igc/igc.h
1427 ++++ b/drivers/net/ethernet/intel/igc/igc.h
1428 +@@ -298,18 +298,14 @@ extern char igc_driver_version[];
1429 + #define IGC_RX_HDR_LEN IGC_RXBUFFER_256
1430 +
1431 + /* Transmit and receive latency (for PTP timestamps) */
1432 +-/* FIXME: These values were estimated using the ones that i225 has as
1433 +- * basis, they seem to provide good numbers with ptp4l/phc2sys, but we
1434 +- * need to confirm them.
1435 +- */
1436 +-#define IGC_I225_TX_LATENCY_10 9542
1437 +-#define IGC_I225_TX_LATENCY_100 1024
1438 +-#define IGC_I225_TX_LATENCY_1000 178
1439 +-#define IGC_I225_TX_LATENCY_2500 64
1440 +-#define IGC_I225_RX_LATENCY_10 20662
1441 +-#define IGC_I225_RX_LATENCY_100 2213
1442 +-#define IGC_I225_RX_LATENCY_1000 448
1443 +-#define IGC_I225_RX_LATENCY_2500 160
1444 ++#define IGC_I225_TX_LATENCY_10 240
1445 ++#define IGC_I225_TX_LATENCY_100 58
1446 ++#define IGC_I225_TX_LATENCY_1000 80
1447 ++#define IGC_I225_TX_LATENCY_2500 1325
1448 ++#define IGC_I225_RX_LATENCY_10 6450
1449 ++#define IGC_I225_RX_LATENCY_100 185
1450 ++#define IGC_I225_RX_LATENCY_1000 300
1451 ++#define IGC_I225_RX_LATENCY_2500 1485
1452 +
1453 + /* RX and TX descriptor control thresholds.
1454 + * PTHRESH - MAC will consider prefetch if it has fewer than this number of
1455 +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
1456 +index 61e38853aa47d..9f191a7f3c71a 100644
1457 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
1458 ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
1459 +@@ -471,12 +471,31 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
1460 + struct sk_buff *skb = adapter->ptp_tx_skb;
1461 + struct skb_shared_hwtstamps shhwtstamps;
1462 + struct igc_hw *hw = &adapter->hw;
1463 ++ int adjust = 0;
1464 + u64 regval;
1465 +
1466 + regval = rd32(IGC_TXSTMPL);
1467 + regval |= (u64)rd32(IGC_TXSTMPH) << 32;
1468 + igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
1469 +
1470 ++ switch (adapter->link_speed) {
1471 ++ case SPEED_10:
1472 ++ adjust = IGC_I225_TX_LATENCY_10;
1473 ++ break;
1474 ++ case SPEED_100:
1475 ++ adjust = IGC_I225_TX_LATENCY_100;
1476 ++ break;
1477 ++ case SPEED_1000:
1478 ++ adjust = IGC_I225_TX_LATENCY_1000;
1479 ++ break;
1480 ++ case SPEED_2500:
1481 ++ adjust = IGC_I225_TX_LATENCY_2500;
1482 ++ break;
1483 ++ }
1484 ++
1485 ++ shhwtstamps.hwtstamp =
1486 ++ ktime_add_ns(shhwtstamps.hwtstamp, adjust);
1487 ++
1488 + /* Clear the lock early before calling skb_tstamp_tx so that
1489 + * applications are not woken up before the lock bit is clear. We use
1490 + * a copy of the skb pointer to ensure other threads can't change it
1491 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
1492 +index 3cf3e35053f77..98e909bf3c1ec 100644
1493 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
1494 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
1495 +@@ -487,11 +487,8 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
1496 + int err;
1497 + int i;
1498 +
1499 +- if (!MLX5_CAP_GEN(dev, pcam_reg))
1500 +- return -EOPNOTSUPP;
1501 +-
1502 +- if (!MLX5_CAP_PCAM_REG(dev, pplm))
1503 +- return -EOPNOTSUPP;
1504 ++ if (!MLX5_CAP_GEN(dev, pcam_reg) || !MLX5_CAP_PCAM_REG(dev, pplm))
1505 ++ return false;
1506 +
1507 + MLX5_SET(pplm_reg, in, local_port, 1);
1508 + err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
1509 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
1510 +index dbdac983ccde5..105d9afe825f1 100644
1511 +--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
1512 ++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
1513 +@@ -4191,7 +4191,8 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1514 + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
1515 + BIT(QED_MF_LLH_PROTO_CLSS) |
1516 + BIT(QED_MF_LL2_NON_UNICAST) |
1517 +- BIT(QED_MF_INTER_PF_SWITCH);
1518 ++ BIT(QED_MF_INTER_PF_SWITCH) |
1519 ++ BIT(QED_MF_DISABLE_ARFS);
1520 + break;
1521 + case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1522 + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
1523 +@@ -4204,6 +4205,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1524 +
1525 + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
1526 + cdev->mf_bits);
1527 ++
1528 ++ /* In CMT the PF is unknown when the GFS block processes the
1529 ++ * packet. Therefore cannot use searcher as it has a per PF
1530 ++ * database, and thus ARFS must be disabled.
1531 ++ *
1532 ++ */
1533 ++ if (QED_IS_CMT(cdev))
1534 ++ cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS);
1535 + }
1536 +
1537 + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
1538 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
1539 +index 29810a1aa2106..b2cd153321720 100644
1540 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
1541 ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
1542 +@@ -2001,6 +2001,9 @@ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
1543 + struct qed_ptt *p_ptt,
1544 + struct qed_arfs_config_params *p_cfg_params)
1545 + {
1546 ++ if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits))
1547 ++ return;
1548 ++
1549 + if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
1550 + qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1551 + p_cfg_params->tcp,
1552 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
1553 +index 11367a248d55e..05eff348b22a8 100644
1554 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
1555 ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
1556 +@@ -289,6 +289,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
1557 + dev_info->fw_eng = FW_ENGINEERING_VERSION;
1558 + dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
1559 + &cdev->mf_bits);
1560 ++ if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
1561 ++ dev_info->b_arfs_capable = true;
1562 + dev_info->tx_switching = true;
1563 +
1564 + if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
1565 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1566 +index 20679fd4204be..229c6f3ff3935 100644
1567 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1568 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1569 +@@ -97,6 +97,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
1570 + p_ramrod->personality = PERSONALITY_ETH;
1571 + break;
1572 + case QED_PCI_ETH_ROCE:
1573 ++ case QED_PCI_ETH_IWARP:
1574 + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
1575 + break;
1576 + default:
1577 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
1578 +index fe72bb6c9455e..203cc76214c70 100644
1579 +--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
1580 ++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
1581 +@@ -336,6 +336,9 @@ int qede_alloc_arfs(struct qede_dev *edev)
1582 + {
1583 + int i;
1584 +
1585 ++ if (!edev->dev_info.common.b_arfs_capable)
1586 ++ return -EINVAL;
1587 ++
1588 + edev->arfs = vzalloc(sizeof(*edev->arfs));
1589 + if (!edev->arfs)
1590 + return -ENOMEM;
1591 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
1592 +index 29e285430f995..082055ee2d397 100644
1593 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
1594 ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
1595 +@@ -827,7 +827,7 @@ static void qede_init_ndev(struct qede_dev *edev)
1596 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1597 + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
1598 +
1599 +- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
1600 ++ if (edev->dev_info.common.b_arfs_capable)
1601 + hw_features |= NETIF_F_NTUPLE;
1602 +
1603 + if (edev->dev_info.common.vxlan_enable ||
1604 +@@ -2278,7 +2278,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
1605 + qede_vlan_mark_nonconfigured(edev);
1606 + edev->ops->fastpath_stop(edev->cdev);
1607 +
1608 +- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1609 ++ if (edev->dev_info.common.b_arfs_capable) {
1610 + qede_poll_for_freeing_arfs_filters(edev);
1611 + qede_free_arfs(edev);
1612 + }
1613 +@@ -2345,10 +2345,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
1614 + if (rc)
1615 + goto err2;
1616 +
1617 +- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1618 +- rc = qede_alloc_arfs(edev);
1619 +- if (rc)
1620 +- DP_NOTICE(edev, "aRFS memory allocation failed\n");
1621 ++ if (qede_alloc_arfs(edev)) {
1622 ++ edev->ndev->features &= ~NETIF_F_NTUPLE;
1623 ++ edev->dev_info.common.b_arfs_capable = false;
1624 + }
1625 +
1626 + qede_napi_add_enable(edev);
1627 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1628 +index 8309194b351a9..a2db5ef3b62a2 100644
1629 +--- a/drivers/net/hyperv/netvsc_drv.c
1630 ++++ b/drivers/net/hyperv/netvsc_drv.c
1631 +@@ -2576,7 +2576,6 @@ static int netvsc_resume(struct hv_device *dev)
1632 + struct net_device *net = hv_get_drvdata(dev);
1633 + struct net_device_context *net_device_ctx;
1634 + struct netvsc_device_info *device_info;
1635 +- struct net_device *vf_netdev;
1636 + int ret;
1637 +
1638 + rtnl_lock();
1639 +@@ -2589,15 +2588,6 @@ static int netvsc_resume(struct hv_device *dev)
1640 + netvsc_devinfo_put(device_info);
1641 + net_device_ctx->saved_netvsc_dev_info = NULL;
1642 +
1643 +- /* A NIC driver (e.g. mlx5) may keep the VF network interface across
1644 +- * hibernation, but here the data path is implicitly switched to the
1645 +- * netvsc NIC since the vmbus channel is closed and re-opened, so
1646 +- * netvsc_vf_changed() must be used to switch the data path to the VF.
1647 +- */
1648 +- vf_netdev = rtnl_dereference(net_device_ctx->vf_netdev);
1649 +- if (vf_netdev && netvsc_vf_changed(vf_netdev) != NOTIFY_OK)
1650 +- ret = -EINVAL;
1651 +-
1652 + rtnl_unlock();
1653 +
1654 + return ret;
1655 +@@ -2658,6 +2648,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
1656 + return netvsc_unregister_vf(event_dev);
1657 + case NETDEV_UP:
1658 + case NETDEV_DOWN:
1659 ++ case NETDEV_CHANGE:
1660 + return netvsc_vf_changed(event_dev);
1661 + default:
1662 + return NOTIFY_DONE;
1663 +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
1664 +index c11f32f644db3..7db9cbd0f5ded 100644
1665 +--- a/drivers/net/ieee802154/adf7242.c
1666 ++++ b/drivers/net/ieee802154/adf7242.c
1667 +@@ -882,7 +882,9 @@ static int adf7242_rx(struct adf7242_local *lp)
1668 + int ret;
1669 + u8 lqi, len_u8, *data;
1670 +
1671 +- adf7242_read_reg(lp, 0, &len_u8);
1672 ++ ret = adf7242_read_reg(lp, 0, &len_u8);
1673 ++ if (ret)
1674 ++ return ret;
1675 +
1676 + len = len_u8;
1677 +
1678 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
1679 +index e04c3b60cae78..4eb64709d44cb 100644
1680 +--- a/drivers/net/ieee802154/ca8210.c
1681 ++++ b/drivers/net/ieee802154/ca8210.c
1682 +@@ -2925,6 +2925,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
1683 + );
1684 + if (!priv->irq_workqueue) {
1685 + dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
1686 ++ destroy_workqueue(priv->mlme_workqueue);
1687 + return -ENOMEM;
1688 + }
1689 +
1690 +diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
1691 +index 8047e307892e3..d9f8bdbc817b2 100644
1692 +--- a/drivers/net/wireless/marvell/mwifiex/fw.h
1693 ++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
1694 +@@ -954,7 +954,7 @@ struct mwifiex_tkip_param {
1695 + struct mwifiex_aes_param {
1696 + u8 pn[WPA_PN_SIZE];
1697 + __le16 key_len;
1698 +- u8 key[WLAN_KEY_LEN_CCMP];
1699 ++ u8 key[WLAN_KEY_LEN_CCMP_256];
1700 + } __packed;
1701 +
1702 + struct mwifiex_wapi_param {
1703 +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
1704 +index 962d8bfe6f101..119ccacd1fcc4 100644
1705 +--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
1706 ++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
1707 +@@ -619,7 +619,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
1708 + key_v2 = &resp->params.key_material_v2;
1709 +
1710 + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
1711 +- if (len > WLAN_KEY_LEN_CCMP)
1712 ++ if (len > sizeof(key_v2->key_param_set.key_params.aes.key))
1713 + return -EINVAL;
1714 +
1715 + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
1716 +@@ -635,7 +635,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
1717 + return 0;
1718 +
1719 + memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
1720 +- WLAN_KEY_LEN_CCMP);
1721 ++ sizeof(key_v2->key_param_set.key_params.aes.key));
1722 + priv->aes_key_v2.key_param_set.key_params.aes.key_len =
1723 + cpu_to_le16(len);
1724 + memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
1725 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
1726 +index cb8c1d80ead92..72ad1426c45fc 100644
1727 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
1728 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
1729 +@@ -2014,7 +2014,8 @@ static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
1730 + sizeof(dev->mt76.hw->wiphy->fw_version),
1731 + "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
1732 +
1733 +- if (!strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) {
1734 ++ if (!is_mt7615(&dev->mt76) &&
1735 ++ !strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) {
1736 + dev->fw_ver = MT7615_FIRMWARE_V2;
1737 + dev->mcu_ops = &sta_update_ops;
1738 + } else {
1739 +diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
1740 +index 3ed9786b88d8e..a44d49d63968a 100644
1741 +--- a/drivers/nvme/host/Kconfig
1742 ++++ b/drivers/nvme/host/Kconfig
1743 +@@ -73,6 +73,7 @@ config NVME_TCP
1744 + depends on INET
1745 + depends on BLK_DEV_NVME
1746 + select NVME_FABRICS
1747 ++ select CRYPTO
1748 + select CRYPTO_CRC32C
1749 + help
1750 + This provides support for the NVMe over Fabrics protocol using
1751 +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
1752 +index fbc95cadaf539..126649c172e11 100644
1753 +--- a/drivers/regulator/axp20x-regulator.c
1754 ++++ b/drivers/regulator/axp20x-regulator.c
1755 +@@ -42,8 +42,9 @@
1756 +
1757 + #define AXP20X_DCDC2_V_OUT_MASK GENMASK(5, 0)
1758 + #define AXP20X_DCDC3_V_OUT_MASK GENMASK(7, 0)
1759 +-#define AXP20X_LDO24_V_OUT_MASK GENMASK(7, 4)
1760 ++#define AXP20X_LDO2_V_OUT_MASK GENMASK(7, 4)
1761 + #define AXP20X_LDO3_V_OUT_MASK GENMASK(6, 0)
1762 ++#define AXP20X_LDO4_V_OUT_MASK GENMASK(3, 0)
1763 + #define AXP20X_LDO5_V_OUT_MASK GENMASK(7, 4)
1764 +
1765 + #define AXP20X_PWR_OUT_EXTEN_MASK BIT_MASK(0)
1766 +@@ -542,14 +543,14 @@ static const struct regulator_desc axp20x_regulators[] = {
1767 + AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_DCDC3_MASK),
1768 + AXP_DESC_FIXED(AXP20X, LDO1, "ldo1", "acin", 1300),
1769 + AXP_DESC(AXP20X, LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
1770 +- AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK,
1771 ++ AXP20X_LDO24_V_OUT, AXP20X_LDO2_V_OUT_MASK,
1772 + AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO2_MASK),
1773 + AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25,
1774 + AXP20X_LDO3_V_OUT, AXP20X_LDO3_V_OUT_MASK,
1775 + AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO3_MASK),
1776 + AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in",
1777 + axp20x_ldo4_ranges, AXP20X_LDO4_V_OUT_NUM_VOLTAGES,
1778 +- AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK,
1779 ++ AXP20X_LDO24_V_OUT, AXP20X_LDO4_V_OUT_MASK,
1780 + AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO4_MASK),
1781 + AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
1782 + AXP20X_LDO5_V_OUT, AXP20X_LDO5_V_OUT_MASK,
1783 +diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
1784 +index cbb770824226f..1a44e321b54e1 100644
1785 +--- a/drivers/s390/block/dasd_fba.c
1786 ++++ b/drivers/s390/block/dasd_fba.c
1787 +@@ -40,6 +40,7 @@
1788 + MODULE_LICENSE("GPL");
1789 +
1790 + static struct dasd_discipline dasd_fba_discipline;
1791 ++static void *dasd_fba_zero_page;
1792 +
1793 + struct dasd_fba_private {
1794 + struct dasd_fba_characteristics rdc_data;
1795 +@@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
1796 + ccw->cmd_code = DASD_FBA_CCW_WRITE;
1797 + ccw->flags |= CCW_FLAG_SLI;
1798 + ccw->count = count;
1799 +- ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0));
1800 ++ ccw->cda = (__u32) (addr_t) dasd_fba_zero_page;
1801 + }
1802 +
1803 + /*
1804 +@@ -830,6 +831,11 @@ dasd_fba_init(void)
1805 + int ret;
1806 +
1807 + ASCEBC(dasd_fba_discipline.ebcname, 4);
1808 ++
1809 ++ dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1810 ++ if (!dasd_fba_zero_page)
1811 ++ return -ENOMEM;
1812 ++
1813 + ret = ccw_driver_register(&dasd_fba_driver);
1814 + if (!ret)
1815 + wait_for_device_probe();
1816 +@@ -841,6 +847,7 @@ static void __exit
1817 + dasd_fba_cleanup(void)
1818 + {
1819 + ccw_driver_unregister(&dasd_fba_driver);
1820 ++ free_page((unsigned long)dasd_fba_zero_page);
1821 + }
1822 +
1823 + module_init(dasd_fba_init);
1824 +diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
1825 +index 56a405dce8bcf..0b244f691b72d 100644
1826 +--- a/drivers/s390/crypto/zcrypt_api.c
1827 ++++ b/drivers/s390/crypto/zcrypt_api.c
1828 +@@ -1429,7 +1429,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1829 + if (!reqcnt)
1830 + return -ENOMEM;
1831 + zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1832 +- if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
1833 ++ if (copy_to_user((int __user *) arg, reqcnt,
1834 ++ sizeof(u32) * AP_DEVICES))
1835 + rc = -EFAULT;
1836 + kfree(reqcnt);
1837 + return rc;
1838 +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
1839 +index 4084f7f2b8216..7064e8024d14d 100644
1840 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
1841 ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
1842 +@@ -71,6 +71,7 @@ static void lpfc_disc_timeout_handler(struct lpfc_vport *);
1843 + static void lpfc_disc_flush_list(struct lpfc_vport *vport);
1844 + static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
1845 + static int lpfc_fcf_inuse(struct lpfc_hba *);
1846 ++static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
1847 +
1848 + void
1849 + lpfc_terminate_rport_io(struct fc_rport *rport)
1850 +@@ -1138,11 +1139,13 @@ out:
1851 + return;
1852 + }
1853 +
1854 +-
1855 + void
1856 + lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1857 + {
1858 + struct lpfc_vport *vport = pmb->vport;
1859 ++ LPFC_MBOXQ_t *sparam_mb;
1860 ++ struct lpfc_dmabuf *sparam_mp;
1861 ++ int rc;
1862 +
1863 + if (pmb->u.mb.mbxStatus)
1864 + goto out;
1865 +@@ -1167,12 +1170,42 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1866 + }
1867 +
1868 + /* Start discovery by sending a FLOGI. port_state is identically
1869 +- * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
1870 +- * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
1871 ++ * LPFC_FLOGI while waiting for FLOGI cmpl.
1872 + */
1873 + if (vport->port_state != LPFC_FLOGI) {
1874 +- if (!(phba->hba_flag & HBA_DEFER_FLOGI))
1875 ++ /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
1876 ++ * bb-credit recovery is in place.
1877 ++ */
1878 ++ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
1879 ++ !(phba->link_flag & LS_LOOPBACK_MODE)) {
1880 ++ sparam_mb = mempool_alloc(phba->mbox_mem_pool,
1881 ++ GFP_KERNEL);
1882 ++ if (!sparam_mb)
1883 ++ goto sparam_out;
1884 ++
1885 ++ rc = lpfc_read_sparam(phba, sparam_mb, 0);
1886 ++ if (rc) {
1887 ++ mempool_free(sparam_mb, phba->mbox_mem_pool);
1888 ++ goto sparam_out;
1889 ++ }
1890 ++ sparam_mb->vport = vport;
1891 ++ sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1892 ++ rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
1893 ++ if (rc == MBX_NOT_FINISHED) {
1894 ++ sparam_mp = (struct lpfc_dmabuf *)
1895 ++ sparam_mb->ctx_buf;
1896 ++ lpfc_mbuf_free(phba, sparam_mp->virt,
1897 ++ sparam_mp->phys);
1898 ++ kfree(sparam_mp);
1899 ++ sparam_mb->ctx_buf = NULL;
1900 ++ mempool_free(sparam_mb, phba->mbox_mem_pool);
1901 ++ goto sparam_out;
1902 ++ }
1903 ++
1904 ++ phba->hba_flag |= HBA_DEFER_FLOGI;
1905 ++ } else {
1906 + lpfc_initial_flogi(vport);
1907 ++ }
1908 + } else {
1909 + if (vport->fc_flag & FC_PT2PT)
1910 + lpfc_disc_start(vport);
1911 +@@ -1184,6 +1217,7 @@ out:
1912 + "0306 CONFIG_LINK mbxStatus error x%x "
1913 + "HBA state x%x\n",
1914 + pmb->u.mb.mbxStatus, vport->port_state);
1915 ++sparam_out:
1916 + mempool_free(pmb, phba->mbox_mem_pool);
1917 +
1918 + lpfc_linkdown(phba);
1919 +@@ -3239,21 +3273,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
1920 + lpfc_linkup(phba);
1921 + sparam_mbox = NULL;
1922 +
1923 +- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
1924 +- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1925 +- if (!cfglink_mbox)
1926 +- goto out;
1927 +- vport->port_state = LPFC_LOCAL_CFG_LINK;
1928 +- lpfc_config_link(phba, cfglink_mbox);
1929 +- cfglink_mbox->vport = vport;
1930 +- cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1931 +- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1932 +- if (rc == MBX_NOT_FINISHED) {
1933 +- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1934 +- goto out;
1935 +- }
1936 +- }
1937 +-
1938 + sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1939 + if (!sparam_mbox)
1940 + goto out;
1941 +@@ -3274,7 +3293,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
1942 + goto out;
1943 + }
1944 +
1945 +- if (phba->hba_flag & HBA_FCOE_MODE) {
1946 ++ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
1947 ++ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1948 ++ if (!cfglink_mbox)
1949 ++ goto out;
1950 ++ vport->port_state = LPFC_LOCAL_CFG_LINK;
1951 ++ lpfc_config_link(phba, cfglink_mbox);
1952 ++ cfglink_mbox->vport = vport;
1953 ++ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1954 ++ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1955 ++ if (rc == MBX_NOT_FINISHED) {
1956 ++ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1957 ++ goto out;
1958 ++ }
1959 ++ } else {
1960 + vport->port_state = LPFC_VPORT_UNKNOWN;
1961 + /*
1962 + * Add the driver's default FCF record at FCF index 0 now. This
1963 +@@ -3331,10 +3363,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
1964 + }
1965 + /* Reset FCF roundrobin bmask for new discovery */
1966 + lpfc_sli4_clear_fcf_rr_bmask(phba);
1967 +- } else {
1968 +- if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
1969 +- !(phba->link_flag & LS_LOOPBACK_MODE))
1970 +- phba->hba_flag |= HBA_DEFER_FLOGI;
1971 + }
1972 +
1973 + /* Prepare for LINK up registrations */
1974 +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
1975 +index 681d090851756..9cfa15ec8b08c 100644
1976 +--- a/drivers/spi/spi-bcm-qspi.c
1977 ++++ b/drivers/spi/spi-bcm-qspi.c
1978 +@@ -1295,7 +1295,7 @@ static const struct of_device_id bcm_qspi_of_match[] = {
1979 + },
1980 + {
1981 + .compatible = "brcm,spi-bcm-qspi",
1982 +- .data = &bcm_qspi_rev_data,
1983 ++ .data = &bcm_qspi_no_rev_data,
1984 + },
1985 + {
1986 + .compatible = "brcm,spi-bcm7216-qspi",
1987 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
1988 +index 91c6affe139c9..283f2468a2f46 100644
1989 +--- a/drivers/spi/spi-fsl-dspi.c
1990 ++++ b/drivers/spi/spi-fsl-dspi.c
1991 +@@ -174,17 +174,17 @@ static const struct fsl_dspi_devtype_data devtype_data[] = {
1992 + .fifo_size = 16,
1993 + },
1994 + [LS2080A] = {
1995 +- .trans_mode = DSPI_DMA_MODE,
1996 ++ .trans_mode = DSPI_XSPI_MODE,
1997 + .max_clock_factor = 8,
1998 + .fifo_size = 4,
1999 + },
2000 + [LS2085A] = {
2001 +- .trans_mode = DSPI_DMA_MODE,
2002 ++ .trans_mode = DSPI_XSPI_MODE,
2003 + .max_clock_factor = 8,
2004 + .fifo_size = 4,
2005 + },
2006 + [LX2160A] = {
2007 +- .trans_mode = DSPI_DMA_MODE,
2008 ++ .trans_mode = DSPI_XSPI_MODE,
2009 + .max_clock_factor = 8,
2010 + .fifo_size = 4,
2011 + },
2012 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2013 +index 6d3ed9542b6c1..e6dbfd09bf1cb 100644
2014 +--- a/fs/btrfs/disk-io.c
2015 ++++ b/fs/btrfs/disk-io.c
2016 +@@ -636,16 +636,15 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
2017 + csum_tree_block(eb, result);
2018 +
2019 + if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
2020 +- u32 val;
2021 +- u32 found = 0;
2022 +-
2023 +- memcpy(&found, result, csum_size);
2024 ++ u8 val[BTRFS_CSUM_SIZE] = { 0 };
2025 +
2026 + read_extent_buffer(eb, &val, 0, csum_size);
2027 + btrfs_warn_rl(fs_info,
2028 +- "%s checksum verify failed on %llu wanted %x found %x level %d",
2029 ++ "%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d",
2030 + fs_info->sb->s_id, eb->start,
2031 +- val, found, btrfs_header_level(eb));
2032 ++ CSUM_FMT_VALUE(csum_size, val),
2033 ++ CSUM_FMT_VALUE(csum_size, result),
2034 ++ btrfs_header_level(eb));
2035 + ret = -EUCLEAN;
2036 + goto err;
2037 + }
2038 +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
2039 +index abc4a8fd6df65..21a1f4b0152e7 100644
2040 +--- a/fs/btrfs/sysfs.c
2041 ++++ b/fs/btrfs/sysfs.c
2042 +@@ -1165,10 +1165,12 @@ int btrfs_sysfs_remove_devices_dir(struct btrfs_fs_devices *fs_devices,
2043 + disk_kobj->name);
2044 + }
2045 +
2046 +- kobject_del(&one_device->devid_kobj);
2047 +- kobject_put(&one_device->devid_kobj);
2048 ++ if (one_device->devid_kobj.state_initialized) {
2049 ++ kobject_del(&one_device->devid_kobj);
2050 ++ kobject_put(&one_device->devid_kobj);
2051 +
2052 +- wait_for_completion(&one_device->kobj_unregister);
2053 ++ wait_for_completion(&one_device->kobj_unregister);
2054 ++ }
2055 +
2056 + return 0;
2057 + }
2058 +@@ -1181,10 +1183,12 @@ int btrfs_sysfs_remove_devices_dir(struct btrfs_fs_devices *fs_devices,
2059 + sysfs_remove_link(fs_devices->devices_kobj,
2060 + disk_kobj->name);
2061 + }
2062 +- kobject_del(&one_device->devid_kobj);
2063 +- kobject_put(&one_device->devid_kobj);
2064 ++ if (one_device->devid_kobj.state_initialized) {
2065 ++ kobject_del(&one_device->devid_kobj);
2066 ++ kobject_put(&one_device->devid_kobj);
2067 +
2068 +- wait_for_completion(&one_device->kobj_unregister);
2069 ++ wait_for_completion(&one_device->kobj_unregister);
2070 ++ }
2071 + }
2072 +
2073 + return 0;
2074 +diff --git a/fs/io_uring.c b/fs/io_uring.c
2075 +index d05023ca74bdc..1d5640cc2a488 100644
2076 +--- a/fs/io_uring.c
2077 ++++ b/fs/io_uring.c
2078 +@@ -3056,8 +3056,6 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
2079 + const char __user *fname;
2080 + int ret;
2081 +
2082 +- if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
2083 +- return -EINVAL;
2084 + if (unlikely(sqe->ioprio || sqe->buf_index))
2085 + return -EINVAL;
2086 + if (unlikely(req->flags & REQ_F_FIXED_FILE))
2087 +@@ -3084,6 +3082,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2088 + {
2089 + u64 flags, mode;
2090 +
2091 ++ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
2092 ++ return -EINVAL;
2093 + if (req->flags & REQ_F_NEED_CLEANUP)
2094 + return 0;
2095 + mode = READ_ONCE(sqe->len);
2096 +@@ -3098,6 +3098,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2097 + size_t len;
2098 + int ret;
2099 +
2100 ++ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
2101 ++ return -EINVAL;
2102 + if (req->flags & REQ_F_NEED_CLEANUP)
2103 + return 0;
2104 + how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
2105 +@@ -5252,6 +5254,8 @@ static void io_cleanup_req(struct io_kiocb *req)
2106 + break;
2107 + case IORING_OP_OPENAT:
2108 + case IORING_OP_OPENAT2:
2109 ++ if (req->open.filename)
2110 ++ putname(req->open.filename);
2111 + break;
2112 + case IORING_OP_SPLICE:
2113 + case IORING_OP_TEE:
2114 +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
2115 +index 6adf90f248d70..d6c83de361e47 100644
2116 +--- a/include/linux/kprobes.h
2117 ++++ b/include/linux/kprobes.h
2118 +@@ -369,6 +369,8 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
2119 + void kprobe_flush_task(struct task_struct *tk);
2120 + void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
2121 +
2122 ++void kprobe_free_init_mem(void);
2123 ++
2124 + int disable_kprobe(struct kprobe *kp);
2125 + int enable_kprobe(struct kprobe *kp);
2126 +
2127 +@@ -426,6 +428,9 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
2128 + static inline void kprobe_flush_task(struct task_struct *tk)
2129 + {
2130 + }
2131 ++static inline void kprobe_free_init_mem(void)
2132 ++{
2133 ++}
2134 + static inline int disable_kprobe(struct kprobe *kp)
2135 + {
2136 + return -ENOSYS;
2137 +diff --git a/include/linux/mm.h b/include/linux/mm.h
2138 +index dc7b87310c103..bc05c3588aa31 100644
2139 +--- a/include/linux/mm.h
2140 ++++ b/include/linux/mm.h
2141 +@@ -2445,7 +2445,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2142 +
2143 + extern void set_dma_reserve(unsigned long new_dma_reserve);
2144 + extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2145 +- enum memmap_context, struct vmem_altmap *);
2146 ++ enum meminit_context, struct vmem_altmap *);
2147 + extern void setup_per_zone_wmarks(void);
2148 + extern int __meminit init_per_zone_wmark_min(void);
2149 + extern void mem_init(void);
2150 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
2151 +index f6f884970511d..04ff9a03bdb33 100644
2152 +--- a/include/linux/mmzone.h
2153 ++++ b/include/linux/mmzone.h
2154 +@@ -799,10 +799,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
2155 + unsigned int alloc_flags);
2156 + bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
2157 + unsigned long mark, int highest_zoneidx);
2158 +-enum memmap_context {
2159 +- MEMMAP_EARLY,
2160 +- MEMMAP_HOTPLUG,
2161 ++/*
2162 ++ * Memory initialization context, use to differentiate memory added by
2163 ++ * the platform statically or via memory hotplug interface.
2164 ++ */
2165 ++enum meminit_context {
2166 ++ MEMINIT_EARLY,
2167 ++ MEMINIT_HOTPLUG,
2168 + };
2169 ++
2170 + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
2171 + unsigned long size);
2172 +
2173 +diff --git a/include/linux/node.h b/include/linux/node.h
2174 +index 4866f32a02d8d..014ba3ab2efd8 100644
2175 +--- a/include/linux/node.h
2176 ++++ b/include/linux/node.h
2177 +@@ -99,11 +99,13 @@ extern struct node *node_devices[];
2178 + typedef void (*node_registration_func_t)(struct node *);
2179 +
2180 + #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
2181 +-extern int link_mem_sections(int nid, unsigned long start_pfn,
2182 +- unsigned long end_pfn);
2183 ++int link_mem_sections(int nid, unsigned long start_pfn,
2184 ++ unsigned long end_pfn,
2185 ++ enum meminit_context context);
2186 + #else
2187 + static inline int link_mem_sections(int nid, unsigned long start_pfn,
2188 +- unsigned long end_pfn)
2189 ++ unsigned long end_pfn,
2190 ++ enum meminit_context context)
2191 + {
2192 + return 0;
2193 + }
2194 +@@ -128,7 +130,8 @@ static inline int register_one_node(int nid)
2195 + if (error)
2196 + return error;
2197 + /* link memory sections under this node */
2198 +- error = link_mem_sections(nid, start_pfn, end_pfn);
2199 ++ error = link_mem_sections(nid, start_pfn, end_pfn,
2200 ++ MEMINIT_EARLY);
2201 + }
2202 +
2203 + return error;
2204 +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
2205 +index 8075f6ae185a1..552df749531db 100644
2206 +--- a/include/linux/pgtable.h
2207 ++++ b/include/linux/pgtable.h
2208 +@@ -1424,6 +1424,16 @@ typedef unsigned int pgtbl_mod_mask;
2209 + #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
2210 + #endif
2211 +
2212 ++#ifndef p4d_offset_lockless
2213 ++#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
2214 ++#endif
2215 ++#ifndef pud_offset_lockless
2216 ++#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
2217 ++#endif
2218 ++#ifndef pmd_offset_lockless
2219 ++#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
2220 ++#endif
2221 ++
2222 + /*
2223 + * p?d_leaf() - true if this entry is a final mapping to a physical address.
2224 + * This differs from p?d_huge() by the fact that they are always available (if
2225 +diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
2226 +index 8cb76405cbce1..78ba1dc54fd57 100644
2227 +--- a/include/linux/qed/qed_if.h
2228 ++++ b/include/linux/qed/qed_if.h
2229 +@@ -648,6 +648,7 @@ struct qed_dev_info {
2230 + #define QED_MFW_VERSION_3_OFFSET 24
2231 +
2232 + u32 flash_size;
2233 ++ bool b_arfs_capable;
2234 + bool b_inter_pf_switch;
2235 + bool tx_switching;
2236 + bool rdma_supported;
2237 +diff --git a/init/main.c b/init/main.c
2238 +index 883ded3638e59..e214cdd18c285 100644
2239 +--- a/init/main.c
2240 ++++ b/init/main.c
2241 +@@ -33,6 +33,7 @@
2242 + #include <linux/nmi.h>
2243 + #include <linux/percpu.h>
2244 + #include <linux/kmod.h>
2245 ++#include <linux/kprobes.h>
2246 + #include <linux/vmalloc.h>
2247 + #include <linux/kernel_stat.h>
2248 + #include <linux/start_kernel.h>
2249 +@@ -1401,6 +1402,7 @@ static int __ref kernel_init(void *unused)
2250 + kernel_init_freeable();
2251 + /* need to finish all async __init code before freeing the memory */
2252 + async_synchronize_full();
2253 ++ kprobe_free_init_mem();
2254 + ftrace_free_init_mem();
2255 + free_initmem();
2256 + mark_readonly();
2257 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
2258 +index fb878ba3f22f0..18f4969552ac2 100644
2259 +--- a/kernel/bpf/inode.c
2260 ++++ b/kernel/bpf/inode.c
2261 +@@ -226,10 +226,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
2262 + else
2263 + prev_key = key;
2264 +
2265 ++ rcu_read_lock();
2266 + if (map->ops->map_get_next_key(map, prev_key, key)) {
2267 + map_iter(m)->done = true;
2268 +- return NULL;
2269 ++ key = NULL;
2270 + }
2271 ++ rcu_read_unlock();
2272 + return key;
2273 + }
2274 +
2275 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2276 +index a264246ff85aa..d0bf0ad425df5 100644
2277 +--- a/kernel/kprobes.c
2278 ++++ b/kernel/kprobes.c
2279 +@@ -2130,9 +2130,10 @@ static void kill_kprobe(struct kprobe *p)
2280 +
2281 + /*
2282 + * The module is going away. We should disarm the kprobe which
2283 +- * is using ftrace.
2284 ++ * is using ftrace, because ftrace framework is still available at
2285 ++ * MODULE_STATE_GOING notification.
2286 + */
2287 +- if (kprobe_ftrace(p))
2288 ++ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2289 + disarm_kprobe_ftrace(p);
2290 + }
2291 +
2292 +@@ -2405,6 +2406,28 @@ static struct notifier_block kprobe_module_nb = {
2293 + extern unsigned long __start_kprobe_blacklist[];
2294 + extern unsigned long __stop_kprobe_blacklist[];
2295 +
2296 ++void kprobe_free_init_mem(void)
2297 ++{
2298 ++ void *start = (void *)(&__init_begin);
2299 ++ void *end = (void *)(&__init_end);
2300 ++ struct hlist_head *head;
2301 ++ struct kprobe *p;
2302 ++ int i;
2303 ++
2304 ++ mutex_lock(&kprobe_mutex);
2305 ++
2306 ++ /* Kill all kprobes on initmem */
2307 ++ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2308 ++ head = &kprobe_table[i];
2309 ++ hlist_for_each_entry(p, head, hlist) {
2310 ++ if (start <= (void *)p->addr && (void *)p->addr < end)
2311 ++ kill_kprobe(p);
2312 ++ }
2313 ++ }
2314 ++
2315 ++ mutex_unlock(&kprobe_mutex);
2316 ++}
2317 ++
2318 + static int __init init_kprobes(void)
2319 + {
2320 + int i, err = 0;
2321 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
2322 +index 0b933546142e8..1b2ef64902296 100644
2323 +--- a/kernel/trace/trace_events_hist.c
2324 ++++ b/kernel/trace/trace_events_hist.c
2325 +@@ -3865,7 +3865,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)
2326 +
2327 + s = kstrdup(field_str, GFP_KERNEL);
2328 + if (!s) {
2329 +- kfree(hist_data->attrs->var_defs.name[n_vars]);
2330 + ret = -ENOMEM;
2331 + goto free;
2332 + }
2333 +diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
2334 +index f10073e626030..f4938040c2286 100644
2335 +--- a/kernel/trace/trace_preemptirq.c
2336 ++++ b/kernel/trace/trace_preemptirq.c
2337 +@@ -102,14 +102,14 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
2338 +
2339 + __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
2340 + {
2341 ++ lockdep_hardirqs_off(CALLER_ADDR0);
2342 ++
2343 + if (!this_cpu_read(tracing_irq_cpu)) {
2344 + this_cpu_write(tracing_irq_cpu, 1);
2345 + tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
2346 + if (!in_nmi())
2347 + trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
2348 + }
2349 +-
2350 +- lockdep_hardirqs_off(CALLER_ADDR0);
2351 + }
2352 + EXPORT_SYMBOL(trace_hardirqs_off_caller);
2353 + NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
2354 +diff --git a/lib/bootconfig.c b/lib/bootconfig.c
2355 +index 912ef49213986..510a0384861a2 100644
2356 +--- a/lib/bootconfig.c
2357 ++++ b/lib/bootconfig.c
2358 +@@ -31,6 +31,8 @@ static size_t xbc_data_size __initdata;
2359 + static struct xbc_node *last_parent __initdata;
2360 + static const char *xbc_err_msg __initdata;
2361 + static int xbc_err_pos __initdata;
2362 ++static int open_brace[XBC_DEPTH_MAX] __initdata;
2363 ++static int brace_index __initdata;
2364 +
2365 + static int __init xbc_parse_error(const char *msg, const char *p)
2366 + {
2367 +@@ -423,27 +425,27 @@ static char *skip_spaces_until_newline(char *p)
2368 + return p;
2369 + }
2370 +
2371 +-static int __init __xbc_open_brace(void)
2372 ++static int __init __xbc_open_brace(char *p)
2373 + {
2374 +- /* Mark the last key as open brace */
2375 +- last_parent->next = XBC_NODE_MAX;
2376 ++ /* Push the last key as open brace */
2377 ++ open_brace[brace_index++] = xbc_node_index(last_parent);
2378 ++ if (brace_index >= XBC_DEPTH_MAX)
2379 ++ return xbc_parse_error("Exceed max depth of braces", p);
2380 +
2381 + return 0;
2382 + }
2383 +
2384 + static int __init __xbc_close_brace(char *p)
2385 + {
2386 +- struct xbc_node *node;
2387 +-
2388 +- if (!last_parent || last_parent->next != XBC_NODE_MAX)
2389 ++ brace_index--;
2390 ++ if (!last_parent || brace_index < 0 ||
2391 ++ (open_brace[brace_index] != xbc_node_index(last_parent)))
2392 + return xbc_parse_error("Unexpected closing brace", p);
2393 +
2394 +- node = last_parent;
2395 +- node->next = 0;
2396 +- do {
2397 +- node = xbc_node_get_parent(node);
2398 +- } while (node && node->next != XBC_NODE_MAX);
2399 +- last_parent = node;
2400 ++ if (brace_index == 0)
2401 ++ last_parent = NULL;
2402 ++ else
2403 ++ last_parent = &xbc_nodes[open_brace[brace_index - 1]];
2404 +
2405 + return 0;
2406 + }
2407 +@@ -484,8 +486,8 @@ static int __init __xbc_parse_value(char **__v, char **__n)
2408 + break;
2409 + }
2410 + if (strchr(",;\n#}", c)) {
2411 +- v = strim(v);
2412 + *p++ = '\0';
2413 ++ v = strim(v);
2414 + break;
2415 + }
2416 + }
2417 +@@ -651,7 +653,7 @@ static int __init xbc_open_brace(char **k, char *n)
2418 + return ret;
2419 + *k = n;
2420 +
2421 +- return __xbc_open_brace();
2422 ++ return __xbc_open_brace(n - 1);
2423 + }
2424 +
2425 + static int __init xbc_close_brace(char **k, char *n)
2426 +@@ -671,6 +673,13 @@ static int __init xbc_verify_tree(void)
2427 + int i, depth, len, wlen;
2428 + struct xbc_node *n, *m;
2429 +
2430 ++ /* Brace closing */
2431 ++ if (brace_index) {
2432 ++ n = &xbc_nodes[open_brace[brace_index]];
2433 ++ return xbc_parse_error("Brace is not closed",
2434 ++ xbc_node_get_data(n));
2435 ++ }
2436 ++
2437 + /* Empty tree */
2438 + if (xbc_node_num == 0) {
2439 + xbc_parse_error("Empty config", xbc_data);
2440 +@@ -735,6 +744,7 @@ void __init xbc_destroy_all(void)
2441 + xbc_node_num = 0;
2442 + memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX);
2443 + xbc_nodes = NULL;
2444 ++ brace_index = 0;
2445 + }
2446 +
2447 + /**
2448 +diff --git a/lib/string.c b/lib/string.c
2449 +index 6012c385fb314..4288e0158d47f 100644
2450 +--- a/lib/string.c
2451 ++++ b/lib/string.c
2452 +@@ -272,6 +272,30 @@ ssize_t strscpy_pad(char *dest, const char *src, size_t count)
2453 + }
2454 + EXPORT_SYMBOL(strscpy_pad);
2455 +
2456 ++/**
2457 ++ * stpcpy - copy a string from src to dest returning a pointer to the new end
2458 ++ * of dest, including src's %NUL-terminator. May overrun dest.
2459 ++ * @dest: pointer to end of string being copied into. Must be large enough
2460 ++ * to receive copy.
2461 ++ * @src: pointer to the beginning of string being copied from. Must not overlap
2462 ++ * dest.
2463 ++ *
2464 ++ * stpcpy differs from strcpy in a key way: the return value is a pointer
2465 ++ * to the new %NUL-terminating character in @dest. (For strcpy, the return
2466 ++ * value is a pointer to the start of @dest). This interface is considered
2467 ++ * unsafe as it doesn't perform bounds checking of the inputs. As such it's
2468 ++ * not recommended for usage. Instead, its definition is provided in case
2469 ++ * the compiler lowers other libcalls to stpcpy.
2470 ++ */
2471 ++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
2472 ++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
2473 ++{
2474 ++ while ((*dest++ = *src++) != '\0')
2475 ++ /* nothing */;
2476 ++ return --dest;
2477 ++}
2478 ++EXPORT_SYMBOL(stpcpy);
2479 ++
2480 + #ifndef __HAVE_ARCH_STRCAT
2481 + /**
2482 + * strcat - Append one %NUL-terminated string to another
2483 +diff --git a/mm/gup.c b/mm/gup.c
2484 +index 0d8d76f10ac61..2e9ce90f29a1c 100644
2485 +--- a/mm/gup.c
2486 ++++ b/mm/gup.c
2487 +@@ -2574,13 +2574,13 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2488 + return 1;
2489 + }
2490 +
2491 +-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
2492 ++static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2493 + unsigned int flags, struct page **pages, int *nr)
2494 + {
2495 + unsigned long next;
2496 + pmd_t *pmdp;
2497 +
2498 +- pmdp = pmd_offset(&pud, addr);
2499 ++ pmdp = pmd_offset_lockless(pudp, pud, addr);
2500 + do {
2501 + pmd_t pmd = READ_ONCE(*pmdp);
2502 +
2503 +@@ -2617,13 +2617,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
2504 + return 1;
2505 + }
2506 +
2507 +-static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
2508 ++static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2509 + unsigned int flags, struct page **pages, int *nr)
2510 + {
2511 + unsigned long next;
2512 + pud_t *pudp;
2513 +
2514 +- pudp = pud_offset(&p4d, addr);
2515 ++ pudp = pud_offset_lockless(p4dp, p4d, addr);
2516 + do {
2517 + pud_t pud = READ_ONCE(*pudp);
2518 +
2519 +@@ -2638,20 +2638,20 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
2520 + if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2521 + PUD_SHIFT, next, flags, pages, nr))
2522 + return 0;
2523 +- } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr))
2524 ++ } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2525 + return 0;
2526 + } while (pudp++, addr = next, addr != end);
2527 +
2528 + return 1;
2529 + }
2530 +
2531 +-static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
2532 ++static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2533 + unsigned int flags, struct page **pages, int *nr)
2534 + {
2535 + unsigned long next;
2536 + p4d_t *p4dp;
2537 +
2538 +- p4dp = p4d_offset(&pgd, addr);
2539 ++ p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2540 + do {
2541 + p4d_t p4d = READ_ONCE(*p4dp);
2542 +
2543 +@@ -2663,7 +2663,7 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
2544 + if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2545 + P4D_SHIFT, next, flags, pages, nr))
2546 + return 0;
2547 +- } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr))
2548 ++ } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2549 + return 0;
2550 + } while (p4dp++, addr = next, addr != end);
2551 +
2552 +@@ -2691,7 +2691,7 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
2553 + if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2554 + PGDIR_SHIFT, next, flags, pages, nr))
2555 + return;
2556 +- } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr))
2557 ++ } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2558 + return;
2559 + } while (pgdp++, addr = next, addr != end);
2560 + }
2561 +diff --git a/mm/madvise.c b/mm/madvise.c
2562 +index d4aa5f7765435..0e0d61003fc6f 100644
2563 +--- a/mm/madvise.c
2564 ++++ b/mm/madvise.c
2565 +@@ -381,9 +381,9 @@ huge_unlock:
2566 + return 0;
2567 + }
2568 +
2569 ++regular_page:
2570 + if (pmd_trans_unstable(pmd))
2571 + return 0;
2572 +-regular_page:
2573 + #endif
2574 + tlb_change_page_size(tlb, PAGE_SIZE);
2575 + orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
2576 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2577 +index e76de2067bfd1..3f5073330bd50 100644
2578 +--- a/mm/memory_hotplug.c
2579 ++++ b/mm/memory_hotplug.c
2580 +@@ -719,7 +719,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
2581 + * are reserved so nobody should be touching them so we should be safe
2582 + */
2583 + memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
2584 +- MEMMAP_HOTPLUG, altmap);
2585 ++ MEMINIT_HOTPLUG, altmap);
2586 +
2587 + set_zone_contiguous(zone);
2588 + }
2589 +@@ -1065,7 +1065,8 @@ int __ref add_memory_resource(int nid, struct resource *res)
2590 + }
2591 +
2592 + /* link memory sections under this node.*/
2593 +- ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1));
2594 ++ ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
2595 ++ MEMINIT_HOTPLUG);
2596 + BUG_ON(ret);
2597 +
2598 + /* create new memmap entry */
2599 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2600 +index d809242f671f0..898ff44f2c7b2 100644
2601 +--- a/mm/page_alloc.c
2602 ++++ b/mm/page_alloc.c
2603 +@@ -5952,7 +5952,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
2604 + * done. Non-atomic initialization, single-pass.
2605 + */
2606 + void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2607 +- unsigned long start_pfn, enum memmap_context context,
2608 ++ unsigned long start_pfn, enum meminit_context context,
2609 + struct vmem_altmap *altmap)
2610 + {
2611 + unsigned long pfn, end_pfn = start_pfn + size;
2612 +@@ -5984,7 +5984,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2613 + * There can be holes in boot-time mem_map[]s handed to this
2614 + * function. They do not exist on hotplugged memory.
2615 + */
2616 +- if (context == MEMMAP_EARLY) {
2617 ++ if (context == MEMINIT_EARLY) {
2618 + if (overlap_memmap_init(zone, &pfn))
2619 + continue;
2620 + if (defer_init(nid, pfn, end_pfn))
2621 +@@ -5993,7 +5993,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2622 +
2623 + page = pfn_to_page(pfn);
2624 + __init_single_page(page, pfn, zone, nid);
2625 +- if (context == MEMMAP_HOTPLUG)
2626 ++ if (context == MEMINIT_HOTPLUG)
2627 + __SetPageReserved(page);
2628 +
2629 + /*
2630 +@@ -6076,7 +6076,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
2631 + * check here not to call set_pageblock_migratetype() against
2632 + * pfn out of zone.
2633 + *
2634 +- * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
2635 ++ * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
2636 + * because this is done early in section_activate()
2637 + */
2638 + if (!(pfn & (pageblock_nr_pages - 1))) {
2639 +@@ -6114,7 +6114,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
2640 + if (end_pfn > start_pfn) {
2641 + size = end_pfn - start_pfn;
2642 + memmap_init_zone(size, nid, zone, start_pfn,
2643 +- MEMMAP_EARLY, NULL);
2644 ++ MEMINIT_EARLY, NULL);
2645 + }
2646 + }
2647 + }
2648 +diff --git a/mm/swapfile.c b/mm/swapfile.c
2649 +index 987276c557d1f..26707c5dc9fce 100644
2650 +--- a/mm/swapfile.c
2651 ++++ b/mm/swapfile.c
2652 +@@ -1074,7 +1074,7 @@ start_over:
2653 + goto nextsi;
2654 + }
2655 + if (size == SWAPFILE_CLUSTER) {
2656 +- if (!(si->flags & SWP_FS))
2657 ++ if (si->flags & SWP_BLKDEV)
2658 + n_ret = swap_alloc_cluster(si, swp_entries);
2659 + } else
2660 + n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
2661 +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
2662 +index cfb9e16afe38a..8002a7f8f3fad 100644
2663 +--- a/net/batman-adv/bridge_loop_avoidance.c
2664 ++++ b/net/batman-adv/bridge_loop_avoidance.c
2665 +@@ -25,6 +25,7 @@
2666 + #include <linux/lockdep.h>
2667 + #include <linux/netdevice.h>
2668 + #include <linux/netlink.h>
2669 ++#include <linux/preempt.h>
2670 + #include <linux/rculist.h>
2671 + #include <linux/rcupdate.h>
2672 + #include <linux/seq_file.h>
2673 +@@ -83,11 +84,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
2674 + */
2675 + static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
2676 + {
2677 +- const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
2678 ++ const struct batadv_bla_backbone_gw *gw;
2679 + u32 hash = 0;
2680 +
2681 +- hash = jhash(&claim->addr, sizeof(claim->addr), hash);
2682 +- hash = jhash(&claim->vid, sizeof(claim->vid), hash);
2683 ++ gw = (struct batadv_bla_backbone_gw *)data;
2684 ++ hash = jhash(&gw->orig, sizeof(gw->orig), hash);
2685 ++ hash = jhash(&gw->vid, sizeof(gw->vid), hash);
2686 +
2687 + return hash % size;
2688 + }
2689 +@@ -1579,13 +1581,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
2690 + }
2691 +
2692 + /**
2693 +- * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
2694 ++ * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
2695 + * @bat_priv: the bat priv with all the soft interface information
2696 +- * @skb: contains the bcast_packet to be checked
2697 ++ * @skb: contains the multicast packet to be checked
2698 ++ * @payload_ptr: pointer to position inside the head buffer of the skb
2699 ++ * marking the start of the data to be CRC'ed
2700 ++ * @orig: originator mac address, NULL if unknown
2701 + *
2702 +- * check if it is on our broadcast list. Another gateway might
2703 +- * have sent the same packet because it is connected to the same backbone,
2704 +- * so we have to remove this duplicate.
2705 ++ * Check if it is on our broadcast list. Another gateway might have sent the
2706 ++ * same packet because it is connected to the same backbone, so we have to
2707 ++ * remove this duplicate.
2708 + *
2709 + * This is performed by checking the CRC, which will tell us
2710 + * with a good chance that it is the same packet. If it is furthermore
2711 +@@ -1594,19 +1599,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
2712 + *
2713 + * Return: true if a packet is in the duplicate list, false otherwise.
2714 + */
2715 +-bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
2716 +- struct sk_buff *skb)
2717 ++static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
2718 ++ struct sk_buff *skb, u8 *payload_ptr,
2719 ++ const u8 *orig)
2720 + {
2721 +- int i, curr;
2722 +- __be32 crc;
2723 +- struct batadv_bcast_packet *bcast_packet;
2724 + struct batadv_bcast_duplist_entry *entry;
2725 + bool ret = false;
2726 +-
2727 +- bcast_packet = (struct batadv_bcast_packet *)skb->data;
2728 ++ int i, curr;
2729 ++ __be32 crc;
2730 +
2731 + /* calculate the crc ... */
2732 +- crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
2733 ++ crc = batadv_skb_crc32(skb, payload_ptr);
2734 +
2735 + spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
2736 +
2737 +@@ -1625,8 +1628,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
2738 + if (entry->crc != crc)
2739 + continue;
2740 +
2741 +- if (batadv_compare_eth(entry->orig, bcast_packet->orig))
2742 +- continue;
2743 ++ /* are the originators both known and not anonymous? */
2744 ++ if (orig && !is_zero_ether_addr(orig) &&
2745 ++ !is_zero_ether_addr(entry->orig)) {
2746 ++ /* If known, check if the new frame came from
2747 ++ * the same originator:
2748 ++ * We are safe to take identical frames from the
2749 ++ * same orig, if known, as multiplications in
2750 ++ * the mesh are detected via the (orig, seqno) pair.
2751 ++ * So we can be a bit more liberal here and allow
2752 ++ * identical frames from the same orig which the source
2753 ++ * host might have sent multiple times on purpose.
2754 ++ */
2755 ++ if (batadv_compare_eth(entry->orig, orig))
2756 ++ continue;
2757 ++ }
2758 +
2759 + /* this entry seems to match: same crc, not too old,
2760 + * and from another gw. therefore return true to forbid it.
2761 +@@ -1642,7 +1658,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
2762 + entry = &bat_priv->bla.bcast_duplist[curr];
2763 + entry->crc = crc;
2764 + entry->entrytime = jiffies;
2765 +- ether_addr_copy(entry->orig, bcast_packet->orig);
2766 ++
2767 ++ /* known originator */
2768 ++ if (orig)
2769 ++ ether_addr_copy(entry->orig, orig);
2770 ++ /* anonymous originator */
2771 ++ else
2772 ++ eth_zero_addr(entry->orig);
2773 ++
2774 + bat_priv->bla.bcast_duplist_curr = curr;
2775 +
2776 + out:
2777 +@@ -1651,6 +1674,48 @@ out:
2778 + return ret;
2779 + }
2780 +
2781 ++/**
2782 ++ * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup.
2783 ++ * @bat_priv: the bat priv with all the soft interface information
2784 ++ * @skb: contains the multicast packet to be checked, decapsulated from a
2785 ++ * unicast_packet
2786 ++ *
2787 ++ * Check if it is on our broadcast list. Another gateway might have sent the
2788 ++ * same packet because it is connected to the same backbone, so we have to
2789 ++ * remove this duplicate.
2790 ++ *
2791 ++ * Return: true if a packet is in the duplicate list, false otherwise.
2792 ++ */
2793 ++static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
2794 ++ struct sk_buff *skb)
2795 ++{
2796 ++ return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
2797 ++}
2798 ++
2799 ++/**
2800 ++ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
2801 ++ * @bat_priv: the bat priv with all the soft interface information
2802 ++ * @skb: contains the bcast_packet to be checked
2803 ++ *
2804 ++ * Check if it is on our broadcast list. Another gateway might have sent the
2805 ++ * same packet because it is connected to the same backbone, so we have to
2806 ++ * remove this duplicate.
2807 ++ *
2808 ++ * Return: true if a packet is in the duplicate list, false otherwise.
2809 ++ */
2810 ++bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
2811 ++ struct sk_buff *skb)
2812 ++{
2813 ++ struct batadv_bcast_packet *bcast_packet;
2814 ++ u8 *payload_ptr;
2815 ++
2816 ++ bcast_packet = (struct batadv_bcast_packet *)skb->data;
2817 ++ payload_ptr = (u8 *)(bcast_packet + 1);
2818 ++
2819 ++ return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
2820 ++ bcast_packet->orig);
2821 ++}
2822 ++
2823 + /**
2824 + * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
2825 + * the VLAN identified by vid.
2826 +@@ -1812,7 +1877,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
2827 + * @bat_priv: the bat priv with all the soft interface information
2828 + * @skb: the frame to be checked
2829 + * @vid: the VLAN ID of the frame
2830 +- * @is_bcast: the packet came in a broadcast packet type.
2831 ++ * @packet_type: the batman packet type this frame came in
2832 + *
2833 + * batadv_bla_rx avoidance checks if:
2834 + * * we have to race for a claim
2835 +@@ -1824,7 +1889,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
2836 + * further process the skb.
2837 + */
2838 + bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2839 +- unsigned short vid, bool is_bcast)
2840 ++ unsigned short vid, int packet_type)
2841 + {
2842 + struct batadv_bla_backbone_gw *backbone_gw;
2843 + struct ethhdr *ethhdr;
2844 +@@ -1846,9 +1911,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2845 + goto handled;
2846 +
2847 + if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
2848 +- /* don't allow broadcasts while requests are in flight */
2849 +- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
2850 +- goto handled;
2851 ++ /* don't allow multicast packets while requests are in flight */
2852 ++ if (is_multicast_ether_addr(ethhdr->h_dest))
2853 ++ /* Both broadcast flooding or multicast-via-unicasts
2854 ++ * delivery might send to multiple backbone gateways
2855 ++ * sharing the same LAN and therefore need to coordinate
2856 ++ * which backbone gateway forwards into the LAN,
2857 ++ * by claiming the payload source address.
2858 ++ *
2859 ++ * Broadcast flooding and multicast-via-unicasts
2860 ++ * delivery use the following two batman packet types.
2861 ++ * Note: explicitly exclude BATADV_UNICAST_4ADDR,
2862 ++ * as the DHCP gateway feature will send explicitly
2863 ++ * to only one BLA gateway, so the claiming process
2864 ++ * should be avoided there.
2865 ++ */
2866 ++ if (packet_type == BATADV_BCAST ||
2867 ++ packet_type == BATADV_UNICAST)
2868 ++ goto handled;
2869 ++
2870 ++ /* potential duplicates from foreign BLA backbone gateways via
2871 ++ * multicast-in-unicast packets
2872 ++ */
2873 ++ if (is_multicast_ether_addr(ethhdr->h_dest) &&
2874 ++ packet_type == BATADV_UNICAST &&
2875 ++ batadv_bla_check_ucast_duplist(bat_priv, skb))
2876 ++ goto handled;
2877 +
2878 + ether_addr_copy(search_claim.addr, ethhdr->h_source);
2879 + search_claim.vid = vid;
2880 +@@ -1883,13 +1971,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2881 + goto allow;
2882 + }
2883 +
2884 +- /* if it is a broadcast ... */
2885 +- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
2886 ++ /* if it is a multicast ... */
2887 ++ if (is_multicast_ether_addr(ethhdr->h_dest) &&
2888 ++ (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
2889 + /* ... drop it. the responsible gateway is in charge.
2890 + *
2891 +- * We need to check is_bcast because with the gateway
2892 ++ * We need to check packet type because with the gateway
2893 + * feature, broadcasts (like DHCP requests) may be sent
2894 +- * using a unicast packet type.
2895 ++ * using a unicast 4 address packet type. See comment above.
2896 + */
2897 + goto handled;
2898 + } else {
2899 +diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
2900 +index 41edb2c4a3277..a81c41b636f93 100644
2901 +--- a/net/batman-adv/bridge_loop_avoidance.h
2902 ++++ b/net/batman-adv/bridge_loop_avoidance.h
2903 +@@ -35,7 +35,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
2904 +
2905 + #ifdef CONFIG_BATMAN_ADV_BLA
2906 + bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2907 +- unsigned short vid, bool is_bcast);
2908 ++ unsigned short vid, int packet_type);
2909 + bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2910 + unsigned short vid);
2911 + bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
2912 +@@ -66,7 +66,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
2913 +
2914 + static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
2915 + struct sk_buff *skb, unsigned short vid,
2916 +- bool is_bcast)
2917 ++ int packet_type)
2918 + {
2919 + return false;
2920 + }
2921 +diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
2922 +index 9ebdc1e864b96..3aaa6612f8c9f 100644
2923 +--- a/net/batman-adv/multicast.c
2924 ++++ b/net/batman-adv/multicast.c
2925 +@@ -51,6 +51,7 @@
2926 + #include <uapi/linux/batadv_packet.h>
2927 + #include <uapi/linux/batman_adv.h>
2928 +
2929 ++#include "bridge_loop_avoidance.h"
2930 + #include "hard-interface.h"
2931 + #include "hash.h"
2932 + #include "log.h"
2933 +@@ -1434,6 +1435,35 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
2934 + return BATADV_FORW_ALL;
2935 + }
2936 +
2937 ++/**
2938 ++ * batadv_mcast_forw_send_orig() - send a multicast packet to an originator
2939 ++ * @bat_priv: the bat priv with all the soft interface information
2940 ++ * @skb: the multicast packet to send
2941 ++ * @vid: the vlan identifier
2942 ++ * @orig_node: the originator to send the packet to
2943 ++ *
2944 ++ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
2945 ++ */
2946 ++int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
2947 ++ struct sk_buff *skb,
2948 ++ unsigned short vid,
2949 ++ struct batadv_orig_node *orig_node)
2950 ++{
2951 ++ /* Avoid sending multicast-in-unicast packets to other BLA
2952 ++ * gateways - they already got the frame from the LAN side
2953 ++ * we share with them.
2954 ++ * TODO: Refactor to take BLA into account earlier, to avoid
2955 ++ * reducing the mcast_fanout count.
2956 ++ */
2957 ++ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
2958 ++ dev_kfree_skb(skb);
2959 ++ return NET_XMIT_SUCCESS;
2960 ++ }
2961 ++
2962 ++ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
2963 ++ orig_node, vid);
2964 ++}
2965 ++
2966 + /**
2967 + * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
2968 + * @bat_priv: the bat priv with all the soft interface information
2969 +@@ -1471,8 +1501,8 @@ batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
2970 + break;
2971 + }
2972 +
2973 +- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
2974 +- orig_entry->orig_node, vid);
2975 ++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
2976 ++ orig_entry->orig_node);
2977 + }
2978 + rcu_read_unlock();
2979 +
2980 +@@ -1513,8 +1543,7 @@ batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
2981 + break;
2982 + }
2983 +
2984 +- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
2985 +- orig_node, vid);
2986 ++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
2987 + }
2988 + rcu_read_unlock();
2989 + return ret;
2990 +@@ -1551,8 +1580,7 @@ batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
2991 + break;
2992 + }
2993 +
2994 +- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
2995 +- orig_node, vid);
2996 ++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
2997 + }
2998 + rcu_read_unlock();
2999 + return ret;
3000 +@@ -1618,8 +1646,7 @@ batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
3001 + break;
3002 + }
3003 +
3004 +- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
3005 +- orig_node, vid);
3006 ++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
3007 + }
3008 + rcu_read_unlock();
3009 + return ret;
3010 +@@ -1656,8 +1683,7 @@ batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
3011 + break;
3012 + }
3013 +
3014 +- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
3015 +- orig_node, vid);
3016 ++ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
3017 + }
3018 + rcu_read_unlock();
3019 + return ret;
3020 +diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
3021 +index ebf825991ecd9..3e114bc5ca3bb 100644
3022 +--- a/net/batman-adv/multicast.h
3023 ++++ b/net/batman-adv/multicast.h
3024 +@@ -46,6 +46,11 @@ enum batadv_forw_mode
3025 + batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
3026 + struct batadv_orig_node **mcast_single_orig);
3027 +
3028 ++int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
3029 ++ struct sk_buff *skb,
3030 ++ unsigned short vid,
3031 ++ struct batadv_orig_node *orig_node);
3032 ++
3033 + int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
3034 + unsigned short vid);
3035 +
3036 +@@ -71,6 +76,16 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
3037 + return BATADV_FORW_ALL;
3038 + }
3039 +
3040 ++static inline int
3041 ++batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
3042 ++ struct sk_buff *skb,
3043 ++ unsigned short vid,
3044 ++ struct batadv_orig_node *orig_node)
3045 ++{
3046 ++ kfree_skb(skb);
3047 ++ return NET_XMIT_DROP;
3048 ++}
3049 ++
3050 + static inline int
3051 + batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
3052 + unsigned short vid)
3053 +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
3054 +index d343382e96641..e6515df546a60 100644
3055 +--- a/net/batman-adv/routing.c
3056 ++++ b/net/batman-adv/routing.c
3057 +@@ -826,6 +826,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
3058 + vid = batadv_get_vid(skb, hdr_len);
3059 + ethhdr = (struct ethhdr *)(skb->data + hdr_len);
3060 +
3061 ++ /* do not reroute multicast frames in a unicast header */
3062 ++ if (is_multicast_ether_addr(ethhdr->h_dest))
3063 ++ return true;
3064 ++
3065 + /* check if the destination client was served by this node and it is now
3066 + * roaming. In this case, it means that the node has got a ROAM_ADV
3067 + * message and that it knows the new destination in the mesh to re-route
3068 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
3069 +index f1f1c86f34193..012b6d0b87ead 100644
3070 +--- a/net/batman-adv/soft-interface.c
3071 ++++ b/net/batman-adv/soft-interface.c
3072 +@@ -364,9 +364,8 @@ send:
3073 + goto dropped;
3074 + ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
3075 + } else if (mcast_single_orig) {
3076 +- ret = batadv_send_skb_unicast(bat_priv, skb,
3077 +- BATADV_UNICAST, 0,
3078 +- mcast_single_orig, vid);
3079 ++ ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
3080 ++ mcast_single_orig);
3081 + } else if (forw_mode == BATADV_FORW_SOME) {
3082 + ret = batadv_mcast_forw_send(bat_priv, skb, vid);
3083 + } else {
3084 +@@ -425,10 +424,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
3085 + struct vlan_ethhdr *vhdr;
3086 + struct ethhdr *ethhdr;
3087 + unsigned short vid;
3088 +- bool is_bcast;
3089 ++ int packet_type;
3090 +
3091 + batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
3092 +- is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
3093 ++ packet_type = batadv_bcast_packet->packet_type;
3094 +
3095 + skb_pull_rcsum(skb, hdr_size);
3096 + skb_reset_mac_header(skb);
3097 +@@ -471,7 +470,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
3098 + /* Let the bridge loop avoidance check the packet. If will
3099 + * not handle it, we can safely push it up.
3100 + */
3101 +- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
3102 ++ if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
3103 + goto out;
3104 +
3105 + if (orig_node)
3106 +diff --git a/net/core/filter.c b/net/core/filter.c
3107 +index d13ea1642b974..0261531d4fda6 100644
3108 +--- a/net/core/filter.c
3109 ++++ b/net/core/filter.c
3110 +@@ -6998,8 +6998,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
3111 + bool indirect = BPF_MODE(orig->code) == BPF_IND;
3112 + struct bpf_insn *insn = insn_buf;
3113 +
3114 +- /* We're guaranteed here that CTX is in R6. */
3115 +- *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
3116 + if (!indirect) {
3117 + *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
3118 + } else {
3119 +@@ -7007,6 +7005,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
3120 + if (orig->imm)
3121 + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
3122 + }
3123 ++ /* We're guaranteed here that CTX is in R6. */
3124 ++ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
3125 +
3126 + switch (BPF_SIZE(orig->code)) {
3127 + case BPF_B:
3128 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
3129 +index b2a9d47cf86dd..c85186799d059 100644
3130 +--- a/net/mac80211/mlme.c
3131 ++++ b/net/mac80211/mlme.c
3132 +@@ -4853,6 +4853,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3133 + struct ieee80211_supported_band *sband;
3134 + struct cfg80211_chan_def chandef;
3135 + bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
3136 ++ bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
3137 + struct ieee80211_bss *bss = (void *)cbss->priv;
3138 + int ret;
3139 + u32 i;
3140 +@@ -4871,7 +4872,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3141 + ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
3142 + }
3143 +
3144 +- if (!sband->vht_cap.vht_supported && !is_6ghz) {
3145 ++ if (!sband->vht_cap.vht_supported && is_5ghz) {
3146 + ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3147 + ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
3148 + }
3149 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
3150 +index dd9f5c7a1ade6..7b1f3645603ca 100644
3151 +--- a/net/mac80211/util.c
3152 ++++ b/net/mac80211/util.c
3153 +@@ -3354,9 +3354,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
3154 + he_chandef.center_freq1 =
3155 + ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0,
3156 + NL80211_BAND_6GHZ);
3157 +- he_chandef.center_freq2 =
3158 +- ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
3159 +- NL80211_BAND_6GHZ);
3160 ++ if (support_80_80 || support_160)
3161 ++ he_chandef.center_freq2 =
3162 ++ ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
3163 ++ NL80211_BAND_6GHZ);
3164 + }
3165 +
3166 + if (!cfg80211_chandef_valid(&he_chandef)) {
3167 +diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
3168 +index ab52811523e99..c829e4a753256 100644
3169 +--- a/net/mac802154/tx.c
3170 ++++ b/net/mac802154/tx.c
3171 +@@ -34,11 +34,11 @@ void ieee802154_xmit_worker(struct work_struct *work)
3172 + if (res)
3173 + goto err_tx;
3174 +
3175 +- ieee802154_xmit_complete(&local->hw, skb, false);
3176 +-
3177 + dev->stats.tx_packets++;
3178 + dev->stats.tx_bytes += skb->len;
3179 +
3180 ++ ieee802154_xmit_complete(&local->hw, skb, false);
3181 ++
3182 + return;
3183 +
3184 + err_tx:
3185 +@@ -78,6 +78,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
3186 +
3187 + /* async is priority, otherwise sync is fallback */
3188 + if (local->ops->xmit_async) {
3189 ++ unsigned int len = skb->len;
3190 ++
3191 + ret = drv_xmit_async(local, skb);
3192 + if (ret) {
3193 + ieee802154_wake_queue(&local->hw);
3194 +@@ -85,7 +87,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
3195 + }
3196 +
3197 + dev->stats.tx_packets++;
3198 +- dev->stats.tx_bytes += skb->len;
3199 ++ dev->stats.tx_bytes += len;
3200 + } else {
3201 + local->tx_skb = skb;
3202 + queue_work(local->workqueue, &local->tx_work);
3203 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
3204 +index 832eabecfbddc..c3a4214dc9588 100644
3205 +--- a/net/netfilter/nf_conntrack_netlink.c
3206 ++++ b/net/netfilter/nf_conntrack_netlink.c
3207 +@@ -851,7 +851,6 @@ static int ctnetlink_done(struct netlink_callback *cb)
3208 + }
3209 +
3210 + struct ctnetlink_filter {
3211 +- u_int32_t cta_flags;
3212 + u8 family;
3213 +
3214 + u_int32_t orig_flags;
3215 +@@ -906,10 +905,6 @@ static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
3216 + struct nf_conntrack_zone *zone,
3217 + u_int32_t flags);
3218 +
3219 +-/* applied on filters */
3220 +-#define CTA_FILTER_F_CTA_MARK (1 << 0)
3221 +-#define CTA_FILTER_F_CTA_MARK_MASK (1 << 1)
3222 +-
3223 + static struct ctnetlink_filter *
3224 + ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
3225 + {
3226 +@@ -930,14 +925,10 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
3227 + #ifdef CONFIG_NF_CONNTRACK_MARK
3228 + if (cda[CTA_MARK]) {
3229 + filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
3230 +- filter->cta_flags |= CTA_FILTER_FLAG(CTA_MARK);
3231 +-
3232 +- if (cda[CTA_MARK_MASK]) {
3233 ++ if (cda[CTA_MARK_MASK])
3234 + filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
3235 +- filter->cta_flags |= CTA_FILTER_FLAG(CTA_MARK_MASK);
3236 +- } else {
3237 ++ else
3238 + filter->mark.mask = 0xffffffff;
3239 +- }
3240 + } else if (cda[CTA_MARK_MASK]) {
3241 + err = -EINVAL;
3242 + goto err_filter;
3243 +@@ -1117,11 +1108,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
3244 + }
3245 +
3246 + #ifdef CONFIG_NF_CONNTRACK_MARK
3247 +- if ((filter->cta_flags & CTA_FILTER_FLAG(CTA_MARK_MASK)) &&
3248 +- (ct->mark & filter->mark.mask) != filter->mark.val)
3249 +- goto ignore_entry;
3250 +- else if ((filter->cta_flags & CTA_FILTER_FLAG(CTA_MARK)) &&
3251 +- ct->mark != filter->mark.val)
3252 ++ if ((ct->mark & filter->mark.mask) != filter->mark.val)
3253 + goto ignore_entry;
3254 + #endif
3255 +
3256 +@@ -1404,7 +1391,8 @@ ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
3257 + if (err < 0)
3258 + return err;
3259 +
3260 +-
3261 ++ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
3262 ++ return -EOPNOTSUPP;
3263 + tuple->src.l3num = l3num;
3264 +
3265 + if (flags & CTA_FILTER_FLAG(CTA_IP_DST) ||
3266 +diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
3267 +index a0560d175a7ff..aaf4293ddd459 100644
3268 +--- a/net/netfilter/nf_conntrack_proto.c
3269 ++++ b/net/netfilter/nf_conntrack_proto.c
3270 +@@ -565,6 +565,7 @@ static int nf_ct_netns_inet_get(struct net *net)
3271 + int err;
3272 +
3273 + err = nf_ct_netns_do_get(net, NFPROTO_IPV4);
3274 ++#if IS_ENABLED(CONFIG_IPV6)
3275 + if (err < 0)
3276 + goto err1;
3277 + err = nf_ct_netns_do_get(net, NFPROTO_IPV6);
3278 +@@ -575,6 +576,7 @@ static int nf_ct_netns_inet_get(struct net *net)
3279 + err2:
3280 + nf_ct_netns_put(net, NFPROTO_IPV4);
3281 + err1:
3282 ++#endif
3283 + return err;
3284 + }
3285 +
3286 +diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
3287 +index 7bc6537f3ccb5..b37bd02448d8c 100644
3288 +--- a/net/netfilter/nft_meta.c
3289 ++++ b/net/netfilter/nft_meta.c
3290 +@@ -147,11 +147,11 @@ nft_meta_get_eval_skugid(enum nft_meta_keys key,
3291 +
3292 + switch (key) {
3293 + case NFT_META_SKUID:
3294 +- *dest = from_kuid_munged(&init_user_ns,
3295 ++ *dest = from_kuid_munged(sock_net(sk)->user_ns,
3296 + sock->file->f_cred->fsuid);
3297 + break;
3298 + case NFT_META_SKGID:
3299 +- *dest = from_kgid_munged(&init_user_ns,
3300 ++ *dest = from_kgid_munged(sock_net(sk)->user_ns,
3301 + sock->file->f_cred->fsgid);
3302 + break;
3303 + default:
3304 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
3305 +index c537272f9c7ed..183d2465df7a3 100644
3306 +--- a/net/sunrpc/svcsock.c
3307 ++++ b/net/sunrpc/svcsock.c
3308 +@@ -228,7 +228,7 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
3309 + static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek)
3310 + {
3311 + struct bvec_iter bi = {
3312 +- .bi_size = size,
3313 ++ .bi_size = size + seek,
3314 + };
3315 + struct bio_vec bv;
3316 +
3317 +diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
3318 +index faf74850a1b52..27026f587fa61 100644
3319 +--- a/net/wireless/Kconfig
3320 ++++ b/net/wireless/Kconfig
3321 +@@ -217,6 +217,7 @@ config LIB80211_CRYPT_WEP
3322 +
3323 + config LIB80211_CRYPT_CCMP
3324 + tristate
3325 ++ select CRYPTO
3326 + select CRYPTO_AES
3327 + select CRYPTO_CCM
3328 +
3329 +diff --git a/net/wireless/util.c b/net/wireless/util.c
3330 +index a72d2ad6ade8b..0f95844e73d80 100644
3331 +--- a/net/wireless/util.c
3332 ++++ b/net/wireless/util.c
3333 +@@ -95,7 +95,7 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band)
3334 + /* see 802.11ax D6.1 27.3.23.2 */
3335 + if (chan == 2)
3336 + return MHZ_TO_KHZ(5935);
3337 +- if (chan <= 253)
3338 ++ if (chan <= 233)
3339 + return MHZ_TO_KHZ(5950 + chan * 5);
3340 + break;
3341 + case NL80211_BAND_60GHZ:
3342 +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
3343 +index e97db37354e4f..b010bfde01490 100644
3344 +--- a/net/xdp/xdp_umem.c
3345 ++++ b/net/xdp/xdp_umem.c
3346 +@@ -303,10 +303,10 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
3347 +
3348 + static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
3349 + {
3350 ++ u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
3351 + bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
3352 +- u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
3353 + u64 npgs, addr = mr->addr, size = mr->len;
3354 +- unsigned int chunks, chunks_per_page;
3355 ++ unsigned int chunks, chunks_rem;
3356 + int err;
3357 +
3358 + if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
3359 +@@ -336,19 +336,18 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
3360 + if ((addr + size) < addr)
3361 + return -EINVAL;
3362 +
3363 +- npgs = size >> PAGE_SHIFT;
3364 ++ npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
3365 ++ if (npgs_rem)
3366 ++ npgs++;
3367 + if (npgs > U32_MAX)
3368 + return -EINVAL;
3369 +
3370 +- chunks = (unsigned int)div_u64(size, chunk_size);
3371 ++ chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
3372 + if (chunks == 0)
3373 + return -EINVAL;
3374 +
3375 +- if (!unaligned_chunks) {
3376 +- chunks_per_page = PAGE_SIZE / chunk_size;
3377 +- if (chunks < chunks_per_page || chunks % chunks_per_page)
3378 +- return -EINVAL;
3379 +- }
3380 ++ if (!unaligned_chunks && chunks_rem)
3381 ++ return -EINVAL;
3382 +
3383 + if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
3384 + return -EINVAL;
3385 +diff --git a/security/device_cgroup.c b/security/device_cgroup.c
3386 +index 43ab0ad45c1b6..04375df52fc9a 100644
3387 +--- a/security/device_cgroup.c
3388 ++++ b/security/device_cgroup.c
3389 +@@ -354,7 +354,8 @@ static bool match_exception_partial(struct list_head *exceptions, short type,
3390 + {
3391 + struct dev_exception_item *ex;
3392 +
3393 +- list_for_each_entry_rcu(ex, exceptions, list) {
3394 ++ list_for_each_entry_rcu(ex, exceptions, list,
3395 ++ lockdep_is_held(&devcgroup_mutex)) {
3396 + if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
3397 + continue;
3398 + if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
3399 +diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
3400 +index 496dcde9715d6..9790f5108a166 100644
3401 +--- a/sound/pci/asihpi/hpioctl.c
3402 ++++ b/sound/pci/asihpi/hpioctl.c
3403 +@@ -343,7 +343,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
3404 + struct hpi_message hm;
3405 + struct hpi_response hr;
3406 + struct hpi_adapter adapter;
3407 +- struct hpi_pci pci;
3408 ++ struct hpi_pci pci = { 0 };
3409 +
3410 + memset(&adapter, 0, sizeof(adapter));
3411 +
3412 +@@ -499,7 +499,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
3413 + return 0;
3414 +
3415 + err:
3416 +- for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
3417 ++ while (--idx >= 0) {
3418 + if (pci.ap_mem_base[idx]) {
3419 + iounmap(pci.ap_mem_base[idx]);
3420 + pci.ap_mem_base[idx] = NULL;
3421 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3422 +index 77e2e6ede31dc..601683e05ccca 100644
3423 +--- a/sound/pci/hda/patch_realtek.c
3424 ++++ b/sound/pci/hda/patch_realtek.c
3425 +@@ -3419,7 +3419,11 @@ static void alc256_shutup(struct hda_codec *codec)
3426 +
3427 + /* 3k pull low control for Headset jack. */
3428 + /* NOTE: call this before clearing the pin, otherwise codec stalls */
3429 +- alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
3430 ++ /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
3431 ++ * when booting with headset plugged. So skip setting it for the codec alc257
3432 ++ */
3433 ++ if (codec->core.vendor_id != 0x10ec0257)
3434 ++ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
3435 +
3436 + if (!spec->no_shutup_pins)
3437 + snd_hda_codec_write(codec, hp_pin, 0,
3438 +@@ -6062,6 +6066,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
3439 + #include "hp_x360_helper.c"
3440 +
3441 + enum {
3442 ++ ALC269_FIXUP_GPIO2,
3443 + ALC269_FIXUP_SONY_VAIO,
3444 + ALC275_FIXUP_SONY_VAIO_GPIO2,
3445 + ALC269_FIXUP_DELL_M101Z,
3446 +@@ -6243,6 +6248,10 @@ enum {
3447 + };
3448 +
3449 + static const struct hda_fixup alc269_fixups[] = {
3450 ++ [ALC269_FIXUP_GPIO2] = {
3451 ++ .type = HDA_FIXUP_FUNC,
3452 ++ .v.func = alc_fixup_gpio2,
3453 ++ },
3454 + [ALC269_FIXUP_SONY_VAIO] = {
3455 + .type = HDA_FIXUP_PINCTLS,
3456 + .v.pins = (const struct hda_pintbl[]) {
3457 +@@ -7062,6 +7071,8 @@ static const struct hda_fixup alc269_fixups[] = {
3458 + [ALC233_FIXUP_LENOVO_MULTI_CODECS] = {
3459 + .type = HDA_FIXUP_FUNC,
3460 + .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
3461 ++ .chained = true,
3462 ++ .chain_id = ALC269_FIXUP_GPIO2
3463 + },
3464 + [ALC233_FIXUP_ACER_HEADSET_MIC] = {
3465 + .type = HDA_FIXUP_VERBS,
3466 +diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
3467 +index 9711fab296ebc..045c6f8b26bef 100644
3468 +--- a/sound/soc/codecs/pcm3168a.c
3469 ++++ b/sound/soc/codecs/pcm3168a.c
3470 +@@ -306,6 +306,13 @@ static int pcm3168a_set_dai_sysclk(struct snd_soc_dai *dai,
3471 + struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(dai->component);
3472 + int ret;
3473 +
3474 ++ /*
3475 ++ * Some sound card sets 0 Hz as reset,
3476 ++ * but it is impossible to set. Ignore it here
3477 ++ */
3478 ++ if (freq == 0)
3479 ++ return 0;
3480 ++
3481 + if (freq > PCM3168A_MAX_SYSCLK)
3482 + return -EINVAL;
3483 +
3484 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
3485 +index 55d0b9be6ff00..58f21329d0e99 100644
3486 +--- a/sound/soc/codecs/wm8994.c
3487 ++++ b/sound/soc/codecs/wm8994.c
3488 +@@ -3491,6 +3491,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
3489 + return -EINVAL;
3490 + }
3491 +
3492 ++ pm_runtime_get_sync(component->dev);
3493 ++
3494 + switch (micbias) {
3495 + case 1:
3496 + micdet = &wm8994->micdet[0];
3497 +@@ -3538,6 +3540,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
3498 +
3499 + snd_soc_dapm_sync(dapm);
3500 +
3501 ++ pm_runtime_put(component->dev);
3502 ++
3503 + return 0;
3504 + }
3505 + EXPORT_SYMBOL_GPL(wm8994_mic_detect);
3506 +@@ -3905,6 +3909,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
3507 + return -EINVAL;
3508 + }
3509 +
3510 ++ pm_runtime_get_sync(component->dev);
3511 ++
3512 + if (jack) {
3513 + snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS");
3514 + snd_soc_dapm_sync(dapm);
3515 +@@ -3973,6 +3979,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
3516 + snd_soc_dapm_sync(dapm);
3517 + }
3518 +
3519 ++ pm_runtime_put(component->dev);
3520 ++
3521 + return 0;
3522 + }
3523 + EXPORT_SYMBOL_GPL(wm8958_mic_detect);
3524 +@@ -4166,11 +4174,13 @@ static int wm8994_component_probe(struct snd_soc_component *component)
3525 + wm8994->hubs.dcs_readback_mode = 2;
3526 + break;
3527 + }
3528 ++ wm8994->hubs.micd_scthr = true;
3529 + break;
3530 +
3531 + case WM8958:
3532 + wm8994->hubs.dcs_readback_mode = 1;
3533 + wm8994->hubs.hp_startup_mode = 1;
3534 ++ wm8994->hubs.micd_scthr = true;
3535 +
3536 + switch (control->revision) {
3537 + case 0:
3538 +diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
3539 +index e93af7edd8f75..dd421e2fe7b21 100644
3540 +--- a/sound/soc/codecs/wm_hubs.c
3541 ++++ b/sound/soc/codecs/wm_hubs.c
3542 +@@ -1223,6 +1223,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component,
3543 + snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL,
3544 + WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
3545 +
3546 ++ if (!hubs->micd_scthr)
3547 ++ return 0;
3548 ++
3549 + snd_soc_component_update_bits(component, WM8993_MICBIAS,
3550 + WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
3551 + WM8993_MICB1_LVL | WM8993_MICB2_LVL,
3552 +diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
3553 +index 4b8e5f0d6e32d..988b29e630607 100644
3554 +--- a/sound/soc/codecs/wm_hubs.h
3555 ++++ b/sound/soc/codecs/wm_hubs.h
3556 +@@ -27,6 +27,7 @@ struct wm_hubs_data {
3557 + int hp_startup_mode;
3558 + int series_startup;
3559 + int no_series_update;
3560 ++ bool micd_scthr;
3561 +
3562 + bool no_cache_dac_hp_direct;
3563 + struct list_head dcs_cache;
3564 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
3565 +index 1fdb70b9e4788..5f885062145fe 100644
3566 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
3567 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
3568 +@@ -591,6 +591,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
3569 + BYT_RT5640_SSP0_AIF1 |
3570 + BYT_RT5640_MCLK_EN),
3571 + },
3572 ++ { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */
3573 ++ .matches = {
3574 ++ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
3575 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
3576 ++ },
3577 ++ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
3578 ++ BYT_RT5640_MONO_SPEAKER |
3579 ++ BYT_RT5640_SSP0_AIF1 |
3580 ++ BYT_RT5640_MCLK_EN),
3581 ++ },
3582 + {
3583 + /* MPMAN MPWIN895CL */
3584 + .matches = {
3585 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3586 +index bf2d521b6768c..e680416a6a8de 100644
3587 +--- a/sound/usb/quirks.c
3588 ++++ b/sound/usb/quirks.c
3589 +@@ -1668,12 +1668,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
3590 + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
3591 + msleep(20);
3592 +
3593 +- /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny
3594 +- * delay here, otherwise requests like get/set frequency return as
3595 +- * failed despite actually succeeding.
3596 ++ /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX
3597 ++ * needs a tiny delay here, otherwise requests like get/set
3598 ++ * frequency return as failed despite actually succeeding.
3599 + */
3600 + if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
3601 + chip->usb_id == USB_ID(0x046d, 0x0a46) ||
3602 ++ chip->usb_id == USB_ID(0x046d, 0x0a56) ||
3603 + chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
3604 + chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
3605 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
3606 +diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
3607 +index bf8ed134cb8a3..c820b0be9d637 100644
3608 +--- a/tools/lib/bpf/Makefile
3609 ++++ b/tools/lib/bpf/Makefile
3610 +@@ -152,6 +152,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
3611 + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
3612 + sort -u | wc -l)
3613 + VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
3614 ++ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
3615 + grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
3616 +
3617 + CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
3618 +@@ -219,6 +220,7 @@ check_abi: $(OUTPUT)libbpf.so
3619 + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \
3620 + sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
3621 + readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
3622 ++ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \
3623 + grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
3624 + sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \
3625 + diff -u $(OUTPUT)libbpf_global_syms.tmp \
3626 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
3627 +index 3ac0094706b81..236c91aff48f8 100644
3628 +--- a/tools/lib/bpf/libbpf.c
3629 ++++ b/tools/lib/bpf/libbpf.c
3630 +@@ -5030,8 +5030,8 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
3631 + int i, j, nrels, new_sz;
3632 + const struct btf_var_secinfo *vi = NULL;
3633 + const struct btf_type *sec, *var, *def;
3634 ++ struct bpf_map *map = NULL, *targ_map;
3635 + const struct btf_member *member;
3636 +- struct bpf_map *map, *targ_map;
3637 + const char *name, *mname;
3638 + Elf_Data *symbols;
3639 + unsigned int moff;
3640 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
3641 +index 5e0d70a89fb87..773e6c7ee5f93 100644
3642 +--- a/tools/objtool/check.c
3643 ++++ b/tools/objtool/check.c
3644 +@@ -619,7 +619,7 @@ static int add_jump_destinations(struct objtool_file *file)
3645 + if (!is_static_jump(insn))
3646 + continue;
3647 +
3648 +- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
3649 ++ if (insn->offset == FAKE_JUMP_OFFSET)
3650 + continue;
3651 +
3652 + rela = find_rela_by_dest_range(file->elf, insn->sec,