Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Thu, 07 Oct 2021 10:36:48
Message-Id: 1633602986.efd47c3e12b1d6d48aee11e5dd709dd719a3a0e5.mpagano@gentoo
1 commit: efd47c3e12b1d6d48aee11e5dd709dd719a3a0e5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Oct 7 10:36:26 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Oct 7 10:36:26 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=efd47c3e
7
8 Linux patch 5.14.10
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1009_linux-5.14.10.patch | 6835 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6839 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2d15afd..11074a3 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -83,6 +83,10 @@ Patch: 1008_linux-5.14.9.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.14.9
23
24 +Patch: 1009_linux-5.14.10.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.14.10
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1009_linux-5.14.10.patch b/1009_linux-5.14.10.patch
33 new file mode 100644
34 index 0000000..3a2fa0e
35 --- /dev/null
36 +++ b/1009_linux-5.14.10.patch
37 @@ -0,0 +1,6835 @@
38 +diff --git a/Makefile b/Makefile
39 +index 50c17e63c54ef..9f99a61d2589b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 9
47 ++SUBLEVEL = 10
48 + EXTRAVERSION =
49 + NAME = Opossums on Parade
50 +
51 +diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
52 +index 9dd76fbb7c6b2..ff9e842cec0fb 100644
53 +--- a/arch/m68k/kernel/entry.S
54 ++++ b/arch/m68k/kernel/entry.S
55 +@@ -186,6 +186,8 @@ ENTRY(ret_from_signal)
56 + movel %curptr@(TASK_STACK),%a1
57 + tstb %a1@(TINFO_FLAGS+2)
58 + jge 1f
59 ++ lea %sp@(SWITCH_STACK_SIZE),%a1
60 ++ movel %a1,%curptr@(TASK_THREAD+THREAD_ESP0)
61 + jbsr syscall_trace
62 + 1: RESTORE_SWITCH_STACK
63 + addql #4,%sp
64 +diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
65 +index 0af88622c6192..cb6d22439f71b 100644
66 +--- a/arch/mips/net/bpf_jit.c
67 ++++ b/arch/mips/net/bpf_jit.c
68 +@@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
69 + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
70 + func##_positive)
71 +
72 ++static bool is_bad_offset(int b_off)
73 ++{
74 ++ return b_off > 0x1ffff || b_off < -0x20000;
75 ++}
76 ++
77 + static int build_body(struct jit_ctx *ctx)
78 + {
79 + const struct bpf_prog *prog = ctx->skf;
80 +@@ -728,7 +733,10 @@ load_common:
81 + /* Load return register on DS for failures */
82 + emit_reg_move(r_ret, r_zero, ctx);
83 + /* Return with error */
84 +- emit_b(b_imm(prog->len, ctx), ctx);
85 ++ b_off = b_imm(prog->len, ctx);
86 ++ if (is_bad_offset(b_off))
87 ++ return -E2BIG;
88 ++ emit_b(b_off, ctx);
89 + emit_nop(ctx);
90 + break;
91 + case BPF_LD | BPF_W | BPF_IND:
92 +@@ -775,8 +783,10 @@ load_ind:
93 + emit_jalr(MIPS_R_RA, r_s0, ctx);
94 + emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
95 + /* Check the error value */
96 +- emit_bcond(MIPS_COND_NE, r_ret, 0,
97 +- b_imm(prog->len, ctx), ctx);
98 ++ b_off = b_imm(prog->len, ctx);
99 ++ if (is_bad_offset(b_off))
100 ++ return -E2BIG;
101 ++ emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
102 + emit_reg_move(r_ret, r_zero, ctx);
103 + /* We are good */
104 + /* X <- P[1:K] & 0xf */
105 +@@ -855,8 +865,10 @@ load_ind:
106 + /* A /= X */
107 + ctx->flags |= SEEN_X | SEEN_A;
108 + /* Check if r_X is zero */
109 +- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
110 +- b_imm(prog->len, ctx), ctx);
111 ++ b_off = b_imm(prog->len, ctx);
112 ++ if (is_bad_offset(b_off))
113 ++ return -E2BIG;
114 ++ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
115 + emit_load_imm(r_ret, 0, ctx); /* delay slot */
116 + emit_div(r_A, r_X, ctx);
117 + break;
118 +@@ -864,8 +876,10 @@ load_ind:
119 + /* A %= X */
120 + ctx->flags |= SEEN_X | SEEN_A;
121 + /* Check if r_X is zero */
122 +- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
123 +- b_imm(prog->len, ctx), ctx);
124 ++ b_off = b_imm(prog->len, ctx);
125 ++ if (is_bad_offset(b_off))
126 ++ return -E2BIG;
127 ++ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
128 + emit_load_imm(r_ret, 0, ctx); /* delay slot */
129 + emit_mod(r_A, r_X, ctx);
130 + break;
131 +@@ -926,7 +940,10 @@ load_ind:
132 + break;
133 + case BPF_JMP | BPF_JA:
134 + /* pc += K */
135 +- emit_b(b_imm(i + k + 1, ctx), ctx);
136 ++ b_off = b_imm(i + k + 1, ctx);
137 ++ if (is_bad_offset(b_off))
138 ++ return -E2BIG;
139 ++ emit_b(b_off, ctx);
140 + emit_nop(ctx);
141 + break;
142 + case BPF_JMP | BPF_JEQ | BPF_K:
143 +@@ -1056,12 +1073,16 @@ jmp_cmp:
144 + break;
145 + case BPF_RET | BPF_A:
146 + ctx->flags |= SEEN_A;
147 +- if (i != prog->len - 1)
148 ++ if (i != prog->len - 1) {
149 + /*
150 + * If this is not the last instruction
151 + * then jump to the epilogue
152 + */
153 +- emit_b(b_imm(prog->len, ctx), ctx);
154 ++ b_off = b_imm(prog->len, ctx);
155 ++ if (is_bad_offset(b_off))
156 ++ return -E2BIG;
157 ++ emit_b(b_off, ctx);
158 ++ }
159 + emit_reg_move(r_ret, r_A, ctx); /* delay slot */
160 + break;
161 + case BPF_RET | BPF_K:
162 +@@ -1075,7 +1096,10 @@ jmp_cmp:
163 + * If this is not the last instruction
164 + * then jump to the epilogue
165 + */
166 +- emit_b(b_imm(prog->len, ctx), ctx);
167 ++ b_off = b_imm(prog->len, ctx);
168 ++ if (is_bad_offset(b_off))
169 ++ return -E2BIG;
170 ++ emit_b(b_off, ctx);
171 + emit_nop(ctx);
172 + }
173 + break;
174 +@@ -1133,8 +1157,10 @@ jmp_cmp:
175 + /* Load *dev pointer */
176 + emit_load_ptr(r_s0, r_skb, off, ctx);
177 + /* error (0) in the delay slot */
178 +- emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
179 +- b_imm(prog->len, ctx), ctx);
180 ++ b_off = b_imm(prog->len, ctx);
181 ++ if (is_bad_offset(b_off))
182 ++ return -E2BIG;
183 ++ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
184 + emit_reg_move(r_ret, r_zero, ctx);
185 + if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
186 + BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
187 +@@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
188 +
189 + /* Generate the actual JIT code */
190 + build_prologue(&ctx);
191 +- build_body(&ctx);
192 ++ if (build_body(&ctx)) {
193 ++ module_memfree(ctx.target);
194 ++ goto out;
195 ++ }
196 + build_epilogue(&ctx);
197 +
198 + /* Update the icache */
199 +diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
200 +index a8bc06e96ef58..ca1beb87f987c 100644
201 +--- a/arch/nios2/Kconfig.debug
202 ++++ b/arch/nios2/Kconfig.debug
203 +@@ -3,9 +3,10 @@
204 + config EARLY_PRINTK
205 + bool "Activate early kernel debugging"
206 + default y
207 ++ depends on TTY
208 + select SERIAL_CORE_CONSOLE
209 + help
210 +- Enable early printk on console
211 ++ Enable early printk on console.
212 + This is useful for kernel debugging when your machine crashes very
213 + early before the console code is initialized.
214 + You should normally say N here, unless you want to debug such a crash.
215 +diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
216 +index cf8d687a2644a..40bc8fb75e0b5 100644
217 +--- a/arch/nios2/kernel/setup.c
218 ++++ b/arch/nios2/kernel/setup.c
219 +@@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
220 +
221 + void __init setup_arch(char **cmdline_p)
222 + {
223 +- int dram_start;
224 +-
225 + console_verbose();
226 +
227 + memory_start = memblock_start_of_DRAM();
228 +diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
229 +index 20f169b6db4ec..d97301d9d0b8c 100644
230 +--- a/arch/s390/include/asm/ccwgroup.h
231 ++++ b/arch/s390/include/asm/ccwgroup.h
232 +@@ -57,7 +57,7 @@ struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
233 + char *bus_id);
234 +
235 + extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
236 +-extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
237 ++int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
238 +
239 + extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
240 + extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
241 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
242 +index 388643ca2177e..0fc961bef299c 100644
243 +--- a/arch/x86/crypto/aesni-intel_glue.c
244 ++++ b/arch/x86/crypto/aesni-intel_glue.c
245 +@@ -849,7 +849,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
246 + return -EINVAL;
247 +
248 + err = skcipher_walk_virt(&walk, req, false);
249 +- if (err)
250 ++ if (!walk.nbytes)
251 + return err;
252 +
253 + if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
254 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
255 +index ac6fd2dabf6a2..482224444a1ee 100644
256 +--- a/arch/x86/events/intel/core.c
257 ++++ b/arch/x86/events/intel/core.c
258 +@@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
259 + INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
260 + INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
261 + INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
262 ++ INTEL_EVENT_CONSTRAINT(0xef, 0xf),
263 + INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
264 + EVENT_CONSTRAINT_END
265 + };
266 +diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
267 +index 87bd6025d91d4..6a5f3acf2b331 100644
268 +--- a/arch/x86/include/asm/kvm_page_track.h
269 ++++ b/arch/x86/include/asm/kvm_page_track.h
270 +@@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
271 + struct kvm_page_track_notifier_node *node);
272 + };
273 +
274 +-void kvm_page_track_init(struct kvm *kvm);
275 ++int kvm_page_track_init(struct kvm *kvm);
276 + void kvm_page_track_cleanup(struct kvm *kvm);
277 +
278 + void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
279 +diff --git a/arch/x86/include/asm/kvmclock.h b/arch/x86/include/asm/kvmclock.h
280 +index eceea92990974..6c57651921028 100644
281 +--- a/arch/x86/include/asm/kvmclock.h
282 ++++ b/arch/x86/include/asm/kvmclock.h
283 +@@ -2,6 +2,20 @@
284 + #ifndef _ASM_X86_KVM_CLOCK_H
285 + #define _ASM_X86_KVM_CLOCK_H
286 +
287 ++#include <linux/percpu.h>
288 ++
289 + extern struct clocksource kvm_clock;
290 +
291 ++DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
292 ++
293 ++static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
294 ++{
295 ++ return &this_cpu_read(hv_clock_per_cpu)->pvti;
296 ++}
297 ++
298 ++static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
299 ++{
300 ++ return this_cpu_read(hv_clock_per_cpu);
301 ++}
302 ++
303 + #endif /* _ASM_X86_KVM_CLOCK_H */
304 +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
305 +index ad273e5861c1b..73c74b961d0fd 100644
306 +--- a/arch/x86/kernel/kvmclock.c
307 ++++ b/arch/x86/kernel/kvmclock.c
308 +@@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
309 + static struct pvclock_vsyscall_time_info
310 + hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
311 + static struct pvclock_wall_clock wall_clock __bss_decrypted;
312 +-static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
313 + static struct pvclock_vsyscall_time_info *hvclock_mem;
314 +-
315 +-static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
316 +-{
317 +- return &this_cpu_read(hv_clock_per_cpu)->pvti;
318 +-}
319 +-
320 +-static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
321 +-{
322 +- return this_cpu_read(hv_clock_per_cpu);
323 +-}
324 ++DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
325 ++EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
326 +
327 + /*
328 + * The wallclock is the time of day when we booted. Since then, some time may
329 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
330 +index fe03bd978761e..751aa85a30012 100644
331 +--- a/arch/x86/kvm/cpuid.c
332 ++++ b/arch/x86/kvm/cpuid.c
333 +@@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
334 + for (i = 0; i < nent; i++) {
335 + e = &entries[i];
336 +
337 +- if (e->function == function && (e->index == index ||
338 +- !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
339 ++ if (e->function == function &&
340 ++ (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
341 + return e;
342 + }
343 +
344 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
345 +index 2837110e66eda..50050d06672b8 100644
346 +--- a/arch/x86/kvm/emulate.c
347 ++++ b/arch/x86/kvm/emulate.c
348 +@@ -435,7 +435,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
349 + __FOP_RET(#op)
350 +
351 + asm(".pushsection .fixup, \"ax\"\n"
352 +- ".global kvm_fastop_exception \n"
353 + "kvm_fastop_exception: xor %esi, %esi; ret\n"
354 + ".popsection");
355 +
356 +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
357 +index ff005fe738a4c..8c065da73f8e5 100644
358 +--- a/arch/x86/kvm/ioapic.c
359 ++++ b/arch/x86/kvm/ioapic.c
360 +@@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
361 + unsigned index;
362 + bool mask_before, mask_after;
363 + union kvm_ioapic_redirect_entry *e;
364 +- unsigned long vcpu_bitmap;
365 + int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
366 ++ DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
367 +
368 + switch (ioapic->ioregsel) {
369 + case IOAPIC_REG_VERSION:
370 +@@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
371 + irq.shorthand = APIC_DEST_NOSHORT;
372 + irq.dest_id = e->fields.dest_id;
373 + irq.msi_redir_hint = false;
374 +- bitmap_zero(&vcpu_bitmap, 16);
375 ++ bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
376 + kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
377 +- &vcpu_bitmap);
378 ++ vcpu_bitmap);
379 + if (old_dest_mode != e->fields.dest_mode ||
380 + old_dest_id != e->fields.dest_id) {
381 + /*
382 +@@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
383 + kvm_lapic_irq_dest_mode(
384 + !!e->fields.dest_mode);
385 + kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
386 +- &vcpu_bitmap);
387 ++ vcpu_bitmap);
388 + }
389 + kvm_make_scan_ioapic_request_mask(ioapic->kvm,
390 +- &vcpu_bitmap);
391 ++ vcpu_bitmap);
392 + } else {
393 + kvm_make_scan_ioapic_request(ioapic->kvm);
394 + }
395 +diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
396 +index 91a9f7e0fd914..68e67228101de 100644
397 +--- a/arch/x86/kvm/mmu/page_track.c
398 ++++ b/arch/x86/kvm/mmu/page_track.c
399 +@@ -163,13 +163,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
400 + cleanup_srcu_struct(&head->track_srcu);
401 + }
402 +
403 +-void kvm_page_track_init(struct kvm *kvm)
404 ++int kvm_page_track_init(struct kvm *kvm)
405 + {
406 + struct kvm_page_track_notifier_head *head;
407 +
408 + head = &kvm->arch.track_notifier_head;
409 +- init_srcu_struct(&head->track_srcu);
410 + INIT_HLIST_HEAD(&head->track_notifier_list);
411 ++ return init_srcu_struct(&head->track_srcu);
412 + }
413 +
414 + /*
415 +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
416 +index e5515477c30a6..700bc241cee18 100644
417 +--- a/arch/x86/kvm/svm/nested.c
418 ++++ b/arch/x86/kvm/svm/nested.c
419 +@@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
420 + (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
421 + (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
422 +
423 +- svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
424 + svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
425 + svm->vmcb->control.int_state = svm->nested.ctl.int_state;
426 + svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
427 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
428 +index 7fbce342eec47..cb166bde449bd 100644
429 +--- a/arch/x86/kvm/svm/sev.c
430 ++++ b/arch/x86/kvm/svm/sev.c
431 +@@ -596,43 +596,50 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
432 + return 0;
433 + }
434 +
435 +-static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
436 ++static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
437 ++ int *error)
438 + {
439 +- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
440 + struct sev_data_launch_update_vmsa vmsa;
441 ++ struct vcpu_svm *svm = to_svm(vcpu);
442 ++ int ret;
443 ++
444 ++ /* Perform some pre-encryption checks against the VMSA */
445 ++ ret = sev_es_sync_vmsa(svm);
446 ++ if (ret)
447 ++ return ret;
448 ++
449 ++ /*
450 ++ * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
451 ++ * the VMSA memory content (i.e it will write the same memory region
452 ++ * with the guest's key), so invalidate it first.
453 ++ */
454 ++ clflush_cache_range(svm->vmsa, PAGE_SIZE);
455 ++
456 ++ vmsa.reserved = 0;
457 ++ vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
458 ++ vmsa.address = __sme_pa(svm->vmsa);
459 ++ vmsa.len = PAGE_SIZE;
460 ++ return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
461 ++}
462 ++
463 ++static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
464 ++{
465 + struct kvm_vcpu *vcpu;
466 + int i, ret;
467 +
468 + if (!sev_es_guest(kvm))
469 + return -ENOTTY;
470 +
471 +- vmsa.reserved = 0;
472 +-
473 + kvm_for_each_vcpu(i, vcpu, kvm) {
474 +- struct vcpu_svm *svm = to_svm(vcpu);
475 +-
476 +- /* Perform some pre-encryption checks against the VMSA */
477 +- ret = sev_es_sync_vmsa(svm);
478 ++ ret = mutex_lock_killable(&vcpu->mutex);
479 + if (ret)
480 + return ret;
481 +
482 +- /*
483 +- * The LAUNCH_UPDATE_VMSA command will perform in-place
484 +- * encryption of the VMSA memory content (i.e it will write
485 +- * the same memory region with the guest's key), so invalidate
486 +- * it first.
487 +- */
488 +- clflush_cache_range(svm->vmsa, PAGE_SIZE);
489 ++ ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
490 +
491 +- vmsa.handle = sev->handle;
492 +- vmsa.address = __sme_pa(svm->vmsa);
493 +- vmsa.len = PAGE_SIZE;
494 +- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
495 +- &argp->error);
496 ++ mutex_unlock(&vcpu->mutex);
497 + if (ret)
498 + return ret;
499 +-
500 +- svm->vcpu.arch.guest_state_protected = true;
501 + }
502 +
503 + return 0;
504 +@@ -1398,8 +1405,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
505 +
506 + /* Bind ASID to this guest */
507 + ret = sev_bind_asid(kvm, start.handle, error);
508 +- if (ret)
509 ++ if (ret) {
510 ++ sev_decommission(start.handle);
511 + goto e_free_session;
512 ++ }
513 +
514 + params.handle = start.handle;
515 + if (copy_to_user((void __user *)(uintptr_t)argp->data,
516 +@@ -1465,7 +1474,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
517 +
518 + /* Pin guest memory */
519 + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
520 +- PAGE_SIZE, &n, 0);
521 ++ PAGE_SIZE, &n, 1);
522 + if (IS_ERR(guest_page)) {
523 + ret = PTR_ERR(guest_page);
524 + goto e_free_trans;
525 +@@ -1502,6 +1511,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
526 + return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
527 + }
528 +
529 ++static bool cmd_allowed_from_miror(u32 cmd_id)
530 ++{
531 ++ /*
532 ++ * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
533 ++ * active mirror VMs. Also allow the debugging and status commands.
534 ++ */
535 ++ if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
536 ++ cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
537 ++ cmd_id == KVM_SEV_DBG_ENCRYPT)
538 ++ return true;
539 ++
540 ++ return false;
541 ++}
542 ++
543 + int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
544 + {
545 + struct kvm_sev_cmd sev_cmd;
546 +@@ -1518,8 +1541,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
547 +
548 + mutex_lock(&kvm->lock);
549 +
550 +- /* enc_context_owner handles all memory enc operations */
551 +- if (is_mirroring_enc_context(kvm)) {
552 ++ /* Only the enc_context_owner handles some memory enc operations. */
553 ++ if (is_mirroring_enc_context(kvm) &&
554 ++ !cmd_allowed_from_miror(sev_cmd.id)) {
555 + r = -EINVAL;
556 + goto out;
557 + }
558 +@@ -1716,8 +1740,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
559 + {
560 + struct file *source_kvm_file;
561 + struct kvm *source_kvm;
562 +- struct kvm_sev_info *mirror_sev;
563 +- unsigned int asid;
564 ++ struct kvm_sev_info source_sev, *mirror_sev;
565 + int ret;
566 +
567 + source_kvm_file = fget(source_fd);
568 +@@ -1740,7 +1763,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
569 + goto e_source_unlock;
570 + }
571 +
572 +- asid = to_kvm_svm(source_kvm)->sev_info.asid;
573 ++ memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
574 ++ sizeof(source_sev));
575 +
576 + /*
577 + * The mirror kvm holds an enc_context_owner ref so its asid can't
578 +@@ -1760,8 +1784,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
579 + /* Set enc_context_owner and copy its encryption context over */
580 + mirror_sev = &to_kvm_svm(kvm)->sev_info;
581 + mirror_sev->enc_context_owner = source_kvm;
582 +- mirror_sev->asid = asid;
583 + mirror_sev->active = true;
584 ++ mirror_sev->asid = source_sev.asid;
585 ++ mirror_sev->fd = source_sev.fd;
586 ++ mirror_sev->es_active = source_sev.es_active;
587 ++ mirror_sev->handle = source_sev.handle;
588 ++ /*
589 ++ * Do not copy ap_jump_table. Since the mirror does not share the same
590 ++ * KVM contexts as the original, and they may have different
591 ++ * memory-views.
592 ++ */
593 +
594 + mutex_unlock(&kvm->lock);
595 + return 0;
596 +diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
597 +index 896b2a50b4aae..a44e2734ff9b7 100644
598 +--- a/arch/x86/kvm/vmx/evmcs.c
599 ++++ b/arch/x86/kvm/vmx/evmcs.c
600 +@@ -354,14 +354,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
601 + switch (msr_index) {
602 + case MSR_IA32_VMX_EXIT_CTLS:
603 + case MSR_IA32_VMX_TRUE_EXIT_CTLS:
604 +- ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
605 ++ ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
606 + break;
607 + case MSR_IA32_VMX_ENTRY_CTLS:
608 + case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
609 +- ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
610 ++ ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
611 + break;
612 + case MSR_IA32_VMX_PROCBASED_CTLS2:
613 +- ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
614 ++ ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
615 ++ break;
616 ++ case MSR_IA32_VMX_PINBASED_CTLS:
617 ++ ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
618 ++ break;
619 ++ case MSR_IA32_VMX_VMFUNC:
620 ++ ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
621 + break;
622 + }
623 +
624 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
625 +index ac1803dac4357..ce30503f5438f 100644
626 +--- a/arch/x86/kvm/vmx/nested.c
627 ++++ b/arch/x86/kvm/vmx/nested.c
628 +@@ -5898,6 +5898,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
629 + case EXIT_REASON_VMFUNC:
630 + /* VM functions are emulated through L2->L0 vmexits. */
631 + return true;
632 ++ case EXIT_REASON_BUS_LOCK:
633 ++ /*
634 ++ * At present, bus lock VM exit is never exposed to L1.
635 ++ * Handle L2's bus locks in L0 directly.
636 ++ */
637 ++ return true;
638 + default:
639 + break;
640 + }
641 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
642 +index 256f8cab4b8b4..55de1eb135f92 100644
643 +--- a/arch/x86/kvm/vmx/vmx.c
644 ++++ b/arch/x86/kvm/vmx/vmx.c
645 +@@ -1840,10 +1840,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
646 + &msr_info->data))
647 + return 1;
648 + /*
649 +- * Enlightened VMCS v1 doesn't have certain fields, but buggy
650 +- * Hyper-V versions are still trying to use corresponding
651 +- * features when they are exposed. Filter out the essential
652 +- * minimum.
653 ++ * Enlightened VMCS v1 doesn't have certain VMCS fields but
654 ++ * instead of just ignoring the features, different Hyper-V
655 ++ * versions are either trying to use them and fail or do some
656 ++ * sanity checking and refuse to boot. Filter all unsupported
657 ++ * features out.
658 + */
659 + if (!msr_info->host_initiated &&
660 + vmx->nested.enlightened_vmcs_enabled)
661 +@@ -6815,7 +6816,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
662 + */
663 + tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
664 + if (tsx_ctrl)
665 +- vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
666 ++ tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
667 + }
668 +
669 + err = alloc_loaded_vmcs(&vmx->vmcs01);
670 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
671 +index 7ec7c2dce5065..6d5d6e93f5c41 100644
672 +--- a/arch/x86/kvm/x86.c
673 ++++ b/arch/x86/kvm/x86.c
674 +@@ -10873,6 +10873,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
675 +
676 + static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
677 +
678 ++ vcpu->arch.cr3 = 0;
679 ++ kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
680 ++
681 + /*
682 + * Reset the MMU context if paging was enabled prior to INIT (which is
683 + * implied if CR0.PG=1 as CR0 will be '0' prior to RESET). Unlike the
684 +@@ -11090,9 +11093,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
685 +
686 + int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
687 + {
688 ++ int ret;
689 ++
690 + if (type)
691 + return -EINVAL;
692 +
693 ++ ret = kvm_page_track_init(kvm);
694 ++ if (ret)
695 ++ return ret;
696 ++
697 + INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
698 + INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
699 + INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
700 +@@ -11125,7 +11134,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
701 +
702 + kvm_apicv_init(kvm);
703 + kvm_hv_init_vm(kvm);
704 +- kvm_page_track_init(kvm);
705 + kvm_mmu_init_vm(kvm);
706 +
707 + return static_call(kvm_x86_vm_init)(kvm);
708 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
709 +index 16d76f814e9b1..ffcc4d29ad506 100644
710 +--- a/arch/x86/net/bpf_jit_comp.c
711 ++++ b/arch/x86/net/bpf_jit_comp.c
712 +@@ -1341,9 +1341,10 @@ st: if (is_imm8(insn->off))
713 + if (insn->imm == (BPF_AND | BPF_FETCH) ||
714 + insn->imm == (BPF_OR | BPF_FETCH) ||
715 + insn->imm == (BPF_XOR | BPF_FETCH)) {
716 +- u8 *branch_target;
717 + bool is64 = BPF_SIZE(insn->code) == BPF_DW;
718 + u32 real_src_reg = src_reg;
719 ++ u32 real_dst_reg = dst_reg;
720 ++ u8 *branch_target;
721 +
722 + /*
723 + * Can't be implemented with a single x86 insn.
724 +@@ -1354,11 +1355,13 @@ st: if (is_imm8(insn->off))
725 + emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
726 + if (src_reg == BPF_REG_0)
727 + real_src_reg = BPF_REG_AX;
728 ++ if (dst_reg == BPF_REG_0)
729 ++ real_dst_reg = BPF_REG_AX;
730 +
731 + branch_target = prog;
732 + /* Load old value */
733 + emit_ldx(&prog, BPF_SIZE(insn->code),
734 +- BPF_REG_0, dst_reg, insn->off);
735 ++ BPF_REG_0, real_dst_reg, insn->off);
736 + /*
737 + * Perform the (commutative) operation locally,
738 + * put the result in the AUX_REG.
739 +@@ -1369,7 +1372,8 @@ st: if (is_imm8(insn->off))
740 + add_2reg(0xC0, AUX_REG, real_src_reg));
741 + /* Attempt to swap in new value */
742 + err = emit_atomic(&prog, BPF_CMPXCHG,
743 +- dst_reg, AUX_REG, insn->off,
744 ++ real_dst_reg, AUX_REG,
745 ++ insn->off,
746 + BPF_SIZE(insn->code));
747 + if (WARN_ON(err))
748 + return err;
749 +@@ -1383,11 +1387,10 @@ st: if (is_imm8(insn->off))
750 + /* Restore R0 after clobbering RAX */
751 + emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
752 + break;
753 +-
754 + }
755 +
756 + err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
757 +- insn->off, BPF_SIZE(insn->code));
758 ++ insn->off, BPF_SIZE(insn->code));
759 + if (err)
760 + return err;
761 + break;
762 +@@ -1744,7 +1747,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
763 + }
764 +
765 + static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
766 +- struct bpf_prog *p, int stack_size, bool mod_ret)
767 ++ struct bpf_prog *p, int stack_size, bool save_ret)
768 + {
769 + u8 *prog = *pprog;
770 + u8 *jmp_insn;
771 +@@ -1777,11 +1780,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
772 + if (emit_call(&prog, p->bpf_func, prog))
773 + return -EINVAL;
774 +
775 +- /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
776 ++ /*
777 ++ * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
778 + * of the previous call which is then passed on the stack to
779 + * the next BPF program.
780 ++ *
781 ++ * BPF_TRAMP_FENTRY trampoline may need to return the return
782 ++ * value of BPF_PROG_TYPE_STRUCT_OPS prog.
783 + */
784 +- if (mod_ret)
785 ++ if (save_ret)
786 + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
787 +
788 + /* replace 2 nops with JE insn, since jmp target is known */
789 +@@ -1828,13 +1835,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
790 + }
791 +
792 + static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
793 +- struct bpf_tramp_progs *tp, int stack_size)
794 ++ struct bpf_tramp_progs *tp, int stack_size,
795 ++ bool save_ret)
796 + {
797 + int i;
798 + u8 *prog = *pprog;
799 +
800 + for (i = 0; i < tp->nr_progs; i++) {
801 +- if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
802 ++ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
803 ++ save_ret))
804 + return -EINVAL;
805 + }
806 + *pprog = prog;
807 +@@ -1877,6 +1886,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
808 + return 0;
809 + }
810 +
811 ++static bool is_valid_bpf_tramp_flags(unsigned int flags)
812 ++{
813 ++ if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
814 ++ (flags & BPF_TRAMP_F_SKIP_FRAME))
815 ++ return false;
816 ++
817 ++ /*
818 ++ * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
819 ++ * and it must be used alone.
820 ++ */
821 ++ if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
822 ++ (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
823 ++ return false;
824 ++
825 ++ return true;
826 ++}
827 ++
828 + /* Example:
829 + * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
830 + * its 'struct btf_func_model' will be nr_args=2
831 +@@ -1949,17 +1975,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
832 + struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
833 + u8 **branches = NULL;
834 + u8 *prog;
835 ++ bool save_ret;
836 +
837 + /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
838 + if (nr_args > 6)
839 + return -ENOTSUPP;
840 +
841 +- if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
842 +- (flags & BPF_TRAMP_F_SKIP_FRAME))
843 ++ if (!is_valid_bpf_tramp_flags(flags))
844 + return -EINVAL;
845 +
846 +- if (flags & BPF_TRAMP_F_CALL_ORIG)
847 +- stack_size += 8; /* room for return value of orig_call */
848 ++ /* room for return value of orig_call or fentry prog */
849 ++ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
850 ++ if (save_ret)
851 ++ stack_size += 8;
852 +
853 + if (flags & BPF_TRAMP_F_SKIP_FRAME)
854 + /* skip patched call instruction and point orig_call to actual
855 +@@ -1986,7 +2014,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
856 + }
857 +
858 + if (fentry->nr_progs)
859 +- if (invoke_bpf(m, &prog, fentry, stack_size))
860 ++ if (invoke_bpf(m, &prog, fentry, stack_size,
861 ++ flags & BPF_TRAMP_F_RET_FENTRY_RET))
862 + return -EINVAL;
863 +
864 + if (fmod_ret->nr_progs) {
865 +@@ -2033,7 +2062,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
866 + }
867 +
868 + if (fexit->nr_progs)
869 +- if (invoke_bpf(m, &prog, fexit, stack_size)) {
870 ++ if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
871 + ret = -EINVAL;
872 + goto cleanup;
873 + }
874 +@@ -2053,9 +2082,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
875 + ret = -EINVAL;
876 + goto cleanup;
877 + }
878 +- /* restore original return value back into RAX */
879 +- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
880 + }
881 ++ /* restore return value of orig_call or fentry prog back into RAX */
882 ++ if (save_ret)
883 ++ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
884 +
885 + EMIT1(0x5B); /* pop rbx */
886 + EMIT1(0xC9); /* leave */
887 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
888 +index 3a1038b6eeb30..9360c65169ff4 100644
889 +--- a/block/bfq-iosched.c
890 ++++ b/block/bfq-iosched.c
891 +@@ -2662,15 +2662,6 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
892 + * are likely to increase the throughput.
893 + */
894 + bfqq->new_bfqq = new_bfqq;
895 +- /*
896 +- * The above assignment schedules the following redirections:
897 +- * each time some I/O for bfqq arrives, the process that
898 +- * generated that I/O is disassociated from bfqq and
899 +- * associated with new_bfqq. Here we increases new_bfqq->ref
900 +- * in advance, adding the number of processes that are
901 +- * expected to be associated with new_bfqq as they happen to
902 +- * issue I/O.
903 +- */
904 + new_bfqq->ref += process_refs;
905 + return new_bfqq;
906 + }
907 +@@ -2733,10 +2724,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
908 + {
909 + struct bfq_queue *in_service_bfqq, *new_bfqq;
910 +
911 +- /* if a merge has already been setup, then proceed with that first */
912 +- if (bfqq->new_bfqq)
913 +- return bfqq->new_bfqq;
914 +-
915 + /*
916 + * Check delayed stable merge for rotational or non-queueing
917 + * devs. For this branch to be executed, bfqq must not be
918 +@@ -2838,6 +2825,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
919 + if (bfq_too_late_for_merging(bfqq))
920 + return NULL;
921 +
922 ++ if (bfqq->new_bfqq)
923 ++ return bfqq->new_bfqq;
924 ++
925 + if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
926 + return NULL;
927 +
928 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
929 +index a3ef6cce644cc..7dd80acf92c78 100644
930 +--- a/drivers/acpi/nfit/core.c
931 ++++ b/drivers/acpi/nfit/core.c
932 +@@ -3007,6 +3007,18 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
933 + ndr_desc->target_node = NUMA_NO_NODE;
934 + }
935 +
936 ++ /* Fallback to address based numa information if node lookup failed */
937 ++ if (ndr_desc->numa_node == NUMA_NO_NODE) {
938 ++ ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
939 ++ dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
940 ++ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
941 ++ }
942 ++ if (ndr_desc->target_node == NUMA_NO_NODE) {
943 ++ ndr_desc->target_node = phys_to_target_node(spa->address);
944 ++ dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
945 ++ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
946 ++ }
947 ++
948 + /*
949 + * Persistence domain bits are hierarchical, if
950 + * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
951 +diff --git a/drivers/base/core.c b/drivers/base/core.c
952 +index 8c77e14987d4b..56f54e6eb9874 100644
953 +--- a/drivers/base/core.c
954 ++++ b/drivers/base/core.c
955 +@@ -1721,6 +1721,25 @@ static int fw_devlink_create_devlink(struct device *con,
956 + struct device *sup_dev;
957 + int ret = 0;
958 +
959 ++ /*
960 ++ * In some cases, a device P might also be a supplier to its child node
961 ++ * C. However, this would defer the probe of C until the probe of P
962 ++ * completes successfully. This is perfectly fine in the device driver
963 ++ * model. device_add() doesn't guarantee probe completion of the device
964 ++ * by the time it returns.
965 ++ *
966 ++ * However, there are a few drivers that assume C will finish probing
967 ++ * as soon as it's added and before P finishes probing. So, we provide
968 ++ * a flag to let fw_devlink know not to delay the probe of C until the
969 ++ * probe of P completes successfully.
970 ++ *
971 ++ * When such a flag is set, we can't create device links where P is the
972 ++ * supplier of C as that would delay the probe of C.
973 ++ */
974 ++ if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
975 ++ fwnode_is_ancestor_of(sup_handle, con->fwnode))
976 ++ return -EINVAL;
977 ++
978 + sup_dev = get_dev_from_fwnode(sup_handle);
979 + if (sup_dev) {
980 + /*
981 +@@ -1771,14 +1790,21 @@ static int fw_devlink_create_devlink(struct device *con,
982 + * be broken by applying logic. Check for these types of cycles and
983 + * break them so that devices in the cycle probe properly.
984 + *
985 +- * If the supplier's parent is dependent on the consumer, then
986 +- * the consumer-supplier dependency is a false dependency. So,
987 +- * treat it as an invalid link.
988 ++ * If the supplier's parent is dependent on the consumer, then the
989 ++ * consumer and supplier have a cyclic dependency. Since fw_devlink
990 ++ * can't tell which of the inferred dependencies are incorrect, don't
991 ++ * enforce probe ordering between any of the devices in this cyclic
992 ++ * dependency. Do this by relaxing all the fw_devlink device links in
993 ++ * this cycle and by treating the fwnode link between the consumer and
994 ++ * the supplier as an invalid dependency.
995 + */
996 + sup_dev = fwnode_get_next_parent_dev(sup_handle);
997 + if (sup_dev && device_is_dependent(con, sup_dev)) {
998 +- dev_dbg(con, "Not linking to %pfwP - False link\n",
999 +- sup_handle);
1000 ++ dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
1001 ++ sup_handle, dev_name(sup_dev));
1002 ++ device_links_write_lock();
1003 ++ fw_devlink_relax_cycle(con, sup_dev);
1004 ++ device_links_write_unlock();
1005 + ret = -EINVAL;
1006 + } else {
1007 + /*
1008 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1009 +index 93708b1938e80..99ab58b877f8c 100644
1010 +--- a/drivers/block/nbd.c
1011 ++++ b/drivers/block/nbd.c
1012 +@@ -97,13 +97,18 @@ struct nbd_config {
1013 +
1014 + atomic_t recv_threads;
1015 + wait_queue_head_t recv_wq;
1016 +- loff_t blksize;
1017 ++ unsigned int blksize_bits;
1018 + loff_t bytesize;
1019 + #if IS_ENABLED(CONFIG_DEBUG_FS)
1020 + struct dentry *dbg_dir;
1021 + #endif
1022 + };
1023 +
1024 ++static inline unsigned int nbd_blksize(struct nbd_config *config)
1025 ++{
1026 ++ return 1u << config->blksize_bits;
1027 ++}
1028 ++
1029 + struct nbd_device {
1030 + struct blk_mq_tag_set tag_set;
1031 +
1032 +@@ -147,7 +152,7 @@ static struct dentry *nbd_dbg_dir;
1033 +
1034 + #define NBD_MAGIC 0x68797548
1035 +
1036 +-#define NBD_DEF_BLKSIZE 1024
1037 ++#define NBD_DEF_BLKSIZE_BITS 10
1038 +
1039 + static unsigned int nbds_max = 16;
1040 + static int max_part = 16;
1041 +@@ -350,12 +355,12 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
1042 + loff_t blksize)
1043 + {
1044 + if (!blksize)
1045 +- blksize = NBD_DEF_BLKSIZE;
1046 ++ blksize = 1u << NBD_DEF_BLKSIZE_BITS;
1047 + if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
1048 + return -EINVAL;
1049 +
1050 + nbd->config->bytesize = bytesize;
1051 +- nbd->config->blksize = blksize;
1052 ++ nbd->config->blksize_bits = __ffs(blksize);
1053 +
1054 + if (!nbd->task_recv)
1055 + return 0;
1056 +@@ -1370,7 +1375,7 @@ static int nbd_start_device(struct nbd_device *nbd)
1057 + args->index = i;
1058 + queue_work(nbd->recv_workq, &args->work);
1059 + }
1060 +- return nbd_set_size(nbd, config->bytesize, config->blksize);
1061 ++ return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1062 + }
1063 +
1064 + static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1065 +@@ -1439,11 +1444,11 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1066 + case NBD_SET_BLKSIZE:
1067 + return nbd_set_size(nbd, config->bytesize, arg);
1068 + case NBD_SET_SIZE:
1069 +- return nbd_set_size(nbd, arg, config->blksize);
1070 ++ return nbd_set_size(nbd, arg, nbd_blksize(config));
1071 + case NBD_SET_SIZE_BLOCKS:
1072 +- if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
1073 ++ if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1074 + return -EINVAL;
1075 +- return nbd_set_size(nbd, bytesize, config->blksize);
1076 ++ return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1077 + case NBD_SET_TIMEOUT:
1078 + nbd_set_cmd_timeout(nbd, arg);
1079 + return 0;
1080 +@@ -1509,7 +1514,7 @@ static struct nbd_config *nbd_alloc_config(void)
1081 + atomic_set(&config->recv_threads, 0);
1082 + init_waitqueue_head(&config->recv_wq);
1083 + init_waitqueue_head(&config->conn_wait);
1084 +- config->blksize = NBD_DEF_BLKSIZE;
1085 ++ config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1086 + atomic_set(&config->live_connections, 0);
1087 + try_module_get(THIS_MODULE);
1088 + return config;
1089 +@@ -1637,7 +1642,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
1090 + debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1091 + debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1092 + debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1093 +- debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1094 ++ debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1095 + debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1096 +
1097 + return 0;
1098 +@@ -1841,7 +1846,7 @@ nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1099 + static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1100 + {
1101 + struct nbd_config *config = nbd->config;
1102 +- u64 bsize = config->blksize;
1103 ++ u64 bsize = nbd_blksize(config);
1104 + u64 bytes = config->bytesize;
1105 +
1106 + if (info->attrs[NBD_ATTR_SIZE_BYTES])
1107 +@@ -1850,7 +1855,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1108 + if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1109 + bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1110 +
1111 +- if (bytes != config->bytesize || bsize != config->blksize)
1112 ++ if (bytes != config->bytesize || bsize != nbd_blksize(config))
1113 + return nbd_set_size(nbd, bytes, bsize);
1114 + return 0;
1115 + }
1116 +diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
1117 +index 66b05a326910e..a6f365b9cc1ad 100644
1118 +--- a/drivers/cpufreq/cpufreq_governor_attr_set.c
1119 ++++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
1120 +@@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
1121 + if (count)
1122 + return count;
1123 +
1124 +- kobject_put(&attr_set->kobj);
1125 + mutex_destroy(&attr_set->update_lock);
1126 ++ kobject_put(&attr_set->kobj);
1127 + return 0;
1128 + }
1129 + EXPORT_SYMBOL_GPL(gov_attr_set_put);
1130 +diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
1131 +index bb88198c874e0..aa4e1a5006919 100644
1132 +--- a/drivers/crypto/ccp/ccp-ops.c
1133 ++++ b/drivers/crypto/ccp/ccp-ops.c
1134 +@@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1135 + in_place ? DMA_BIDIRECTIONAL
1136 + : DMA_TO_DEVICE);
1137 + if (ret)
1138 +- goto e_ctx;
1139 ++ goto e_aad;
1140 +
1141 + if (in_place) {
1142 + dst = src;
1143 +@@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1144 + op.u.aes.size = 0;
1145 + ret = cmd_q->ccp->vdata->perform->aes(&op);
1146 + if (ret)
1147 +- goto e_dst;
1148 ++ goto e_final_wa;
1149 +
1150 + if (aes->action == CCP_AES_ACTION_ENCRYPT) {
1151 + /* Put the ciphered tag after the ciphertext. */
1152 +@@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1153 + ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
1154 + DMA_BIDIRECTIONAL);
1155 + if (ret)
1156 +- goto e_tag;
1157 ++ goto e_final_wa;
1158 + ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
1159 +- if (ret)
1160 +- goto e_tag;
1161 ++ if (ret) {
1162 ++ ccp_dm_free(&tag);
1163 ++ goto e_final_wa;
1164 ++ }
1165 +
1166 + ret = crypto_memneq(tag.address, final_wa.address,
1167 + authsize) ? -EBADMSG : 0;
1168 + ccp_dm_free(&tag);
1169 + }
1170 +
1171 +-e_tag:
1172 ++e_final_wa:
1173 + ccp_dm_free(&final_wa);
1174 +
1175 + e_dst:
1176 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
1177 +index f5cfc0698799a..8ebf369b3ba0f 100644
1178 +--- a/drivers/gpio/gpio-pca953x.c
1179 ++++ b/drivers/gpio/gpio-pca953x.c
1180 +@@ -468,15 +468,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
1181 + mutex_lock(&chip->i2c_lock);
1182 + ret = regmap_read(chip->regmap, inreg, &reg_val);
1183 + mutex_unlock(&chip->i2c_lock);
1184 +- if (ret < 0) {
1185 +- /*
1186 +- * NOTE:
1187 +- * diagnostic already emitted; that's all we should
1188 +- * do unless gpio_*_value_cansleep() calls become different
1189 +- * from their nonsleeping siblings (and report faults).
1190 +- */
1191 +- return 0;
1192 +- }
1193 ++ if (ret < 0)
1194 ++ return ret;
1195 +
1196 + return !!(reg_val & bit);
1197 + }
1198 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1199 +index 7b42636fc7dc6..d3247a5cceb4c 100644
1200 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1201 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1202 +@@ -3602,9 +3602,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1203 +
1204 + fence_driver_init:
1205 + /* Fence driver */
1206 +- r = amdgpu_fence_driver_init(adev);
1207 ++ r = amdgpu_fence_driver_sw_init(adev);
1208 + if (r) {
1209 +- dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
1210 ++ dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
1211 + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
1212 + goto failed;
1213 + }
1214 +@@ -3631,6 +3631,8 @@ fence_driver_init:
1215 + goto release_ras_con;
1216 + }
1217 +
1218 ++ amdgpu_fence_driver_hw_init(adev);
1219 ++
1220 + dev_info(adev->dev,
1221 + "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
1222 + adev->gfx.config.max_shader_engines,
1223 +@@ -3798,7 +3800,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
1224 + else
1225 + drm_atomic_helper_shutdown(adev_to_drm(adev));
1226 + }
1227 +- amdgpu_fence_driver_fini_hw(adev);
1228 ++ amdgpu_fence_driver_hw_fini(adev);
1229 +
1230 + if (adev->pm_sysfs_en)
1231 + amdgpu_pm_sysfs_fini(adev);
1232 +@@ -3820,7 +3822,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
1233 + void amdgpu_device_fini_sw(struct amdgpu_device *adev)
1234 + {
1235 + amdgpu_device_ip_fini(adev);
1236 +- amdgpu_fence_driver_fini_sw(adev);
1237 ++ amdgpu_fence_driver_sw_fini(adev);
1238 + release_firmware(adev->firmware.gpu_info_fw);
1239 + adev->firmware.gpu_info_fw = NULL;
1240 + adev->accel_working = false;
1241 +@@ -3895,7 +3897,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
1242 + /* evict vram memory */
1243 + amdgpu_bo_evict_vram(adev);
1244 +
1245 +- amdgpu_fence_driver_suspend(adev);
1246 ++ amdgpu_fence_driver_hw_fini(adev);
1247 +
1248 + amdgpu_device_ip_suspend_phase2(adev);
1249 + /* evict remaining vram memory
1250 +@@ -3940,8 +3942,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
1251 + dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
1252 + return r;
1253 + }
1254 +- amdgpu_fence_driver_resume(adev);
1255 +-
1256 ++ amdgpu_fence_driver_hw_init(adev);
1257 +
1258 + r = amdgpu_device_ip_late_init(adev);
1259 + if (r)
1260 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1261 +index 7a73167319116..dc50c05f23fc2 100644
1262 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1263 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1264 +@@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
1265 + return 0;
1266 + }
1267 +
1268 ++/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
1269 ++static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
1270 ++{
1271 ++ u64 micro_tile_mode;
1272 ++
1273 ++ /* Zero swizzle mode means linear */
1274 ++ if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
1275 ++ return 0;
1276 ++
1277 ++ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
1278 ++ switch (micro_tile_mode) {
1279 ++ case 0: /* DISPLAY */
1280 ++ case 3: /* RENDER */
1281 ++ return 0;
1282 ++ default:
1283 ++ drm_dbg_kms(afb->base.dev,
1284 ++ "Micro tile mode %llu not supported for scanout\n",
1285 ++ micro_tile_mode);
1286 ++ return -EINVAL;
1287 ++ }
1288 ++}
1289 ++
1290 + static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
1291 + unsigned int *width, unsigned int *height)
1292 + {
1293 +@@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
1294 + const struct drm_mode_fb_cmd2 *mode_cmd,
1295 + struct drm_gem_object *obj)
1296 + {
1297 ++ struct amdgpu_device *adev = drm_to_adev(dev);
1298 + int ret, i;
1299 +
1300 + /*
1301 +@@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
1302 + if (ret)
1303 + return ret;
1304 +
1305 ++ if (!dev->mode_config.allow_fb_modifiers) {
1306 ++ drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1307 ++ "GFX9+ requires FB check based on format modifier\n");
1308 ++ ret = check_tiling_flags_gfx6(rfb);
1309 ++ if (ret)
1310 ++ return ret;
1311 ++ }
1312 ++
1313 + if (dev->mode_config.allow_fb_modifiers &&
1314 + !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1315 + ret = convert_tiling_flags_to_modifier(rfb);
1316 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
1317 +index 72d9b92b17547..49884069226a2 100644
1318 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
1319 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
1320 +@@ -417,9 +417,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
1321 + }
1322 + amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
1323 +
1324 +- if (irq_src)
1325 +- amdgpu_irq_get(adev, irq_src, irq_type);
1326 +-
1327 + ring->fence_drv.irq_src = irq_src;
1328 + ring->fence_drv.irq_type = irq_type;
1329 + ring->fence_drv.initialized = true;
1330 +@@ -501,7 +498,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
1331 + }
1332 +
1333 + /**
1334 +- * amdgpu_fence_driver_init - init the fence driver
1335 ++ * amdgpu_fence_driver_sw_init - init the fence driver
1336 + * for all possible rings.
1337 + *
1338 + * @adev: amdgpu device pointer
1339 +@@ -512,20 +509,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
1340 + * amdgpu_fence_driver_start_ring().
1341 + * Returns 0 for success.
1342 + */
1343 +-int amdgpu_fence_driver_init(struct amdgpu_device *adev)
1344 ++int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
1345 + {
1346 + return 0;
1347 + }
1348 +
1349 + /**
1350 +- * amdgpu_fence_driver_fini - tear down the fence driver
1351 ++ * amdgpu_fence_driver_hw_fini - tear down the fence driver
1352 + * for all possible rings.
1353 + *
1354 + * @adev: amdgpu device pointer
1355 + *
1356 + * Tear down the fence driver for all possible rings (all asics).
1357 + */
1358 +-void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
1359 ++void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
1360 + {
1361 + int i, r;
1362 +
1363 +@@ -534,8 +531,10 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
1364 +
1365 + if (!ring || !ring->fence_drv.initialized)
1366 + continue;
1367 ++
1368 + if (!ring->no_scheduler)
1369 +- drm_sched_fini(&ring->sched);
1370 ++ drm_sched_stop(&ring->sched, NULL);
1371 ++
1372 + /* You can't wait for HW to signal if it's gone */
1373 + if (!drm_dev_is_unplugged(&adev->ddev))
1374 + r = amdgpu_fence_wait_empty(ring);
1375 +@@ -553,7 +552,7 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
1376 + }
1377 + }
1378 +
1379 +-void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
1380 ++void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
1381 + {
1382 + unsigned int i, j;
1383 +
1384 +@@ -563,6 +562,9 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
1385 + if (!ring || !ring->fence_drv.initialized)
1386 + continue;
1387 +
1388 ++ if (!ring->no_scheduler)
1389 ++ drm_sched_fini(&ring->sched);
1390 ++
1391 + for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
1392 + dma_fence_put(ring->fence_drv.fences[j]);
1393 + kfree(ring->fence_drv.fences);
1394 +@@ -572,49 +574,18 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
1395 + }
1396 +
1397 + /**
1398 +- * amdgpu_fence_driver_suspend - suspend the fence driver
1399 +- * for all possible rings.
1400 +- *
1401 +- * @adev: amdgpu device pointer
1402 +- *
1403 +- * Suspend the fence driver for all possible rings (all asics).
1404 +- */
1405 +-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
1406 +-{
1407 +- int i, r;
1408 +-
1409 +- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1410 +- struct amdgpu_ring *ring = adev->rings[i];
1411 +- if (!ring || !ring->fence_drv.initialized)
1412 +- continue;
1413 +-
1414 +- /* wait for gpu to finish processing current batch */
1415 +- r = amdgpu_fence_wait_empty(ring);
1416 +- if (r) {
1417 +- /* delay GPU reset to resume */
1418 +- amdgpu_fence_driver_force_completion(ring);
1419 +- }
1420 +-
1421 +- /* disable the interrupt */
1422 +- if (ring->fence_drv.irq_src)
1423 +- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
1424 +- ring->fence_drv.irq_type);
1425 +- }
1426 +-}
1427 +-
1428 +-/**
1429 +- * amdgpu_fence_driver_resume - resume the fence driver
1430 ++ * amdgpu_fence_driver_hw_init - enable the fence driver
1431 + * for all possible rings.
1432 + *
1433 + * @adev: amdgpu device pointer
1434 + *
1435 +- * Resume the fence driver for all possible rings (all asics).
1436 ++ * Enable the fence driver for all possible rings (all asics).
1437 + * Not all asics have all rings, so each asic will only
1438 + * start the fence driver on the rings it has using
1439 + * amdgpu_fence_driver_start_ring().
1440 + * Returns 0 for success.
1441 + */
1442 +-void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
1443 ++void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
1444 + {
1445 + int i;
1446 +
1447 +@@ -623,6 +594,11 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
1448 + if (!ring || !ring->fence_drv.initialized)
1449 + continue;
1450 +
1451 ++ if (!ring->no_scheduler) {
1452 ++ drm_sched_resubmit_jobs(&ring->sched);
1453 ++ drm_sched_start(&ring->sched, true);
1454 ++ }
1455 ++
1456 + /* enable the interrupt */
1457 + if (ring->fence_drv.irq_src)
1458 + amdgpu_irq_get(adev, ring->fence_drv.irq_src,
1459 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
1460 +index e7d3d0dbdd967..9c11ced4312c8 100644
1461 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
1462 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
1463 +@@ -106,9 +106,6 @@ struct amdgpu_fence_driver {
1464 + struct dma_fence **fences;
1465 + };
1466 +
1467 +-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
1468 +-void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev);
1469 +-void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev);
1470 + void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
1471 +
1472 + int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
1473 +@@ -117,8 +114,10 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
1474 + int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
1475 + struct amdgpu_irq_src *irq_src,
1476 + unsigned irq_type);
1477 +-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
1478 +-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
1479 ++void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
1480 ++void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
1481 ++int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
1482 ++void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
1483 + int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
1484 + unsigned flags);
1485 + int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
1486 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1487 +index 6a23c6826e122..88ed0ef88f7e2 100644
1488 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1489 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1490 +@@ -3598,7 +3598,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
1491 +
1492 + /* set static priority for a queue/ring */
1493 + gfx_v9_0_mqd_set_priority(ring, mqd);
1494 +- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
1495 ++ mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
1496 +
1497 + /* map_queues packet doesn't need activate the queue,
1498 + * so only kiq need set this field.
1499 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
1500 +index 7486e53067867..27e0ca615edc1 100644
1501 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
1502 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
1503 +@@ -883,6 +883,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
1504 + msleep(1000);
1505 + }
1506 +
1507 ++ /* TODO: check whether can submit a doorbell request to raise
1508 ++ * a doorbell fence to exit gfxoff.
1509 ++ */
1510 ++ if (adev->in_s0ix)
1511 ++ amdgpu_gfx_off_ctrl(adev, false);
1512 ++
1513 + sdma_v5_2_soft_reset(adev);
1514 + /* unhalt the MEs */
1515 + sdma_v5_2_enable(adev, true);
1516 +@@ -891,6 +897,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
1517 +
1518 + /* start the gfx rings and rlc compute queues */
1519 + r = sdma_v5_2_gfx_resume(adev);
1520 ++ if (adev->in_s0ix)
1521 ++ amdgpu_gfx_off_ctrl(adev, true);
1522 + if (r)
1523 + return r;
1524 + r = sdma_v5_2_rlc_resume(adev);
1525 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1526 +index 3bb567ea2cef9..a03d7682cd8f2 100644
1527 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1528 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1529 +@@ -1117,6 +1117,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
1530 +
1531 + init_data.asic_id.pci_revision_id = adev->pdev->revision;
1532 + init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1533 ++ init_data.asic_id.chip_id = adev->pdev->device;
1534 +
1535 + init_data.asic_id.vram_width = adev->gmc.vram_width;
1536 + /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1537 +@@ -1724,6 +1725,7 @@ static int dm_late_init(void *handle)
1538 + linear_lut[i] = 0xFFFF * i / 15;
1539 +
1540 + params.set = 0;
1541 ++ params.backlight_ramping_override = false;
1542 + params.backlight_ramping_start = 0xCCCC;
1543 + params.backlight_ramping_reduction = 0xCCCCCCCC;
1544 + params.backlight_lut_array_size = 16;
1545 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1546 +index 83ef72a3ebf41..3c8da3665a274 100644
1547 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1548 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1549 +@@ -1813,14 +1813,13 @@ bool perform_link_training_with_retries(
1550 + if (panel_mode == DP_PANEL_MODE_EDP) {
1551 + struct cp_psp *cp_psp = &stream->ctx->cp_psp;
1552 +
1553 +- if (cp_psp && cp_psp->funcs.enable_assr) {
1554 +- if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
1555 +- /* since eDP implies ASSR on, change panel
1556 +- * mode to disable ASSR
1557 +- */
1558 +- panel_mode = DP_PANEL_MODE_DEFAULT;
1559 +- }
1560 +- }
1561 ++ if (cp_psp && cp_psp->funcs.enable_assr)
1562 ++ /* ASSR is bound to fail with unsigned PSP
1563 ++ * verstage used during devlopment phase.
1564 ++ * Report and continue with eDP panel mode to
1565 ++ * perform eDP link training with right settings
1566 ++ */
1567 ++ cp_psp->funcs.enable_assr(cp_psp->handle, link);
1568 + }
1569 + #endif
1570 +
1571 +diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
1572 +index 06e9a8ed4e03c..db9c212a240e5 100644
1573 +--- a/drivers/gpu/drm/i915/gt/intel_rps.c
1574 ++++ b/drivers/gpu/drm/i915/gt/intel_rps.c
1575 +@@ -861,8 +861,6 @@ void intel_rps_park(struct intel_rps *rps)
1576 + {
1577 + int adj;
1578 +
1579 +- GEM_BUG_ON(atomic_read(&rps->num_waiters));
1580 +-
1581 + if (!intel_rps_clear_active(rps))
1582 + return;
1583 +
1584 +diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
1585 +index 734c37c5e3474..527b59b863125 100644
1586 +--- a/drivers/gpu/drm/i915/gvt/scheduler.c
1587 ++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
1588 +@@ -576,7 +576,7 @@ retry:
1589 +
1590 + /* No one is going to touch shadow bb from now on. */
1591 + i915_gem_object_flush_map(bb->obj);
1592 +- i915_gem_object_unlock(bb->obj);
1593 ++ i915_gem_ww_ctx_fini(&ww);
1594 + }
1595 + }
1596 + return 0;
1597 +@@ -630,7 +630,7 @@ retry:
1598 + return ret;
1599 + }
1600 +
1601 +- i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
1602 ++ i915_gem_ww_ctx_fini(&ww);
1603 +
1604 + /* FIXME: we are not tracking our pinned VMA leaving it
1605 + * up to the core to fix up the stray pin_count upon
1606 +diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
1607 +index 37aef13085739..7db972fa70243 100644
1608 +--- a/drivers/gpu/drm/i915/i915_request.c
1609 ++++ b/drivers/gpu/drm/i915/i915_request.c
1610 +@@ -914,8 +914,6 @@ static void __i915_request_ctor(void *arg)
1611 + i915_sw_fence_init(&rq->submit, submit_notify);
1612 + i915_sw_fence_init(&rq->semaphore, semaphore_notify);
1613 +
1614 +- dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
1615 +-
1616 + rq->capture_list = NULL;
1617 +
1618 + init_llist_head(&rq->execute_cb);
1619 +@@ -978,17 +976,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
1620 + rq->ring = ce->ring;
1621 + rq->execution_mask = ce->engine->mask;
1622 +
1623 +- kref_init(&rq->fence.refcount);
1624 +- rq->fence.flags = 0;
1625 +- rq->fence.error = 0;
1626 +- INIT_LIST_HEAD(&rq->fence.cb_list);
1627 +-
1628 + ret = intel_timeline_get_seqno(tl, rq, &seqno);
1629 + if (ret)
1630 + goto err_free;
1631 +
1632 +- rq->fence.context = tl->fence_context;
1633 +- rq->fence.seqno = seqno;
1634 ++ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
1635 ++ tl->fence_context, seqno);
1636 +
1637 + RCU_INIT_POINTER(rq->timeline, tl);
1638 + rq->hwsp_seqno = tl->hwsp_seqno;
1639 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1640 +index 8d68796aa905f..1b4a192b19e5e 100644
1641 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1642 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1643 +@@ -239,13 +239,13 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
1644 + if (!privdata->cl_data)
1645 + return -ENOMEM;
1646 +
1647 +- rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
1648 ++ mp2_select_ops(privdata);
1649 ++
1650 ++ rc = amd_sfh_hid_client_init(privdata);
1651 + if (rc)
1652 + return rc;
1653 +
1654 +- mp2_select_ops(privdata);
1655 +-
1656 +- return amd_sfh_hid_client_init(privdata);
1657 ++ return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
1658 + }
1659 +
1660 + static const struct pci_device_id amd_mp2_pci_tbl[] = {
1661 +diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
1662 +index 0790fbd3fc9a2..467d789f9bc2d 100644
1663 +--- a/drivers/hid/hid-betopff.c
1664 ++++ b/drivers/hid/hid-betopff.c
1665 +@@ -56,15 +56,22 @@ static int betopff_init(struct hid_device *hid)
1666 + {
1667 + struct betopff_device *betopff;
1668 + struct hid_report *report;
1669 +- struct hid_input *hidinput =
1670 +- list_first_entry(&hid->inputs, struct hid_input, list);
1671 ++ struct hid_input *hidinput;
1672 + struct list_head *report_list =
1673 + &hid->report_enum[HID_OUTPUT_REPORT].report_list;
1674 +- struct input_dev *dev = hidinput->input;
1675 ++ struct input_dev *dev;
1676 + int field_count = 0;
1677 + int error;
1678 + int i, j;
1679 +
1680 ++ if (list_empty(&hid->inputs)) {
1681 ++ hid_err(hid, "no inputs found\n");
1682 ++ return -ENODEV;
1683 ++ }
1684 ++
1685 ++ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
1686 ++ dev = hidinput->input;
1687 ++
1688 + if (list_empty(report_list)) {
1689 + hid_err(hid, "no output reports found\n");
1690 + return -ENODEV;
1691 +diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
1692 +index 95e0807878c7e..d70cd3d7f583b 100644
1693 +--- a/drivers/hid/hid-u2fzero.c
1694 ++++ b/drivers/hid/hid-u2fzero.c
1695 +@@ -198,7 +198,9 @@ static int u2fzero_rng_read(struct hwrng *rng, void *data,
1696 + }
1697 +
1698 + ret = u2fzero_recv(dev, &req, &resp);
1699 +- if (ret < 0)
1700 ++
1701 ++ /* ignore errors or packets without data */
1702 ++ if (ret < offsetof(struct u2f_hid_msg, init.data))
1703 + return 0;
1704 +
1705 + /* only take the minimum amount of data it is safe to take */
1706 +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
1707 +index b234958f883a4..c56cb03c1551f 100644
1708 +--- a/drivers/hid/usbhid/hid-core.c
1709 ++++ b/drivers/hid/usbhid/hid-core.c
1710 +@@ -505,7 +505,7 @@ static void hid_ctrl(struct urb *urb)
1711 +
1712 + if (unplug) {
1713 + usbhid->ctrltail = usbhid->ctrlhead;
1714 +- } else {
1715 ++ } else if (usbhid->ctrlhead != usbhid->ctrltail) {
1716 + usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
1717 +
1718 + if (usbhid->ctrlhead != usbhid->ctrltail &&
1719 +@@ -1223,9 +1223,20 @@ static void usbhid_stop(struct hid_device *hid)
1720 + mutex_lock(&usbhid->mutex);
1721 +
1722 + clear_bit(HID_STARTED, &usbhid->iofl);
1723 ++
1724 + spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
1725 + set_bit(HID_DISCONNECTED, &usbhid->iofl);
1726 ++ while (usbhid->ctrltail != usbhid->ctrlhead) {
1727 ++ if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_OUT) {
1728 ++ kfree(usbhid->ctrl[usbhid->ctrltail].raw_report);
1729 ++ usbhid->ctrl[usbhid->ctrltail].raw_report = NULL;
1730 ++ }
1731 ++
1732 ++ usbhid->ctrltail = (usbhid->ctrltail + 1) &
1733 ++ (HID_CONTROL_FIFO_SIZE - 1);
1734 ++ }
1735 + spin_unlock_irq(&usbhid->lock);
1736 ++
1737 + usb_kill_urb(usbhid->urbin);
1738 + usb_kill_urb(usbhid->urbout);
1739 + usb_kill_urb(usbhid->urbctrl);
1740 +diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
1741 +index 116681fde33d2..89fe7b9fe26be 100644
1742 +--- a/drivers/hwmon/mlxreg-fan.c
1743 ++++ b/drivers/hwmon/mlxreg-fan.c
1744 +@@ -315,8 +315,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
1745 + {
1746 + struct mlxreg_fan *fan = cdev->devdata;
1747 + unsigned long cur_state;
1748 ++ int i, config = 0;
1749 + u32 regval;
1750 +- int i;
1751 + int err;
1752 +
1753 + /*
1754 +@@ -329,6 +329,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
1755 + * overwritten.
1756 + */
1757 + if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
1758 ++ /*
1759 ++ * This is configuration change, which is only supported through sysfs.
1760 ++ * For configuration non-zero value is to be returned to avoid thermal
1761 ++ * statistics update.
1762 ++ */
1763 ++ config = 1;
1764 + state -= MLXREG_FAN_MAX_STATE;
1765 + for (i = 0; i < state; i++)
1766 + fan->cooling_levels[i] = state;
1767 +@@ -343,7 +349,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
1768 +
1769 + cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
1770 + if (state < cur_state)
1771 +- return 0;
1772 ++ return config;
1773 +
1774 + state = cur_state;
1775 + }
1776 +@@ -359,7 +365,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
1777 + dev_err(fan->dev, "Failed to write PWM duty\n");
1778 + return err;
1779 + }
1780 +- return 0;
1781 ++ return config;
1782 + }
1783 +
1784 + static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
1785 +diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
1786 +index 0d68a78be980d..ae664613289c4 100644
1787 +--- a/drivers/hwmon/occ/common.c
1788 ++++ b/drivers/hwmon/occ/common.c
1789 +@@ -340,18 +340,11 @@ static ssize_t occ_show_temp_10(struct device *dev,
1790 + if (val == OCC_TEMP_SENSOR_FAULT)
1791 + return -EREMOTEIO;
1792 +
1793 +- /*
1794 +- * VRM doesn't return temperature, only alarm bit. This
1795 +- * attribute maps to tempX_alarm instead of tempX_input for
1796 +- * VRM
1797 +- */
1798 +- if (temp->fru_type != OCC_FRU_TYPE_VRM) {
1799 +- /* sensor not ready */
1800 +- if (val == 0)
1801 +- return -EAGAIN;
1802 ++ /* sensor not ready */
1803 ++ if (val == 0)
1804 ++ return -EAGAIN;
1805 +
1806 +- val *= 1000;
1807 +- }
1808 ++ val *= 1000;
1809 + break;
1810 + case 2:
1811 + val = temp->fru_type;
1812 +@@ -886,7 +879,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
1813 + 0, i);
1814 + attr++;
1815 +
1816 +- if (sensors->temp.version > 1 &&
1817 ++ if (sensors->temp.version == 2 &&
1818 + temp->fru_type == OCC_FRU_TYPE_VRM) {
1819 + snprintf(attr->name, sizeof(attr->name),
1820 + "temp%d_alarm", s);
1821 +diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
1822 +index eb94bd5f4e2a8..51986adfbf47c 100644
1823 +--- a/drivers/hwmon/pmbus/mp2975.c
1824 ++++ b/drivers/hwmon/pmbus/mp2975.c
1825 +@@ -54,7 +54,7 @@
1826 +
1827 + #define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
1828 + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
1829 +- PMBUS_PHASE_VIRTUAL)
1830 ++ PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
1831 +
1832 + struct mp2975_data {
1833 + struct pmbus_driver_info info;
1834 +diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
1835 +index ede66ea6a730d..b963a369c5ab3 100644
1836 +--- a/drivers/hwmon/tmp421.c
1837 ++++ b/drivers/hwmon/tmp421.c
1838 +@@ -100,71 +100,81 @@ struct tmp421_data {
1839 + s16 temp[4];
1840 + };
1841 +
1842 +-static int temp_from_s16(s16 reg)
1843 ++static int temp_from_raw(u16 reg, bool extended)
1844 + {
1845 + /* Mask out status bits */
1846 + int temp = reg & ~0xf;
1847 +
1848 +- return (temp * 1000 + 128) / 256;
1849 +-}
1850 +-
1851 +-static int temp_from_u16(u16 reg)
1852 +-{
1853 +- /* Mask out status bits */
1854 +- int temp = reg & ~0xf;
1855 +-
1856 +- /* Add offset for extended temperature range. */
1857 +- temp -= 64 * 256;
1858 ++ if (extended)
1859 ++ temp = temp - 64 * 256;
1860 ++ else
1861 ++ temp = (s16)temp;
1862 +
1863 +- return (temp * 1000 + 128) / 256;
1864 ++ return DIV_ROUND_CLOSEST(temp * 1000, 256);
1865 + }
1866 +
1867 +-static struct tmp421_data *tmp421_update_device(struct device *dev)
1868 ++static int tmp421_update_device(struct tmp421_data *data)
1869 + {
1870 +- struct tmp421_data *data = dev_get_drvdata(dev);
1871 + struct i2c_client *client = data->client;
1872 ++ int ret = 0;
1873 + int i;
1874 +
1875 + mutex_lock(&data->update_lock);
1876 +
1877 + if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
1878 + !data->valid) {
1879 +- data->config = i2c_smbus_read_byte_data(client,
1880 +- TMP421_CONFIG_REG_1);
1881 ++ ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
1882 ++ if (ret < 0)
1883 ++ goto exit;
1884 ++ data->config = ret;
1885 +
1886 + for (i = 0; i < data->channels; i++) {
1887 +- data->temp[i] = i2c_smbus_read_byte_data(client,
1888 +- TMP421_TEMP_MSB[i]) << 8;
1889 +- data->temp[i] |= i2c_smbus_read_byte_data(client,
1890 +- TMP421_TEMP_LSB[i]);
1891 ++ ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
1892 ++ if (ret < 0)
1893 ++ goto exit;
1894 ++ data->temp[i] = ret << 8;
1895 ++
1896 ++ ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
1897 ++ if (ret < 0)
1898 ++ goto exit;
1899 ++ data->temp[i] |= ret;
1900 + }
1901 + data->last_updated = jiffies;
1902 + data->valid = 1;
1903 + }
1904 +
1905 ++exit:
1906 + mutex_unlock(&data->update_lock);
1907 +
1908 +- return data;
1909 ++ if (ret < 0) {
1910 ++ data->valid = 0;
1911 ++ return ret;
1912 ++ }
1913 ++
1914 ++ return 0;
1915 + }
1916 +
1917 + static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
1918 + u32 attr, int channel, long *val)
1919 + {
1920 +- struct tmp421_data *tmp421 = tmp421_update_device(dev);
1921 ++ struct tmp421_data *tmp421 = dev_get_drvdata(dev);
1922 ++ int ret = 0;
1923 ++
1924 ++ ret = tmp421_update_device(tmp421);
1925 ++ if (ret)
1926 ++ return ret;
1927 +
1928 + switch (attr) {
1929 + case hwmon_temp_input:
1930 +- if (tmp421->config & TMP421_CONFIG_RANGE)
1931 +- *val = temp_from_u16(tmp421->temp[channel]);
1932 +- else
1933 +- *val = temp_from_s16(tmp421->temp[channel]);
1934 ++ *val = temp_from_raw(tmp421->temp[channel],
1935 ++ tmp421->config & TMP421_CONFIG_RANGE);
1936 + return 0;
1937 + case hwmon_temp_fault:
1938 + /*
1939 +- * The OPEN bit signals a fault. This is bit 0 of the temperature
1940 +- * register (low byte).
1941 ++ * Any of OPEN or /PVLD bits indicate a hardware mulfunction
1942 ++ * and the conversion result may be incorrect
1943 + */
1944 +- *val = tmp421->temp[channel] & 0x01;
1945 ++ *val = !!(tmp421->temp[channel] & 0x03);
1946 + return 0;
1947 + default:
1948 + return -EOPNOTSUPP;
1949 +@@ -177,9 +187,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
1950 + {
1951 + switch (attr) {
1952 + case hwmon_temp_fault:
1953 +- if (channel == 0)
1954 +- return 0;
1955 +- return 0444;
1956 + case hwmon_temp_input:
1957 + return 0444;
1958 + default:
1959 +diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
1960 +index 37b25a1474c46..3c1be2c11fdf0 100644
1961 +--- a/drivers/hwmon/w83791d.c
1962 ++++ b/drivers/hwmon/w83791d.c
1963 +@@ -273,9 +273,6 @@ struct w83791d_data {
1964 + char valid; /* !=0 if following fields are valid */
1965 + unsigned long last_updated; /* In jiffies */
1966 +
1967 +- /* array of 2 pointers to subclients */
1968 +- struct i2c_client *lm75[2];
1969 +-
1970 + /* volts */
1971 + u8 in[NUMBER_OF_VIN]; /* Register value */
1972 + u8 in_max[NUMBER_OF_VIN]; /* Register value */
1973 +@@ -1257,7 +1254,6 @@ static const struct attribute_group w83791d_group_fanpwm45 = {
1974 + static int w83791d_detect_subclients(struct i2c_client *client)
1975 + {
1976 + struct i2c_adapter *adapter = client->adapter;
1977 +- struct w83791d_data *data = i2c_get_clientdata(client);
1978 + int address = client->addr;
1979 + int i, id;
1980 + u8 val;
1981 +@@ -1280,22 +1276,19 @@ static int w83791d_detect_subclients(struct i2c_client *client)
1982 + }
1983 +
1984 + val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
1985 +- if (!(val & 0x08))
1986 +- data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
1987 +- 0x48 + (val & 0x7));
1988 +- if (!(val & 0x80)) {
1989 +- if (!IS_ERR(data->lm75[0]) &&
1990 +- ((val & 0x7) == ((val >> 4) & 0x7))) {
1991 +- dev_err(&client->dev,
1992 +- "duplicate addresses 0x%x, "
1993 +- "use force_subclient\n",
1994 +- data->lm75[0]->addr);
1995 +- return -ENODEV;
1996 +- }
1997 +- data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
1998 +- 0x48 + ((val >> 4) & 0x7));
1999 ++
2000 ++ if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
2001 ++ dev_err(&client->dev,
2002 ++ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
2003 ++ return -ENODEV;
2004 + }
2005 +
2006 ++ if (!(val & 0x08))
2007 ++ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (val & 0x7));
2008 ++
2009 ++ if (!(val & 0x80))
2010 ++ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
2011 ++
2012 + return 0;
2013 + }
2014 +
2015 +diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
2016 +index abd5c3a722b91..1f175f3813506 100644
2017 +--- a/drivers/hwmon/w83792d.c
2018 ++++ b/drivers/hwmon/w83792d.c
2019 +@@ -264,9 +264,6 @@ struct w83792d_data {
2020 + char valid; /* !=0 if following fields are valid */
2021 + unsigned long last_updated; /* In jiffies */
2022 +
2023 +- /* array of 2 pointers to subclients */
2024 +- struct i2c_client *lm75[2];
2025 +-
2026 + u8 in[9]; /* Register value */
2027 + u8 in_max[9]; /* Register value */
2028 + u8 in_min[9]; /* Register value */
2029 +@@ -927,7 +924,6 @@ w83792d_detect_subclients(struct i2c_client *new_client)
2030 + int address = new_client->addr;
2031 + u8 val;
2032 + struct i2c_adapter *adapter = new_client->adapter;
2033 +- struct w83792d_data *data = i2c_get_clientdata(new_client);
2034 +
2035 + id = i2c_adapter_id(adapter);
2036 + if (force_subclients[0] == id && force_subclients[1] == address) {
2037 +@@ -946,21 +942,19 @@ w83792d_detect_subclients(struct i2c_client *new_client)
2038 + }
2039 +
2040 + val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
2041 +- if (!(val & 0x08))
2042 +- data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
2043 +- 0x48 + (val & 0x7));
2044 +- if (!(val & 0x80)) {
2045 +- if (!IS_ERR(data->lm75[0]) &&
2046 +- ((val & 0x7) == ((val >> 4) & 0x7))) {
2047 +- dev_err(&new_client->dev,
2048 +- "duplicate addresses 0x%x, use force_subclient\n",
2049 +- data->lm75[0]->addr);
2050 +- return -ENODEV;
2051 +- }
2052 +- data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
2053 +- 0x48 + ((val >> 4) & 0x7));
2054 ++
2055 ++ if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
2056 ++ dev_err(&new_client->dev,
2057 ++ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
2058 ++ return -ENODEV;
2059 + }
2060 +
2061 ++ if (!(val & 0x08))
2062 ++ devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + (val & 0x7));
2063 ++
2064 ++ if (!(val & 0x80))
2065 ++ devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
2066 ++
2067 + return 0;
2068 + }
2069 +
2070 +diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
2071 +index e7d0484eabe4c..1d2854de1cfc9 100644
2072 +--- a/drivers/hwmon/w83793.c
2073 ++++ b/drivers/hwmon/w83793.c
2074 +@@ -202,7 +202,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
2075 + }
2076 +
2077 + struct w83793_data {
2078 +- struct i2c_client *lm75[2];
2079 + struct device *hwmon_dev;
2080 + struct mutex update_lock;
2081 + char valid; /* !=0 if following fields are valid */
2082 +@@ -1566,7 +1565,6 @@ w83793_detect_subclients(struct i2c_client *client)
2083 + int address = client->addr;
2084 + u8 tmp;
2085 + struct i2c_adapter *adapter = client->adapter;
2086 +- struct w83793_data *data = i2c_get_clientdata(client);
2087 +
2088 + id = i2c_adapter_id(adapter);
2089 + if (force_subclients[0] == id && force_subclients[1] == address) {
2090 +@@ -1586,21 +1584,19 @@ w83793_detect_subclients(struct i2c_client *client)
2091 + }
2092 +
2093 + tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
2094 +- if (!(tmp & 0x08))
2095 +- data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
2096 +- 0x48 + (tmp & 0x7));
2097 +- if (!(tmp & 0x80)) {
2098 +- if (!IS_ERR(data->lm75[0])
2099 +- && ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
2100 +- dev_err(&client->dev,
2101 +- "duplicate addresses 0x%x, "
2102 +- "use force_subclients\n", data->lm75[0]->addr);
2103 +- return -ENODEV;
2104 +- }
2105 +- data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
2106 +- 0x48 + ((tmp >> 4) & 0x7));
2107 ++
2108 ++ if (!(tmp & 0x88) && (tmp & 0x7) == ((tmp >> 4) & 0x7)) {
2109 ++ dev_err(&client->dev,
2110 ++ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (tmp & 0x7));
2111 ++ return -ENODEV;
2112 + }
2113 +
2114 ++ if (!(tmp & 0x08))
2115 ++ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (tmp & 0x7));
2116 ++
2117 ++ if (!(tmp & 0x80))
2118 ++ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((tmp >> 4) & 0x7));
2119 ++
2120 + return 0;
2121 + }
2122 +
2123 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
2124 +index 5d3b8b8d163d6..dbbacc8e9273f 100644
2125 +--- a/drivers/infiniband/core/cma.c
2126 ++++ b/drivers/infiniband/core/cma.c
2127 +@@ -1746,15 +1746,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
2128 + }
2129 + }
2130 +
2131 +-static void cma_cancel_listens(struct rdma_id_private *id_priv)
2132 ++static void _cma_cancel_listens(struct rdma_id_private *id_priv)
2133 + {
2134 + struct rdma_id_private *dev_id_priv;
2135 +
2136 ++ lockdep_assert_held(&lock);
2137 ++
2138 + /*
2139 + * Remove from listen_any_list to prevent added devices from spawning
2140 + * additional listen requests.
2141 + */
2142 +- mutex_lock(&lock);
2143 + list_del(&id_priv->list);
2144 +
2145 + while (!list_empty(&id_priv->listen_list)) {
2146 +@@ -1768,6 +1769,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
2147 + rdma_destroy_id(&dev_id_priv->id);
2148 + mutex_lock(&lock);
2149 + }
2150 ++}
2151 ++
2152 ++static void cma_cancel_listens(struct rdma_id_private *id_priv)
2153 ++{
2154 ++ mutex_lock(&lock);
2155 ++ _cma_cancel_listens(id_priv);
2156 + mutex_unlock(&lock);
2157 + }
2158 +
2159 +@@ -1776,6 +1783,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
2160 + {
2161 + switch (state) {
2162 + case RDMA_CM_ADDR_QUERY:
2163 ++ /*
2164 ++ * We can avoid doing the rdma_addr_cancel() based on state,
2165 ++ * only RDMA_CM_ADDR_QUERY has a work that could still execute.
2166 ++ * Notice that the addr_handler work could still be exiting
2167 ++ * outside this state, however due to the interaction with the
2168 ++ * handler_mutex the work is guaranteed not to touch id_priv
2169 ++ * during exit.
2170 ++ */
2171 + rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
2172 + break;
2173 + case RDMA_CM_ROUTE_QUERY:
2174 +@@ -1810,6 +1825,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
2175 + static void destroy_mc(struct rdma_id_private *id_priv,
2176 + struct cma_multicast *mc)
2177 + {
2178 ++ bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
2179 ++
2180 + if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
2181 + ib_sa_free_multicast(mc->sa_mc);
2182 +
2183 +@@ -1826,7 +1843,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
2184 +
2185 + cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
2186 + &mgid);
2187 +- cma_igmp_send(ndev, &mgid, false);
2188 ++
2189 ++ if (!send_only)
2190 ++ cma_igmp_send(ndev, &mgid, false);
2191 ++
2192 + dev_put(ndev);
2193 + }
2194 +
2195 +@@ -2574,7 +2594,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
2196 + return 0;
2197 +
2198 + err_listen:
2199 +- list_del(&id_priv->list);
2200 ++ _cma_cancel_listens(id_priv);
2201 + mutex_unlock(&lock);
2202 + if (to_destroy)
2203 + rdma_destroy_id(&to_destroy->id);
2204 +@@ -3410,6 +3430,21 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2205 + if (dst_addr->sa_family == AF_IB) {
2206 + ret = cma_resolve_ib_addr(id_priv);
2207 + } else {
2208 ++ /*
2209 ++ * The FSM can return back to RDMA_CM_ADDR_BOUND after
2210 ++ * rdma_resolve_ip() is called, eg through the error
2211 ++ * path in addr_handler(). If this happens the existing
2212 ++ * request must be canceled before issuing a new one.
2213 ++ * Since canceling a request is a bit slow and this
2214 ++ * oddball path is rare, keep track once a request has
2215 ++ * been issued. The track turns out to be a permanent
2216 ++ * state since this is the only cancel as it is
2217 ++ * immediately before rdma_resolve_ip().
2218 ++ */
2219 ++ if (id_priv->used_resolve_ip)
2220 ++ rdma_addr_cancel(&id->route.addr.dev_addr);
2221 ++ else
2222 ++ id_priv->used_resolve_ip = 1;
2223 + ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
2224 + &id->route.addr.dev_addr,
2225 + timeout_ms, addr_handler,
2226 +@@ -3768,9 +3803,13 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
2227 + int ret;
2228 +
2229 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
2230 ++ struct sockaddr_in any_in = {
2231 ++ .sin_family = AF_INET,
2232 ++ .sin_addr.s_addr = htonl(INADDR_ANY),
2233 ++ };
2234 ++
2235 + /* For a well behaved ULP state will be RDMA_CM_IDLE */
2236 +- id->route.addr.src_addr.ss_family = AF_INET;
2237 +- ret = rdma_bind_addr(id, cma_src_addr(id_priv));
2238 ++ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
2239 + if (ret)
2240 + return ret;
2241 + if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
2242 +diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
2243 +index 5c463da998453..f92f101ea9818 100644
2244 +--- a/drivers/infiniband/core/cma_priv.h
2245 ++++ b/drivers/infiniband/core/cma_priv.h
2246 +@@ -91,6 +91,7 @@ struct rdma_id_private {
2247 + u8 afonly;
2248 + u8 timeout;
2249 + u8 min_rnr_timer;
2250 ++ u8 used_resolve_ip;
2251 + enum ib_gid_type gid_type;
2252 +
2253 + /*
2254 +diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
2255 +index 993f9838b6c80..e1fdeadda437d 100644
2256 +--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
2257 ++++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
2258 +@@ -873,14 +873,14 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
2259 + struct hfi1_ipoib_txq *txq = &priv->txqs[q];
2260 + u64 completed = atomic64_read(&txq->complete_txreqs);
2261 +
2262 +- dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
2263 +- (unsigned long long)txq, q,
2264 ++ dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
2265 ++ txq, q,
2266 + __netif_subqueue_stopped(dev, txq->q_idx),
2267 + atomic_read(&txq->stops),
2268 + atomic_read(&txq->no_desc),
2269 + atomic_read(&txq->ring_full));
2270 +- dd_dev_info(priv->dd, "sde %llx engine %u\n",
2271 +- (unsigned long long)txq->sde,
2272 ++ dd_dev_info(priv->dd, "sde %p engine %u\n",
2273 ++ txq->sde,
2274 + txq->sde ? txq->sde->this_idx : 0);
2275 + dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
2276 + dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
2277 +diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
2278 +index 1e9c3c5bee684..d763f097599ff 100644
2279 +--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
2280 ++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
2281 +@@ -326,19 +326,30 @@ static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
2282 + INIT_LIST_HEAD(&hr_cq->rq_list);
2283 + }
2284 +
2285 +-static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
2286 +- struct hns_roce_ib_create_cq *ucmd)
2287 ++static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
2288 ++ struct hns_roce_ib_create_cq *ucmd)
2289 + {
2290 + struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2291 +
2292 +- if (udata) {
2293 +- if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
2294 +- hr_cq->cqe_size = ucmd->cqe_size;
2295 +- else
2296 +- hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
2297 +- } else {
2298 ++ if (!udata) {
2299 + hr_cq->cqe_size = hr_dev->caps.cqe_sz;
2300 ++ return 0;
2301 ++ }
2302 ++
2303 ++ if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
2304 ++ if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
2305 ++ ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
2306 ++ ibdev_err(&hr_dev->ib_dev,
2307 ++ "invalid cqe size %u.\n", ucmd->cqe_size);
2308 ++ return -EINVAL;
2309 ++ }
2310 ++
2311 ++ hr_cq->cqe_size = ucmd->cqe_size;
2312 ++ } else {
2313 ++ hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
2314 + }
2315 ++
2316 ++ return 0;
2317 + }
2318 +
2319 + int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
2320 +@@ -366,7 +377,9 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
2321 +
2322 + set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
2323 +
2324 +- set_cqe_size(hr_cq, udata, &ucmd);
2325 ++ ret = set_cqe_size(hr_cq, udata, &ucmd);
2326 ++ if (ret)
2327 ++ return ret;
2328 +
2329 + ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
2330 + if (ret) {
2331 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2332 +index c320891c8763c..0ccb0c453f6a2 100644
2333 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2334 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
2335 +@@ -3306,7 +3306,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2336 + dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2337 + hr_cq->ib_cq.cqe);
2338 + owner_bit = hr_reg_read(dest, CQE_OWNER);
2339 +- memcpy(dest, cqe, sizeof(*cqe));
2340 ++ memcpy(dest, cqe, hr_cq->cqe_size);
2341 + hr_reg_write(dest, CQE_OWNER, owner_bit);
2342 + }
2343 + }
2344 +@@ -4411,7 +4411,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
2345 + hr_qp->path_mtu = ib_mtu;
2346 +
2347 + mtu = ib_mtu_enum_to_int(ib_mtu);
2348 +- if (WARN_ON(mtu < 0))
2349 ++ if (WARN_ON(mtu <= 0))
2350 ++ return -EINVAL;
2351 ++#define MAX_LP_MSG_LEN 65536
2352 ++ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
2353 ++ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
2354 ++ if (WARN_ON(lp_pktn_ini >= 0xF))
2355 + return -EINVAL;
2356 +
2357 + if (attr_mask & IB_QP_PATH_MTU) {
2358 +@@ -4419,10 +4424,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
2359 + hr_reg_clear(qpc_mask, QPC_MTU);
2360 + }
2361 +
2362 +-#define MAX_LP_MSG_LEN 65536
2363 +- /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
2364 +- lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
2365 +-
2366 + hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
2367 + hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
2368 +
2369 +diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
2370 +index 6b62299abfbbb..6dea0a49d1718 100644
2371 +--- a/drivers/infiniband/hw/irdma/cm.c
2372 ++++ b/drivers/infiniband/hw/irdma/cm.c
2373 +@@ -3496,7 +3496,7 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
2374 + original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
2375 + last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
2376 + last_ae == IRDMA_AE_BAD_CLOSE ||
2377 +- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) {
2378 ++ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
2379 + issue_close = 1;
2380 + iwqp->cm_id = NULL;
2381 + qp->term_flags = 0;
2382 +@@ -4250,7 +4250,7 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
2383 + teardown_entry);
2384 + attr.qp_state = IB_QPS_ERR;
2385 + irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
2386 +- if (iwdev->reset)
2387 ++ if (iwdev->rf->reset)
2388 + irdma_cm_disconn(cm_node->iwqp);
2389 + irdma_rem_ref_cm_node(cm_node);
2390 + }
2391 +diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
2392 +index 00de5ee9a2609..7de525a5ccf8c 100644
2393 +--- a/drivers/infiniband/hw/irdma/hw.c
2394 ++++ b/drivers/infiniband/hw/irdma/hw.c
2395 +@@ -176,6 +176,14 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
2396 + case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
2397 + qp->flush_code = FLUSH_GENERAL_ERR;
2398 + break;
2399 ++ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
2400 ++ qp->flush_code = FLUSH_RETRY_EXC_ERR;
2401 ++ break;
2402 ++ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
2403 ++ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
2404 ++ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
2405 ++ qp->flush_code = FLUSH_MW_BIND_ERR;
2406 ++ break;
2407 + default:
2408 + qp->flush_code = FLUSH_FATAL_ERR;
2409 + break;
2410 +@@ -1489,7 +1497,7 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
2411 +
2412 + irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
2413 + if (irdma_initialize_ieq(iwdev)) {
2414 +- iwdev->reset = true;
2415 ++ iwdev->rf->reset = true;
2416 + rf->gen_ops.request_reset(rf);
2417 + }
2418 + }
2419 +@@ -1632,13 +1640,13 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
2420 + case IEQ_CREATED:
2421 + if (!iwdev->roce_mode)
2422 + irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
2423 +- iwdev->reset);
2424 ++ iwdev->rf->reset);
2425 + fallthrough;
2426 + case ILQ_CREATED:
2427 + if (!iwdev->roce_mode)
2428 + irdma_puda_dele_rsrc(&iwdev->vsi,
2429 + IRDMA_PUDA_RSRC_TYPE_ILQ,
2430 +- iwdev->reset);
2431 ++ iwdev->rf->reset);
2432 + break;
2433 + default:
2434 + ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
2435 +diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
2436 +index bddf88194d095..d219f64b2c3d5 100644
2437 +--- a/drivers/infiniband/hw/irdma/i40iw_if.c
2438 ++++ b/drivers/infiniband/hw/irdma/i40iw_if.c
2439 +@@ -55,7 +55,7 @@ static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
2440 +
2441 + iwdev = to_iwdev(ibdev);
2442 + if (reset)
2443 +- iwdev->reset = true;
2444 ++ iwdev->rf->reset = true;
2445 +
2446 + iwdev->iw_status = 0;
2447 + irdma_port_ibevent(iwdev);
2448 +diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
2449 +index 743d9e143a999..b678fe712447e 100644
2450 +--- a/drivers/infiniband/hw/irdma/main.h
2451 ++++ b/drivers/infiniband/hw/irdma/main.h
2452 +@@ -346,7 +346,6 @@ struct irdma_device {
2453 + bool roce_mode:1;
2454 + bool roce_dcqcn_en:1;
2455 + bool dcb:1;
2456 +- bool reset:1;
2457 + bool iw_ooo:1;
2458 + enum init_completion_state init_state;
2459 +
2460 +diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
2461 +index ff705f3232333..3dcbb1fbf2c66 100644
2462 +--- a/drivers/infiniband/hw/irdma/user.h
2463 ++++ b/drivers/infiniband/hw/irdma/user.h
2464 +@@ -102,6 +102,8 @@ enum irdma_flush_opcode {
2465 + FLUSH_REM_OP_ERR,
2466 + FLUSH_LOC_LEN_ERR,
2467 + FLUSH_FATAL_ERR,
2468 ++ FLUSH_RETRY_EXC_ERR,
2469 ++ FLUSH_MW_BIND_ERR,
2470 + };
2471 +
2472 + enum irdma_cmpl_status {
2473 +diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
2474 +index 5bbe44e54f9a1..832e9604766b4 100644
2475 +--- a/drivers/infiniband/hw/irdma/utils.c
2476 ++++ b/drivers/infiniband/hw/irdma/utils.c
2477 +@@ -2510,7 +2510,7 @@ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
2478 + struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
2479 + struct ib_qp_attr attr;
2480 +
2481 +- if (qp->iwdev->reset)
2482 ++ if (qp->iwdev->rf->reset)
2483 + return;
2484 + attr.qp_state = IB_QPS_ERR;
2485 +
2486 +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
2487 +index 717147ed0519d..fa393c5ea3973 100644
2488 +--- a/drivers/infiniband/hw/irdma/verbs.c
2489 ++++ b/drivers/infiniband/hw/irdma/verbs.c
2490 +@@ -535,8 +535,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2491 + irdma_qp_rem_ref(&iwqp->ibqp);
2492 + wait_for_completion(&iwqp->free_qp);
2493 + irdma_free_lsmm_rsrc(iwqp);
2494 +- if (!iwdev->reset)
2495 +- irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
2496 ++ irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
2497 +
2498 + if (!iwqp->user_mode) {
2499 + if (iwqp->iwscq) {
2500 +@@ -2041,7 +2040,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
2501 + /* Kmode allocations */
2502 + int rsize;
2503 +
2504 +- if (entries > rf->max_cqe) {
2505 ++ if (entries < 1 || entries > rf->max_cqe) {
2506 + err_code = -EINVAL;
2507 + goto cq_free_rsrc;
2508 + }
2509 +@@ -3359,6 +3358,10 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
2510 + return IB_WC_LOC_LEN_ERR;
2511 + case FLUSH_GENERAL_ERR:
2512 + return IB_WC_WR_FLUSH_ERR;
2513 ++ case FLUSH_RETRY_EXC_ERR:
2514 ++ return IB_WC_RETRY_EXC_ERR;
2515 ++ case FLUSH_MW_BIND_ERR:
2516 ++ return IB_WC_MW_BIND_ERR;
2517 + case FLUSH_FATAL_ERR:
2518 + default:
2519 + return IB_WC_FATAL_ERR;
2520 +diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
2521 +index 632dbdd219150..99eef7e2d326a 100644
2522 +--- a/drivers/interconnect/qcom/sdm660.c
2523 ++++ b/drivers/interconnect/qcom/sdm660.c
2524 +@@ -44,9 +44,9 @@
2525 + #define NOC_PERM_MODE_BYPASS (1 << NOC_QOS_MODE_BYPASS)
2526 +
2527 + #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
2528 +-#define NOC_QOS_PRIORITY_MASK 0xf
2529 ++#define NOC_QOS_PRIORITY_P1_MASK 0xc
2530 ++#define NOC_QOS_PRIORITY_P0_MASK 0x3
2531 + #define NOC_QOS_PRIORITY_P1_SHIFT 0x2
2532 +-#define NOC_QOS_PRIORITY_P0_SHIFT 0x3
2533 +
2534 + #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
2535 + #define NOC_QOS_MODEn_MASK 0x3
2536 +@@ -307,7 +307,7 @@ DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0)
2537 + DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
2538 + DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
2539 + DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
2540 +-DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
2541 ++DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_CNOC_MNOC_CFG, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
2542 + DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
2543 + DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
2544 + DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
2545 +@@ -624,13 +624,12 @@ static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
2546 + /* Must be updated one at a time, P1 first, P0 last */
2547 + val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
2548 + rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
2549 +- NOC_QOS_PRIORITY_MASK, val);
2550 ++ NOC_QOS_PRIORITY_P1_MASK, val);
2551 + if (rc)
2552 + return rc;
2553 +
2554 +- val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT;
2555 + return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
2556 +- NOC_QOS_PRIORITY_MASK, val);
2557 ++ NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
2558 + }
2559 +
2560 + static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
2561 +diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
2562 +index 20fa02c81070f..9117874cbfdbd 100644
2563 +--- a/drivers/ipack/devices/ipoctal.c
2564 ++++ b/drivers/ipack/devices/ipoctal.c
2565 +@@ -33,6 +33,7 @@ struct ipoctal_channel {
2566 + unsigned int pointer_read;
2567 + unsigned int pointer_write;
2568 + struct tty_port tty_port;
2569 ++ bool tty_registered;
2570 + union scc2698_channel __iomem *regs;
2571 + union scc2698_block __iomem *block_regs;
2572 + unsigned int board_id;
2573 +@@ -81,22 +82,34 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
2574 + return 0;
2575 + }
2576 +
2577 +-static int ipoctal_open(struct tty_struct *tty, struct file *file)
2578 ++static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty)
2579 + {
2580 + struct ipoctal_channel *channel = dev_get_drvdata(tty->dev);
2581 + struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index);
2582 +- int err;
2583 +-
2584 +- tty->driver_data = channel;
2585 ++ int res;
2586 +
2587 + if (!ipack_get_carrier(ipoctal->dev))
2588 + return -EBUSY;
2589 +
2590 +- err = tty_port_open(&channel->tty_port, tty, file);
2591 +- if (err)
2592 +- ipack_put_carrier(ipoctal->dev);
2593 ++ res = tty_standard_install(driver, tty);
2594 ++ if (res)
2595 ++ goto err_put_carrier;
2596 ++
2597 ++ tty->driver_data = channel;
2598 ++
2599 ++ return 0;
2600 ++
2601 ++err_put_carrier:
2602 ++ ipack_put_carrier(ipoctal->dev);
2603 ++
2604 ++ return res;
2605 ++}
2606 ++
2607 ++static int ipoctal_open(struct tty_struct *tty, struct file *file)
2608 ++{
2609 ++ struct ipoctal_channel *channel = tty->driver_data;
2610 +
2611 +- return err;
2612 ++ return tty_port_open(&channel->tty_port, tty, file);
2613 + }
2614 +
2615 + static void ipoctal_reset_stats(struct ipoctal_stats *stats)
2616 +@@ -264,7 +277,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
2617 + int res;
2618 + int i;
2619 + struct tty_driver *tty;
2620 +- char name[20];
2621 + struct ipoctal_channel *channel;
2622 + struct ipack_region *region;
2623 + void __iomem *addr;
2624 +@@ -355,8 +367,11 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
2625 + /* Fill struct tty_driver with ipoctal data */
2626 + tty->owner = THIS_MODULE;
2627 + tty->driver_name = KBUILD_MODNAME;
2628 +- sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
2629 +- tty->name = name;
2630 ++ tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
2631 ++ if (!tty->name) {
2632 ++ res = -ENOMEM;
2633 ++ goto err_put_driver;
2634 ++ }
2635 + tty->major = 0;
2636 +
2637 + tty->minor_start = 0;
2638 +@@ -372,8 +387,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
2639 + res = tty_register_driver(tty);
2640 + if (res) {
2641 + dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
2642 +- put_tty_driver(tty);
2643 +- return res;
2644 ++ goto err_free_name;
2645 + }
2646 +
2647 + /* Save struct tty_driver for use it when uninstalling the device */
2648 +@@ -384,7 +398,9 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
2649 +
2650 + channel = &ipoctal->channel[i];
2651 + tty_port_init(&channel->tty_port);
2652 +- tty_port_alloc_xmit_buf(&channel->tty_port);
2653 ++ res = tty_port_alloc_xmit_buf(&channel->tty_port);
2654 ++ if (res)
2655 ++ continue;
2656 + channel->tty_port.ops = &ipoctal_tty_port_ops;
2657 +
2658 + ipoctal_reset_stats(&channel->stats);
2659 +@@ -392,13 +408,15 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
2660 + spin_lock_init(&channel->lock);
2661 + channel->pointer_read = 0;
2662 + channel->pointer_write = 0;
2663 +- tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL);
2664 ++ tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,
2665 ++ i, NULL, channel, NULL);
2666 + if (IS_ERR(tty_dev)) {
2667 + dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
2668 ++ tty_port_free_xmit_buf(&channel->tty_port);
2669 + tty_port_destroy(&channel->tty_port);
2670 + continue;
2671 + }
2672 +- dev_set_drvdata(tty_dev, channel);
2673 ++ channel->tty_registered = true;
2674 + }
2675 +
2676 + /*
2677 +@@ -410,6 +428,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
2678 + ipoctal_irq_handler, ipoctal);
2679 +
2680 + return 0;
2681 ++
2682 ++err_free_name:
2683 ++ kfree(tty->name);
2684 ++err_put_driver:
2685 ++ put_tty_driver(tty);
2686 ++
2687 ++ return res;
2688 + }
2689 +
2690 + static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
2691 +@@ -649,6 +674,7 @@ static void ipoctal_cleanup(struct tty_struct *tty)
2692 +
2693 + static const struct tty_operations ipoctal_fops = {
2694 + .ioctl = NULL,
2695 ++ .install = ipoctal_install,
2696 + .open = ipoctal_open,
2697 + .close = ipoctal_close,
2698 + .write = ipoctal_write_tty,
2699 +@@ -691,12 +717,17 @@ static void __ipoctal_remove(struct ipoctal *ipoctal)
2700 +
2701 + for (i = 0; i < NR_CHANNELS; i++) {
2702 + struct ipoctal_channel *channel = &ipoctal->channel[i];
2703 ++
2704 ++ if (!channel->tty_registered)
2705 ++ continue;
2706 ++
2707 + tty_unregister_device(ipoctal->tty_drv, i);
2708 + tty_port_free_xmit_buf(&channel->tty_port);
2709 + tty_port_destroy(&channel->tty_port);
2710 + }
2711 +
2712 + tty_unregister_driver(ipoctal->tty_drv);
2713 ++ kfree(ipoctal->tty_drv->name);
2714 + put_tty_driver(ipoctal->tty_drv);
2715 + kfree(ipoctal);
2716 + }
2717 +diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
2718 +index d402e456f27df..7d0ab19c38bb9 100644
2719 +--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
2720 ++++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
2721 +@@ -1140,8 +1140,8 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
2722 + continue;
2723 + length = 0;
2724 + switch (c) {
2725 +- /* SOF0: baseline JPEG */
2726 +- case SOF0:
2727 ++ /* JPEG_MARKER_SOF0: baseline JPEG */
2728 ++ case JPEG_MARKER_SOF0:
2729 + if (get_word_be(&jpeg_buffer, &word))
2730 + break;
2731 + length = (long)word - 2;
2732 +@@ -1172,7 +1172,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
2733 + notfound = 0;
2734 + break;
2735 +
2736 +- case DQT:
2737 ++ case JPEG_MARKER_DQT:
2738 + if (get_word_be(&jpeg_buffer, &word))
2739 + break;
2740 + length = (long)word - 2;
2741 +@@ -1185,7 +1185,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
2742 + skip(&jpeg_buffer, length);
2743 + break;
2744 +
2745 +- case DHT:
2746 ++ case JPEG_MARKER_DHT:
2747 + if (get_word_be(&jpeg_buffer, &word))
2748 + break;
2749 + length = (long)word - 2;
2750 +@@ -1198,15 +1198,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
2751 + skip(&jpeg_buffer, length);
2752 + break;
2753 +
2754 +- case SOS:
2755 ++ case JPEG_MARKER_SOS:
2756 + sos = jpeg_buffer.curr - 2; /* 0xffda */
2757 + break;
2758 +
2759 + /* skip payload-less markers */
2760 +- case RST ... RST + 7:
2761 +- case SOI:
2762 +- case EOI:
2763 +- case TEM:
2764 ++ case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
2765 ++ case JPEG_MARKER_SOI:
2766 ++ case JPEG_MARKER_EOI:
2767 ++ case JPEG_MARKER_TEM:
2768 + break;
2769 +
2770 + /* skip uninteresting payload markers */
2771 +diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
2772 +index a77d93c098ce7..8473a019bb5f2 100644
2773 +--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
2774 ++++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
2775 +@@ -37,15 +37,15 @@
2776 + #define EXYNOS3250_IRQ_TIMEOUT 0x10000000
2777 +
2778 + /* a selection of JPEG markers */
2779 +-#define TEM 0x01
2780 +-#define SOF0 0xc0
2781 +-#define DHT 0xc4
2782 +-#define RST 0xd0
2783 +-#define SOI 0xd8
2784 +-#define EOI 0xd9
2785 +-#define SOS 0xda
2786 +-#define DQT 0xdb
2787 +-#define DHP 0xde
2788 ++#define JPEG_MARKER_TEM 0x01
2789 ++#define JPEG_MARKER_SOF0 0xc0
2790 ++#define JPEG_MARKER_DHT 0xc4
2791 ++#define JPEG_MARKER_RST 0xd0
2792 ++#define JPEG_MARKER_SOI 0xd8
2793 ++#define JPEG_MARKER_EOI 0xd9
2794 ++#define JPEG_MARKER_SOS 0xda
2795 ++#define JPEG_MARKER_DQT 0xdb
2796 ++#define JPEG_MARKER_DHP 0xde
2797 +
2798 + /* Flags that indicate a format can be used for capture/output */
2799 + #define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)
2800 +@@ -187,11 +187,11 @@ struct s5p_jpeg_marker {
2801 + * @fmt: driver-specific format of this queue
2802 + * @w: image width
2803 + * @h: image height
2804 +- * @sos: SOS marker's position relative to the buffer beginning
2805 +- * @dht: DHT markers' positions relative to the buffer beginning
2806 +- * @dqt: DQT markers' positions relative to the buffer beginning
2807 +- * @sof: SOF0 marker's position relative to the buffer beginning
2808 +- * @sof_len: SOF0 marker's payload length (without length field itself)
2809 ++ * @sos: JPEG_MARKER_SOS's position relative to the buffer beginning
2810 ++ * @dht: JPEG_MARKER_DHT' positions relative to the buffer beginning
2811 ++ * @dqt: JPEG_MARKER_DQT' positions relative to the buffer beginning
2812 ++ * @sof: JPEG_MARKER_SOF0's position relative to the buffer beginning
2813 ++ * @sof_len: JPEG_MARKER_SOF0's payload length (without length field itself)
2814 + * @size: image buffer size in bytes
2815 + */
2816 + struct s5p_jpeg_q_data {
2817 +diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
2818 +index 3e729a17b35ff..48d52baec1a1c 100644
2819 +--- a/drivers/media/rc/ir_toy.c
2820 ++++ b/drivers/media/rc/ir_toy.c
2821 +@@ -24,6 +24,7 @@ static const u8 COMMAND_VERSION[] = { 'v' };
2822 + // End transmit and repeat reset command so we exit sump mode
2823 + static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
2824 + static const u8 COMMAND_SMODE_ENTER[] = { 's' };
2825 ++static const u8 COMMAND_SMODE_EXIT[] = { 0 };
2826 + static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
2827 +
2828 + #define REPLY_XMITCOUNT 't'
2829 +@@ -309,12 +310,30 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
2830 + buf[i] = cpu_to_be16(v);
2831 + }
2832 +
2833 +- buf[count] = cpu_to_be16(0xffff);
2834 ++ buf[count] = 0xffff;
2835 +
2836 + irtoy->tx_buf = buf;
2837 + irtoy->tx_len = size;
2838 + irtoy->emitted = 0;
2839 +
2840 ++ // There is an issue where if the unit is receiving IR while the
2841 ++ // first TXSTART command is sent, the device might end up hanging
2842 ++ // with its led on. It does not respond to any command when this
2843 ++ // happens. To work around this, re-enter sample mode.
2844 ++ err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
2845 ++ sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
2846 ++ if (err) {
2847 ++ dev_err(irtoy->dev, "exit sample mode: %d\n", err);
2848 ++ return err;
2849 ++ }
2850 ++
2851 ++ err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
2852 ++ sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
2853 ++ if (err) {
2854 ++ dev_err(irtoy->dev, "enter sample mode: %d\n", err);
2855 ++ return err;
2856 ++ }
2857 ++
2858 + err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
2859 + STATE_TX);
2860 + kfree(buf);
2861 +diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
2862 +index e49ca0f7fe9a8..1543a5dd94252 100644
2863 +--- a/drivers/mmc/host/renesas_sdhi_core.c
2864 ++++ b/drivers/mmc/host/renesas_sdhi_core.c
2865 +@@ -582,6 +582,8 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
2866 + /* Unknown why but without polling reset status, it will hang */
2867 + read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
2868 + false, priv->rstc);
2869 ++ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
2870 ++ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
2871 + priv->needs_adjust_hs400 = false;
2872 + renesas_sdhi_set_clock(host, host->clk_cache);
2873 + } else if (priv->scc_ctl) {
2874 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
2875 +index 1c122a1f2f97d..66b4f4a9832a4 100644
2876 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
2877 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
2878 +@@ -2775,8 +2775,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
2879 + if (err)
2880 + return err;
2881 +
2882 +- /* Port Control 2: don't force a good FCS, set the maximum frame size to
2883 +- * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2884 ++ /* Port Control 2: don't force a good FCS, set the MTU size to
2885 ++ * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
2886 + * untagged frames on this port, do a destination address lookup on all
2887 + * received packets as usual, disable ARP mirroring and don't send a
2888 + * copy of all transmitted/received frames on this port to the CPU.
2889 +@@ -2795,7 +2795,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
2890 + return err;
2891 +
2892 + if (chip->info->ops->port_set_jumbo_size) {
2893 +- err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
2894 ++ err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
2895 + if (err)
2896 + return err;
2897 + }
2898 +@@ -2885,10 +2885,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
2899 + struct mv88e6xxx_chip *chip = ds->priv;
2900 +
2901 + if (chip->info->ops->port_set_jumbo_size)
2902 +- return 10240;
2903 ++ return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
2904 + else if (chip->info->ops->set_max_frame_size)
2905 +- return 1632;
2906 +- return 1522;
2907 ++ return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
2908 ++ return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
2909 + }
2910 +
2911 + static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2912 +@@ -2896,6 +2896,9 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2913 + struct mv88e6xxx_chip *chip = ds->priv;
2914 + int ret = 0;
2915 +
2916 ++ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
2917 ++ new_mtu += EDSA_HLEN;
2918 ++
2919 + mv88e6xxx_reg_lock(chip);
2920 + if (chip->info->ops->port_set_jumbo_size)
2921 + ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
2922 +@@ -3657,7 +3660,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
2923 + .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
2924 + .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
2925 + .port_set_ether_type = mv88e6351_port_set_ether_type,
2926 +- .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
2927 + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
2928 + .port_pause_limit = mv88e6097_port_pause_limit,
2929 + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
2930 +@@ -3682,6 +3684,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
2931 + .avb_ops = &mv88e6165_avb_ops,
2932 + .ptp_ops = &mv88e6165_ptp_ops,
2933 + .phylink_validate = mv88e6185_phylink_validate,
2934 ++ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
2935 + };
2936 +
2937 + static const struct mv88e6xxx_ops mv88e6165_ops = {
2938 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
2939 +index 675b1f3e43b7b..59f316cc8583e 100644
2940 +--- a/drivers/net/dsa/mv88e6xxx/chip.h
2941 ++++ b/drivers/net/dsa/mv88e6xxx/chip.h
2942 +@@ -18,6 +18,7 @@
2943 + #include <linux/timecounter.h>
2944 + #include <net/dsa.h>
2945 +
2946 ++#define EDSA_HLEN 8
2947 + #define MV88E6XXX_N_FID 4096
2948 +
2949 + /* PVT limits for 4-bit port and 5-bit switch */
2950 +diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
2951 +index 815b0f681d698..5848112036b08 100644
2952 +--- a/drivers/net/dsa/mv88e6xxx/global1.c
2953 ++++ b/drivers/net/dsa/mv88e6xxx/global1.c
2954 +@@ -232,6 +232,8 @@ int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
2955 + u16 val;
2956 + int err;
2957 +
2958 ++ mtu += ETH_HLEN + ETH_FCS_LEN;
2959 ++
2960 + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
2961 + if (err)
2962 + return err;
2963 +diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
2964 +index f77e2ee64a607..451028c57af8a 100644
2965 +--- a/drivers/net/dsa/mv88e6xxx/port.c
2966 ++++ b/drivers/net/dsa/mv88e6xxx/port.c
2967 +@@ -1277,6 +1277,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
2968 + u16 reg;
2969 + int err;
2970 +
2971 ++ size += VLAN_ETH_HLEN + ETH_FCS_LEN;
2972 ++
2973 + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
2974 + if (err)
2975 + return err;
2976 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2977 +index c84f6c226743d..cf00709caea4b 100644
2978 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2979 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2980 +@@ -541,8 +541,7 @@ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
2981 +
2982 + if (phy_interface_mode_is_rgmii(phy_mode)) {
2983 + val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
2984 +- val &= ~ENETC_PM0_IFM_EN_AUTO;
2985 +- val &= ENETC_PM0_IFM_IFMODE_MASK;
2986 ++ val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
2987 + val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
2988 + enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
2989 + }
2990 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
2991 +index e0b7c3c44e7b4..32987bd134a1d 100644
2992 +--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
2993 ++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
2994 +@@ -750,7 +750,6 @@ struct hnae3_tc_info {
2995 + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
2996 + u16 tqp_count[HNAE3_MAX_TC];
2997 + u16 tqp_offset[HNAE3_MAX_TC];
2998 +- unsigned long tc_en; /* bitmap of TC enabled */
2999 + u8 num_tc; /* Total number of enabled TCs */
3000 + bool mqprio_active;
3001 + };
3002 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3003 +index 9faa3712ea5b8..114692c4f7978 100644
3004 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3005 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3006 +@@ -620,13 +620,9 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
3007 + return ret;
3008 + }
3009 +
3010 +- for (i = 0; i < HNAE3_MAX_TC; i++) {
3011 +- if (!test_bit(i, &tc_info->tc_en))
3012 +- continue;
3013 +-
3014 ++ for (i = 0; i < tc_info->num_tc; i++)
3015 + netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
3016 + tc_info->tqp_offset[i]);
3017 +- }
3018 + }
3019 +
3020 + ret = netif_set_real_num_tx_queues(netdev, queue_size);
3021 +@@ -776,6 +772,11 @@ static int hns3_nic_net_open(struct net_device *netdev)
3022 + if (hns3_nic_resetting(netdev))
3023 + return -EBUSY;
3024 +
3025 ++ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
3026 ++ netdev_warn(netdev, "net open repeatedly!\n");
3027 ++ return 0;
3028 ++ }
3029 ++
3030 + netif_carrier_off(netdev);
3031 +
3032 + ret = hns3_nic_set_real_num_queue(netdev);
3033 +@@ -4825,12 +4826,9 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3034 + struct hnae3_tc_info *tc_info = &kinfo->tc_info;
3035 + int i;
3036 +
3037 +- for (i = 0; i < HNAE3_MAX_TC; i++) {
3038 ++ for (i = 0; i < tc_info->num_tc; i++) {
3039 + int j;
3040 +
3041 +- if (!test_bit(i, &tc_info->tc_en))
3042 +- continue;
3043 +-
3044 + for (j = 0; j < tc_info->tqp_count[i]; j++) {
3045 + struct hnae3_queue *q;
3046 +
3047 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
3048 +index 82061ab6930fb..83ee0f41322c7 100644
3049 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
3050 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
3051 +@@ -312,33 +312,8 @@ out:
3052 + return ret_val;
3053 + }
3054 +
3055 +-/**
3056 +- * hns3_self_test - self test
3057 +- * @ndev: net device
3058 +- * @eth_test: test cmd
3059 +- * @data: test result
3060 +- */
3061 +-static void hns3_self_test(struct net_device *ndev,
3062 +- struct ethtool_test *eth_test, u64 *data)
3063 ++static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
3064 + {
3065 +- struct hns3_nic_priv *priv = netdev_priv(ndev);
3066 +- struct hnae3_handle *h = priv->ae_handle;
3067 +- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
3068 +- bool if_running = netif_running(ndev);
3069 +- int test_index = 0;
3070 +- u32 i;
3071 +-
3072 +- if (hns3_nic_resetting(ndev)) {
3073 +- netdev_err(ndev, "dev resetting!");
3074 +- return;
3075 +- }
3076 +-
3077 +- /* Only do offline selftest, or pass by default */
3078 +- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
3079 +- return;
3080 +-
3081 +- netif_dbg(h, drv, ndev, "self test start");
3082 +-
3083 + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
3084 + st_param[HNAE3_LOOP_APP][1] =
3085 + h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
3086 +@@ -355,13 +330,26 @@ static void hns3_self_test(struct net_device *ndev,
3087 + st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
3088 + st_param[HNAE3_LOOP_PHY][1] =
3089 + h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
3090 ++}
3091 ++
3092 ++static void hns3_selftest_prepare(struct net_device *ndev,
3093 ++ bool if_running, int (*st_param)[2])
3094 ++{
3095 ++ struct hns3_nic_priv *priv = netdev_priv(ndev);
3096 ++ struct hnae3_handle *h = priv->ae_handle;
3097 ++
3098 ++ if (netif_msg_ifdown(h))
3099 ++ netdev_info(ndev, "self test start\n");
3100 ++
3101 ++ hns3_set_selftest_param(h, st_param);
3102 +
3103 + if (if_running)
3104 + ndev->netdev_ops->ndo_stop(ndev);
3105 +
3106 + #if IS_ENABLED(CONFIG_VLAN_8021Q)
3107 + /* Disable the vlan filter for selftest does not support it */
3108 +- if (h->ae_algo->ops->enable_vlan_filter)
3109 ++ if (h->ae_algo->ops->enable_vlan_filter &&
3110 ++ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
3111 + h->ae_algo->ops->enable_vlan_filter(h, false);
3112 + #endif
3113 +
3114 +@@ -373,6 +361,36 @@ static void hns3_self_test(struct net_device *ndev,
3115 + h->ae_algo->ops->halt_autoneg(h, true);
3116 +
3117 + set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
3118 ++}
3119 ++
3120 ++static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
3121 ++{
3122 ++ struct hns3_nic_priv *priv = netdev_priv(ndev);
3123 ++ struct hnae3_handle *h = priv->ae_handle;
3124 ++
3125 ++ clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
3126 ++
3127 ++ if (h->ae_algo->ops->halt_autoneg)
3128 ++ h->ae_algo->ops->halt_autoneg(h, false);
3129 ++
3130 ++#if IS_ENABLED(CONFIG_VLAN_8021Q)
3131 ++ if (h->ae_algo->ops->enable_vlan_filter &&
3132 ++ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
3133 ++ h->ae_algo->ops->enable_vlan_filter(h, true);
3134 ++#endif
3135 ++
3136 ++ if (if_running)
3137 ++ ndev->netdev_ops->ndo_open(ndev);
3138 ++
3139 ++ if (netif_msg_ifdown(h))
3140 ++ netdev_info(ndev, "self test end\n");
3141 ++}
3142 ++
3143 ++static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
3144 ++ struct ethtool_test *eth_test, u64 *data)
3145 ++{
3146 ++ int test_index = 0;
3147 ++ u32 i;
3148 +
3149 + for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
3150 + enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
3151 +@@ -391,21 +409,32 @@ static void hns3_self_test(struct net_device *ndev,
3152 +
3153 + test_index++;
3154 + }
3155 ++}
3156 +
3157 +- clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
3158 +-
3159 +- if (h->ae_algo->ops->halt_autoneg)
3160 +- h->ae_algo->ops->halt_autoneg(h, false);
3161 ++/**
3162 ++ * hns3_nic_self_test - self test
3163 ++ * @ndev: net device
3164 ++ * @eth_test: test cmd
3165 ++ * @data: test result
3166 ++ */
3167 ++static void hns3_self_test(struct net_device *ndev,
3168 ++ struct ethtool_test *eth_test, u64 *data)
3169 ++{
3170 ++ int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
3171 ++ bool if_running = netif_running(ndev);
3172 +
3173 +-#if IS_ENABLED(CONFIG_VLAN_8021Q)
3174 +- if (h->ae_algo->ops->enable_vlan_filter)
3175 +- h->ae_algo->ops->enable_vlan_filter(h, true);
3176 +-#endif
3177 ++ if (hns3_nic_resetting(ndev)) {
3178 ++ netdev_err(ndev, "dev resetting!");
3179 ++ return;
3180 ++ }
3181 +
3182 +- if (if_running)
3183 +- ndev->netdev_ops->ndo_open(ndev);
3184 ++ /* Only do offline selftest, or pass by default */
3185 ++ if (eth_test->flags != ETH_TEST_FL_OFFLINE)
3186 ++ return;
3187 +
3188 +- netif_dbg(h, drv, ndev, "self test end\n");
3189 ++ hns3_selftest_prepare(ndev, if_running, st_param);
3190 ++ hns3_do_selftest(ndev, st_param, eth_test, data);
3191 ++ hns3_selftest_restore(ndev, if_running);
3192 + }
3193 +
3194 + static void hns3_update_limit_promisc_mode(struct net_device *netdev,
3195 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
3196 +index eb748aa35952c..0f0bf3d503bf5 100644
3197 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
3198 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
3199 +@@ -472,7 +472,7 @@ err_csq:
3200 + return ret;
3201 + }
3202 +
3203 +-static int hclge_firmware_compat_config(struct hclge_dev *hdev)
3204 ++static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
3205 + {
3206 + struct hclge_firmware_compat_cmd *req;
3207 + struct hclge_desc desc;
3208 +@@ -480,13 +480,16 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
3209 +
3210 + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
3211 +
3212 +- req = (struct hclge_firmware_compat_cmd *)desc.data;
3213 ++ if (en) {
3214 ++ req = (struct hclge_firmware_compat_cmd *)desc.data;
3215 +
3216 +- hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
3217 +- hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
3218 +- if (hnae3_dev_phy_imp_supported(hdev))
3219 +- hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
3220 +- req->compat = cpu_to_le32(compat);
3221 ++ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
3222 ++ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
3223 ++ if (hnae3_dev_phy_imp_supported(hdev))
3224 ++ hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
3225 ++
3226 ++ req->compat = cpu_to_le32(compat);
3227 ++ }
3228 +
3229 + return hclge_cmd_send(&hdev->hw, &desc, 1);
3230 + }
3231 +@@ -543,7 +546,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
3232 + /* ask the firmware to enable some features, driver can work without
3233 + * it.
3234 + */
3235 +- ret = hclge_firmware_compat_config(hdev);
3236 ++ ret = hclge_firmware_compat_config(hdev, true);
3237 + if (ret)
3238 + dev_warn(&hdev->pdev->dev,
3239 + "Firmware compatible features not enabled(%d).\n",
3240 +@@ -573,6 +576,8 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
3241 +
3242 + void hclge_cmd_uninit(struct hclge_dev *hdev)
3243 + {
3244 ++ hclge_firmware_compat_config(hdev, false);
3245 ++
3246 + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3247 + /* wait to ensure that the firmware completes the possible left
3248 + * over commands.
3249 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
3250 +index 39f56f245d843..c90bfde2aecff 100644
3251 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
3252 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
3253 +@@ -224,6 +224,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
3254 + }
3255 +
3256 + hclge_tm_schd_info_update(hdev, num_tc);
3257 ++ if (num_tc > 1)
3258 ++ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
3259 ++ else
3260 ++ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
3261 +
3262 + ret = hclge_ieee_ets_to_tm_info(hdev, ets);
3263 + if (ret)
3264 +@@ -285,8 +289,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
3265 + u8 i, j, pfc_map, *prio_tc;
3266 + int ret;
3267 +
3268 +- if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
3269 +- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
3270 ++ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
3271 + return -EINVAL;
3272 +
3273 + if (pfc->pfc_en == hdev->tm_info.pfc_en)
3274 +@@ -420,8 +423,6 @@ static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
3275 + static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
3276 + struct tc_mqprio_qopt_offload *mqprio_qopt)
3277 + {
3278 +- int i;
3279 +-
3280 + memset(tc_info, 0, sizeof(*tc_info));
3281 + tc_info->num_tc = mqprio_qopt->qopt.num_tc;
3282 + memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
3283 +@@ -430,9 +431,6 @@ static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
3284 + sizeof_field(struct hnae3_tc_info, tqp_count));
3285 + memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
3286 + sizeof_field(struct hnae3_tc_info, tqp_offset));
3287 +-
3288 +- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
3289 +- set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
3290 + }
3291 +
3292 + static int hclge_config_tc(struct hclge_dev *hdev,
3293 +@@ -498,12 +496,17 @@ static int hclge_setup_tc(struct hnae3_handle *h,
3294 + return hclge_notify_init_up(hdev);
3295 +
3296 + err_out:
3297 +- /* roll-back */
3298 +- memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
3299 +- if (hclge_config_tc(hdev, &kinfo->tc_info))
3300 +- dev_err(&hdev->pdev->dev,
3301 +- "failed to roll back tc configuration\n");
3302 +-
3303 ++ if (!tc) {
3304 ++ dev_warn(&hdev->pdev->dev,
3305 ++ "failed to destroy mqprio, will active after reset, ret = %d\n",
3306 ++ ret);
3307 ++ } else {
3308 ++ /* roll-back */
3309 ++ memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
3310 ++ if (hclge_config_tc(hdev, &kinfo->tc_info))
3311 ++ dev_err(&hdev->pdev->dev,
3312 ++ "failed to roll back tc configuration\n");
3313 ++ }
3314 + hclge_notify_init_up(hdev);
3315 +
3316 + return ret;
3317 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3318 +index 90a72c79fec99..9920e76b4f41c 100644
3319 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3320 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3321 +@@ -8701,15 +8701,8 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
3322 + }
3323 +
3324 + /* check if we just hit the duplicate */
3325 +- if (!ret) {
3326 +- dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
3327 +- vport->vport_id, addr);
3328 +- return 0;
3329 +- }
3330 +-
3331 +- dev_err(&hdev->pdev->dev,
3332 +- "PF failed to add unicast entry(%pM) in the MAC table\n",
3333 +- addr);
3334 ++ if (!ret)
3335 ++ return -EEXIST;
3336 +
3337 + return ret;
3338 + }
3339 +@@ -8861,7 +8854,13 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
3340 + } else {
3341 + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
3342 + &vport->state);
3343 +- break;
3344 ++
3345 ++ /* If one unicast mac address is existing in hardware,
3346 ++ * we need to try whether other unicast mac addresses
3347 ++ * are new addresses that can be added.
3348 ++ */
3349 ++ if (ret != -EEXIST)
3350 ++ break;
3351 + }
3352 + }
3353 + }
3354 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
3355 +index 44618cc4cca10..f314dbd3ce11f 100644
3356 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
3357 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
3358 +@@ -687,12 +687,10 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
3359 +
3360 + for (i = 0; i < HNAE3_MAX_TC; i++) {
3361 + if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
3362 +- set_bit(i, &kinfo->tc_info.tc_en);
3363 + kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
3364 + kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
3365 + } else {
3366 + /* Set to default queue if TC is disable */
3367 +- clear_bit(i, &kinfo->tc_info.tc_en);
3368 + kinfo->tc_info.tqp_offset[i] = 0;
3369 + kinfo->tc_info.tqp_count[i] = 1;
3370 + }
3371 +@@ -729,14 +727,6 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
3372 + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
3373 + hdev->tm_info.prio_tc[i] =
3374 + (i >= hdev->tm_info.num_tc) ? 0 : i;
3375 +-
3376 +- /* DCB is enabled if we have more than 1 TC or pfc_en is
3377 +- * non-zero.
3378 +- */
3379 +- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
3380 +- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
3381 +- else
3382 +- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
3383 + }
3384 +
3385 + static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
3386 +@@ -767,10 +757,10 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
3387 +
3388 + static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
3389 + {
3390 +- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
3391 ++ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
3392 + if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
3393 + dev_warn(&hdev->pdev->dev,
3394 +- "DCB is disable, but last mode is FC_PFC\n");
3395 ++ "Only 1 tc used, but last mode is FC_PFC\n");
3396 +
3397 + hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
3398 + } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
3399 +@@ -796,7 +786,7 @@ static void hclge_update_fc_mode(struct hclge_dev *hdev)
3400 + }
3401 + }
3402 +
3403 +-static void hclge_pfc_info_init(struct hclge_dev *hdev)
3404 ++void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
3405 + {
3406 + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
3407 + hclge_update_fc_mode(hdev);
3408 +@@ -812,7 +802,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
3409 +
3410 + hclge_tm_vport_info_update(hdev);
3411 +
3412 +- hclge_pfc_info_init(hdev);
3413 ++ hclge_tm_pfc_info_update(hdev);
3414 + }
3415 +
3416 + static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
3417 +@@ -1558,19 +1548,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
3418 + hclge_tm_schd_info_init(hdev);
3419 + }
3420 +
3421 +-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
3422 +-{
3423 +- /* DCB is enabled if we have more than 1 TC or pfc_en is
3424 +- * non-zero.
3425 +- */
3426 +- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
3427 +- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
3428 +- else
3429 +- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
3430 +-
3431 +- hclge_pfc_info_init(hdev);
3432 +-}
3433 +-
3434 + int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
3435 + {
3436 + int ret;
3437 +@@ -1616,7 +1593,7 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
3438 + if (ret)
3439 + return ret;
3440 +
3441 +- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
3442 ++ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
3443 + return 0;
3444 +
3445 + return hclge_tm_bp_setup(hdev);
3446 +diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
3447 +index 1b0958bd24f6c..1fa68ebe94325 100644
3448 +--- a/drivers/net/ethernet/intel/e100.c
3449 ++++ b/drivers/net/ethernet/intel/e100.c
3450 +@@ -2437,11 +2437,15 @@ static void e100_get_drvinfo(struct net_device *netdev,
3451 + sizeof(info->bus_info));
3452 + }
3453 +
3454 +-#define E100_PHY_REGS 0x1C
3455 ++#define E100_PHY_REGS 0x1D
3456 + static int e100_get_regs_len(struct net_device *netdev)
3457 + {
3458 + struct nic *nic = netdev_priv(netdev);
3459 +- return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
3460 ++
3461 ++ /* We know the number of registers, and the size of the dump buffer.
3462 ++ * Calculate the total size in bytes.
3463 ++ */
3464 ++ return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
3465 + }
3466 +
3467 + static void e100_get_regs(struct net_device *netdev,
3468 +@@ -2455,14 +2459,18 @@ static void e100_get_regs(struct net_device *netdev,
3469 + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
3470 + ioread8(&nic->csr->scb.cmd_lo) << 16 |
3471 + ioread16(&nic->csr->scb.status);
3472 +- for (i = E100_PHY_REGS; i >= 0; i--)
3473 +- buff[1 + E100_PHY_REGS - i] =
3474 +- mdio_read(netdev, nic->mii.phy_id, i);
3475 ++ for (i = 0; i < E100_PHY_REGS; i++)
3476 ++ /* Note that we read the registers in reverse order. This
3477 ++ * ordering is the ABI apparently used by ethtool and other
3478 ++ * applications.
3479 ++ */
3480 ++ buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
3481 ++ E100_PHY_REGS - 1 - i);
3482 + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
3483 + e100_exec_cb(nic, NULL, e100_dump);
3484 + msleep(10);
3485 +- memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
3486 +- sizeof(nic->mem->dump_buf));
3487 ++ memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
3488 ++ sizeof(nic->mem->dump_buf));
3489 + }
3490 +
3491 + static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3492 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
3493 +index 4ceaca0f6ce30..21321d1647089 100644
3494 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
3495 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
3496 +@@ -3204,7 +3204,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3497 + max_combined = ixgbe_max_rss_indices(adapter);
3498 + }
3499 +
3500 +- return max_combined;
3501 ++ return min_t(int, max_combined, num_online_cpus());
3502 + }
3503 +
3504 + static void ixgbe_get_channels(struct net_device *dev,
3505 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
3506 +index 14aea40da50fb..77350e5fdf977 100644
3507 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
3508 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
3509 +@@ -10112,6 +10112,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
3510 + struct ixgbe_adapter *adapter = netdev_priv(dev);
3511 + struct bpf_prog *old_prog;
3512 + bool need_reset;
3513 ++ int num_queues;
3514 +
3515 + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3516 + return -EINVAL;
3517 +@@ -10161,11 +10162,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
3518 + /* Kick start the NAPI context if there is an AF_XDP socket open
3519 + * on that queue id. This so that receiving will start.
3520 + */
3521 +- if (need_reset && prog)
3522 +- for (i = 0; i < adapter->num_rx_queues; i++)
3523 ++ if (need_reset && prog) {
3524 ++ num_queues = min_t(int, adapter->num_rx_queues,
3525 ++ adapter->num_xdp_queues);
3526 ++ for (i = 0; i < num_queues; i++)
3527 + if (adapter->xdp_ring[i]->xsk_pool)
3528 + (void)ixgbe_xsk_wakeup(adapter->netdev, i,
3529 + XDP_WAKEUP_RX);
3530 ++ }
3531 +
3532 + return 0;
3533 + }
3534 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
3535 +index 1e672bc36c4dc..a6878e5f922a7 100644
3536 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
3537 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
3538 +@@ -1272,7 +1272,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
3539 + if (!netif_carrier_ok(dev)) {
3540 + if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
3541 + if (priv->port_state.link_state) {
3542 +- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
3543 + netif_carrier_on(dev);
3544 + en_dbg(LINK, priv, "Link Up\n");
3545 + }
3546 +@@ -1560,26 +1559,36 @@ static void mlx4_en_service_task(struct work_struct *work)
3547 + mutex_unlock(&mdev->state_lock);
3548 + }
3549 +
3550 +-static void mlx4_en_linkstate(struct work_struct *work)
3551 ++static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
3552 ++{
3553 ++ struct mlx4_en_port_state *port_state = &priv->port_state;
3554 ++ struct mlx4_en_dev *mdev = priv->mdev;
3555 ++ struct net_device *dev = priv->dev;
3556 ++ bool up;
3557 ++
3558 ++ if (mlx4_en_QUERY_PORT(mdev, priv->port))
3559 ++ port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
3560 ++
3561 ++ up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
3562 ++ if (up == netif_carrier_ok(dev))
3563 ++ netif_carrier_event(dev);
3564 ++ if (!up) {
3565 ++ en_info(priv, "Link Down\n");
3566 ++ netif_carrier_off(dev);
3567 ++ } else {
3568 ++ en_info(priv, "Link Up\n");
3569 ++ netif_carrier_on(dev);
3570 ++ }
3571 ++}
3572 ++
3573 ++static void mlx4_en_linkstate_work(struct work_struct *work)
3574 + {
3575 + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
3576 + linkstate_task);
3577 + struct mlx4_en_dev *mdev = priv->mdev;
3578 +- int linkstate = priv->link_state;
3579 +
3580 + mutex_lock(&mdev->state_lock);
3581 +- /* If observable port state changed set carrier state and
3582 +- * report to system log */
3583 +- if (priv->last_link_state != linkstate) {
3584 +- if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
3585 +- en_info(priv, "Link Down\n");
3586 +- netif_carrier_off(priv->dev);
3587 +- } else {
3588 +- en_info(priv, "Link Up\n");
3589 +- netif_carrier_on(priv->dev);
3590 +- }
3591 +- }
3592 +- priv->last_link_state = linkstate;
3593 ++ mlx4_en_linkstate(priv);
3594 + mutex_unlock(&mdev->state_lock);
3595 + }
3596 +
3597 +@@ -2082,9 +2091,11 @@ static int mlx4_en_open(struct net_device *dev)
3598 + mlx4_en_clear_stats(dev);
3599 +
3600 + err = mlx4_en_start_port(dev);
3601 +- if (err)
3602 ++ if (err) {
3603 + en_err(priv, "Failed starting port:%d\n", priv->port);
3604 +-
3605 ++ goto out;
3606 ++ }
3607 ++ mlx4_en_linkstate(priv);
3608 + out:
3609 + mutex_unlock(&mdev->state_lock);
3610 + return err;
3611 +@@ -3171,7 +3182,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3612 + spin_lock_init(&priv->stats_lock);
3613 + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3614 + INIT_WORK(&priv->restart_task, mlx4_en_restart);
3615 +- INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3616 ++ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
3617 + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3618 + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3619 + #ifdef CONFIG_RFS_ACCEL
3620 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3621 +index f3d1a20201ef3..6bf558c5ec107 100644
3622 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3623 ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3624 +@@ -552,7 +552,6 @@ struct mlx4_en_priv {
3625 +
3626 + struct mlx4_hwq_resources res;
3627 + int link_state;
3628 +- int last_link_state;
3629 + bool port_up;
3630 + int port;
3631 + int registered;
3632 +diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
3633 +index 5cc00d22c708c..6ecc4eb30e74b 100644
3634 +--- a/drivers/net/ethernet/micrel/Makefile
3635 ++++ b/drivers/net/ethernet/micrel/Makefile
3636 +@@ -4,8 +4,6 @@
3637 + #
3638 +
3639 + obj-$(CONFIG_KS8842) += ks8842.o
3640 +-obj-$(CONFIG_KS8851) += ks8851.o
3641 +-ks8851-objs = ks8851_common.o ks8851_spi.o
3642 +-obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
3643 +-ks8851_mll-objs = ks8851_common.o ks8851_par.o
3644 ++obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o
3645 ++obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o
3646 + obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
3647 +diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
3648 +index 831518466de22..0f9c5457b93ef 100644
3649 +--- a/drivers/net/ethernet/micrel/ks8851_common.c
3650 ++++ b/drivers/net/ethernet/micrel/ks8851_common.c
3651 +@@ -1057,6 +1057,7 @@ int ks8851_suspend(struct device *dev)
3652 +
3653 + return 0;
3654 + }
3655 ++EXPORT_SYMBOL_GPL(ks8851_suspend);
3656 +
3657 + int ks8851_resume(struct device *dev)
3658 + {
3659 +@@ -1070,6 +1071,7 @@ int ks8851_resume(struct device *dev)
3660 +
3661 + return 0;
3662 + }
3663 ++EXPORT_SYMBOL_GPL(ks8851_resume);
3664 + #endif
3665 +
3666 + static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
3667 +@@ -1243,6 +1245,7 @@ err_reg:
3668 + err_reg_io:
3669 + return ret;
3670 + }
3671 ++EXPORT_SYMBOL_GPL(ks8851_probe_common);
3672 +
3673 + int ks8851_remove_common(struct device *dev)
3674 + {
3675 +@@ -1261,3 +1264,8 @@ int ks8851_remove_common(struct device *dev)
3676 +
3677 + return 0;
3678 + }
3679 ++EXPORT_SYMBOL_GPL(ks8851_remove_common);
3680 ++
3681 ++MODULE_DESCRIPTION("KS8851 Network driver");
3682 ++MODULE_AUTHOR("Ben Dooks <ben@×××××××××.uk>");
3683 ++MODULE_LICENSE("GPL");
3684 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
3685 +index 58a854666c62b..c14de5fcedea3 100644
3686 +--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
3687 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
3688 +@@ -380,15 +380,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
3689 + &ionic_dbg_intr_stats_desc[i]);
3690 + (*buf)++;
3691 + }
3692 +- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
3693 +- **buf = IONIC_READ_STAT64(&txqcq->napi_stats,
3694 +- &ionic_dbg_napi_stats_desc[i]);
3695 +- (*buf)++;
3696 +- }
3697 +- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
3698 +- **buf = txqcq->napi_stats.work_done_cntr[i];
3699 +- (*buf)++;
3700 +- }
3701 + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
3702 + **buf = txstats->sg_cntr[i];
3703 + (*buf)++;
3704 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3705 +index 2218bc3a624b4..86151a817b79a 100644
3706 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3707 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3708 +@@ -486,6 +486,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
3709 + timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
3710 + stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
3711 + eee_tw_timer);
3712 ++ if (priv->hw->xpcs)
3713 ++ xpcs_config_eee(priv->hw->xpcs,
3714 ++ priv->plat->mult_fact_100ns,
3715 ++ true);
3716 + }
3717 +
3718 + if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
3719 +diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
3720 +index e60e38c1f09d3..5e49f7a919b61 100644
3721 +--- a/drivers/net/mhi/net.c
3722 ++++ b/drivers/net/mhi/net.c
3723 +@@ -337,7 +337,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
3724 + /* Start MHI channels */
3725 + err = mhi_prepare_for_transfer(mhi_dev);
3726 + if (err)
3727 +- goto out_err;
3728 ++ return err;
3729 +
3730 + /* Number of transfer descriptors determines size of the queue */
3731 + mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
3732 +@@ -347,7 +347,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
3733 + else
3734 + err = register_netdev(ndev);
3735 + if (err)
3736 +- goto out_err;
3737 ++ return err;
3738 +
3739 + if (mhi_netdev->proto) {
3740 + err = mhi_netdev->proto->init(mhi_netdev);
3741 +@@ -359,8 +359,6 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
3742 +
3743 + out_err_proto:
3744 + unregister_netdevice(ndev);
3745 +-out_err:
3746 +- free_netdev(ndev);
3747 + return err;
3748 + }
3749 +
3750 +diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
3751 +index e79297a4bae81..27b6a3f507ae6 100644
3752 +--- a/drivers/net/phy/bcm7xxx.c
3753 ++++ b/drivers/net/phy/bcm7xxx.c
3754 +@@ -27,7 +27,12 @@
3755 + #define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
3756 + #define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
3757 + #define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
3758 ++#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0
3759 ++#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1
3760 ++#define MII_BCM7XXX_SHD_3_EEE_CAP 0x2
3761 + #define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
3762 ++#define MII_BCM7XXX_SHD_3_EEE_LP 0x4
3763 ++#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5
3764 + #define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
3765 + #define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
3766 + #define MII_BCM7XXX_SHD_3_AN_STAT 0xb
3767 +@@ -216,25 +221,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
3768 + return genphy_config_aneg(phydev);
3769 + }
3770 +
3771 +-static int phy_set_clr_bits(struct phy_device *dev, int location,
3772 +- int set_mask, int clr_mask)
3773 ++static int __phy_set_clr_bits(struct phy_device *dev, int location,
3774 ++ int set_mask, int clr_mask)
3775 + {
3776 + int v, ret;
3777 +
3778 +- v = phy_read(dev, location);
3779 ++ v = __phy_read(dev, location);
3780 + if (v < 0)
3781 + return v;
3782 +
3783 + v &= ~clr_mask;
3784 + v |= set_mask;
3785 +
3786 +- ret = phy_write(dev, location, v);
3787 ++ ret = __phy_write(dev, location, v);
3788 + if (ret < 0)
3789 + return ret;
3790 +
3791 + return v;
3792 + }
3793 +
3794 ++static int phy_set_clr_bits(struct phy_device *dev, int location,
3795 ++ int set_mask, int clr_mask)
3796 ++{
3797 ++ int ret;
3798 ++
3799 ++ mutex_lock(&dev->mdio.bus->mdio_lock);
3800 ++ ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
3801 ++ mutex_unlock(&dev->mdio.bus->mdio_lock);
3802 ++
3803 ++ return ret;
3804 ++}
3805 ++
3806 + static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
3807 + {
3808 + int ret;
3809 +@@ -398,6 +415,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
3810 + return bcm7xxx_28nm_ephy_apd_enable(phydev);
3811 + }
3812 +
3813 ++#define MII_BCM7XXX_REG_INVALID 0xff
3814 ++
3815 ++static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
3816 ++{
3817 ++ switch (regnum) {
3818 ++ case MDIO_CTRL1:
3819 ++ return MII_BCM7XXX_SHD_3_PCS_CTRL;
3820 ++ case MDIO_STAT1:
3821 ++ return MII_BCM7XXX_SHD_3_PCS_STATUS;
3822 ++ case MDIO_PCS_EEE_ABLE:
3823 ++ return MII_BCM7XXX_SHD_3_EEE_CAP;
3824 ++ case MDIO_AN_EEE_ADV:
3825 ++ return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
3826 ++ case MDIO_AN_EEE_LPABLE:
3827 ++ return MII_BCM7XXX_SHD_3_EEE_LP;
3828 ++ case MDIO_PCS_EEE_WK_ERR:
3829 ++ return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
3830 ++ default:
3831 ++ return MII_BCM7XXX_REG_INVALID;
3832 ++ }
3833 ++}
3834 ++
3835 ++static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
3836 ++{
3837 ++ return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
3838 ++}
3839 ++
3840 ++static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
3841 ++ int devnum, u16 regnum)
3842 ++{
3843 ++ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
3844 ++ int ret;
3845 ++
3846 ++ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
3847 ++ shd == MII_BCM7XXX_REG_INVALID)
3848 ++ return -EOPNOTSUPP;
3849 ++
3850 ++ /* set shadow mode 2 */
3851 ++ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
3852 ++ MII_BCM7XXX_SHD_MODE_2, 0);
3853 ++ if (ret < 0)
3854 ++ return ret;
3855 ++
3856 ++ /* Access the desired shadow register address */
3857 ++ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
3858 ++ if (ret < 0)
3859 ++ goto reset_shadow_mode;
3860 ++
3861 ++ ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
3862 ++
3863 ++reset_shadow_mode:
3864 ++ /* reset shadow mode 2 */
3865 ++ __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
3866 ++ MII_BCM7XXX_SHD_MODE_2);
3867 ++ return ret;
3868 ++}
3869 ++
3870 ++static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
3871 ++ int devnum, u16 regnum, u16 val)
3872 ++{
3873 ++ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
3874 ++ int ret;
3875 ++
3876 ++ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
3877 ++ shd == MII_BCM7XXX_REG_INVALID)
3878 ++ return -EOPNOTSUPP;
3879 ++
3880 ++ /* set shadow mode 2 */
3881 ++ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
3882 ++ MII_BCM7XXX_SHD_MODE_2, 0);
3883 ++ if (ret < 0)
3884 ++ return ret;
3885 ++
3886 ++ /* Access the desired shadow register address */
3887 ++ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
3888 ++ if (ret < 0)
3889 ++ goto reset_shadow_mode;
3890 ++
3891 ++ /* Write the desired value in the shadow register */
3892 ++ __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
3893 ++
3894 ++reset_shadow_mode:
3895 ++ /* reset shadow mode 2 */
3896 ++ return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
3897 ++ MII_BCM7XXX_SHD_MODE_2);
3898 ++}
3899 ++
3900 + static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
3901 + {
3902 + int ret;
3903 +@@ -595,6 +699,8 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
3904 + .get_stats = bcm7xxx_28nm_get_phy_stats, \
3905 + .probe = bcm7xxx_28nm_probe, \
3906 + .remove = bcm7xxx_28nm_remove, \
3907 ++ .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \
3908 ++ .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \
3909 + }
3910 +
3911 + #define BCM7XXX_40NM_EPHY(_oui, _name) \
3912 +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
3913 +index 53f034fc2ef79..ee8313a4ac713 100644
3914 +--- a/drivers/net/phy/mdio_bus.c
3915 ++++ b/drivers/net/phy/mdio_bus.c
3916 +@@ -525,6 +525,10 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
3917 + NULL == bus->read || NULL == bus->write)
3918 + return -EINVAL;
3919 +
3920 ++ if (bus->parent && bus->parent->of_node)
3921 ++ bus->parent->of_node->fwnode.flags |=
3922 ++ FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
3923 ++
3924 + BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
3925 + bus->state != MDIOBUS_UNREGISTERED);
3926 +
3927 +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
3928 +index 3c7120ec70798..6a0799f5b05f9 100644
3929 +--- a/drivers/net/usb/hso.c
3930 ++++ b/drivers/net/usb/hso.c
3931 +@@ -2353,7 +2353,7 @@ static int remove_net_device(struct hso_device *hso_dev)
3932 + }
3933 +
3934 + /* Frees our network device */
3935 +-static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
3936 ++static void hso_free_net_device(struct hso_device *hso_dev)
3937 + {
3938 + int i;
3939 + struct hso_net *hso_net = dev2net(hso_dev);
3940 +@@ -2376,7 +2376,7 @@ static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
3941 + kfree(hso_net->mux_bulk_tx_buf);
3942 + hso_net->mux_bulk_tx_buf = NULL;
3943 +
3944 +- if (hso_net->net && !bailout)
3945 ++ if (hso_net->net)
3946 + free_netdev(hso_net->net);
3947 +
3948 + kfree(hso_dev);
3949 +@@ -3136,7 +3136,7 @@ static void hso_free_interface(struct usb_interface *interface)
3950 + rfkill_unregister(rfk);
3951 + rfkill_destroy(rfk);
3952 + }
3953 +- hso_free_net_device(network_table[i], false);
3954 ++ hso_free_net_device(network_table[i]);
3955 + }
3956 + }
3957 + }
3958 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
3959 +index 4c8ee1cff4d47..4cb71dd1998c4 100644
3960 +--- a/drivers/net/usb/smsc95xx.c
3961 ++++ b/drivers/net/usb/smsc95xx.c
3962 +@@ -1178,7 +1178,10 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
3963 +
3964 + static void smsc95xx_handle_link_change(struct net_device *net)
3965 + {
3966 ++ struct usbnet *dev = netdev_priv(net);
3967 ++
3968 + phy_print_status(net->phydev);
3969 ++ usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
3970 + }
3971 +
3972 + static int smsc95xx_start_phy(struct usbnet *dev)
3973 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
3974 +index ffa894f7312a4..0adae76eb8df1 100644
3975 +--- a/drivers/net/wireless/mac80211_hwsim.c
3976 ++++ b/drivers/net/wireless/mac80211_hwsim.c
3977 +@@ -1867,8 +1867,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
3978 + bcn_int -= data->bcn_delta;
3979 + data->bcn_delta = 0;
3980 + }
3981 +- hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
3982 +- ns_to_ktime(bcn_int * NSEC_PER_USEC));
3983 ++ hrtimer_forward_now(&data->beacon_timer,
3984 ++ ns_to_ktime(bcn_int * NSEC_PER_USEC));
3985 + return HRTIMER_RESTART;
3986 + }
3987 +
3988 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3989 +index e2374319df61a..9b6f78eac9375 100644
3990 +--- a/drivers/nvme/host/core.c
3991 ++++ b/drivers/nvme/host/core.c
3992 +@@ -980,6 +980,7 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
3993 + blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
3994 + {
3995 + struct nvme_command *cmd = nvme_req(req)->cmd;
3996 ++ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
3997 + blk_status_t ret = BLK_STS_OK;
3998 +
3999 + if (!(req->rq_flags & RQF_DONTPREP)) {
4000 +@@ -1028,7 +1029,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
4001 + return BLK_STS_IOERR;
4002 + }
4003 +
4004 +- nvme_req(req)->genctr++;
4005 ++ if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
4006 ++ nvme_req(req)->genctr++;
4007 + cmd->common.command_id = nvme_cid(req);
4008 + trace_nvme_setup_cmd(req, cmd);
4009 + return ret;
4010 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
4011 +index 26511794629bc..12393a72662e5 100644
4012 +--- a/drivers/nvme/host/nvme.h
4013 ++++ b/drivers/nvme/host/nvme.h
4014 +@@ -149,6 +149,12 @@ enum nvme_quirks {
4015 + * 48 bits.
4016 + */
4017 + NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
4018 ++
4019 ++ /*
4020 ++ * The controller requires the command_id value be be limited, so skip
4021 ++ * encoding the generation sequence number.
4022 ++ */
4023 ++ NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
4024 + };
4025 +
4026 + /*
4027 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
4028 +index c246fdacba2e5..4f22fbafe964f 100644
4029 +--- a/drivers/nvme/host/pci.c
4030 ++++ b/drivers/nvme/host/pci.c
4031 +@@ -3282,7 +3282,8 @@ static const struct pci_device_id nvme_id_table[] = {
4032 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
4033 + .driver_data = NVME_QUIRK_SINGLE_VECTOR |
4034 + NVME_QUIRK_128_BYTES_SQES |
4035 +- NVME_QUIRK_SHARED_TAGS },
4036 ++ NVME_QUIRK_SHARED_TAGS |
4037 ++ NVME_QUIRK_SKIP_CID_GEN },
4038 +
4039 + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
4040 + { 0, }
4041 +diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
4042 +index a89d24a040af8..9b524969eff74 100644
4043 +--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
4044 ++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
4045 +@@ -1,6 +1,6 @@
4046 + // SPDX-License-Identifier: GPL-2.0-only
4047 + /*
4048 +- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
4049 ++ * Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.
4050 + */
4051 +
4052 + #include <linux/gpio/driver.h>
4053 +@@ -14,6 +14,7 @@
4054 + #include <linux/platform_device.h>
4055 + #include <linux/regmap.h>
4056 + #include <linux/slab.h>
4057 ++#include <linux/spmi.h>
4058 + #include <linux/types.h>
4059 +
4060 + #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
4061 +@@ -171,6 +172,8 @@ struct pmic_gpio_state {
4062 + struct pinctrl_dev *ctrl;
4063 + struct gpio_chip chip;
4064 + struct irq_chip irq;
4065 ++ u8 usid;
4066 ++ u8 pid_base;
4067 + };
4068 +
4069 + static const struct pinconf_generic_params pmic_gpio_bindings[] = {
4070 +@@ -949,12 +952,36 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
4071 + unsigned int *parent_hwirq,
4072 + unsigned int *parent_type)
4073 + {
4074 +- *parent_hwirq = child_hwirq + 0xc0;
4075 ++ struct pmic_gpio_state *state = gpiochip_get_data(chip);
4076 ++
4077 ++ *parent_hwirq = child_hwirq + state->pid_base;
4078 + *parent_type = child_type;
4079 +
4080 + return 0;
4081 + }
4082 +
4083 ++static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
4084 ++ unsigned int parent_hwirq,
4085 ++ unsigned int parent_type)
4086 ++{
4087 ++ struct pmic_gpio_state *state = gpiochip_get_data(chip);
4088 ++ struct irq_fwspec *fwspec;
4089 ++
4090 ++ fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
4091 ++ if (!fwspec)
4092 ++ return NULL;
4093 ++
4094 ++ fwspec->fwnode = chip->irq.parent_domain->fwnode;
4095 ++
4096 ++ fwspec->param_count = 4;
4097 ++ fwspec->param[0] = state->usid;
4098 ++ fwspec->param[1] = parent_hwirq;
4099 ++ /* param[2] must be left as 0 */
4100 ++ fwspec->param[3] = parent_type;
4101 ++
4102 ++ return fwspec;
4103 ++}
4104 ++
4105 + static int pmic_gpio_probe(struct platform_device *pdev)
4106 + {
4107 + struct irq_domain *parent_domain;
4108 +@@ -965,6 +992,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
4109 + struct pmic_gpio_pad *pad, *pads;
4110 + struct pmic_gpio_state *state;
4111 + struct gpio_irq_chip *girq;
4112 ++ const struct spmi_device *parent_spmi_dev;
4113 + int ret, npins, i;
4114 + u32 reg;
4115 +
4116 +@@ -984,6 +1012,9 @@ static int pmic_gpio_probe(struct platform_device *pdev)
4117 +
4118 + state->dev = &pdev->dev;
4119 + state->map = dev_get_regmap(dev->parent, NULL);
4120 ++ parent_spmi_dev = to_spmi_device(dev->parent);
4121 ++ state->usid = parent_spmi_dev->usid;
4122 ++ state->pid_base = reg >> 8;
4123 +
4124 + pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
4125 + if (!pindesc)
4126 +@@ -1059,7 +1090,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
4127 + girq->fwnode = of_node_to_fwnode(state->dev->of_node);
4128 + girq->parent_domain = parent_domain;
4129 + girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
4130 +- girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
4131 ++ girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec;
4132 + girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
4133 + girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
4134 +
4135 +diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
4136 +index 2e4e97a626a51..7b03b497d93b2 100644
4137 +--- a/drivers/platform/x86/intel-hid.c
4138 ++++ b/drivers/platform/x86/intel-hid.c
4139 +@@ -118,12 +118,30 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
4140 + { }
4141 + };
4142 +
4143 ++/*
4144 ++ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
4145 ++ * reports. Accept such reports only from devices in this list.
4146 ++ */
4147 ++static const struct dmi_system_id dmi_auto_add_switch[] = {
4148 ++ {
4149 ++ .matches = {
4150 ++ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
4151 ++ },
4152 ++ },
4153 ++ {
4154 ++ .matches = {
4155 ++ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
4156 ++ },
4157 ++ },
4158 ++ {} /* Array terminator */
4159 ++};
4160 ++
4161 + struct intel_hid_priv {
4162 + struct input_dev *input_dev;
4163 + struct input_dev *array;
4164 + struct input_dev *switches;
4165 + bool wakeup_mode;
4166 +- bool dual_accel;
4167 ++ bool auto_add_switch;
4168 + };
4169 +
4170 + #define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
4171 +@@ -452,10 +470,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
4172 + * Some convertible have unreliable VGBS return which could cause incorrect
4173 + * SW_TABLET_MODE report, in these cases we enable support when receiving
4174 + * the first event instead of during driver setup.
4175 +- *
4176 +- * See dual_accel_detect.h for more info on the dual_accel check.
4177 + */
4178 +- if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
4179 ++ if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
4180 + dev_info(&device->dev, "switch event received, enable switches supports\n");
4181 + err = intel_hid_switches_setup(device);
4182 + if (err)
4183 +@@ -596,7 +612,8 @@ static int intel_hid_probe(struct platform_device *device)
4184 + return -ENOMEM;
4185 + dev_set_drvdata(&device->dev, priv);
4186 +
4187 +- priv->dual_accel = dual_accel_detect();
4188 ++ /* See dual_accel_detect.h for more info on the dual_accel check. */
4189 ++ priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
4190 +
4191 + err = intel_hid_input_setup(device);
4192 + if (err) {
4193 +diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
4194 +index 3dd519dfc473c..d0096cd7096a8 100644
4195 +--- a/drivers/ptp/ptp_kvm_x86.c
4196 ++++ b/drivers/ptp/ptp_kvm_x86.c
4197 +@@ -15,8 +15,6 @@
4198 + #include <linux/ptp_clock_kernel.h>
4199 + #include <linux/ptp_kvm.h>
4200 +
4201 +-struct pvclock_vsyscall_time_info *hv_clock;
4202 +-
4203 + static phys_addr_t clock_pair_gpa;
4204 + static struct kvm_clock_pairing clock_pair;
4205 +
4206 +@@ -28,8 +26,7 @@ int kvm_arch_ptp_init(void)
4207 + return -ENODEV;
4208 +
4209 + clock_pair_gpa = slow_virt_to_phys(&clock_pair);
4210 +- hv_clock = pvclock_get_pvti_cpu0_va();
4211 +- if (!hv_clock)
4212 ++ if (!pvclock_get_pvti_cpu0_va())
4213 + return -ENODEV;
4214 +
4215 + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
4216 +@@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
4217 + struct pvclock_vcpu_time_info *src;
4218 + unsigned int version;
4219 + long ret;
4220 +- int cpu;
4221 +
4222 +- cpu = smp_processor_id();
4223 +- src = &hv_clock[cpu].pvti;
4224 ++ src = this_cpu_pvti();
4225 +
4226 + do {
4227 + /*
4228 +diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
4229 +index 9748165e08e96..f19f02e751155 100644
4230 +--- a/drivers/s390/cio/ccwgroup.c
4231 ++++ b/drivers/s390/cio/ccwgroup.c
4232 +@@ -77,12 +77,13 @@ EXPORT_SYMBOL(ccwgroup_set_online);
4233 + /**
4234 + * ccwgroup_set_offline() - disable a ccwgroup device
4235 + * @gdev: target ccwgroup device
4236 ++ * @call_gdrv: Call the registered gdrv set_offline function
4237 + *
4238 + * This function attempts to put the ccwgroup device into the offline state.
4239 + * Returns:
4240 + * %0 on success and a negative error value on failure.
4241 + */
4242 +-int ccwgroup_set_offline(struct ccwgroup_device *gdev)
4243 ++int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv)
4244 + {
4245 + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
4246 + int ret = -EINVAL;
4247 +@@ -91,11 +92,16 @@ int ccwgroup_set_offline(struct ccwgroup_device *gdev)
4248 + return -EAGAIN;
4249 + if (gdev->state == CCWGROUP_OFFLINE)
4250 + goto out;
4251 ++ if (!call_gdrv) {
4252 ++ ret = 0;
4253 ++ goto offline;
4254 ++ }
4255 + if (gdrv->set_offline)
4256 + ret = gdrv->set_offline(gdev);
4257 + if (ret)
4258 + goto out;
4259 +
4260 ++offline:
4261 + gdev->state = CCWGROUP_OFFLINE;
4262 + out:
4263 + atomic_set(&gdev->onoff, 0);
4264 +@@ -124,7 +130,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
4265 + if (value == 1)
4266 + ret = ccwgroup_set_online(gdev);
4267 + else if (value == 0)
4268 +- ret = ccwgroup_set_offline(gdev);
4269 ++ ret = ccwgroup_set_offline(gdev, true);
4270 + else
4271 + ret = -EINVAL;
4272 + out:
4273 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
4274 +index f4d554ea0c930..52bdb2c8c0855 100644
4275 +--- a/drivers/s390/net/qeth_core.h
4276 ++++ b/drivers/s390/net/qeth_core.h
4277 +@@ -877,7 +877,6 @@ struct qeth_card {
4278 + struct napi_struct napi;
4279 + struct qeth_rx rx;
4280 + struct delayed_work buffer_reclaim_work;
4281 +- struct work_struct close_dev_work;
4282 + };
4283 +
4284 + static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
4285 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
4286 +index 51f7f4e680c34..f5bad10f3f44f 100644
4287 +--- a/drivers/s390/net/qeth_core_main.c
4288 ++++ b/drivers/s390/net/qeth_core_main.c
4289 +@@ -71,15 +71,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
4290 + static int qeth_qdio_establish(struct qeth_card *);
4291 + static void qeth_free_qdio_queues(struct qeth_card *card);
4292 +
4293 +-static void qeth_close_dev_handler(struct work_struct *work)
4294 +-{
4295 +- struct qeth_card *card;
4296 +-
4297 +- card = container_of(work, struct qeth_card, close_dev_work);
4298 +- QETH_CARD_TEXT(card, 2, "cldevhdl");
4299 +- ccwgroup_set_offline(card->gdev);
4300 +-}
4301 +-
4302 + static const char *qeth_get_cardname(struct qeth_card *card)
4303 + {
4304 + if (IS_VM_NIC(card)) {
4305 +@@ -797,10 +788,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
4306 + case IPA_CMD_STOPLAN:
4307 + if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
4308 + dev_err(&card->gdev->dev,
4309 +- "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
4310 ++ "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
4311 + netdev_name(card->dev));
4312 +- schedule_work(&card->close_dev_work);
4313 ++ /* Set offline, then probably fail to set online: */
4314 ++ qeth_schedule_recovery(card);
4315 + } else {
4316 ++ /* stay online for subsequent STARTLAN */
4317 + dev_warn(&card->gdev->dev,
4318 + "The link for interface %s on CHPID 0x%X failed\n",
4319 + netdev_name(card->dev), card->info.chpid);
4320 +@@ -1559,7 +1552,6 @@ static void qeth_setup_card(struct qeth_card *card)
4321 + INIT_LIST_HEAD(&card->ipato.entries);
4322 + qeth_init_qdio_info(card);
4323 + INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
4324 +- INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
4325 + hash_init(card->rx_mode_addrs);
4326 + hash_init(card->local_addrs4);
4327 + hash_init(card->local_addrs6);
4328 +@@ -5556,7 +5548,8 @@ static int qeth_do_reset(void *data)
4329 + dev_info(&card->gdev->dev,
4330 + "Device successfully recovered!\n");
4331 + } else {
4332 +- ccwgroup_set_offline(card->gdev);
4333 ++ qeth_set_offline(card, disc, true);
4334 ++ ccwgroup_set_offline(card->gdev, false);
4335 + dev_warn(&card->gdev->dev,
4336 + "The qeth device driver failed to recover an error on the device\n");
4337 + }
4338 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
4339 +index d7cdd9cfe485a..3dbe592ca97a1 100644
4340 +--- a/drivers/s390/net/qeth_l2_main.c
4341 ++++ b/drivers/s390/net/qeth_l2_main.c
4342 +@@ -2218,7 +2218,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
4343 + if (gdev->state == CCWGROUP_ONLINE)
4344 + qeth_set_offline(card, card->discipline, false);
4345 +
4346 +- cancel_work_sync(&card->close_dev_work);
4347 + if (card->dev->reg_state == NETREG_REGISTERED)
4348 + unregister_netdev(card->dev);
4349 + }
4350 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
4351 +index f0d6f205c53cd..5ba38499e3e29 100644
4352 +--- a/drivers/s390/net/qeth_l3_main.c
4353 ++++ b/drivers/s390/net/qeth_l3_main.c
4354 +@@ -1965,7 +1965,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
4355 + if (cgdev->state == CCWGROUP_ONLINE)
4356 + qeth_set_offline(card, card->discipline, false);
4357 +
4358 +- cancel_work_sync(&card->close_dev_work);
4359 + if (card->dev->reg_state == NETREG_REGISTERED)
4360 + unregister_netdev(card->dev);
4361 +
4362 +diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
4363 +index 390b07bf92b97..ccbded3353bd0 100644
4364 +--- a/drivers/scsi/csiostor/csio_init.c
4365 ++++ b/drivers/scsi/csiostor/csio_init.c
4366 +@@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
4367 + MODULE_VERSION(CSIO_DRV_VERSION);
4368 + MODULE_FIRMWARE(FW_FNAME_T5);
4369 + MODULE_FIRMWARE(FW_FNAME_T6);
4370 ++MODULE_SOFTDEP("pre: cxgb4");
4371 +diff --git a/drivers/scsi/elx/libefc/efc_device.c b/drivers/scsi/elx/libefc/efc_device.c
4372 +index 725ca2a23fb2a..52be01333c6e3 100644
4373 +--- a/drivers/scsi/elx/libefc/efc_device.c
4374 ++++ b/drivers/scsi/elx/libefc/efc_device.c
4375 +@@ -928,22 +928,21 @@ __efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
4376 + break;
4377 +
4378 + case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
4379 +- enum efc_nport_topology topology =
4380 +- (enum efc_nport_topology)arg;
4381 ++ enum efc_nport_topology *topology = arg;
4382 +
4383 + WARN_ON(node->nport->domain->attached);
4384 +
4385 + WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
4386 +
4387 + node_printf(node, "topology notification, topology=%d\n",
4388 +- topology);
4389 ++ *topology);
4390 +
4391 + /* At the time the PLOGI was received, the topology was unknown,
4392 + * so we didn't know which node would perform the domain attach:
4393 + * 1. The node from which the PLOGI was sent (p2p) or
4394 + * 2. The node to which the FLOGI was sent (fabric).
4395 + */
4396 +- if (topology == EFC_NPORT_TOPO_P2P) {
4397 ++ if (*topology == EFC_NPORT_TOPO_P2P) {
4398 + /* if this is p2p, need to attach to the domain using
4399 + * the d_id from the PLOGI received
4400 + */
4401 +diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
4402 +index d397220d9e543..3270ce40196c6 100644
4403 +--- a/drivers/scsi/elx/libefc/efc_fabric.c
4404 ++++ b/drivers/scsi/elx/libefc/efc_fabric.c
4405 +@@ -107,7 +107,6 @@ void
4406 + efc_fabric_notify_topology(struct efc_node *node)
4407 + {
4408 + struct efc_node *tmp_node;
4409 +- enum efc_nport_topology topology = node->nport->topology;
4410 + unsigned long index;
4411 +
4412 + /*
4413 +@@ -118,7 +117,7 @@ efc_fabric_notify_topology(struct efc_node *node)
4414 + if (tmp_node != node) {
4415 + efc_node_post_event(tmp_node,
4416 + EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
4417 +- (void *)topology);
4418 ++ &node->nport->topology);
4419 + }
4420 + }
4421 + }
4422 +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
4423 +index 2f67ec1df3e66..82b6f4c2eb4a8 100644
4424 +--- a/drivers/scsi/qla2xxx/qla_def.h
4425 ++++ b/drivers/scsi/qla2xxx/qla_def.h
4426 +@@ -3935,7 +3935,6 @@ struct qla_hw_data {
4427 + uint32_t scm_supported_f:1;
4428 + /* Enabled in Driver */
4429 + uint32_t scm_enabled:1;
4430 +- uint32_t max_req_queue_warned:1;
4431 + uint32_t plogi_template_valid:1;
4432 + uint32_t port_isolated:1;
4433 + } flags;
4434 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
4435 +index d9fb093a60a1f..2aa8f519aae62 100644
4436 +--- a/drivers/scsi/qla2xxx/qla_isr.c
4437 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
4438 +@@ -4201,6 +4201,8 @@ skip_msi:
4439 + ql_dbg(ql_dbg_init, vha, 0x0125,
4440 + "INTa mode: Enabled.\n");
4441 + ha->flags.mr_intr_valid = 1;
4442 ++ /* Set max_qpair to 0, as MSI-X and MSI in not enabled */
4443 ++ ha->max_qpairs = 0;
4444 + }
4445 +
4446 + clear_risc_ints:
4447 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
4448 +index a7259733e4709..9316d7d91e2ab 100644
4449 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
4450 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
4451 +@@ -109,19 +109,24 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
4452 + return -EINVAL;
4453 + }
4454 +
4455 +- if (ha->queue_pair_map[qidx]) {
4456 +- *handle = ha->queue_pair_map[qidx];
4457 +- ql_log(ql_log_info, vha, 0x2121,
4458 +- "Returning existing qpair of %p for idx=%x\n",
4459 +- *handle, qidx);
4460 +- return 0;
4461 +- }
4462 ++ /* Use base qpair if max_qpairs is 0 */
4463 ++ if (!ha->max_qpairs) {
4464 ++ qpair = ha->base_qpair;
4465 ++ } else {
4466 ++ if (ha->queue_pair_map[qidx]) {
4467 ++ *handle = ha->queue_pair_map[qidx];
4468 ++ ql_log(ql_log_info, vha, 0x2121,
4469 ++ "Returning existing qpair of %p for idx=%x\n",
4470 ++ *handle, qidx);
4471 ++ return 0;
4472 ++ }
4473 +
4474 +- qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
4475 +- if (qpair == NULL) {
4476 +- ql_log(ql_log_warn, vha, 0x2122,
4477 +- "Failed to allocate qpair\n");
4478 +- return -EINVAL;
4479 ++ qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
4480 ++ if (!qpair) {
4481 ++ ql_log(ql_log_warn, vha, 0x2122,
4482 ++ "Failed to allocate qpair\n");
4483 ++ return -EINVAL;
4484 ++ }
4485 + }
4486 + *handle = qpair;
4487 +
4488 +@@ -728,18 +733,9 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
4489 +
4490 + WARN_ON(vha->nvme_local_port);
4491 +
4492 +- if (ha->max_req_queues < 3) {
4493 +- if (!ha->flags.max_req_queue_warned)
4494 +- ql_log(ql_log_info, vha, 0x2120,
4495 +- "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
4496 +- __func__, ha->max_req_queues);
4497 +- ha->flags.max_req_queue_warned = 1;
4498 +- return ret;
4499 +- }
4500 +-
4501 + qla_nvme_fc_transport.max_hw_queues =
4502 + min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
4503 +- (uint8_t)(ha->max_req_queues - 2));
4504 ++ (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
4505 +
4506 + pinfo.node_name = wwn_to_u64(vha->node_name);
4507 + pinfo.port_name = wwn_to_u64(vha->port_name);
4508 +diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
4509 +index e6c334bfb4c2c..40acca04d03bb 100644
4510 +--- a/drivers/scsi/ufs/ufshcd-pci.c
4511 ++++ b/drivers/scsi/ufs/ufshcd-pci.c
4512 +@@ -128,6 +128,81 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
4513 + return err;
4514 + }
4515 +
4516 ++static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
4517 ++{
4518 ++ struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
4519 ++ int ret;
4520 ++
4521 ++ pwr_info.lane_rx = lanes;
4522 ++ pwr_info.lane_tx = lanes;
4523 ++ ret = ufshcd_config_pwr_mode(hba, &pwr_info);
4524 ++ if (ret)
4525 ++ dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
4526 ++ __func__, lanes, ret);
4527 ++ return ret;
4528 ++}
4529 ++
4530 ++static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
4531 ++ enum ufs_notify_change_status status,
4532 ++ struct ufs_pa_layer_attr *dev_max_params,
4533 ++ struct ufs_pa_layer_attr *dev_req_params)
4534 ++{
4535 ++ int err = 0;
4536 ++
4537 ++ switch (status) {
4538 ++ case PRE_CHANGE:
4539 ++ if (ufshcd_is_hs_mode(dev_max_params) &&
4540 ++ (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
4541 ++ ufs_intel_set_lanes(hba, 2);
4542 ++ memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
4543 ++ break;
4544 ++ case POST_CHANGE:
4545 ++ if (ufshcd_is_hs_mode(dev_req_params)) {
4546 ++ u32 peer_granularity;
4547 ++
4548 ++ usleep_range(1000, 1250);
4549 ++ err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
4550 ++ &peer_granularity);
4551 ++ }
4552 ++ break;
4553 ++ default:
4554 ++ break;
4555 ++ }
4556 ++
4557 ++ return err;
4558 ++}
4559 ++
4560 ++static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
4561 ++{
4562 ++ u32 granularity, peer_granularity;
4563 ++ u32 pa_tactivate, peer_pa_tactivate;
4564 ++ int ret;
4565 ++
4566 ++ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
4567 ++ if (ret)
4568 ++ goto out;
4569 ++
4570 ++ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
4571 ++ if (ret)
4572 ++ goto out;
4573 ++
4574 ++ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
4575 ++ if (ret)
4576 ++ goto out;
4577 ++
4578 ++ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
4579 ++ if (ret)
4580 ++ goto out;
4581 ++
4582 ++ if (granularity == peer_granularity) {
4583 ++ u32 new_peer_pa_tactivate = pa_tactivate + 2;
4584 ++
4585 ++ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
4586 ++ }
4587 ++out:
4588 ++ return ret;
4589 ++}
4590 ++
4591 + #define INTEL_ACTIVELTR 0x804
4592 + #define INTEL_IDLELTR 0x808
4593 +
4594 +@@ -351,6 +426,7 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
4595 + struct ufs_host *ufs_host;
4596 + int err;
4597 +
4598 ++ hba->nop_out_timeout = 200;
4599 + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
4600 + hba->caps |= UFSHCD_CAP_CRYPTO;
4601 + err = ufs_intel_common_init(hba);
4602 +@@ -381,6 +457,8 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
4603 + .exit = ufs_intel_common_exit,
4604 + .hce_enable_notify = ufs_intel_hce_enable_notify,
4605 + .link_startup_notify = ufs_intel_link_startup_notify,
4606 ++ .pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
4607 ++ .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
4608 + .resume = ufs_intel_resume,
4609 + .device_reset = ufs_intel_device_reset,
4610 + };
4611 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
4612 +index 3a204324151a8..a3f5af088122e 100644
4613 +--- a/drivers/scsi/ufs/ufshcd.c
4614 ++++ b/drivers/scsi/ufs/ufshcd.c
4615 +@@ -330,8 +330,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
4616 + static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
4617 + enum ufs_trace_str_t str_t)
4618 + {
4619 +- int off = (int)tag - hba->nutrs;
4620 +- struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
4621 ++ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
4622 +
4623 + if (!trace_ufshcd_upiu_enabled())
4624 + return;
4625 +@@ -4767,7 +4766,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4626 + mutex_lock(&hba->dev_cmd.lock);
4627 + for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4628 + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4629 +- NOP_OUT_TIMEOUT);
4630 ++ hba->nop_out_timeout);
4631 +
4632 + if (!err || err == -ETIMEDOUT)
4633 + break;
4634 +@@ -9403,6 +9402,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
4635 + hba->dev = dev;
4636 + *hba_handle = hba;
4637 + hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
4638 ++ hba->nop_out_timeout = NOP_OUT_TIMEOUT;
4639 +
4640 + INIT_LIST_HEAD(&hba->clk_list_head);
4641 +
4642 +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
4643 +index 86d4765a17b83..aa95deffb873a 100644
4644 +--- a/drivers/scsi/ufs/ufshcd.h
4645 ++++ b/drivers/scsi/ufs/ufshcd.h
4646 +@@ -814,6 +814,7 @@ struct ufs_hba {
4647 + /* Device management request data */
4648 + struct ufs_dev_cmd dev_cmd;
4649 + ktime_t last_dme_cmd_tstamp;
4650 ++ int nop_out_timeout;
4651 +
4652 + /* Keeps information of the UFS device connected to this host */
4653 + struct ufs_dev_info dev_info;
4654 +diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
4655 +index 31d8449ca1d2d..fc769c52c6d30 100644
4656 +--- a/drivers/staging/media/hantro/hantro_drv.c
4657 ++++ b/drivers/staging/media/hantro/hantro_drv.c
4658 +@@ -918,7 +918,7 @@ static int hantro_probe(struct platform_device *pdev)
4659 + if (!vpu->variant->irqs[i].handler)
4660 + continue;
4661 +
4662 +- if (vpu->variant->num_clocks > 1) {
4663 ++ if (vpu->variant->num_irqs > 1) {
4664 + irq_name = vpu->variant->irqs[i].name;
4665 + irq = platform_get_irq_byname(vpu->pdev, irq_name);
4666 + } else {
4667 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
4668 +index 32c13ecb22d83..a8168ac2fbd0c 100644
4669 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
4670 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
4671 +@@ -135,7 +135,7 @@ void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
4672 + sizeimage = bytesperline * height;
4673 +
4674 + /* Chroma plane size. */
4675 +- sizeimage += bytesperline * height / 2;
4676 ++ sizeimage += bytesperline * ALIGN(height, 64) / 2;
4677 +
4678 + break;
4679 +
4680 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
4681 +index cb72393f92d3a..153d4a88ec9ac 100644
4682 +--- a/drivers/tty/vt/vt.c
4683 ++++ b/drivers/tty/vt/vt.c
4684 +@@ -1219,8 +1219,25 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
4685 + new_row_size = new_cols << 1;
4686 + new_screen_size = new_row_size * new_rows;
4687 +
4688 +- if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
4689 +- return 0;
4690 ++ if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) {
4691 ++ /*
4692 ++ * This function is being called here to cover the case
4693 ++ * where the userspace calls the FBIOPUT_VSCREENINFO twice,
4694 ++ * passing the same fb_var_screeninfo containing the fields
4695 ++ * yres/xres equal to a number non-multiple of vc_font.height
4696 ++ * and yres_virtual/xres_virtual equal to number lesser than the
4697 ++ * vc_font.height and yres/xres.
4698 ++ * In the second call, the struct fb_var_screeninfo isn't
4699 ++ * being modified by the underlying driver because of the
4700 ++ * if above, and this causes the fbcon_display->vrows to become
4701 ++ * negative and it eventually leads to out-of-bound
4702 ++ * access by the imageblit function.
4703 ++ * To give the correct values to the struct and to not have
4704 ++ * to deal with possible errors from the code below, we call
4705 ++ * the resize_screen here as well.
4706 ++ */
4707 ++ return resize_screen(vc, new_cols, new_rows, user);
4708 ++ }
4709 +
4710 + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size)
4711 + return -EINVAL;
4712 +diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
4713 +index 546dfc1e2349c..71cf3f503f16b 100644
4714 +--- a/drivers/watchdog/Kconfig
4715 ++++ b/drivers/watchdog/Kconfig
4716 +@@ -1677,7 +1677,7 @@ config WDT_MTX1
4717 +
4718 + config SIBYTE_WDOG
4719 + tristate "Sibyte SoC hardware watchdog"
4720 +- depends on CPU_SB1 || (MIPS && COMPILE_TEST)
4721 ++ depends on CPU_SB1
4722 + help
4723 + Watchdog driver for the built in watchdog hardware in Sibyte
4724 + SoC processors. There are apparently two watchdog timers
4725 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
4726 +index 439ed81e755af..964be729ed0a6 100644
4727 +--- a/fs/binfmt_elf.c
4728 ++++ b/fs/binfmt_elf.c
4729 +@@ -630,7 +630,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
4730 +
4731 + vaddr = eppnt->p_vaddr;
4732 + if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
4733 +- elf_type |= MAP_FIXED_NOREPLACE;
4734 ++ elf_type |= MAP_FIXED;
4735 + else if (no_base && interp_elf_ex->e_type == ET_DYN)
4736 + load_addr = -vaddr;
4737 +
4738 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
4739 +index 8129a430d789d..2f117c57160dc 100644
4740 +--- a/fs/debugfs/inode.c
4741 ++++ b/fs/debugfs/inode.c
4742 +@@ -528,7 +528,7 @@ void debugfs_create_file_size(const char *name, umode_t mode,
4743 + {
4744 + struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
4745 +
4746 +- if (de)
4747 ++ if (!IS_ERR(de))
4748 + d_inode(de)->i_size = file_size;
4749 + }
4750 + EXPORT_SYMBOL_GPL(debugfs_create_file_size);
4751 +diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
4752 +index ffb295aa891c0..74b172a4adda3 100644
4753 +--- a/fs/ext4/dir.c
4754 ++++ b/fs/ext4/dir.c
4755 +@@ -551,7 +551,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
4756 + struct dir_private_info *info = file->private_data;
4757 + struct inode *inode = file_inode(file);
4758 + struct fname *fname;
4759 +- int ret;
4760 ++ int ret = 0;
4761 +
4762 + if (!info) {
4763 + info = ext4_htree_create_dir_info(file, ctx->pos);
4764 +@@ -599,7 +599,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
4765 + info->curr_minor_hash,
4766 + &info->next_hash);
4767 + if (ret < 0)
4768 +- return ret;
4769 ++ goto finished;
4770 + if (ret == 0) {
4771 + ctx->pos = ext4_get_htree_eof(file);
4772 + break;
4773 +@@ -630,7 +630,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
4774 + }
4775 + finished:
4776 + info->last_pos = ctx->pos;
4777 +- return 0;
4778 ++ return ret < 0 ? ret : 0;
4779 + }
4780 +
4781 + static int ext4_release_dir(struct inode *inode, struct file *filp)
4782 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
4783 +index 92ad64b89d9b5..b1933e3513d60 100644
4784 +--- a/fs/ext4/extents.c
4785 ++++ b/fs/ext4/extents.c
4786 +@@ -5908,7 +5908,7 @@ void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
4787 + }
4788 +
4789 + /* Check if *cur is a hole and if it is, skip it */
4790 +-static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
4791 ++static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
4792 + {
4793 + int ret;
4794 + struct ext4_map_blocks map;
4795 +@@ -5917,9 +5917,12 @@ static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
4796 + map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
4797 +
4798 + ret = ext4_map_blocks(NULL, inode, &map, 0);
4799 ++ if (ret < 0)
4800 ++ return ret;
4801 + if (ret != 0)
4802 +- return;
4803 ++ return 0;
4804 + *cur = *cur + map.m_len;
4805 ++ return 0;
4806 + }
4807 +
4808 + /* Count number of blocks used by this inode and update i_blocks */
4809 +@@ -5968,7 +5971,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
4810 + * iblocks by total number of differences found.
4811 + */
4812 + cur = 0;
4813 +- skip_hole(inode, &cur);
4814 ++ ret = skip_hole(inode, &cur);
4815 ++ if (ret < 0)
4816 ++ goto out;
4817 + path = ext4_find_extent(inode, cur, NULL, 0);
4818 + if (IS_ERR(path))
4819 + goto out;
4820 +@@ -5987,8 +5992,12 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
4821 + }
4822 + cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
4823 + ext4_ext_get_actual_len(ex));
4824 +- skip_hole(inode, &cur);
4825 +-
4826 ++ ret = skip_hole(inode, &cur);
4827 ++ if (ret < 0) {
4828 ++ ext4_ext_drop_refs(path);
4829 ++ kfree(path);
4830 ++ break;
4831 ++ }
4832 + path2 = ext4_find_extent(inode, cur, NULL, 0);
4833 + if (IS_ERR(path2)) {
4834 + ext4_ext_drop_refs(path);
4835 +diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
4836 +index e8195229c2529..782d05a3f97a0 100644
4837 +--- a/fs/ext4/fast_commit.c
4838 ++++ b/fs/ext4/fast_commit.c
4839 +@@ -893,6 +893,12 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
4840 + sizeof(lrange), (u8 *)&lrange, crc))
4841 + return -ENOSPC;
4842 + } else {
4843 ++ unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
4844 ++ EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;
4845 ++
4846 ++ /* Limit the number of blocks in one extent */
4847 ++ map.m_len = min(max, map.m_len);
4848 ++
4849 + fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
4850 + ex = (struct ext4_extent *)&fc_ext.fc_ex;
4851 + ex->ee_block = cpu_to_le32(map.m_lblk);
4852 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4853 +index d8de607849df3..73daf9443e5e0 100644
4854 +--- a/fs/ext4/inode.c
4855 ++++ b/fs/ext4/inode.c
4856 +@@ -1640,6 +1640,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
4857 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4858 + int ret;
4859 + bool allocated = false;
4860 ++ bool reserved = false;
4861 +
4862 + /*
4863 + * If the cluster containing lblk is shared with a delayed,
4864 +@@ -1656,6 +1657,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
4865 + ret = ext4_da_reserve_space(inode);
4866 + if (ret != 0) /* ENOSPC */
4867 + goto errout;
4868 ++ reserved = true;
4869 + } else { /* bigalloc */
4870 + if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
4871 + if (!ext4_es_scan_clu(inode,
4872 +@@ -1668,6 +1670,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
4873 + ret = ext4_da_reserve_space(inode);
4874 + if (ret != 0) /* ENOSPC */
4875 + goto errout;
4876 ++ reserved = true;
4877 + } else {
4878 + allocated = true;
4879 + }
4880 +@@ -1678,6 +1681,8 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
4881 + }
4882 +
4883 + ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
4884 ++ if (ret && reserved)
4885 ++ ext4_da_release_space(inode, 1);
4886 +
4887 + errout:
4888 + return ret;
4889 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4890 +index 970013c93d3ea..59c25a95050af 100644
4891 +--- a/fs/ext4/super.c
4892 ++++ b/fs/ext4/super.c
4893 +@@ -661,7 +661,7 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
4894 + * constraints, it may not be safe to do it right here so we
4895 + * defer superblock flushing to a workqueue.
4896 + */
4897 +- if (continue_fs)
4898 ++ if (continue_fs && journal)
4899 + schedule_work(&EXT4_SB(sb)->s_error_work);
4900 + else
4901 + ext4_commit_super(sb);
4902 +@@ -1351,6 +1351,12 @@ static void ext4_destroy_inode(struct inode *inode)
4903 + true);
4904 + dump_stack();
4905 + }
4906 ++
4907 ++ if (EXT4_I(inode)->i_reserved_data_blocks)
4908 ++ ext4_msg(inode->i_sb, KERN_ERR,
4909 ++ "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
4910 ++ inode->i_ino, EXT4_I(inode),
4911 ++ EXT4_I(inode)->i_reserved_data_blocks);
4912 + }
4913 +
4914 + static void init_once(void *foo)
4915 +@@ -3185,17 +3191,17 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
4916 + */
4917 + static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
4918 + {
4919 +- loff_t res = EXT4_NDIR_BLOCKS;
4920 ++ unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;
4921 + int meta_blocks;
4922 +- loff_t upper_limit;
4923 +- /* This is calculated to be the largest file size for a dense, block
4924 ++
4925 ++ /*
4926 ++ * This is calculated to be the largest file size for a dense, block
4927 + * mapped file such that the file's total number of 512-byte sectors,
4928 + * including data and all indirect blocks, does not exceed (2^48 - 1).
4929 + *
4930 + * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
4931 + * number of 512-byte sectors of the file.
4932 + */
4933 +-
4934 + if (!has_huge_files) {
4935 + /*
4936 + * !has_huge_files or implies that the inode i_block field
4937 +@@ -3238,7 +3244,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
4938 + if (res > MAX_LFS_FILESIZE)
4939 + res = MAX_LFS_FILESIZE;
4940 +
4941 +- return res;
4942 ++ return (loff_t)res;
4943 + }
4944 +
4945 + static ext4_fsblk_t descriptor_loc(struct super_block *sb,
4946 +@@ -5183,12 +5189,15 @@ failed_mount_wq:
4947 + sbi->s_ea_block_cache = NULL;
4948 +
4949 + if (sbi->s_journal) {
4950 ++ /* flush s_error_work before journal destroy. */
4951 ++ flush_work(&sbi->s_error_work);
4952 + jbd2_journal_destroy(sbi->s_journal);
4953 + sbi->s_journal = NULL;
4954 + }
4955 + failed_mount3a:
4956 + ext4_es_unregister_shrinker(sbi);
4957 + failed_mount3:
4958 ++ /* flush s_error_work before sbi destroy */
4959 + flush_work(&sbi->s_error_work);
4960 + del_timer_sync(&sbi->s_err_report);
4961 + ext4_stop_mmpd(sbi);
4962 +diff --git a/fs/verity/enable.c b/fs/verity/enable.c
4963 +index 77e159a0346b1..60a4372aa4d75 100644
4964 +--- a/fs/verity/enable.c
4965 ++++ b/fs/verity/enable.c
4966 +@@ -177,7 +177,7 @@ static int build_merkle_tree(struct file *filp,
4967 + * (level 0) and ascending to the root node (level 'num_levels - 1').
4968 + * Then at the end (level 'num_levels'), calculate the root hash.
4969 + */
4970 +- blocks = (inode->i_size + params->block_size - 1) >>
4971 ++ blocks = ((u64)inode->i_size + params->block_size - 1) >>
4972 + params->log_blocksize;
4973 + for (level = 0; level <= params->num_levels; level++) {
4974 + err = build_merkle_tree_level(filp, level, blocks, params,
4975 +diff --git a/fs/verity/open.c b/fs/verity/open.c
4976 +index 60ff8af7219fe..92df87f5fa388 100644
4977 +--- a/fs/verity/open.c
4978 ++++ b/fs/verity/open.c
4979 +@@ -89,7 +89,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
4980 + */
4981 +
4982 + /* Compute number of levels and the number of blocks in each level */
4983 +- blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;
4984 ++ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;
4985 + pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
4986 + while (blocks > 1) {
4987 + if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
4988 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
4989 +index e8e2b0393ca93..11da5671d4f09 100644
4990 +--- a/include/linux/bpf.h
4991 ++++ b/include/linux/bpf.h
4992 +@@ -553,6 +553,8 @@ struct btf_func_model {
4993 + * programs only. Should not be used with normal calls and indirect calls.
4994 + */
4995 + #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
4996 ++/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
4997 ++#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
4998 +
4999 + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
5000 + * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
5001 +diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
5002 +index 59828516ebaf1..9f4ad719bfe3f 100644
5003 +--- a/include/linux/fwnode.h
5004 ++++ b/include/linux/fwnode.h
5005 +@@ -22,10 +22,15 @@ struct device;
5006 + * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
5007 + * NOT_DEVICE: The fwnode will never be populated as a struct device.
5008 + * INITIALIZED: The hardware corresponding to fwnode has been initialized.
5009 ++ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
5010 ++ * driver needs its child devices to be bound with
5011 ++ * their respective drivers as soon as they are
5012 ++ * added.
5013 + */
5014 +-#define FWNODE_FLAG_LINKS_ADDED BIT(0)
5015 +-#define FWNODE_FLAG_NOT_DEVICE BIT(1)
5016 +-#define FWNODE_FLAG_INITIALIZED BIT(2)
5017 ++#define FWNODE_FLAG_LINKS_ADDED BIT(0)
5018 ++#define FWNODE_FLAG_NOT_DEVICE BIT(1)
5019 ++#define FWNODE_FLAG_INITIALIZED BIT(2)
5020 ++#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
5021 +
5022 + struct fwnode_handle {
5023 + struct fwnode_handle *secondary;
5024 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
5025 +index 3ab2563b1a230..7fd7f60936129 100644
5026 +--- a/include/net/ip_fib.h
5027 ++++ b/include/net/ip_fib.h
5028 +@@ -597,5 +597,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
5029 + int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
5030 + u8 rt_family, unsigned char *flags, bool skip_oif);
5031 + int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
5032 +- int nh_weight, u8 rt_family);
5033 ++ int nh_weight, u8 rt_family, u32 nh_tclassid);
5034 + #endif /* _NET_FIB_H */
5035 +diff --git a/include/net/nexthop.h b/include/net/nexthop.h
5036 +index 10e1777877e6a..28085b995ddcf 100644
5037 +--- a/include/net/nexthop.h
5038 ++++ b/include/net/nexthop.h
5039 +@@ -325,7 +325,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
5040 + struct fib_nh_common *nhc = &nhi->fib_nhc;
5041 + int weight = nhg->nh_entries[i].weight;
5042 +
5043 +- if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
5044 ++ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
5045 + return -EMSGSIZE;
5046 + }
5047 +
5048 +diff --git a/include/net/sock.h b/include/net/sock.h
5049 +index f23cb259b0e24..d28b9bb5ef5a0 100644
5050 +--- a/include/net/sock.h
5051 ++++ b/include/net/sock.h
5052 +@@ -487,8 +487,10 @@ struct sock {
5053 + u8 sk_prefer_busy_poll;
5054 + u16 sk_busy_poll_budget;
5055 + #endif
5056 ++ spinlock_t sk_peer_lock;
5057 + struct pid *sk_peer_pid;
5058 + const struct cred *sk_peer_cred;
5059 ++
5060 + long sk_rcvtimeo;
5061 + ktime_t sk_stamp;
5062 + #if BITS_PER_LONG==32
5063 +diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
5064 +index 989e1517332d6..7a08ed2acd609 100644
5065 +--- a/include/sound/rawmidi.h
5066 ++++ b/include/sound/rawmidi.h
5067 +@@ -98,6 +98,7 @@ struct snd_rawmidi_file {
5068 + struct snd_rawmidi *rmidi;
5069 + struct snd_rawmidi_substream *input;
5070 + struct snd_rawmidi_substream *output;
5071 ++ unsigned int user_pversion; /* supported protocol version */
5072 + };
5073 +
5074 + struct snd_rawmidi_str {
5075 +diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
5076 +index d17c061950df6..9c5121e6ead45 100644
5077 +--- a/include/uapi/sound/asound.h
5078 ++++ b/include/uapi/sound/asound.h
5079 +@@ -783,6 +783,7 @@ struct snd_rawmidi_status {
5080 +
5081 + #define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
5082 + #define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
5083 ++#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
5084 + #define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
5085 + #define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
5086 + #define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
5087 +diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
5088 +index 70f6fd4fa3056..2ce17447fb769 100644
5089 +--- a/kernel/bpf/bpf_struct_ops.c
5090 ++++ b/kernel/bpf/bpf_struct_ops.c
5091 +@@ -367,6 +367,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
5092 + const struct btf_type *mtype, *ptype;
5093 + struct bpf_prog *prog;
5094 + u32 moff;
5095 ++ u32 flags;
5096 +
5097 + moff = btf_member_bit_offset(t, member) / 8;
5098 + ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
5099 +@@ -430,10 +431,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
5100 +
5101 + tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
5102 + tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
5103 ++ flags = st_ops->func_models[i].ret_size > 0 ?
5104 ++ BPF_TRAMP_F_RET_FENTRY_RET : 0;
5105 + err = arch_prepare_bpf_trampoline(NULL, image,
5106 + st_map->image + PAGE_SIZE,
5107 +- &st_ops->func_models[i], 0,
5108 +- tprogs, NULL);
5109 ++ &st_ops->func_models[i],
5110 ++ flags, tprogs, NULL);
5111 + if (err < 0)
5112 + goto reset_unlock;
5113 +
5114 +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
5115 +index 0a28a8095d3e9..c019611fbc8f4 100644
5116 +--- a/kernel/bpf/core.c
5117 ++++ b/kernel/bpf/core.c
5118 +@@ -827,7 +827,7 @@ int bpf_jit_charge_modmem(u32 pages)
5119 + {
5120 + if (atomic_long_add_return(pages, &bpf_jit_current) >
5121 + (bpf_jit_limit >> PAGE_SHIFT)) {
5122 +- if (!capable(CAP_SYS_ADMIN)) {
5123 ++ if (!bpf_capable()) {
5124 + atomic_long_sub(pages, &bpf_jit_current);
5125 + return -EPERM;
5126 + }
5127 +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
5128 +index 57124614363df..e7af18857371e 100644
5129 +--- a/kernel/sched/cpufreq_schedutil.c
5130 ++++ b/kernel/sched/cpufreq_schedutil.c
5131 +@@ -537,9 +537,17 @@ static struct attribute *sugov_attrs[] = {
5132 + };
5133 + ATTRIBUTE_GROUPS(sugov);
5134 +
5135 ++static void sugov_tunables_free(struct kobject *kobj)
5136 ++{
5137 ++ struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj);
5138 ++
5139 ++ kfree(to_sugov_tunables(attr_set));
5140 ++}
5141 ++
5142 + static struct kobj_type sugov_tunables_ktype = {
5143 + .default_groups = sugov_groups,
5144 + .sysfs_ops = &governor_sysfs_ops,
5145 ++ .release = &sugov_tunables_free,
5146 + };
5147 +
5148 + /********************** cpufreq governor interface *********************/
5149 +@@ -639,12 +647,10 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic
5150 + return tunables;
5151 + }
5152 +
5153 +-static void sugov_tunables_free(struct sugov_tunables *tunables)
5154 ++static void sugov_clear_global_tunables(void)
5155 + {
5156 + if (!have_governor_per_policy())
5157 + global_tunables = NULL;
5158 +-
5159 +- kfree(tunables);
5160 + }
5161 +
5162 + static int sugov_init(struct cpufreq_policy *policy)
5163 +@@ -707,7 +713,7 @@ out:
5164 + fail:
5165 + kobject_put(&tunables->attr_set.kobj);
5166 + policy->governor_data = NULL;
5167 +- sugov_tunables_free(tunables);
5168 ++ sugov_clear_global_tunables();
5169 +
5170 + stop_kthread:
5171 + sugov_kthread_stop(sg_policy);
5172 +@@ -734,7 +740,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
5173 + count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
5174 + policy->governor_data = NULL;
5175 + if (!count)
5176 +- sugov_tunables_free(tunables);
5177 ++ sugov_clear_global_tunables();
5178 +
5179 + mutex_unlock(&global_tunables_lock);
5180 +
5181 +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
5182 +index 7e08e3d947c20..2c879cd02a5f7 100644
5183 +--- a/kernel/sched/debug.c
5184 ++++ b/kernel/sched/debug.c
5185 +@@ -173,16 +173,22 @@ static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
5186 + size_t cnt, loff_t *ppos)
5187 + {
5188 + char buf[16];
5189 ++ unsigned int scaling;
5190 +
5191 + if (cnt > 15)
5192 + cnt = 15;
5193 +
5194 + if (copy_from_user(&buf, ubuf, cnt))
5195 + return -EFAULT;
5196 ++ buf[cnt] = '\0';
5197 +
5198 +- if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
5199 ++ if (kstrtouint(buf, 10, &scaling))
5200 + return -EINVAL;
5201 +
5202 ++ if (scaling >= SCHED_TUNABLESCALING_END)
5203 ++ return -EINVAL;
5204 ++
5205 ++ sysctl_sched_tunable_scaling = scaling;
5206 + if (sched_update_scaling())
5207 + return -EINVAL;
5208 +
5209 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
5210 +index 30a6984a58f71..423ec671a3063 100644
5211 +--- a/kernel/sched/fair.c
5212 ++++ b/kernel/sched/fair.c
5213 +@@ -4898,8 +4898,12 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
5214 + /* update hierarchical throttle state */
5215 + walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5216 +
5217 +- if (!cfs_rq->load.weight)
5218 ++ /* Nothing to run but something to decay (on_list)? Complete the branch */
5219 ++ if (!cfs_rq->load.weight) {
5220 ++ if (cfs_rq->on_list)
5221 ++ goto unthrottle_throttle;
5222 + return;
5223 ++ }
5224 +
5225 + task_delta = cfs_rq->h_nr_running;
5226 + idle_task_delta = cfs_rq->idle_h_nr_running;
5227 +diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
5228 +index 1e2d10f860117..cdc842d090db3 100644
5229 +--- a/lib/Kconfig.kasan
5230 ++++ b/lib/Kconfig.kasan
5231 +@@ -66,6 +66,7 @@ choice
5232 + config KASAN_GENERIC
5233 + bool "Generic mode"
5234 + depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
5235 ++ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
5236 + select SLUB_DEBUG if SLUB
5237 + select CONSTRUCTORS
5238 + help
5239 +@@ -86,6 +87,7 @@ config KASAN_GENERIC
5240 + config KASAN_SW_TAGS
5241 + bool "Software tag-based mode"
5242 + depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
5243 ++ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
5244 + select SLUB_DEBUG if SLUB
5245 + select CONSTRUCTORS
5246 + help
5247 +diff --git a/mm/util.c b/mm/util.c
5248 +index c18202b3e659d..8bd4a20262a91 100644
5249 +--- a/mm/util.c
5250 ++++ b/mm/util.c
5251 +@@ -593,6 +593,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
5252 + if (ret || size <= PAGE_SIZE)
5253 + return ret;
5254 +
5255 ++ /* Don't even allow crazy sizes */
5256 ++ if (WARN_ON_ONCE(size > INT_MAX))
5257 ++ return NULL;
5258 ++
5259 + return __vmalloc_node(size, 1, flags, node,
5260 + __builtin_return_address(0));
5261 + }
5262 +diff --git a/net/core/sock.c b/net/core/sock.c
5263 +index a3eea6e0b30a7..4a08ae6de578c 100644
5264 +--- a/net/core/sock.c
5265 ++++ b/net/core/sock.c
5266 +@@ -1366,6 +1366,16 @@ set_sndbuf:
5267 + }
5268 + EXPORT_SYMBOL(sock_setsockopt);
5269 +
5270 ++static const struct cred *sk_get_peer_cred(struct sock *sk)
5271 ++{
5272 ++ const struct cred *cred;
5273 ++
5274 ++ spin_lock(&sk->sk_peer_lock);
5275 ++ cred = get_cred(sk->sk_peer_cred);
5276 ++ spin_unlock(&sk->sk_peer_lock);
5277 ++
5278 ++ return cred;
5279 ++}
5280 +
5281 + static void cred_to_ucred(struct pid *pid, const struct cred *cred,
5282 + struct ucred *ucred)
5283 +@@ -1542,7 +1552,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
5284 + struct ucred peercred;
5285 + if (len > sizeof(peercred))
5286 + len = sizeof(peercred);
5287 ++
5288 ++ spin_lock(&sk->sk_peer_lock);
5289 + cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
5290 ++ spin_unlock(&sk->sk_peer_lock);
5291 ++
5292 + if (copy_to_user(optval, &peercred, len))
5293 + return -EFAULT;
5294 + goto lenout;
5295 +@@ -1550,20 +1564,23 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
5296 +
5297 + case SO_PEERGROUPS:
5298 + {
5299 ++ const struct cred *cred;
5300 + int ret, n;
5301 +
5302 +- if (!sk->sk_peer_cred)
5303 ++ cred = sk_get_peer_cred(sk);
5304 ++ if (!cred)
5305 + return -ENODATA;
5306 +
5307 +- n = sk->sk_peer_cred->group_info->ngroups;
5308 ++ n = cred->group_info->ngroups;
5309 + if (len < n * sizeof(gid_t)) {
5310 + len = n * sizeof(gid_t);
5311 ++ put_cred(cred);
5312 + return put_user(len, optlen) ? -EFAULT : -ERANGE;
5313 + }
5314 + len = n * sizeof(gid_t);
5315 +
5316 +- ret = groups_to_user((gid_t __user *)optval,
5317 +- sk->sk_peer_cred->group_info);
5318 ++ ret = groups_to_user((gid_t __user *)optval, cred->group_info);
5319 ++ put_cred(cred);
5320 + if (ret)
5321 + return ret;
5322 + goto lenout;
5323 +@@ -1921,9 +1938,10 @@ static void __sk_destruct(struct rcu_head *head)
5324 + sk->sk_frag.page = NULL;
5325 + }
5326 +
5327 +- if (sk->sk_peer_cred)
5328 +- put_cred(sk->sk_peer_cred);
5329 ++ /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
5330 ++ put_cred(sk->sk_peer_cred);
5331 + put_pid(sk->sk_peer_pid);
5332 ++
5333 + if (likely(sk->sk_net_refcnt))
5334 + put_net(sock_net(sk));
5335 + sk_prot_free(sk->sk_prot_creator, sk);
5336 +@@ -3124,6 +3142,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
5337 +
5338 + sk->sk_peer_pid = NULL;
5339 + sk->sk_peer_cred = NULL;
5340 ++ spin_lock_init(&sk->sk_peer_lock);
5341 ++
5342 + sk->sk_write_pending = 0;
5343 + sk->sk_rcvlowat = 1;
5344 + sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
5345 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
5346 +index 4c0c33e4710da..27fdd86b9cee7 100644
5347 +--- a/net/ipv4/fib_semantics.c
5348 ++++ b/net/ipv4/fib_semantics.c
5349 +@@ -1663,7 +1663,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
5350 +
5351 + #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
5352 + int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
5353 +- int nh_weight, u8 rt_family)
5354 ++ int nh_weight, u8 rt_family, u32 nh_tclassid)
5355 + {
5356 + const struct net_device *dev = nhc->nhc_dev;
5357 + struct rtnexthop *rtnh;
5358 +@@ -1681,6 +1681,9 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
5359 +
5360 + rtnh->rtnh_flags = flags;
5361 +
5362 ++ if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid))
5363 ++ goto nla_put_failure;
5364 ++
5365 + /* length of rtnetlink header + attributes */
5366 + rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
5367 +
5368 +@@ -1708,14 +1711,13 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
5369 + }
5370 +
5371 + for_nexthops(fi) {
5372 +- if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
5373 +- AF_INET) < 0)
5374 +- goto nla_put_failure;
5375 ++ u32 nh_tclassid = 0;
5376 + #ifdef CONFIG_IP_ROUTE_CLASSID
5377 +- if (nh->nh_tclassid &&
5378 +- nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
5379 +- goto nla_put_failure;
5380 ++ nh_tclassid = nh->nh_tclassid;
5381 + #endif
5382 ++ if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
5383 ++ AF_INET, nh_tclassid) < 0)
5384 ++ goto nla_put_failure;
5385 + } endfor_nexthops(fi);
5386 +
5387 + mp_end:
5388 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5389 +index 1a742b710e543..915ea635b2d5a 100644
5390 +--- a/net/ipv4/udp.c
5391 ++++ b/net/ipv4/udp.c
5392 +@@ -1053,7 +1053,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
5393 + __be16 dport;
5394 + u8 tos;
5395 + int err, is_udplite = IS_UDPLITE(sk);
5396 +- int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
5397 ++ int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
5398 + int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
5399 + struct sk_buff *skb;
5400 + struct ip_options_data opt_copy;
5401 +@@ -1361,7 +1361,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
5402 + }
5403 +
5404 + up->len += size;
5405 +- if (!(up->corkflag || (flags&MSG_MORE)))
5406 ++ if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))
5407 + ret = udp_push_pending_frames(sk);
5408 + if (!ret)
5409 + ret = size;
5410 +@@ -2662,9 +2662,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
5411 + switch (optname) {
5412 + case UDP_CORK:
5413 + if (val != 0) {
5414 +- up->corkflag = 1;
5415 ++ WRITE_ONCE(up->corkflag, 1);
5416 + } else {
5417 +- up->corkflag = 0;
5418 ++ WRITE_ONCE(up->corkflag, 0);
5419 + lock_sock(sk);
5420 + push_pending_frames(sk);
5421 + release_sock(sk);
5422 +@@ -2787,7 +2787,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
5423 +
5424 + switch (optname) {
5425 + case UDP_CORK:
5426 +- val = up->corkflag;
5427 ++ val = READ_ONCE(up->corkflag);
5428 + break;
5429 +
5430 + case UDP_ENCAP:
5431 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
5432 +index 6033403021019..0aeff2ce17b9f 100644
5433 +--- a/net/ipv6/route.c
5434 ++++ b/net/ipv6/route.c
5435 +@@ -5700,14 +5700,15 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5436 + goto nla_put_failure;
5437 +
5438 + if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5439 +- rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
5440 ++ rt->fib6_nh->fib_nh_weight, AF_INET6,
5441 ++ 0) < 0)
5442 + goto nla_put_failure;
5443 +
5444 + list_for_each_entry_safe(sibling, next_sibling,
5445 + &rt->fib6_siblings, fib6_siblings) {
5446 + if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5447 + sibling->fib6_nh->fib_nh_weight,
5448 +- AF_INET6) < 0)
5449 ++ AF_INET6, 0) < 0)
5450 + goto nla_put_failure;
5451 + }
5452 +
5453 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5454 +index c5e15e94bb004..80ae024d13c8c 100644
5455 +--- a/net/ipv6/udp.c
5456 ++++ b/net/ipv6/udp.c
5457 +@@ -1303,7 +1303,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
5458 + int addr_len = msg->msg_namelen;
5459 + bool connected = false;
5460 + int ulen = len;
5461 +- int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
5462 ++ int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
5463 + int err;
5464 + int is_udplite = IS_UDPLITE(sk);
5465 + int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
5466 +diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
5467 +index 204830a55240b..3fbd0b9ff9135 100644
5468 +--- a/net/mac80211/mesh_ps.c
5469 ++++ b/net/mac80211/mesh_ps.c
5470 +@@ -2,6 +2,7 @@
5471 + /*
5472 + * Copyright 2012-2013, Marco Porsch <marco.porsch@×××××××××××××××××.de>
5473 + * Copyright 2012-2013, cozybit Inc.
5474 ++ * Copyright (C) 2021 Intel Corporation
5475 + */
5476 +
5477 + #include "mesh.h"
5478 +@@ -588,7 +589,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
5479 +
5480 + /* only transmit to PS STA with announced, non-zero awake window */
5481 + if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
5482 +- (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
5483 ++ (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))
5484 + return;
5485 +
5486 + if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
5487 +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
5488 +index e5935e3d7a078..8c6416129d5be 100644
5489 +--- a/net/mac80211/rate.c
5490 ++++ b/net/mac80211/rate.c
5491 +@@ -392,10 +392,6 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
5492 + int mcast_rate;
5493 + bool use_basicrate = false;
5494 +
5495 +- if (ieee80211_is_tx_data(txrc->skb) &&
5496 +- info->flags & IEEE80211_TX_CTL_NO_ACK)
5497 +- return false;
5498 +-
5499 + if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {
5500 + __rate_control_send_low(txrc->hw, sband, pubsta, info,
5501 + txrc->rate_idx_mask);
5502 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5503 +index fa09a369214db..751e601c46235 100644
5504 +--- a/net/mac80211/tx.c
5505 ++++ b/net/mac80211/tx.c
5506 +@@ -2209,7 +2209,11 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
5507 + }
5508 +
5509 + vht_mcs = iterator.this_arg[4] >> 4;
5510 ++ if (vht_mcs > 11)
5511 ++ vht_mcs = 0;
5512 + vht_nss = iterator.this_arg[4] & 0xF;
5513 ++ if (!vht_nss || vht_nss > 8)
5514 ++ vht_nss = 1;
5515 + break;
5516 +
5517 + /*
5518 +@@ -3380,6 +3384,14 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
5519 + if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
5520 + goto out;
5521 +
5522 ++ /* If n == 2, the "while (*frag_tail)" loop above didn't execute
5523 ++ * and frag_tail should be &skb_shinfo(head)->frag_list.
5524 ++ * However, ieee80211_amsdu_prepare_head() can reallocate it.
5525 ++ * Reload frag_tail to have it pointing to the correct place.
5526 ++ */
5527 ++ if (n == 2)
5528 ++ frag_tail = &skb_shinfo(head)->frag_list;
5529 ++
5530 + /*
5531 + * Pad out the previous subframe to a multiple of 4 by adding the
5532 + * padding to the next one, that's being added. Note that head->len
5533 +diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
5534 +index bca47fad5a162..4eed23e276104 100644
5535 +--- a/net/mac80211/wpa.c
5536 ++++ b/net/mac80211/wpa.c
5537 +@@ -520,6 +520,9 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
5538 + return RX_DROP_UNUSABLE;
5539 + }
5540 +
5541 ++ /* reload hdr - skb might have been reallocated */
5542 ++ hdr = (void *)rx->skb->data;
5543 ++
5544 + data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
5545 + if (!rx->sta || data_len < 0)
5546 + return RX_DROP_UNUSABLE;
5547 +@@ -749,6 +752,9 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
5548 + return RX_DROP_UNUSABLE;
5549 + }
5550 +
5551 ++ /* reload hdr - skb might have been reallocated */
5552 ++ hdr = (void *)rx->skb->data;
5553 ++
5554 + data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
5555 + if (!rx->sta || data_len < 0)
5556 + return RX_DROP_UNUSABLE;
5557 +diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
5558 +index f48eb6315bbb4..292374fb07792 100644
5559 +--- a/net/mptcp/mptcp_diag.c
5560 ++++ b/net/mptcp/mptcp_diag.c
5561 +@@ -36,7 +36,7 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
5562 + struct sock *sk;
5563 +
5564 + net = sock_net(in_skb->sk);
5565 +- msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);
5566 ++ msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);
5567 + if (!msk)
5568 + goto out_nosk;
5569 +
5570 +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
5571 +index 89251cbe9f1a7..81103b29c0af1 100644
5572 +--- a/net/mptcp/pm_netlink.c
5573 ++++ b/net/mptcp/pm_netlink.c
5574 +@@ -1558,9 +1558,7 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
5575 +
5576 + list_for_each_entry(entry, &pernet->local_addr_list, list) {
5577 + if (addresses_equal(&entry->addr, &addr.addr, true)) {
5578 +- ret = mptcp_nl_addr_backup(net, &entry->addr, bkup);
5579 +- if (ret)
5580 +- return ret;
5581 ++ mptcp_nl_addr_backup(net, &entry->addr, bkup);
5582 +
5583 + if (bkup)
5584 + entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
5585 +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
5586 +index 6ac564d584c19..c8a49e92e66f3 100644
5587 +--- a/net/mptcp/protocol.h
5588 ++++ b/net/mptcp/protocol.h
5589 +@@ -680,7 +680,7 @@ int mptcp_token_new_connect(struct sock *sk);
5590 + void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
5591 + struct mptcp_sock *msk);
5592 + bool mptcp_token_exists(u32 token);
5593 +-struct mptcp_sock *mptcp_token_get_sock(u32 token);
5594 ++struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);
5595 + struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
5596 + long *s_num);
5597 + void mptcp_token_destroy(struct mptcp_sock *msk);
5598 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
5599 +index 966f777d35ce9..1f3039b829a7f 100644
5600 +--- a/net/mptcp/subflow.c
5601 ++++ b/net/mptcp/subflow.c
5602 +@@ -86,7 +86,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
5603 + struct mptcp_sock *msk;
5604 + int local_id;
5605 +
5606 +- msk = mptcp_token_get_sock(subflow_req->token);
5607 ++ msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
5608 + if (!msk) {
5609 + SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
5610 + return NULL;
5611 +diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c
5612 +index 37127781aee98..7f22526346a7e 100644
5613 +--- a/net/mptcp/syncookies.c
5614 ++++ b/net/mptcp/syncookies.c
5615 +@@ -108,18 +108,12 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
5616 +
5617 + e->valid = 0;
5618 +
5619 +- msk = mptcp_token_get_sock(e->token);
5620 ++ msk = mptcp_token_get_sock(net, e->token);
5621 + if (!msk) {
5622 + spin_unlock_bh(&join_entry_locks[i]);
5623 + return false;
5624 + }
5625 +
5626 +- /* If this fails, the token got re-used in the mean time by another
5627 +- * mptcp socket in a different netns, i.e. entry is outdated.
5628 +- */
5629 +- if (!net_eq(sock_net((struct sock *)msk), net))
5630 +- goto err_put;
5631 +-
5632 + subflow_req->remote_nonce = e->remote_nonce;
5633 + subflow_req->local_nonce = e->local_nonce;
5634 + subflow_req->backup = e->backup;
5635 +@@ -128,11 +122,6 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
5636 + subflow_req->msk = msk;
5637 + spin_unlock_bh(&join_entry_locks[i]);
5638 + return true;
5639 +-
5640 +-err_put:
5641 +- spin_unlock_bh(&join_entry_locks[i]);
5642 +- sock_put((struct sock *)msk);
5643 +- return false;
5644 + }
5645 +
5646 + void __init mptcp_join_cookie_init(void)
5647 +diff --git a/net/mptcp/token.c b/net/mptcp/token.c
5648 +index a98e554b034fe..e581b341c5beb 100644
5649 +--- a/net/mptcp/token.c
5650 ++++ b/net/mptcp/token.c
5651 +@@ -231,6 +231,7 @@ found:
5652 +
5653 + /**
5654 + * mptcp_token_get_sock - retrieve mptcp connection sock using its token
5655 ++ * @net: restrict to this namespace
5656 + * @token: token of the mptcp connection to retrieve
5657 + *
5658 + * This function returns the mptcp connection structure with the given token.
5659 +@@ -238,7 +239,7 @@ found:
5660 + *
5661 + * returns NULL if no connection with the given token value exists.
5662 + */
5663 +-struct mptcp_sock *mptcp_token_get_sock(u32 token)
5664 ++struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)
5665 + {
5666 + struct hlist_nulls_node *pos;
5667 + struct token_bucket *bucket;
5668 +@@ -251,11 +252,15 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token)
5669 + again:
5670 + sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
5671 + msk = mptcp_sk(sk);
5672 +- if (READ_ONCE(msk->token) != token)
5673 ++ if (READ_ONCE(msk->token) != token ||
5674 ++ !net_eq(sock_net(sk), net))
5675 + continue;
5676 ++
5677 + if (!refcount_inc_not_zero(&sk->sk_refcnt))
5678 + goto not_found;
5679 +- if (READ_ONCE(msk->token) != token) {
5680 ++
5681 ++ if (READ_ONCE(msk->token) != token ||
5682 ++ !net_eq(sock_net(sk), net)) {
5683 + sock_put(sk);
5684 + goto again;
5685 + }
5686 +diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
5687 +index e1bd6f0a0676f..5d984bec1cd86 100644
5688 +--- a/net/mptcp/token_test.c
5689 ++++ b/net/mptcp/token_test.c
5690 +@@ -11,6 +11,7 @@ static struct mptcp_subflow_request_sock *build_req_sock(struct kunit *test)
5691 + GFP_USER);
5692 + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req);
5693 + mptcp_token_init_request((struct request_sock *)req);
5694 ++ sock_net_set((struct sock *)req, &init_net);
5695 + return req;
5696 + }
5697 +
5698 +@@ -22,7 +23,7 @@ static void mptcp_token_test_req_basic(struct kunit *test)
5699 + KUNIT_ASSERT_EQ(test, 0,
5700 + mptcp_token_new_request((struct request_sock *)req));
5701 + KUNIT_EXPECT_NE(test, 0, (int)req->token);
5702 +- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token));
5703 ++ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, req->token));
5704 +
5705 + /* cleanup */
5706 + mptcp_token_destroy_request((struct request_sock *)req);
5707 +@@ -55,6 +56,7 @@ static struct mptcp_sock *build_msk(struct kunit *test)
5708 + msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
5709 + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
5710 + refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
5711 ++ sock_net_set((struct sock *)msk, &init_net);
5712 + return msk;
5713 + }
5714 +
5715 +@@ -74,11 +76,11 @@ static void mptcp_token_test_msk_basic(struct kunit *test)
5716 + mptcp_token_new_connect((struct sock *)icsk));
5717 + KUNIT_EXPECT_NE(test, 0, (int)ctx->token);
5718 + KUNIT_EXPECT_EQ(test, ctx->token, msk->token);
5719 +- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token));
5720 ++ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, ctx->token));
5721 + KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt));
5722 +
5723 + mptcp_token_destroy(msk);
5724 +- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token));
5725 ++ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, ctx->token));
5726 + }
5727 +
5728 + static void mptcp_token_test_accept(struct kunit *test)
5729 +@@ -90,11 +92,11 @@ static void mptcp_token_test_accept(struct kunit *test)
5730 + mptcp_token_new_request((struct request_sock *)req));
5731 + msk->token = req->token;
5732 + mptcp_token_accept(req, msk);
5733 +- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
5734 ++ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
5735 +
5736 + /* this is now a no-op */
5737 + mptcp_token_destroy_request((struct request_sock *)req);
5738 +- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
5739 ++ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
5740 +
5741 + /* cleanup */
5742 + mptcp_token_destroy(msk);
5743 +@@ -116,7 +118,7 @@ static void mptcp_token_test_destroyed(struct kunit *test)
5744 +
5745 + /* simulate race on removal */
5746 + refcount_set(&sk->sk_refcnt, 0);
5747 +- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token));
5748 ++ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, msk->token));
5749 +
5750 + /* cleanup */
5751 + mptcp_token_destroy(msk);
5752 +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
5753 +index 6186358eac7c5..6e391308431da 100644
5754 +--- a/net/netfilter/ipset/ip_set_hash_gen.h
5755 ++++ b/net/netfilter/ipset/ip_set_hash_gen.h
5756 +@@ -130,11 +130,11 @@ htable_size(u8 hbits)
5757 + {
5758 + size_t hsize;
5759 +
5760 +- /* We must fit both into u32 in jhash and size_t */
5761 ++ /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
5762 + if (hbits > 31)
5763 + return 0;
5764 + hsize = jhash_size(hbits);
5765 +- if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)
5766 ++ if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
5767 + < hsize)
5768 + return 0;
5769 +
5770 +diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
5771 +index c100c6b112c81..2c467c422dc63 100644
5772 +--- a/net/netfilter/ipvs/ip_vs_conn.c
5773 ++++ b/net/netfilter/ipvs/ip_vs_conn.c
5774 +@@ -1468,6 +1468,10 @@ int __init ip_vs_conn_init(void)
5775 + int idx;
5776 +
5777 + /* Compute size and mask */
5778 ++ if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {
5779 ++ pr_info("conn_tab_bits not in [8, 20]. Using default value\n");
5780 ++ ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
5781 ++ }
5782 + ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
5783 + ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
5784 +
5785 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
5786 +index d31dbccbe7bd4..4f074d7653b8a 100644
5787 +--- a/net/netfilter/nf_conntrack_core.c
5788 ++++ b/net/netfilter/nf_conntrack_core.c
5789 +@@ -75,6 +75,9 @@ static __read_mostly struct kmem_cache *nf_conntrack_cachep;
5790 + static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
5791 + static __read_mostly bool nf_conntrack_locks_all;
5792 +
5793 ++/* serialize hash resizes and nf_ct_iterate_cleanup */
5794 ++static DEFINE_MUTEX(nf_conntrack_mutex);
5795 ++
5796 + #define GC_SCAN_INTERVAL (120u * HZ)
5797 + #define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
5798 +
5799 +@@ -2192,28 +2195,31 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
5800 + spinlock_t *lockp;
5801 +
5802 + for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
5803 ++ struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
5804 ++
5805 ++ if (hlist_nulls_empty(hslot))
5806 ++ continue;
5807 ++
5808 + lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
5809 + local_bh_disable();
5810 + nf_conntrack_lock(lockp);
5811 +- if (*bucket < nf_conntrack_htable_size) {
5812 +- hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
5813 +- if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
5814 +- continue;
5815 +- /* All nf_conn objects are added to hash table twice, one
5816 +- * for original direction tuple, once for the reply tuple.
5817 +- *
5818 +- * Exception: In the IPS_NAT_CLASH case, only the reply
5819 +- * tuple is added (the original tuple already existed for
5820 +- * a different object).
5821 +- *
5822 +- * We only need to call the iterator once for each
5823 +- * conntrack, so we just use the 'reply' direction
5824 +- * tuple while iterating.
5825 +- */
5826 +- ct = nf_ct_tuplehash_to_ctrack(h);
5827 +- if (iter(ct, data))
5828 +- goto found;
5829 +- }
5830 ++ hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
5831 ++ if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
5832 ++ continue;
5833 ++ /* All nf_conn objects are added to hash table twice, one
5834 ++ * for original direction tuple, once for the reply tuple.
5835 ++ *
5836 ++ * Exception: In the IPS_NAT_CLASH case, only the reply
5837 ++ * tuple is added (the original tuple already existed for
5838 ++ * a different object).
5839 ++ *
5840 ++ * We only need to call the iterator once for each
5841 ++ * conntrack, so we just use the 'reply' direction
5842 ++ * tuple while iterating.
5843 ++ */
5844 ++ ct = nf_ct_tuplehash_to_ctrack(h);
5845 ++ if (iter(ct, data))
5846 ++ goto found;
5847 + }
5848 + spin_unlock(lockp);
5849 + local_bh_enable();
5850 +@@ -2231,26 +2237,20 @@ found:
5851 + static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
5852 + void *data, u32 portid, int report)
5853 + {
5854 +- unsigned int bucket = 0, sequence;
5855 ++ unsigned int bucket = 0;
5856 + struct nf_conn *ct;
5857 +
5858 + might_sleep();
5859 +
5860 +- for (;;) {
5861 +- sequence = read_seqcount_begin(&nf_conntrack_generation);
5862 +-
5863 +- while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
5864 +- /* Time to push up daises... */
5865 ++ mutex_lock(&nf_conntrack_mutex);
5866 ++ while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
5867 ++ /* Time to push up daises... */
5868 +
5869 +- nf_ct_delete(ct, portid, report);
5870 +- nf_ct_put(ct);
5871 +- cond_resched();
5872 +- }
5873 +-
5874 +- if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
5875 +- break;
5876 +- bucket = 0;
5877 ++ nf_ct_delete(ct, portid, report);
5878 ++ nf_ct_put(ct);
5879 ++ cond_resched();
5880 + }
5881 ++ mutex_unlock(&nf_conntrack_mutex);
5882 + }
5883 +
5884 + struct iter_data {
5885 +@@ -2486,8 +2486,10 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
5886 + if (!hash)
5887 + return -ENOMEM;
5888 +
5889 ++ mutex_lock(&nf_conntrack_mutex);
5890 + old_size = nf_conntrack_htable_size;
5891 + if (old_size == hashsize) {
5892 ++ mutex_unlock(&nf_conntrack_mutex);
5893 + kvfree(hash);
5894 + return 0;
5895 + }
5896 +@@ -2523,6 +2525,8 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
5897 + nf_conntrack_all_unlock();
5898 + local_bh_enable();
5899 +
5900 ++ mutex_unlock(&nf_conntrack_mutex);
5901 ++
5902 + synchronize_net();
5903 + kvfree(old_hash);
5904 + return 0;
5905 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5906 +index 081437dd75b7e..b9546defdc280 100644
5907 +--- a/net/netfilter/nf_tables_api.c
5908 ++++ b/net/netfilter/nf_tables_api.c
5909 +@@ -4336,7 +4336,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
5910 + if (ops->privsize != NULL)
5911 + size = ops->privsize(nla, &desc);
5912 + alloc_size = sizeof(*set) + size + udlen;
5913 +- if (alloc_size < size)
5914 ++ if (alloc_size < size || alloc_size > INT_MAX)
5915 + return -ENOMEM;
5916 + set = kvzalloc(alloc_size, GFP_KERNEL);
5917 + if (!set)
5918 +@@ -9599,7 +9599,6 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
5919 + table->use--;
5920 + nf_tables_chain_destroy(&ctx);
5921 + }
5922 +- list_del(&table->list);
5923 + nf_tables_table_destroy(&ctx);
5924 + }
5925 +
5926 +@@ -9612,6 +9611,8 @@ static void __nft_release_tables(struct net *net)
5927 + if (nft_table_has_owner(table))
5928 + continue;
5929 +
5930 ++ list_del(&table->list);
5931 ++
5932 + __nft_release_table(net, table);
5933 + }
5934 + }
5935 +@@ -9619,31 +9620,38 @@ static void __nft_release_tables(struct net *net)
5936 + static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
5937 + void *ptr)
5938 + {
5939 ++ struct nft_table *table, *to_delete[8];
5940 + struct nftables_pernet *nft_net;
5941 + struct netlink_notify *n = ptr;
5942 +- struct nft_table *table, *nt;
5943 + struct net *net = n->net;
5944 +- bool release = false;
5945 ++ unsigned int deleted;
5946 ++ bool restart = false;
5947 +
5948 + if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
5949 + return NOTIFY_DONE;
5950 +
5951 + nft_net = nft_pernet(net);
5952 ++ deleted = 0;
5953 + mutex_lock(&nft_net->commit_mutex);
5954 ++again:
5955 + list_for_each_entry(table, &nft_net->tables, list) {
5956 + if (nft_table_has_owner(table) &&
5957 + n->portid == table->nlpid) {
5958 + __nft_release_hook(net, table);
5959 +- release = true;
5960 ++ list_del_rcu(&table->list);
5961 ++ to_delete[deleted++] = table;
5962 ++ if (deleted >= ARRAY_SIZE(to_delete))
5963 ++ break;
5964 + }
5965 + }
5966 +- if (release) {
5967 ++ if (deleted) {
5968 ++ restart = deleted >= ARRAY_SIZE(to_delete);
5969 + synchronize_rcu();
5970 +- list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
5971 +- if (nft_table_has_owner(table) &&
5972 +- n->portid == table->nlpid)
5973 +- __nft_release_table(net, table);
5974 +- }
5975 ++ while (deleted)
5976 ++ __nft_release_table(net, to_delete[--deleted]);
5977 ++
5978 ++ if (restart)
5979 ++ goto again;
5980 + }
5981 + mutex_unlock(&nft_net->commit_mutex);
5982 +
5983 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
5984 +index 272bcdb1392df..f69cc73c58130 100644
5985 +--- a/net/netfilter/nft_compat.c
5986 ++++ b/net/netfilter/nft_compat.c
5987 +@@ -19,6 +19,7 @@
5988 + #include <linux/netfilter_bridge/ebtables.h>
5989 + #include <linux/netfilter_arp/arp_tables.h>
5990 + #include <net/netfilter/nf_tables.h>
5991 ++#include <net/netfilter/nf_log.h>
5992 +
5993 + /* Used for matches where *info is larger than X byte */
5994 + #define NFT_MATCH_LARGE_THRESH 192
5995 +@@ -257,8 +258,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
5996 + nft_compat_wait_for_destructors();
5997 +
5998 + ret = xt_check_target(&par, size, proto, inv);
5999 +- if (ret < 0)
6000 ++ if (ret < 0) {
6001 ++ if (ret == -ENOENT) {
6002 ++ const char *modname = NULL;
6003 ++
6004 ++ if (strcmp(target->name, "LOG") == 0)
6005 ++ modname = "nf_log_syslog";
6006 ++ else if (strcmp(target->name, "NFLOG") == 0)
6007 ++ modname = "nfnetlink_log";
6008 ++
6009 ++ if (modname &&
6010 ++ nft_request_module(ctx->net, "%s", modname) == -EAGAIN)
6011 ++ return -EAGAIN;
6012 ++ }
6013 ++
6014 + return ret;
6015 ++ }
6016 +
6017 + /* The standard target cannot be used */
6018 + if (!target->target)
6019 +diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
6020 +index 2ff75f7637b09..f39244f9c0ed9 100644
6021 +--- a/net/netfilter/xt_LOG.c
6022 ++++ b/net/netfilter/xt_LOG.c
6023 +@@ -44,6 +44,7 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
6024 + static int log_tg_check(const struct xt_tgchk_param *par)
6025 + {
6026 + const struct xt_log_info *loginfo = par->targinfo;
6027 ++ int ret;
6028 +
6029 + if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
6030 + return -EINVAL;
6031 +@@ -58,7 +59,14 @@ static int log_tg_check(const struct xt_tgchk_param *par)
6032 + return -EINVAL;
6033 + }
6034 +
6035 +- return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
6036 ++ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
6037 ++ if (ret != 0 && !par->nft_compat) {
6038 ++ request_module("%s", "nf_log_syslog");
6039 ++
6040 ++ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
6041 ++ }
6042 ++
6043 ++ return ret;
6044 + }
6045 +
6046 + static void log_tg_destroy(const struct xt_tgdtor_param *par)
6047 +diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
6048 +index fb57932080598..e660c3710a109 100644
6049 +--- a/net/netfilter/xt_NFLOG.c
6050 ++++ b/net/netfilter/xt_NFLOG.c
6051 +@@ -42,13 +42,21 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
6052 + static int nflog_tg_check(const struct xt_tgchk_param *par)
6053 + {
6054 + const struct xt_nflog_info *info = par->targinfo;
6055 ++ int ret;
6056 +
6057 + if (info->flags & ~XT_NFLOG_MASK)
6058 + return -EINVAL;
6059 + if (info->prefix[sizeof(info->prefix) - 1] != '\0')
6060 + return -EINVAL;
6061 +
6062 +- return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
6063 ++ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
6064 ++ if (ret != 0 && !par->nft_compat) {
6065 ++ request_module("%s", "nfnetlink_log");
6066 ++
6067 ++ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
6068 ++ }
6069 ++
6070 ++ return ret;
6071 + }
6072 +
6073 + static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
6074 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
6075 +index d7869a984881e..d2a4e31d963d3 100644
6076 +--- a/net/sched/cls_flower.c
6077 ++++ b/net/sched/cls_flower.c
6078 +@@ -2188,18 +2188,24 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
6079 +
6080 + arg->count = arg->skip;
6081 +
6082 ++ rcu_read_lock();
6083 + idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
6084 + /* don't return filters that are being deleted */
6085 + if (!refcount_inc_not_zero(&f->refcnt))
6086 + continue;
6087 ++ rcu_read_unlock();
6088 ++
6089 + if (arg->fn(tp, f, arg) < 0) {
6090 + __fl_put(f);
6091 + arg->stop = 1;
6092 ++ rcu_read_lock();
6093 + break;
6094 + }
6095 + __fl_put(f);
6096 + arg->count++;
6097 ++ rcu_read_lock();
6098 + }
6099 ++ rcu_read_unlock();
6100 + arg->cookie = id;
6101 + }
6102 +
6103 +diff --git a/net/sctp/input.c b/net/sctp/input.c
6104 +index 5ef86fdb11769..1f1786021d9c8 100644
6105 +--- a/net/sctp/input.c
6106 ++++ b/net/sctp/input.c
6107 +@@ -702,7 +702,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
6108 + ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
6109 +
6110 + /* Break out if chunk length is less then minimal. */
6111 +- if (ntohs(ch->length) < sizeof(_ch))
6112 ++ if (!ch || ntohs(ch->length) < sizeof(_ch))
6113 + break;
6114 +
6115 + ch_end = offset + SCTP_PAD4(ntohs(ch->length));
6116 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6117 +index 91ff09d833e8f..f96ee27d9ff22 100644
6118 +--- a/net/unix/af_unix.c
6119 ++++ b/net/unix/af_unix.c
6120 +@@ -600,20 +600,42 @@ static void unix_release_sock(struct sock *sk, int embrion)
6121 +
6122 + static void init_peercred(struct sock *sk)
6123 + {
6124 +- put_pid(sk->sk_peer_pid);
6125 +- if (sk->sk_peer_cred)
6126 +- put_cred(sk->sk_peer_cred);
6127 ++ const struct cred *old_cred;
6128 ++ struct pid *old_pid;
6129 ++
6130 ++ spin_lock(&sk->sk_peer_lock);
6131 ++ old_pid = sk->sk_peer_pid;
6132 ++ old_cred = sk->sk_peer_cred;
6133 + sk->sk_peer_pid = get_pid(task_tgid(current));
6134 + sk->sk_peer_cred = get_current_cred();
6135 ++ spin_unlock(&sk->sk_peer_lock);
6136 ++
6137 ++ put_pid(old_pid);
6138 ++ put_cred(old_cred);
6139 + }
6140 +
6141 + static void copy_peercred(struct sock *sk, struct sock *peersk)
6142 + {
6143 +- put_pid(sk->sk_peer_pid);
6144 +- if (sk->sk_peer_cred)
6145 +- put_cred(sk->sk_peer_cred);
6146 ++ const struct cred *old_cred;
6147 ++ struct pid *old_pid;
6148 ++
6149 ++ if (sk < peersk) {
6150 ++ spin_lock(&sk->sk_peer_lock);
6151 ++ spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
6152 ++ } else {
6153 ++ spin_lock(&peersk->sk_peer_lock);
6154 ++ spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
6155 ++ }
6156 ++ old_pid = sk->sk_peer_pid;
6157 ++ old_cred = sk->sk_peer_cred;
6158 + sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
6159 + sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
6160 ++
6161 ++ spin_unlock(&sk->sk_peer_lock);
6162 ++ spin_unlock(&peersk->sk_peer_lock);
6163 ++
6164 ++ put_pid(old_pid);
6165 ++ put_cred(old_cred);
6166 + }
6167 +
6168 + static int unix_listen(struct socket *sock, int backlog)
6169 +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
6170 +index 6c0a4a67ad2e3..6f30231bdb884 100644
6171 +--- a/sound/core/rawmidi.c
6172 ++++ b/sound/core/rawmidi.c
6173 +@@ -873,12 +873,21 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
6174 + return -EINVAL;
6175 + }
6176 + }
6177 ++ case SNDRV_RAWMIDI_IOCTL_USER_PVERSION:
6178 ++ if (get_user(rfile->user_pversion, (unsigned int __user *)arg))
6179 ++ return -EFAULT;
6180 ++ return 0;
6181 ++
6182 + case SNDRV_RAWMIDI_IOCTL_PARAMS:
6183 + {
6184 + struct snd_rawmidi_params params;
6185 +
6186 + if (copy_from_user(&params, argp, sizeof(struct snd_rawmidi_params)))
6187 + return -EFAULT;
6188 ++ if (rfile->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 2)) {
6189 ++ params.mode = 0;
6190 ++ memset(params.reserved, 0, sizeof(params.reserved));
6191 ++ }
6192 + switch (params.stream) {
6193 + case SNDRV_RAWMIDI_STREAM_OUTPUT:
6194 + if (rfile->output == NULL)
6195 +diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
6196 +index 5388b85fb60e5..a18c2c033e836 100644
6197 +--- a/sound/firewire/motu/amdtp-motu.c
6198 ++++ b/sound/firewire/motu/amdtp-motu.c
6199 +@@ -276,10 +276,11 @@ static void __maybe_unused copy_message(u64 *frames, __be32 *buffer,
6200 +
6201 + /* This is just for v2/v3 protocol. */
6202 + for (i = 0; i < data_blocks; ++i) {
6203 +- *frames = (be32_to_cpu(buffer[1]) << 16) |
6204 +- (be32_to_cpu(buffer[2]) >> 16);
6205 ++ *frames = be32_to_cpu(buffer[1]);
6206 ++ *frames <<= 16;
6207 ++ *frames |= be32_to_cpu(buffer[2]) >> 16;
6208 ++ ++frames;
6209 + buffer += data_block_quadlets;
6210 +- frames++;
6211 + }
6212 + }
6213 +
6214 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6215 +index 70516527ebce3..0b9230a274b0a 100644
6216 +--- a/sound/pci/hda/patch_realtek.c
6217 ++++ b/sound/pci/hda/patch_realtek.c
6218 +@@ -6442,6 +6442,20 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
6219 + hda_fixup_thinkpad_acpi(codec, fix, action);
6220 + }
6221 +
6222 ++/* Fixup for Lenovo Legion 15IMHg05 speaker output on headset removal. */
6223 ++static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
6224 ++ const struct hda_fixup *fix,
6225 ++ int action)
6226 ++{
6227 ++ struct alc_spec *spec = codec->spec;
6228 ++
6229 ++ switch (action) {
6230 ++ case HDA_FIXUP_ACT_PRE_PROBE:
6231 ++ spec->gen.suppress_auto_mute = 1;
6232 ++ break;
6233 ++ }
6234 ++}
6235 ++
6236 + /* for alc295_fixup_hp_top_speakers */
6237 + #include "hp_x360_helper.c"
6238 +
6239 +@@ -6659,6 +6673,10 @@ enum {
6240 + ALC623_FIXUP_LENOVO_THINKSTATION_P340,
6241 + ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
6242 + ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
6243 ++ ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
6244 ++ ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
6245 ++ ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
6246 ++ ALC287_FIXUP_13S_GEN2_SPEAKERS
6247 + };
6248 +
6249 + static const struct hda_fixup alc269_fixups[] = {
6250 +@@ -8249,6 +8267,113 @@ static const struct hda_fixup alc269_fixups[] = {
6251 + .chained = true,
6252 + .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
6253 + },
6254 ++ [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = {
6255 ++ .type = HDA_FIXUP_VERBS,
6256 ++ //.v.verbs = legion_15imhg05_coefs,
6257 ++ .v.verbs = (const struct hda_verb[]) {
6258 ++ // set left speaker Legion 7i.
6259 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
6260 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
6261 ++
6262 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6263 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
6264 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6265 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
6266 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6267 ++
6268 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6269 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
6270 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6271 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6272 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6273 ++
6274 ++ // set right speaker Legion 7i.
6275 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
6276 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
6277 ++
6278 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6279 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
6280 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6281 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
6282 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6283 ++
6284 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6285 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
6286 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6287 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6288 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6289 ++ {}
6290 ++ },
6291 ++ .chained = true,
6292 ++ .chain_id = ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
6293 ++ },
6294 ++ [ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE] = {
6295 ++ .type = HDA_FIXUP_FUNC,
6296 ++ .v.func = alc287_fixup_legion_15imhg05_speakers,
6297 ++ .chained = true,
6298 ++ .chain_id = ALC269_FIXUP_HEADSET_MODE,
6299 ++ },
6300 ++ [ALC287_FIXUP_YOGA7_14ITL_SPEAKERS] = {
6301 ++ .type = HDA_FIXUP_VERBS,
6302 ++ .v.verbs = (const struct hda_verb[]) {
6303 ++ // set left speaker Yoga 7i.
6304 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
6305 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
6306 ++
6307 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6308 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
6309 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6310 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
6311 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6312 ++
6313 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6314 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
6315 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6316 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6317 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6318 ++
6319 ++ // set right speaker Yoga 7i.
6320 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
6321 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x46 },
6322 ++
6323 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6324 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
6325 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6326 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
6327 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6328 ++
6329 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6330 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
6331 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6332 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6333 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6334 ++ {}
6335 ++ },
6336 ++ .chained = true,
6337 ++ .chain_id = ALC269_FIXUP_HEADSET_MODE,
6338 ++ },
6339 ++ [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
6340 ++ .type = HDA_FIXUP_VERBS,
6341 ++ .v.verbs = (const struct hda_verb[]) {
6342 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
6343 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
6344 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6345 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
6346 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6347 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6348 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6349 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
6350 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
6351 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
6352 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
6353 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6354 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
6355 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
6356 ++ {}
6357 ++ },
6358 ++ .chained = true,
6359 ++ .chain_id = ALC269_FIXUP_HEADSET_MODE,
6360 ++ },
6361 + };
6362 +
6363 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6364 +@@ -8643,6 +8768,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6365 + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
6366 + SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
6367 + SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
6368 ++ SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
6369 ++ SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
6370 ++ SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
6371 ++ SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
6372 + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
6373 + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
6374 + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
6375 +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
6376 +index a961f837cd094..bda66b30e063c 100644
6377 +--- a/sound/soc/fsl/fsl_esai.c
6378 ++++ b/sound/soc/fsl/fsl_esai.c
6379 +@@ -1073,6 +1073,16 @@ static int fsl_esai_probe(struct platform_device *pdev)
6380 + if (ret < 0)
6381 + goto err_pm_get_sync;
6382 +
6383 ++ /*
6384 ++ * Register platform component before registering cpu dai for there
6385 ++ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
6386 ++ */
6387 ++ ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
6388 ++ if (ret) {
6389 ++ dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
6390 ++ goto err_pm_get_sync;
6391 ++ }
6392 ++
6393 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
6394 + &fsl_esai_dai, 1);
6395 + if (ret) {
6396 +@@ -1082,12 +1092,6 @@ static int fsl_esai_probe(struct platform_device *pdev)
6397 +
6398 + INIT_WORK(&esai_priv->work, fsl_esai_hw_reset);
6399 +
6400 +- ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
6401 +- if (ret) {
6402 +- dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
6403 +- goto err_pm_get_sync;
6404 +- }
6405 +-
6406 + return ret;
6407 +
6408 + err_pm_get_sync:
6409 +diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
6410 +index 8c0c75ce9490f..9f90989ac59a6 100644
6411 +--- a/sound/soc/fsl/fsl_micfil.c
6412 ++++ b/sound/soc/fsl/fsl_micfil.c
6413 +@@ -737,18 +737,23 @@ static int fsl_micfil_probe(struct platform_device *pdev)
6414 + pm_runtime_enable(&pdev->dev);
6415 + regcache_cache_only(micfil->regmap, true);
6416 +
6417 ++ /*
6418 ++ * Register platform component before registering cpu dai for there
6419 ++ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
6420 ++ */
6421 ++ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
6422 ++ if (ret) {
6423 ++ dev_err(&pdev->dev, "failed to pcm register\n");
6424 ++ return ret;
6425 ++ }
6426 ++
6427 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
6428 + &fsl_micfil_dai, 1);
6429 + if (ret) {
6430 + dev_err(&pdev->dev, "failed to register component %s\n",
6431 + fsl_micfil_component.name);
6432 +- return ret;
6433 + }
6434 +
6435 +- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
6436 +- if (ret)
6437 +- dev_err(&pdev->dev, "failed to pcm register\n");
6438 +-
6439 + return ret;
6440 + }
6441 +
6442 +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
6443 +index 223fcd15bfccc..38f6362099d58 100644
6444 +--- a/sound/soc/fsl/fsl_sai.c
6445 ++++ b/sound/soc/fsl/fsl_sai.c
6446 +@@ -1152,11 +1152,10 @@ static int fsl_sai_probe(struct platform_device *pdev)
6447 + if (ret < 0)
6448 + goto err_pm_get_sync;
6449 +
6450 +- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
6451 +- &sai->cpu_dai_drv, 1);
6452 +- if (ret)
6453 +- goto err_pm_get_sync;
6454 +-
6455 ++ /*
6456 ++ * Register platform component before registering cpu dai for there
6457 ++ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
6458 ++ */
6459 + if (sai->soc_data->use_imx_pcm) {
6460 + ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
6461 + if (ret)
6462 +@@ -1167,6 +1166,11 @@ static int fsl_sai_probe(struct platform_device *pdev)
6463 + goto err_pm_get_sync;
6464 + }
6465 +
6466 ++ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
6467 ++ &sai->cpu_dai_drv, 1);
6468 ++ if (ret)
6469 ++ goto err_pm_get_sync;
6470 ++
6471 + return ret;
6472 +
6473 + err_pm_get_sync:
6474 +diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
6475 +index 8ffb1a6048d63..1c53719bb61e2 100644
6476 +--- a/sound/soc/fsl/fsl_spdif.c
6477 ++++ b/sound/soc/fsl/fsl_spdif.c
6478 +@@ -1434,16 +1434,20 @@ static int fsl_spdif_probe(struct platform_device *pdev)
6479 + pm_runtime_enable(&pdev->dev);
6480 + regcache_cache_only(spdif_priv->regmap, true);
6481 +
6482 +- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
6483 +- &spdif_priv->cpu_dai_drv, 1);
6484 ++ /*
6485 ++ * Register platform component before registering cpu dai for there
6486 ++ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
6487 ++ */
6488 ++ ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
6489 + if (ret) {
6490 +- dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
6491 ++ dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
6492 + goto err_pm_disable;
6493 + }
6494 +
6495 +- ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
6496 ++ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
6497 ++ &spdif_priv->cpu_dai_drv, 1);
6498 + if (ret) {
6499 +- dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
6500 ++ dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
6501 + goto err_pm_disable;
6502 + }
6503 +
6504 +diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
6505 +index fb7c29fc39d75..477d16713e72e 100644
6506 +--- a/sound/soc/fsl/fsl_xcvr.c
6507 ++++ b/sound/soc/fsl/fsl_xcvr.c
6508 +@@ -1217,18 +1217,23 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
6509 + pm_runtime_enable(dev);
6510 + regcache_cache_only(xcvr->regmap, true);
6511 +
6512 ++ /*
6513 ++ * Register platform component before registering cpu dai for there
6514 ++ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
6515 ++ */
6516 ++ ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
6517 ++ if (ret) {
6518 ++ dev_err(dev, "failed to pcm register\n");
6519 ++ return ret;
6520 ++ }
6521 ++
6522 + ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp,
6523 + &fsl_xcvr_dai, 1);
6524 + if (ret) {
6525 + dev_err(dev, "failed to register component %s\n",
6526 + fsl_xcvr_comp.name);
6527 +- return ret;
6528 + }
6529 +
6530 +- ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
6531 +- if (ret)
6532 +- dev_err(dev, "failed to pcm register\n");
6533 +-
6534 + return ret;
6535 + }
6536 +
6537 +diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.c b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
6538 +index 3cb2adf420bbf..ab7bbd53bb013 100644
6539 +--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.c
6540 ++++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
6541 +@@ -334,9 +334,11 @@ int mtk_afe_suspend(struct snd_soc_component *component)
6542 + devm_kcalloc(dev, afe->reg_back_up_list_num,
6543 + sizeof(unsigned int), GFP_KERNEL);
6544 +
6545 +- for (i = 0; i < afe->reg_back_up_list_num; i++)
6546 +- regmap_read(regmap, afe->reg_back_up_list[i],
6547 +- &afe->reg_back_up[i]);
6548 ++ if (afe->reg_back_up) {
6549 ++ for (i = 0; i < afe->reg_back_up_list_num; i++)
6550 ++ regmap_read(regmap, afe->reg_back_up_list[i],
6551 ++ &afe->reg_back_up[i]);
6552 ++ }
6553 +
6554 + afe->suspended = true;
6555 + afe->runtime_suspend(dev);
6556 +@@ -356,12 +358,13 @@ int mtk_afe_resume(struct snd_soc_component *component)
6557 +
6558 + afe->runtime_resume(dev);
6559 +
6560 +- if (!afe->reg_back_up)
6561 ++ if (!afe->reg_back_up) {
6562 + dev_dbg(dev, "%s no reg_backup\n", __func__);
6563 +-
6564 +- for (i = 0; i < afe->reg_back_up_list_num; i++)
6565 +- mtk_regmap_write(regmap, afe->reg_back_up_list[i],
6566 +- afe->reg_back_up[i]);
6567 ++ } else {
6568 ++ for (i = 0; i < afe->reg_back_up_list_num; i++)
6569 ++ mtk_regmap_write(regmap, afe->reg_back_up_list[i],
6570 ++ afe->reg_back_up[i]);
6571 ++ }
6572 +
6573 + afe->suspended = false;
6574 + return 0;
6575 +diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
6576 +index 12fedf0984bd9..7e9723a10d02e 100644
6577 +--- a/sound/soc/sof/imx/imx8.c
6578 ++++ b/sound/soc/sof/imx/imx8.c
6579 +@@ -365,7 +365,14 @@ static int imx8_remove(struct snd_sof_dev *sdev)
6580 + /* on i.MX8 there is 1 to 1 match between type and BAR idx */
6581 + static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)
6582 + {
6583 +- return type;
6584 ++ /* Only IRAM and SRAM bars are valid */
6585 ++ switch (type) {
6586 ++ case SOF_FW_BLK_TYPE_IRAM:
6587 ++ case SOF_FW_BLK_TYPE_SRAM:
6588 ++ return type;
6589 ++ default:
6590 ++ return -EINVAL;
6591 ++ }
6592 + }
6593 +
6594 + static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
6595 +diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
6596 +index cb822d9537678..892e1482f97fa 100644
6597 +--- a/sound/soc/sof/imx/imx8m.c
6598 ++++ b/sound/soc/sof/imx/imx8m.c
6599 +@@ -228,7 +228,14 @@ static int imx8m_remove(struct snd_sof_dev *sdev)
6600 + /* on i.MX8 there is 1 to 1 match between type and BAR idx */
6601 + static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)
6602 + {
6603 +- return type;
6604 ++ /* Only IRAM and SRAM bars are valid */
6605 ++ switch (type) {
6606 ++ case SOF_FW_BLK_TYPE_IRAM:
6607 ++ case SOF_FW_BLK_TYPE_SRAM:
6608 ++ return type;
6609 ++ default:
6610 ++ return -EINVAL;
6611 ++ }
6612 + }
6613 +
6614 + static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
6615 +diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
6616 +index bbb9a2282ed9e..f6e3411b33cf1 100644
6617 +--- a/sound/soc/sof/xtensa/core.c
6618 ++++ b/sound/soc/sof/xtensa/core.c
6619 +@@ -122,9 +122,9 @@ static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
6620 + * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
6621 + */
6622 + for (i = 0; i < stack_words; i += 4) {
6623 +- hex_dump_to_buffer(stack + i * 4, 16, 16, 4,
6624 ++ hex_dump_to_buffer(stack + i, 16, 16, 4,
6625 + buf, sizeof(buf), false);
6626 +- dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);
6627 ++ dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);
6628 + }
6629 + }
6630 +
6631 +diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
6632 +index 10911a8cad0f2..2df880cefdaee 100644
6633 +--- a/tools/lib/bpf/linker.c
6634 ++++ b/tools/lib/bpf/linker.c
6635 +@@ -1649,11 +1649,17 @@ static bool btf_is_non_static(const struct btf_type *t)
6636 + static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,
6637 + int *out_btf_sec_id, int *out_btf_id)
6638 + {
6639 +- int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0;
6640 ++ int i, j, n, m, btf_id = 0;
6641 + const struct btf_type *t;
6642 + const struct btf_var_secinfo *vi;
6643 + const char *name;
6644 +
6645 ++ if (!obj->btf) {
6646 ++ pr_warn("failed to find BTF info for object '%s'\n", obj->filename);
6647 ++ return -EINVAL;
6648 ++ }
6649 ++
6650 ++ n = btf__get_nr_types(obj->btf);
6651 + for (i = 1; i <= n; i++) {
6652 + t = btf__type_by_id(obj->btf, i);
6653 +
6654 +diff --git a/tools/objtool/special.c b/tools/objtool/special.c
6655 +index bc925cf19e2de..f1428e32a5052 100644
6656 +--- a/tools/objtool/special.c
6657 ++++ b/tools/objtool/special.c
6658 +@@ -58,6 +58,24 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
6659 + {
6660 + }
6661 +
6662 ++static bool reloc2sec_off(struct reloc *reloc, struct section **sec, unsigned long *off)
6663 ++{
6664 ++ switch (reloc->sym->type) {
6665 ++ case STT_FUNC:
6666 ++ *sec = reloc->sym->sec;
6667 ++ *off = reloc->sym->offset + reloc->addend;
6668 ++ return true;
6669 ++
6670 ++ case STT_SECTION:
6671 ++ *sec = reloc->sym->sec;
6672 ++ *off = reloc->addend;
6673 ++ return true;
6674 ++
6675 ++ default:
6676 ++ return false;
6677 ++ }
6678 ++}
6679 ++
6680 + static int get_alt_entry(struct elf *elf, struct special_entry *entry,
6681 + struct section *sec, int idx,
6682 + struct special_alt *alt)
6683 +@@ -91,15 +109,14 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
6684 + WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
6685 + return -1;
6686 + }
6687 +- if (orig_reloc->sym->type != STT_SECTION) {
6688 +- WARN_FUNC("don't know how to handle non-section reloc symbol %s",
6689 +- sec, offset + entry->orig, orig_reloc->sym->name);
6690 ++ if (!reloc2sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off)) {
6691 ++ WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
6692 ++ sec, offset + entry->orig,
6693 ++ orig_reloc->sym->type,
6694 ++ orig_reloc->sym->name);
6695 + return -1;
6696 + }
6697 +
6698 +- alt->orig_sec = orig_reloc->sym->sec;
6699 +- alt->orig_off = orig_reloc->addend;
6700 +-
6701 + if (!entry->group || alt->new_len) {
6702 + new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
6703 + if (!new_reloc) {
6704 +@@ -116,8 +133,13 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
6705 + if (arch_is_retpoline(new_reloc->sym))
6706 + return 1;
6707 +
6708 +- alt->new_sec = new_reloc->sym->sec;
6709 +- alt->new_off = (unsigned int)new_reloc->addend;
6710 ++ if (!reloc2sec_off(new_reloc, &alt->new_sec, &alt->new_off)) {
6711 ++ WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
6712 ++ sec, offset + entry->new,
6713 ++ new_reloc->sym->type,
6714 ++ new_reloc->sym->name);
6715 ++ return -1;
6716 ++ }
6717 +
6718 + /* _ASM_EXTABLE_EX hack */
6719 + if (alt->new_off >= 0x7ffffff0)
6720 +diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
6721 +index eeafe97b8105b..792cd75ade33d 100644
6722 +--- a/tools/perf/arch/x86/util/iostat.c
6723 ++++ b/tools/perf/arch/x86/util/iostat.c
6724 +@@ -432,7 +432,7 @@ void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
6725 + u8 die = ((struct iio_root_port *)evsel->priv)->die;
6726 + struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
6727 +
6728 +- if (count->run && count->ena) {
6729 ++ if (count && count->run && count->ena) {
6730 + if (evsel->prev_raw_counts && !out->force_header) {
6731 + struct perf_counts_values *prev_count =
6732 + perf_counts(evsel->prev_raw_counts, die, 0);
6733 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
6734 +index 634375937db96..36033a7372f91 100644
6735 +--- a/tools/perf/builtin-stat.c
6736 ++++ b/tools/perf/builtin-stat.c
6737 +@@ -2406,6 +2406,8 @@ int cmd_stat(int argc, const char **argv)
6738 + goto out;
6739 + } else if (verbose)
6740 + iostat_list(evsel_list, &stat_config);
6741 ++ if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
6742 ++ target.system_wide = true;
6743 + }
6744 +
6745 + if (add_default_attributes())
6746 +diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
6747 +index a288035eb3626..c756284b3b135 100644
6748 +--- a/tools/perf/tests/dwarf-unwind.c
6749 ++++ b/tools/perf/tests/dwarf-unwind.c
6750 +@@ -20,6 +20,23 @@
6751 + /* For bsearch. We try to unwind functions in shared object. */
6752 + #include <stdlib.h>
6753 +
6754 ++/*
6755 ++ * The test will assert frames are on the stack but tail call optimizations lose
6756 ++ * the frame of the caller. Clang can disable this optimization on a called
6757 ++ * function but GCC currently (11/2020) lacks this attribute. The barrier is
6758 ++ * used to inhibit tail calls in these cases.
6759 ++ */
6760 ++#ifdef __has_attribute
6761 ++#if __has_attribute(disable_tail_calls)
6762 ++#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
6763 ++#define NO_TAIL_CALL_BARRIER
6764 ++#endif
6765 ++#endif
6766 ++#ifndef NO_TAIL_CALL_ATTRIBUTE
6767 ++#define NO_TAIL_CALL_ATTRIBUTE
6768 ++#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
6769 ++#endif
6770 ++
6771 + static int mmap_handler(struct perf_tool *tool __maybe_unused,
6772 + union perf_event *event,
6773 + struct perf_sample *sample,
6774 +@@ -91,7 +108,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
6775 + return strcmp((const char *) symbol, funcs[idx]);
6776 + }
6777 +
6778 +-noinline int test_dwarf_unwind__thread(struct thread *thread)
6779 ++NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
6780 + {
6781 + struct perf_sample sample;
6782 + unsigned long cnt = 0;
6783 +@@ -122,7 +139,7 @@ noinline int test_dwarf_unwind__thread(struct thread *thread)
6784 +
6785 + static int global_unwind_retval = -INT_MAX;
6786 +
6787 +-noinline int test_dwarf_unwind__compare(void *p1, void *p2)
6788 ++NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
6789 + {
6790 + /* Any possible value should be 'thread' */
6791 + struct thread *thread = *(struct thread **)p1;
6792 +@@ -141,7 +158,7 @@ noinline int test_dwarf_unwind__compare(void *p1, void *p2)
6793 + return p1 - p2;
6794 + }
6795 +
6796 +-noinline int test_dwarf_unwind__krava_3(struct thread *thread)
6797 ++NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
6798 + {
6799 + struct thread *array[2] = {thread, thread};
6800 + void *fp = &bsearch;
6801 +@@ -160,14 +177,22 @@ noinline int test_dwarf_unwind__krava_3(struct thread *thread)
6802 + return global_unwind_retval;
6803 + }
6804 +
6805 +-noinline int test_dwarf_unwind__krava_2(struct thread *thread)
6806 ++NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
6807 + {
6808 +- return test_dwarf_unwind__krava_3(thread);
6809 ++ int ret;
6810 ++
6811 ++ ret = test_dwarf_unwind__krava_3(thread);
6812 ++ NO_TAIL_CALL_BARRIER;
6813 ++ return ret;
6814 + }
6815 +
6816 +-noinline int test_dwarf_unwind__krava_1(struct thread *thread)
6817 ++NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
6818 + {
6819 +- return test_dwarf_unwind__krava_2(thread);
6820 ++ int ret;
6821 ++
6822 ++ ret = test_dwarf_unwind__krava_2(thread);
6823 ++ NO_TAIL_CALL_BARRIER;
6824 ++ return ret;
6825 + }
6826 +
6827 + int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
6828 +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
6829 +index f405b20c1e6c5..93f1f124ef89b 100644
6830 +--- a/tools/testing/selftests/bpf/Makefile
6831 ++++ b/tools/testing/selftests/bpf/Makefile
6832 +@@ -374,7 +374,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
6833 + $(TRUNNER_BPF_PROGS_DIR)/%.c \
6834 + $(TRUNNER_BPF_PROGS_DIR)/*.h \
6835 + $$(INCLUDE_DIR)/vmlinux.h \
6836 +- $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
6837 ++ $(wildcard $(BPFDIR)/bpf_*.h) \
6838 ++ | $(TRUNNER_OUTPUT) $$(BPFOBJ)
6839 + $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
6840 + $(TRUNNER_BPF_CFLAGS))
6841 +
6842 +diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
6843 +index 59ea56945e6cd..b497bb85b667f 100755
6844 +--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
6845 ++++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
6846 +@@ -112,6 +112,14 @@ setup()
6847 + ip netns add "${NS2}"
6848 + ip netns add "${NS3}"
6849 +
6850 ++ # rp_filter gets confused by what these tests are doing, so disable it
6851 ++ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
6852 ++ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
6853 ++ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
6854 ++ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
6855 ++ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
6856 ++ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
6857 ++
6858 + ip link add veth1 type veth peer name veth2
6859 + ip link add veth3 type veth peer name veth4
6860 + ip link add veth5 type veth peer name veth6
6861 +@@ -236,11 +244,6 @@ setup()
6862 + ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
6863 + ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
6864 +
6865 +- # rp_filter gets confused by what these tests are doing, so disable it
6866 +- ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
6867 +- ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
6868 +- ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
6869 +-
6870 + TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
6871 +
6872 + sleep 1 # reduce flakiness