Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.16 commit in: /
Date: Tue, 01 Feb 2022 17:21:34
Message-Id: 1643736067.9531e5312113989d7f6c60a965ac5b2357fa6459.mpagano@gentoo
1 commit: 9531e5312113989d7f6c60a965ac5b2357fa6459
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Feb 1 17:21:07 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Feb 1 17:21:07 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9531e531
7
8 Linux patch 5.16.5
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1004_linux-5.16.5.patch | 8026 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 8030 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ff7d994b..f8c4cea5 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -59,6 +59,10 @@ Patch: 1003_linux-5.16.4.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.16.4
23
24 +Patch: 1004_linux-5.16.5.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.16.5
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1004_linux-5.16.5.patch b/1004_linux-5.16.5.patch
33 new file mode 100644
34 index 00000000..282c5f8b
35 --- /dev/null
36 +++ b/1004_linux-5.16.5.patch
37 @@ -0,0 +1,8026 @@
38 +diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst
39 +index f2b3439edcc2c..860fe651d6453 100644
40 +--- a/Documentation/accounting/psi.rst
41 ++++ b/Documentation/accounting/psi.rst
42 +@@ -92,7 +92,8 @@ Triggers can be set on more than one psi metric and more than one trigger
43 + for the same psi metric can be specified. However for each trigger a separate
44 + file descriptor is required to be able to poll it separately from others,
45 + therefore for each trigger a separate open() syscall should be made even
46 +-when opening the same psi interface file.
47 ++when opening the same psi interface file. Write operations to a file descriptor
48 ++with an already existing psi trigger will fail with EBUSY.
49 +
50 + Monitors activate only when system enters stall state for the monitored
51 + psi metric and deactivates upon exit from the stall state. While system is
52 +diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
53 +index 0968b40aef1e8..e3501bfa22e90 100644
54 +--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
55 ++++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
56 +@@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 {
57 + #address-cells = <1>;
58 + #size-cells = <1>;
59 + spi-max-frequency = <10000000>;
60 +- bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
61 ++ bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>;
62 + interrupt-parent = <&gpio1>;
63 + interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
64 + device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
65 +diff --git a/Makefile b/Makefile
66 +index 36ff4ed4763b3..2f0e5c3d9e2a7 100644
67 +--- a/Makefile
68 ++++ b/Makefile
69 +@@ -1,7 +1,7 @@
70 + # SPDX-License-Identifier: GPL-2.0
71 + VERSION = 5
72 + PATCHLEVEL = 16
73 +-SUBLEVEL = 4
74 ++SUBLEVEL = 5
75 + EXTRAVERSION =
76 + NAME = Gobble Gobble
77 +
78 +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
79 +index 7d23d4bb2168b..6fe67963ba5a0 100644
80 +--- a/arch/arm/include/asm/assembler.h
81 ++++ b/arch/arm/include/asm/assembler.h
82 +@@ -288,6 +288,7 @@
83 + */
84 + #define ALT_UP(instr...) \
85 + .pushsection ".alt.smp.init", "a" ;\
86 ++ .align 2 ;\
87 + .long 9998b - . ;\
88 + 9997: instr ;\
89 + .if . - 9997b == 2 ;\
90 +@@ -299,6 +300,7 @@
91 + .popsection
92 + #define ALT_UP_B(label) \
93 + .pushsection ".alt.smp.init", "a" ;\
94 ++ .align 2 ;\
95 + .long 9998b - . ;\
96 + W(b) . + (label - 9998b) ;\
97 + .popsection
98 +diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
99 +index 6af68edfa53ab..bdc35c0e8dfb9 100644
100 +--- a/arch/arm/include/asm/processor.h
101 ++++ b/arch/arm/include/asm/processor.h
102 +@@ -96,6 +96,7 @@ unsigned long __get_wchan(struct task_struct *p);
103 + #define __ALT_SMP_ASM(smp, up) \
104 + "9998: " smp "\n" \
105 + " .pushsection \".alt.smp.init\", \"a\"\n" \
106 ++ " .align 2\n" \
107 + " .long 9998b - .\n" \
108 + " " up "\n" \
109 + " .popsection\n"
110 +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
111 +index 36fbc33292526..32dbfd81f42a4 100644
112 +--- a/arch/arm/include/asm/uaccess.h
113 ++++ b/arch/arm/include/asm/uaccess.h
114 +@@ -11,6 +11,7 @@
115 + #include <linux/string.h>
116 + #include <asm/memory.h>
117 + #include <asm/domain.h>
118 ++#include <asm/unaligned.h>
119 + #include <asm/unified.h>
120 + #include <asm/compiler.h>
121 +
122 +@@ -497,7 +498,10 @@ do { \
123 + } \
124 + default: __err = __get_user_bad(); break; \
125 + } \
126 +- *(type *)(dst) = __val; \
127 ++ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
128 ++ put_unaligned(__val, (type *)(dst)); \
129 ++ else \
130 ++ *(type *)(dst) = __val; /* aligned by caller */ \
131 + if (__err) \
132 + goto err_label; \
133 + } while (0)
134 +@@ -507,7 +511,9 @@ do { \
135 + const type *__pk_ptr = (dst); \
136 + unsigned long __dst = (unsigned long)__pk_ptr; \
137 + int __err = 0; \
138 +- type __val = *(type *)src; \
139 ++ type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
140 ++ ? get_unaligned((type *)(src)) \
141 ++ : *(type *)(src); /* aligned by caller */ \
142 + switch (sizeof(type)) { \
143 + case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
144 + case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
145 +diff --git a/arch/arm/probes/kprobes/Makefile b/arch/arm/probes/kprobes/Makefile
146 +index 14db56f49f0a3..6159010dac4a6 100644
147 +--- a/arch/arm/probes/kprobes/Makefile
148 ++++ b/arch/arm/probes/kprobes/Makefile
149 +@@ -1,4 +1,7 @@
150 + # SPDX-License-Identifier: GPL-2.0
151 ++KASAN_SANITIZE_actions-common.o := n
152 ++KASAN_SANITIZE_actions-arm.o := n
153 ++KASAN_SANITIZE_actions-thumb.o := n
154 + obj-$(CONFIG_KPROBES) += core.o actions-common.o checkers-common.o
155 + obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
156 + test-kprobes-objs := test-core.o
157 +diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
158 +index 0418399e0a201..c5d0097154020 100644
159 +--- a/arch/arm64/kvm/hyp/exception.c
160 ++++ b/arch/arm64/kvm/hyp/exception.c
161 +@@ -38,7 +38,10 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
162 +
163 + static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
164 + {
165 +- write_sysreg_el1(val, SYS_SPSR);
166 ++ if (has_vhe())
167 ++ write_sysreg_el1(val, SYS_SPSR);
168 ++ else
169 ++ __vcpu_sys_reg(vcpu, SPSR_EL1) = val;
170 + }
171 +
172 + static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
173 +diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
174 +index f8ceebe4982eb..4c77ff556f0ae 100644
175 +--- a/arch/arm64/kvm/hyp/pgtable.c
176 ++++ b/arch/arm64/kvm/hyp/pgtable.c
177 +@@ -921,13 +921,9 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
178 + */
179 + stage2_put_pte(ptep, mmu, addr, level, mm_ops);
180 +
181 +- if (need_flush) {
182 +- kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
183 +-
184 +- dcache_clean_inval_poc((unsigned long)pte_follow,
185 +- (unsigned long)pte_follow +
186 +- kvm_granule_size(level));
187 +- }
188 ++ if (need_flush && mm_ops->dcache_clean_inval_poc)
189 ++ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
190 ++ kvm_granule_size(level));
191 +
192 + if (childp)
193 + mm_ops->put_page(childp);
194 +@@ -1089,15 +1085,13 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
195 + struct kvm_pgtable *pgt = arg;
196 + struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
197 + kvm_pte_t pte = *ptep;
198 +- kvm_pte_t *pte_follow;
199 +
200 + if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
201 + return 0;
202 +
203 +- pte_follow = kvm_pte_follow(pte, mm_ops);
204 +- dcache_clean_inval_poc((unsigned long)pte_follow,
205 +- (unsigned long)pte_follow +
206 +- kvm_granule_size(level));
207 ++ if (mm_ops->dcache_clean_inval_poc)
208 ++ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
209 ++ kvm_granule_size(level));
210 + return 0;
211 + }
212 +
213 +diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
214 +index 20db2f281cf23..4fb419f7b8b61 100644
215 +--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
216 ++++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
217 +@@ -983,6 +983,9 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
218 + val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
219 + /* IDbits */
220 + val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
221 ++ /* SEIS */
222 ++ if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
223 ++ val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
224 + /* A3V */
225 + val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
226 + /* EOImode */
227 +diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
228 +index 04f62c4b07fb5..0216201251e2f 100644
229 +--- a/arch/arm64/kvm/vgic/vgic-v3.c
230 ++++ b/arch/arm64/kvm/vgic/vgic-v3.c
231 +@@ -609,6 +609,18 @@ static int __init early_gicv4_enable(char *buf)
232 + }
233 + early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
234 +
235 ++static const struct midr_range broken_seis[] = {
236 ++ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
237 ++ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
238 ++ {},
239 ++};
240 ++
241 ++static bool vgic_v3_broken_seis(void)
242 ++{
243 ++ return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
244 ++ is_midr_in_range_list(read_cpuid_id(), broken_seis));
245 ++}
246 ++
247 + /**
248 + * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
249 + * @info: pointer to the GIC description
250 +@@ -676,9 +688,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
251 + group1_trap = true;
252 + }
253 +
254 +- if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) {
255 +- kvm_info("GICv3 with locally generated SEI\n");
256 ++ if (vgic_v3_broken_seis()) {
257 ++ kvm_info("GICv3 with broken locally generated SEI\n");
258 +
259 ++ kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
260 + group0_trap = true;
261 + group1_trap = true;
262 + if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
263 +diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
264 +index c3d53811a15e1..5d1a3125fd2b0 100644
265 +--- a/arch/arm64/mm/extable.c
266 ++++ b/arch/arm64/mm/extable.c
267 +@@ -43,8 +43,8 @@ static bool
268 + ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
269 + struct pt_regs *regs)
270 + {
271 +- int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->type);
272 +- int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->type);
273 ++ int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data);
274 ++ int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
275 + unsigned long data, addr, offset;
276 +
277 + addr = pt_regs_read_reg(regs, reg_addr);
278 +diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c
279 +index acb55a41260dd..2bcdd7d3a1ada 100644
280 +--- a/arch/ia64/pci/fixup.c
281 ++++ b/arch/ia64/pci/fixup.c
282 +@@ -76,5 +76,5 @@ static void pci_fixup_video(struct pci_dev *pdev)
283 + }
284 + }
285 + }
286 +-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
287 +- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
288 ++DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
289 ++ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
290 +diff --git a/arch/mips/loongson64/vbios_quirk.c b/arch/mips/loongson64/vbios_quirk.c
291 +index 9a29e94d3db1d..3115d4de982c5 100644
292 +--- a/arch/mips/loongson64/vbios_quirk.c
293 ++++ b/arch/mips/loongson64/vbios_quirk.c
294 +@@ -3,7 +3,7 @@
295 + #include <linux/pci.h>
296 + #include <loongson.h>
297 +
298 +-static void pci_fixup_radeon(struct pci_dev *pdev)
299 ++static void pci_fixup_video(struct pci_dev *pdev)
300 + {
301 + struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
302 +
303 +@@ -22,8 +22,7 @@ static void pci_fixup_radeon(struct pci_dev *pdev)
304 + res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
305 + IORESOURCE_PCI_FIXED;
306 +
307 +- dev_info(&pdev->dev, "BAR %d: assigned %pR for Radeon ROM\n",
308 +- PCI_ROM_RESOURCE, res);
309 ++ dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n", res);
310 + }
311 +-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, 0x9615,
312 +- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_radeon);
313 ++DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, 0x9615,
314 ++ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
315 +diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
316 +index f5be185cbdf8d..94ad7acfd0565 100644
317 +--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
318 ++++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
319 +@@ -143,6 +143,8 @@ static __always_inline void update_user_segments(u32 val)
320 + update_user_segment(15, val);
321 + }
322 +
323 ++int __init find_free_bat(void);
324 ++unsigned int bat_block_size(unsigned long base, unsigned long top);
325 + #endif /* !__ASSEMBLY__ */
326 +
327 + /* We happily ignore the smaller BATs on 601, we don't actually use
328 +diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
329 +index fff391b9b97bc..5b150cf573618 100644
330 +--- a/arch/powerpc/include/asm/kvm_book3s_64.h
331 ++++ b/arch/powerpc/include/asm/kvm_book3s_64.h
332 +@@ -39,7 +39,6 @@ struct kvm_nested_guest {
333 + pgd_t *shadow_pgtable; /* our page table for this guest */
334 + u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
335 + u64 process_table; /* process table entry for this guest */
336 +- u64 hfscr; /* HFSCR that the L1 requested for this nested guest */
337 + long refcnt; /* number of pointers to this struct */
338 + struct mutex tlb_lock; /* serialize page faults and tlbies */
339 + struct kvm_nested_guest *next;
340 +diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
341 +index e4d23193eba75..2ec07dacbaa13 100644
342 +--- a/arch/powerpc/include/asm/kvm_host.h
343 ++++ b/arch/powerpc/include/asm/kvm_host.h
344 +@@ -814,6 +814,7 @@ struct kvm_vcpu_arch {
345 +
346 + /* For support of nested guests */
347 + struct kvm_nested_guest *nested;
348 ++ u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
349 + u32 nested_vcpu_id;
350 + gpa_t nested_io_gpr;
351 + #endif
352 +diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
353 +index baea657bc8687..bca31a61e57f8 100644
354 +--- a/arch/powerpc/include/asm/ppc-opcode.h
355 ++++ b/arch/powerpc/include/asm/ppc-opcode.h
356 +@@ -498,6 +498,7 @@
357 + #define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
358 + #define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
359 + #define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
360 ++#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
361 + #define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
362 + #define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
363 + #define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i))
364 +diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
365 +index 52d05b465e3ec..25fc8ad9a27af 100644
366 +--- a/arch/powerpc/include/asm/syscall.h
367 ++++ b/arch/powerpc/include/asm/syscall.h
368 +@@ -90,7 +90,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
369 + unsigned long val, mask = -1UL;
370 + unsigned int n = 6;
371 +
372 +- if (is_32bit_task())
373 ++ if (is_tsk_32bit_task(task))
374 + mask = 0xffffffff;
375 +
376 + while (n--) {
377 +@@ -105,7 +105,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
378 +
379 + static inline int syscall_get_arch(struct task_struct *task)
380 + {
381 +- if (is_32bit_task())
382 ++ if (is_tsk_32bit_task(task))
383 + return AUDIT_ARCH_PPC;
384 + else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
385 + return AUDIT_ARCH_PPC64LE;
386 +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
387 +index 5725029aaa295..d6e649b3c70b6 100644
388 +--- a/arch/powerpc/include/asm/thread_info.h
389 ++++ b/arch/powerpc/include/asm/thread_info.h
390 +@@ -168,8 +168,10 @@ static inline bool test_thread_local_flags(unsigned int flags)
391 +
392 + #ifdef CONFIG_COMPAT
393 + #define is_32bit_task() (test_thread_flag(TIF_32BIT))
394 ++#define is_tsk_32bit_task(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT))
395 + #else
396 + #define is_32bit_task() (IS_ENABLED(CONFIG_PPC32))
397 ++#define is_tsk_32bit_task(tsk) (IS_ENABLED(CONFIG_PPC32))
398 + #endif
399 +
400 + #if defined(CONFIG_PPC64)
401 +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
402 +index 5fa68c2ef1f81..36f3f5a8868dd 100644
403 +--- a/arch/powerpc/kernel/Makefile
404 ++++ b/arch/powerpc/kernel/Makefile
405 +@@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC
406 + CFLAGS_btext.o += -fPIC
407 + endif
408 +
409 ++CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
410 + CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
411 + CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
412 + CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
413 +diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
414 +index 4b1ff94e67eb4..4c6d1a8dcefed 100644
415 +--- a/arch/powerpc/kernel/interrupt_64.S
416 ++++ b/arch/powerpc/kernel/interrupt_64.S
417 +@@ -30,6 +30,7 @@ COMPAT_SYS_CALL_TABLE:
418 + .ifc \srr,srr
419 + mfspr r11,SPRN_SRR0
420 + ld r12,_NIP(r1)
421 ++ clrrdi r11,r11,2
422 + clrrdi r12,r12,2
423 + 100: tdne r11,r12
424 + EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
425 +@@ -40,6 +41,7 @@ COMPAT_SYS_CALL_TABLE:
426 + .else
427 + mfspr r11,SPRN_HSRR0
428 + ld r12,_NIP(r1)
429 ++ clrrdi r11,r11,2
430 + clrrdi r12,r12,2
431 + 100: tdne r11,r12
432 + EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
433 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
434 +index 94da0d25eb125..a2fd1db29f7e8 100644
435 +--- a/arch/powerpc/kvm/book3s_hv.c
436 ++++ b/arch/powerpc/kvm/book3s_hv.c
437 +@@ -1731,7 +1731,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
438 +
439 + static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
440 + {
441 +- struct kvm_nested_guest *nested = vcpu->arch.nested;
442 + int r;
443 + int srcu_idx;
444 +
445 +@@ -1831,7 +1830,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
446 + * it into a HEAI.
447 + */
448 + if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
449 +- (nested->hfscr & (1UL << cause))) {
450 ++ (vcpu->arch.nested_hfscr & (1UL << cause))) {
451 + vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
452 +
453 + /*
454 +diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
455 +index 89295b52a97c3..6c4e0e93105ff 100644
456 +--- a/arch/powerpc/kvm/book3s_hv_nested.c
457 ++++ b/arch/powerpc/kvm/book3s_hv_nested.c
458 +@@ -362,7 +362,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
459 + /* set L1 state to L2 state */
460 + vcpu->arch.nested = l2;
461 + vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
462 +- l2->hfscr = l2_hv.hfscr;
463 ++ vcpu->arch.nested_hfscr = l2_hv.hfscr;
464 + vcpu->arch.regs = l2_regs;
465 +
466 + /* Guest must always run with ME enabled, HV disabled. */
467 +diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
468 +index 9e5d0f413b712..0b08e85d38391 100644
469 +--- a/arch/powerpc/lib/Makefile
470 ++++ b/arch/powerpc/lib/Makefile
471 +@@ -19,6 +19,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
472 + CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
473 + endif
474 +
475 ++CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
476 ++CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
477 ++
478 + obj-y += alloc.o code-patching.o feature-fixups.o pmem.o test_code-patching.o
479 +
480 + ifndef CONFIG_KASAN
481 +diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
482 +index 27061583a0107..203735caf6915 100644
483 +--- a/arch/powerpc/mm/book3s32/mmu.c
484 ++++ b/arch/powerpc/mm/book3s32/mmu.c
485 +@@ -76,7 +76,7 @@ unsigned long p_block_mapped(phys_addr_t pa)
486 + return 0;
487 + }
488 +
489 +-static int find_free_bat(void)
490 ++int __init find_free_bat(void)
491 + {
492 + int b;
493 + int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
494 +@@ -100,7 +100,7 @@ static int find_free_bat(void)
495 + * - block size has to be a power of two. This is calculated by finding the
496 + * highest bit set to 1.
497 + */
498 +-static unsigned int block_size(unsigned long base, unsigned long top)
499 ++unsigned int bat_block_size(unsigned long base, unsigned long top)
500 + {
501 + unsigned int max_size = SZ_256M;
502 + unsigned int base_shift = (ffs(base) - 1) & 31;
503 +@@ -145,7 +145,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
504 + int idx;
505 +
506 + while ((idx = find_free_bat()) != -1 && base != top) {
507 +- unsigned int size = block_size(base, top);
508 ++ unsigned int size = bat_block_size(base, top);
509 +
510 + if (size < 128 << 10)
511 + break;
512 +@@ -196,18 +196,17 @@ void mmu_mark_initmem_nx(void)
513 + int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
514 + int i;
515 + unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
516 +- unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
517 ++ unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
518 + unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
519 + unsigned long size;
520 +
521 +- for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
522 +- size = block_size(base, top);
523 ++ for (i = 0; i < nb - 1 && base < top;) {
524 ++ size = bat_block_size(base, top);
525 + setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
526 + base += size;
527 + }
528 + if (base < top) {
529 +- size = block_size(base, top);
530 +- size = max(size, 128UL << 10);
531 ++ size = bat_block_size(base, top);
532 + if ((top - base) > size) {
533 + size <<= 1;
534 + if (strict_kernel_rwx_enabled() && base + size > border)
535 +diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
536 +index 35b287b0a8da4..450a67ef0bbe1 100644
537 +--- a/arch/powerpc/mm/kasan/book3s_32.c
538 ++++ b/arch/powerpc/mm/kasan/book3s_32.c
539 +@@ -10,48 +10,51 @@ int __init kasan_init_region(void *start, size_t size)
540 + {
541 + unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
542 + unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
543 +- unsigned long k_cur = k_start;
544 +- int k_size = k_end - k_start;
545 +- int k_size_base = 1 << (ffs(k_size) - 1);
546 ++ unsigned long k_nobat = k_start;
547 ++ unsigned long k_cur;
548 ++ phys_addr_t phys;
549 + int ret;
550 +- void *block;
551 +
552 +- block = memblock_alloc(k_size, k_size_base);
553 +-
554 +- if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) {
555 +- int shift = ffs(k_size - k_size_base);
556 +- int k_size_more = shift ? 1 << (shift - 1) : 0;
557 +-
558 +- setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
559 +- if (k_size_more >= SZ_128K)
560 +- setbat(-1, k_start + k_size_base, __pa(block) + k_size_base,
561 +- k_size_more, PAGE_KERNEL);
562 +- if (v_block_mapped(k_start))
563 +- k_cur = k_start + k_size_base;
564 +- if (v_block_mapped(k_start + k_size_base))
565 +- k_cur = k_start + k_size_base + k_size_more;
566 +-
567 +- update_bats();
568 ++ while (k_nobat < k_end) {
569 ++ unsigned int k_size = bat_block_size(k_nobat, k_end);
570 ++ int idx = find_free_bat();
571 ++
572 ++ if (idx == -1)
573 ++ break;
574 ++ if (k_size < SZ_128K)
575 ++ break;
576 ++ phys = memblock_phys_alloc_range(k_size, k_size, 0,
577 ++ MEMBLOCK_ALLOC_ANYWHERE);
578 ++ if (!phys)
579 ++ break;
580 ++
581 ++ setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
582 ++ k_nobat += k_size;
583 + }
584 ++ if (k_nobat != k_start)
585 ++ update_bats();
586 +
587 +- if (!block)
588 +- block = memblock_alloc(k_size, PAGE_SIZE);
589 +- if (!block)
590 +- return -ENOMEM;
591 ++ if (k_nobat < k_end) {
592 ++ phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
593 ++ MEMBLOCK_ALLOC_ANYWHERE);
594 ++ if (!phys)
595 ++ return -ENOMEM;
596 ++ }
597 +
598 + ret = kasan_init_shadow_page_tables(k_start, k_end);
599 + if (ret)
600 + return ret;
601 +
602 +- kasan_update_early_region(k_start, k_cur, __pte(0));
603 ++ kasan_update_early_region(k_start, k_nobat, __pte(0));
604 +
605 +- for (; k_cur < k_end; k_cur += PAGE_SIZE) {
606 ++ for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
607 + pmd_t *pmd = pmd_off_k(k_cur);
608 +- void *va = block + k_cur - k_start;
609 +- pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
610 ++ pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
611 +
612 + __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
613 + }
614 + flush_tlb_kernel_range(k_start, k_end);
615 ++ memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
616 ++
617 + return 0;
618 + }
619 +diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
620 +index 90ce75f0f1e2a..8acf8a611a265 100644
621 +--- a/arch/powerpc/net/bpf_jit_comp.c
622 ++++ b/arch/powerpc/net/bpf_jit_comp.c
623 +@@ -23,15 +23,15 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
624 + memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
625 + }
626 +
627 +-/* Fix the branch target addresses for subprog calls */
628 +-static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
629 +- struct codegen_context *ctx, u32 *addrs)
630 ++/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
631 ++static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
632 ++ struct codegen_context *ctx, u32 *addrs)
633 + {
634 + const struct bpf_insn *insn = fp->insnsi;
635 + bool func_addr_fixed;
636 + u64 func_addr;
637 + u32 tmp_idx;
638 +- int i, ret;
639 ++ int i, j, ret;
640 +
641 + for (i = 0; i < fp->len; i++) {
642 + /*
643 +@@ -66,6 +66,23 @@ static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
644 + * of the JITed sequence remains unchanged.
645 + */
646 + ctx->idx = tmp_idx;
647 ++ } else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
648 ++ tmp_idx = ctx->idx;
649 ++ ctx->idx = addrs[i] / 4;
650 ++#ifdef CONFIG_PPC32
651 ++ PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 1].imm);
652 ++ PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm);
653 ++ for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
654 ++ EMIT(PPC_RAW_NOP());
655 ++#else
656 ++ func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
657 ++ PPC_LI64(b2p[insn[i].dst_reg], func_addr);
658 ++ /* overwrite rest with nops */
659 ++ for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
660 ++ EMIT(PPC_RAW_NOP());
661 ++#endif
662 ++ ctx->idx = tmp_idx;
663 ++ i++;
664 + }
665 + }
666 +
667 +@@ -193,13 +210,13 @@ skip_init_ctx:
668 + /*
669 + * Do not touch the prologue and epilogue as they will remain
670 + * unchanged. Only fix the branch target address for subprog
671 +- * calls in the body.
672 ++ * calls in the body, and ldimm64 instructions.
673 + *
674 + * This does not change the offsets and lengths of the subprog
675 + * call instruction sequences and hence, the size of the JITed
676 + * image as well.
677 + */
678 +- bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
679 ++ bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
680 +
681 + /* There is no need to perform the usual passes. */
682 + goto skip_codegen_passes;
683 +diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
684 +index 8a4faa05f9e41..0448b0c008835 100644
685 +--- a/arch/powerpc/net/bpf_jit_comp32.c
686 ++++ b/arch/powerpc/net/bpf_jit_comp32.c
687 +@@ -191,6 +191,9 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
688 +
689 + if (image && rel < 0x2000000 && rel >= -0x2000000) {
690 + PPC_BL_ABS(func);
691 ++ EMIT(PPC_RAW_NOP());
692 ++ EMIT(PPC_RAW_NOP());
693 ++ EMIT(PPC_RAW_NOP());
694 + } else {
695 + /* Load function address into r0 */
696 + EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
697 +@@ -289,6 +292,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
698 + bool func_addr_fixed;
699 + u64 func_addr;
700 + u32 true_cond;
701 ++ u32 tmp_idx;
702 ++ int j;
703 +
704 + /*
705 + * addrs[] maps a BPF bytecode address into a real offset from
706 +@@ -836,8 +841,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
707 + * 16 byte instruction that uses two 'struct bpf_insn'
708 + */
709 + case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
710 ++ tmp_idx = ctx->idx;
711 + PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
712 + PPC_LI32(dst_reg, (u32)insn[i].imm);
713 ++ /* padding to allow full 4 instructions for later patching */
714 ++ for (j = ctx->idx - tmp_idx; j < 4; j++)
715 ++ EMIT(PPC_RAW_NOP());
716 + /* Adjust for two bpf instructions */
717 + addrs[++i] = ctx->idx * 4;
718 + break;
719 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
720 +index 8571aafcc9e1e..a26a782e8b78e 100644
721 +--- a/arch/powerpc/net/bpf_jit_comp64.c
722 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
723 +@@ -318,6 +318,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
724 + u64 imm64;
725 + u32 true_cond;
726 + u32 tmp_idx;
727 ++ int j;
728 +
729 + /*
730 + * addrs[] maps a BPF bytecode address into a real offset from
731 +@@ -632,17 +633,21 @@ bpf_alu32_trunc:
732 + EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
733 + break;
734 + case 64:
735 +- /*
736 +- * Way easier and faster(?) to store the value
737 +- * into stack and then use ldbrx
738 +- *
739 +- * ctx->seen will be reliable in pass2, but
740 +- * the instructions generated will remain the
741 +- * same across all passes
742 +- */
743 ++ /* Store the value to stack and then use byte-reverse loads */
744 + PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
745 + EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
746 +- EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
747 ++ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
748 ++ EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
749 ++ } else {
750 ++ EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
751 ++ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
752 ++ EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
753 ++ EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
754 ++ EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
755 ++ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
756 ++ EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
757 ++ EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
758 ++ }
759 + break;
760 + }
761 + break;
762 +@@ -806,9 +811,13 @@ emit_clear:
763 + case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
764 + imm64 = ((u64)(u32) insn[i].imm) |
765 + (((u64)(u32) insn[i+1].imm) << 32);
766 ++ tmp_idx = ctx->idx;
767 ++ PPC_LI64(dst_reg, imm64);
768 ++ /* padding to allow full 5 instructions for later patching */
769 ++ for (j = ctx->idx - tmp_idx; j < 5; j++)
770 ++ EMIT(PPC_RAW_NOP());
771 + /* Adjust for two bpf instructions */
772 + addrs[++i] = ctx->idx * 4;
773 +- PPC_LI64(dst_reg, imm64);
774 + break;
775 +
776 + /*
777 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
778 +index bef6b1abce702..e78de70509472 100644
779 +--- a/arch/powerpc/perf/core-book3s.c
780 ++++ b/arch/powerpc/perf/core-book3s.c
781 +@@ -1326,9 +1326,20 @@ static void power_pmu_disable(struct pmu *pmu)
782 + * Otherwise provide a warning if there is PMI pending, but
783 + * no counter is found overflown.
784 + */
785 +- if (any_pmc_overflown(cpuhw))
786 +- clear_pmi_irq_pending();
787 +- else
788 ++ if (any_pmc_overflown(cpuhw)) {
789 ++ /*
790 ++ * Since power_pmu_disable runs under local_irq_save, it
791 ++ * could happen that code hits a PMC overflow without PMI
792 ++ * pending in paca. Hence only clear PMI pending if it was
793 ++ * set.
794 ++ *
795 ++ * If a PMI is pending, then MSR[EE] must be disabled (because
796 ++ * the masked PMI handler disabling EE). So it is safe to
797 ++ * call clear_pmi_irq_pending().
798 ++ */
799 ++ if (pmi_irq_pending())
800 ++ clear_pmi_irq_pending();
801 ++ } else
802 + WARN_ON(pmi_irq_pending());
803 +
804 + val = mmcra = cpuhw->mmcr.mmcra;
805 +diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
806 +index 33f973ff97442..e8f15dbb89d02 100644
807 +--- a/arch/s390/hypfs/hypfs_vm.c
808 ++++ b/arch/s390/hypfs/hypfs_vm.c
809 +@@ -20,6 +20,7 @@
810 +
811 + static char local_guest[] = " ";
812 + static char all_guests[] = "* ";
813 ++static char *all_groups = all_guests;
814 + static char *guest_query;
815 +
816 + struct diag2fc_data {
817 +@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
818 +
819 + memcpy(parm_list.userid, query, NAME_LEN);
820 + ASCEBC(parm_list.userid, NAME_LEN);
821 +- parm_list.addr = (unsigned long) addr ;
822 ++ memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
823 ++ ASCEBC(parm_list.aci_grp, NAME_LEN);
824 ++ parm_list.addr = (unsigned long)addr;
825 + parm_list.size = size;
826 + parm_list.fmt = 0x02;
827 +- memset(parm_list.aci_grp, 0x40, NAME_LEN);
828 + rc = -1;
829 +
830 + diag_stat_inc(DIAG_STAT_X2FC);
831 +diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
832 +index d52d85367bf73..b032e556eeb71 100644
833 +--- a/arch/s390/kernel/module.c
834 ++++ b/arch/s390/kernel/module.c
835 +@@ -33,7 +33,7 @@
836 + #define DEBUGP(fmt , ...)
837 + #endif
838 +
839 +-#define PLT_ENTRY_SIZE 20
840 ++#define PLT_ENTRY_SIZE 22
841 +
842 + void *module_alloc(unsigned long size)
843 + {
844 +@@ -341,27 +341,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
845 + case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
846 + case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
847 + if (info->plt_initialized == 0) {
848 +- unsigned int insn[5];
849 +- unsigned int *ip = me->core_layout.base +
850 +- me->arch.plt_offset +
851 +- info->plt_offset;
852 +-
853 +- insn[0] = 0x0d10e310; /* basr 1,0 */
854 +- insn[1] = 0x100a0004; /* lg 1,10(1) */
855 ++ unsigned char insn[PLT_ENTRY_SIZE];
856 ++ char *plt_base;
857 ++ char *ip;
858 ++
859 ++ plt_base = me->core_layout.base + me->arch.plt_offset;
860 ++ ip = plt_base + info->plt_offset;
861 ++ *(int *)insn = 0x0d10e310; /* basr 1,0 */
862 ++ *(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
863 + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
864 +- unsigned int *ij;
865 +- ij = me->core_layout.base +
866 +- me->arch.plt_offset +
867 +- me->arch.plt_size - PLT_ENTRY_SIZE;
868 +- insn[2] = 0xa7f40000 + /* j __jump_r1 */
869 +- (unsigned int)(u16)
870 +- (((unsigned long) ij - 8 -
871 +- (unsigned long) ip) / 2);
872 ++ char *jump_r1;
873 ++
874 ++ jump_r1 = plt_base + me->arch.plt_size -
875 ++ PLT_ENTRY_SIZE;
876 ++ /* brcl 0xf,__jump_r1 */
877 ++ *(short *)&insn[8] = 0xc0f4;
878 ++ *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
879 + } else {
880 +- insn[2] = 0x07f10000; /* br %r1 */
881 ++ *(int *)&insn[8] = 0x07f10000; /* br %r1 */
882 + }
883 +- insn[3] = (unsigned int) (val >> 32);
884 +- insn[4] = (unsigned int) val;
885 ++ *(long *)&insn[14] = val;
886 +
887 + write(ip, insn, sizeof(insn));
888 + info->plt_initialized = 1;
889 +diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
890 +index 20f8e1868853f..a50f2ff1b00e8 100644
891 +--- a/arch/s390/kernel/nmi.c
892 ++++ b/arch/s390/kernel/nmi.c
893 +@@ -273,7 +273,14 @@ static int notrace s390_validate_registers(union mci mci, int umode)
894 + /* Validate vector registers */
895 + union ctlreg0 cr0;
896 +
897 +- if (!mci.vr) {
898 ++ /*
899 ++ * The vector validity must only be checked if not running a
900 ++ * KVM guest. For KVM guests the machine check is forwarded by
901 ++ * KVM and it is the responsibility of the guest to take
902 ++ * appropriate actions. The host vector or FPU values have been
903 ++ * saved by KVM and will be restored by KVM.
904 ++ */
905 ++ if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) {
906 + /*
907 + * Vector registers can't be restored. If the kernel
908 + * currently uses vector registers the system is
909 +@@ -316,11 +323,21 @@ static int notrace s390_validate_registers(union mci mci, int umode)
910 + if (cr2.gse) {
911 + if (!mci.gs) {
912 + /*
913 +- * Guarded storage register can't be restored and
914 +- * the current processes uses guarded storage.
915 +- * It has to be terminated.
916 ++ * 2 cases:
917 ++ * - machine check in kernel or userspace
918 ++ * - machine check while running SIE (KVM guest)
919 ++ * For kernel or userspace the userspace values of
920 ++ * guarded storage control can not be recreated, the
921 ++ * process must be terminated.
922 ++ * For SIE the guest values of guarded storage can not
923 ++ * be recreated. This is either due to a bug or due to
924 ++ * GS being disabled in the guest. The guest will be
925 ++ * notified by KVM code and the guests machine check
926 ++ * handling must take care of this. The host values
927 ++ * are saved by KVM and are not affected.
928 + */
929 +- kill_task = 1;
930 ++ if (!test_cpu_flag(CIF_MCCK_GUEST))
931 ++ kill_task = 1;
932 + } else {
933 + load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
934 + }
935 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
936 +index 1e33c75ffa260..18d825a39032a 100644
937 +--- a/arch/x86/events/intel/core.c
938 ++++ b/arch/x86/events/intel/core.c
939 +@@ -6242,6 +6242,19 @@ __init int intel_pmu_init(void)
940 + pmu->num_counters = x86_pmu.num_counters;
941 + pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
942 + }
943 ++
944 ++ /*
945 ++ * Quirk: For some Alder Lake machine, when all E-cores are disabled in
946 ++ * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
947 ++ * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
948 ++ * mistakenly add extra counters for P-cores. Correct the number of
949 ++ * counters here.
950 ++ */
951 ++ if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) {
952 ++ pmu->num_counters = x86_pmu.num_counters;
953 ++ pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
954 ++ }
955 ++
956 + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
957 + pmu->unconstrained = (struct event_constraint)
958 + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
959 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
960 +index 3660f698fb2aa..ed869443efb21 100644
961 +--- a/arch/x86/events/intel/uncore_snbep.c
962 ++++ b/arch/x86/events/intel/uncore_snbep.c
963 +@@ -5482,7 +5482,7 @@ static struct intel_uncore_type icx_uncore_imc = {
964 + .fixed_ctr_bits = 48,
965 + .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
966 + .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
967 +- .event_descs = hswep_uncore_imc_events,
968 ++ .event_descs = snr_uncore_imc_events,
969 + .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
970 + .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
971 + .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
972 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
973 +index 59fc339ba5282..5d3645b325e27 100644
974 +--- a/arch/x86/include/asm/kvm_host.h
975 ++++ b/arch/x86/include/asm/kvm_host.h
976 +@@ -1497,6 +1497,7 @@ struct kvm_x86_ops {
977 + };
978 +
979 + struct kvm_x86_nested_ops {
980 ++ void (*leave_nested)(struct kvm_vcpu *vcpu);
981 + int (*check_events)(struct kvm_vcpu *vcpu);
982 + bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
983 + void (*triple_fault)(struct kvm_vcpu *vcpu);
984 +diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
985 +index fc85eb17cb6d0..c757ba7605fbf 100644
986 +--- a/arch/x86/kernel/cpu/mce/amd.c
987 ++++ b/arch/x86/kernel/cpu/mce/amd.c
988 +@@ -401,7 +401,7 @@ static void threshold_restart_bank(void *_tr)
989 + u32 hi, lo;
990 +
991 + /* sysfs write might race against an offline operation */
992 +- if (this_cpu_read(threshold_banks))
993 ++ if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off)
994 + return;
995 +
996 + rdmsr(tr->b->address, lo, hi);
997 +diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
998 +index bb9a46a804bf2..baafbb37be678 100644
999 +--- a/arch/x86/kernel/cpu/mce/intel.c
1000 ++++ b/arch/x86/kernel/cpu/mce/intel.c
1001 +@@ -486,6 +486,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
1002 + case INTEL_FAM6_BROADWELL_X:
1003 + case INTEL_FAM6_SKYLAKE_X:
1004 + case INTEL_FAM6_ICELAKE_X:
1005 ++ case INTEL_FAM6_ICELAKE_D:
1006 + case INTEL_FAM6_SAPPHIRERAPIDS_X:
1007 + case INTEL_FAM6_XEON_PHI_KNL:
1008 + case INTEL_FAM6_XEON_PHI_KNM:
1009 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
1010 +index e5e597fc3c86f..add8f58d686e3 100644
1011 +--- a/arch/x86/kvm/cpuid.c
1012 ++++ b/arch/x86/kvm/cpuid.c
1013 +@@ -113,6 +113,7 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2
1014 + orig = &vcpu->arch.cpuid_entries[i];
1015 + if (e2[i].function != orig->function ||
1016 + e2[i].index != orig->index ||
1017 ++ e2[i].flags != orig->flags ||
1018 + e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
1019 + e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
1020 + return -EINVAL;
1021 +@@ -176,10 +177,26 @@ void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
1022 + vcpu->arch.pv_cpuid.features = best->eax;
1023 + }
1024 +
1025 ++/*
1026 ++ * Calculate guest's supported XCR0 taking into account guest CPUID data and
1027 ++ * supported_xcr0 (comprised of host configuration and KVM_SUPPORTED_XCR0).
1028 ++ */
1029 ++static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
1030 ++{
1031 ++ struct kvm_cpuid_entry2 *best;
1032 ++
1033 ++ best = cpuid_entry2_find(entries, nent, 0xd, 0);
1034 ++ if (!best)
1035 ++ return 0;
1036 ++
1037 ++ return (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
1038 ++}
1039 ++
1040 + static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
1041 + int nent)
1042 + {
1043 + struct kvm_cpuid_entry2 *best;
1044 ++ u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
1045 +
1046 + best = cpuid_entry2_find(entries, nent, 1, 0);
1047 + if (best) {
1048 +@@ -218,6 +235,21 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
1049 + vcpu->arch.ia32_misc_enable_msr &
1050 + MSR_IA32_MISC_ENABLE_MWAIT);
1051 + }
1052 ++
1053 ++ /*
1054 ++ * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
1055 ++ * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
1056 ++ * requested XCR0 value. The enclave's XFRM must be a subset of XCRO
1057 ++ * at the time of EENTER, thus adjust the allowed XFRM by the guest's
1058 ++ * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
1059 ++ * '1' even on CPUs that don't support XSAVE.
1060 ++ */
1061 ++ best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
1062 ++ if (best) {
1063 ++ best->ecx &= guest_supported_xcr0 & 0xffffffff;
1064 ++ best->edx &= guest_supported_xcr0 >> 32;
1065 ++ best->ecx |= XFEATURE_MASK_FPSSE;
1066 ++ }
1067 + }
1068 +
1069 + void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
1070 +@@ -241,27 +273,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
1071 + kvm_apic_set_version(vcpu);
1072 + }
1073 +
1074 +- best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
1075 +- if (!best)
1076 +- vcpu->arch.guest_supported_xcr0 = 0;
1077 +- else
1078 +- vcpu->arch.guest_supported_xcr0 =
1079 +- (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
1080 +-
1081 +- /*
1082 +- * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
1083 +- * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
1084 +- * requested XCR0 value. The enclave's XFRM must be a subset of XCRO
1085 +- * at the time of EENTER, thus adjust the allowed XFRM by the guest's
1086 +- * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
1087 +- * '1' even on CPUs that don't support XSAVE.
1088 +- */
1089 +- best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1);
1090 +- if (best) {
1091 +- best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff;
1092 +- best->edx &= vcpu->arch.guest_supported_xcr0 >> 32;
1093 +- best->ecx |= XFEATURE_MASK_FPSSE;
1094 +- }
1095 ++ vcpu->arch.guest_supported_xcr0 =
1096 ++ cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
1097 +
1098 + kvm_update_pv_runtime(vcpu);
1099 +
1100 +@@ -326,8 +339,14 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
1101 + * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
1102 + * whether the supplied CPUID data is equal to what's already set.
1103 + */
1104 +- if (vcpu->arch.last_vmentry_cpu != -1)
1105 +- return kvm_cpuid_check_equal(vcpu, e2, nent);
1106 ++ if (vcpu->arch.last_vmentry_cpu != -1) {
1107 ++ r = kvm_cpuid_check_equal(vcpu, e2, nent);
1108 ++ if (r)
1109 ++ return r;
1110 ++
1111 ++ kvfree(e2);
1112 ++ return 0;
1113 ++ }
1114 +
1115 + r = kvm_check_cpuid(e2, nent);
1116 + if (r)
1117 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1118 +index 7c009867d6f23..e8e383fbe8868 100644
1119 +--- a/arch/x86/kvm/lapic.c
1120 ++++ b/arch/x86/kvm/lapic.c
1121 +@@ -2623,7 +2623,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
1122 + kvm_apic_set_version(vcpu);
1123 +
1124 + apic_update_ppr(apic);
1125 +- hrtimer_cancel(&apic->lapic_timer.timer);
1126 ++ cancel_apic_timer(apic);
1127 + apic->lapic_timer.expired_tscdeadline = 0;
1128 + apic_update_lvtt(apic);
1129 + apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
1130 +diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
1131 +index f8b7bc04b3e7a..a67f8bee3adc3 100644
1132 +--- a/arch/x86/kvm/svm/nested.c
1133 ++++ b/arch/x86/kvm/svm/nested.c
1134 +@@ -964,9 +964,9 @@ void svm_free_nested(struct vcpu_svm *svm)
1135 + /*
1136 + * Forcibly leave nested mode in order to be able to reset the VCPU later on.
1137 + */
1138 +-void svm_leave_nested(struct vcpu_svm *svm)
1139 ++void svm_leave_nested(struct kvm_vcpu *vcpu)
1140 + {
1141 +- struct kvm_vcpu *vcpu = &svm->vcpu;
1142 ++ struct vcpu_svm *svm = to_svm(vcpu);
1143 +
1144 + if (is_guest_mode(vcpu)) {
1145 + svm->nested.nested_run_pending = 0;
1146 +@@ -1345,7 +1345,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1147 + return -EINVAL;
1148 +
1149 + if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1150 +- svm_leave_nested(svm);
1151 ++ svm_leave_nested(vcpu);
1152 + svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1153 + return 0;
1154 + }
1155 +@@ -1410,7 +1410,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1156 + */
1157 +
1158 + if (is_guest_mode(vcpu))
1159 +- svm_leave_nested(svm);
1160 ++ svm_leave_nested(vcpu);
1161 + else
1162 + svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1163 +
1164 +@@ -1464,6 +1464,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1165 + }
1166 +
1167 + struct kvm_x86_nested_ops svm_nested_ops = {
1168 ++ .leave_nested = svm_leave_nested,
1169 + .check_events = svm_check_nested_events,
1170 + .triple_fault = nested_svm_triple_fault,
1171 + .get_nested_state_pages = svm_get_nested_state_pages,
1172 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
1173 +index 5151efa424acb..3efada37272c0 100644
1174 +--- a/arch/x86/kvm/svm/svm.c
1175 ++++ b/arch/x86/kvm/svm/svm.c
1176 +@@ -290,7 +290,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1177 +
1178 + if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
1179 + if (!(efer & EFER_SVME)) {
1180 +- svm_leave_nested(svm);
1181 ++ svm_leave_nested(vcpu);
1182 + svm_set_gif(svm, true);
1183 + /* #GP intercept is still needed for vmware backdoor */
1184 + if (!enable_vmware_backdoor)
1185 +@@ -312,7 +312,11 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1186 + return ret;
1187 + }
1188 +
1189 +- if (svm_gp_erratum_intercept)
1190 ++ /*
1191 ++ * Never intercept #GP for SEV guests, KVM can't
1192 ++ * decrypt guest memory to workaround the erratum.
1193 ++ */
1194 ++ if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
1195 + set_exception_intercept(svm, GP_VECTOR);
1196 + }
1197 + }
1198 +@@ -1238,9 +1242,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
1199 + * Guest access to VMware backdoor ports could legitimately
1200 + * trigger #GP because of TSS I/O permission bitmap.
1201 + * We intercept those #GP and allow access to them anyway
1202 +- * as VMware does.
1203 ++ * as VMware does. Don't intercept #GP for SEV guests as KVM can't
1204 ++ * decrypt guest memory to decode the faulting instruction.
1205 + */
1206 +- if (enable_vmware_backdoor)
1207 ++ if (enable_vmware_backdoor && !sev_guest(vcpu->kvm))
1208 + set_exception_intercept(svm, GP_VECTOR);
1209 +
1210 + svm_set_intercept(svm, INTERCEPT_INTR);
1211 +@@ -2301,10 +2306,6 @@ static int gp_interception(struct kvm_vcpu *vcpu)
1212 + if (error_code)
1213 + goto reinject;
1214 +
1215 +- /* All SVM instructions expect page aligned RAX */
1216 +- if (svm->vmcb->save.rax & ~PAGE_MASK)
1217 +- goto reinject;
1218 +-
1219 + /* Decode the instruction for usage later */
1220 + if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
1221 + goto reinject;
1222 +@@ -2322,8 +2323,13 @@ static int gp_interception(struct kvm_vcpu *vcpu)
1223 + if (!is_guest_mode(vcpu))
1224 + return kvm_emulate_instruction(vcpu,
1225 + EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
1226 +- } else
1227 ++ } else {
1228 ++ /* All SVM instructions expect page aligned RAX */
1229 ++ if (svm->vmcb->save.rax & ~PAGE_MASK)
1230 ++ goto reinject;
1231 ++
1232 + return emulate_svm_instr(vcpu, opcode);
1233 ++ }
1234 +
1235 + reinject:
1236 + kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1237 +@@ -4464,8 +4470,13 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i
1238 + bool smep, smap, is_user;
1239 + unsigned long cr4;
1240 +
1241 ++ /* Emulation is always possible when KVM has access to all guest state. */
1242 ++ if (!sev_guest(vcpu->kvm))
1243 ++ return true;
1244 ++
1245 + /*
1246 +- * When the guest is an SEV-ES guest, emulation is not possible.
1247 ++ * Emulation is impossible for SEV-ES guests as KVM doesn't have access
1248 ++ * to guest register state.
1249 + */
1250 + if (sev_es_guest(vcpu->kvm))
1251 + return false;
1252 +@@ -4513,21 +4524,11 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i
1253 + if (likely(!insn || insn_len))
1254 + return true;
1255 +
1256 +- /*
1257 +- * If RIP is invalid, go ahead with emulation which will cause an
1258 +- * internal error exit.
1259 +- */
1260 +- if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT))
1261 +- return true;
1262 +-
1263 + cr4 = kvm_read_cr4(vcpu);
1264 + smep = cr4 & X86_CR4_SMEP;
1265 + smap = cr4 & X86_CR4_SMAP;
1266 + is_user = svm_get_cpl(vcpu) == 3;
1267 + if (smap && (!smep || is_user)) {
1268 +- if (!sev_guest(vcpu->kvm))
1269 +- return true;
1270 +-
1271 + pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
1272 + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1273 + }
1274 +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
1275 +index 1c7306c370fa3..1b460e539926b 100644
1276 +--- a/arch/x86/kvm/svm/svm.h
1277 ++++ b/arch/x86/kvm/svm/svm.h
1278 +@@ -470,7 +470,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
1279 +
1280 + int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
1281 + u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
1282 +-void svm_leave_nested(struct vcpu_svm *svm);
1283 ++void svm_leave_nested(struct kvm_vcpu *vcpu);
1284 + void svm_free_nested(struct vcpu_svm *svm);
1285 + int svm_allocate_nested(struct vcpu_svm *svm);
1286 + int nested_svm_vmrun(struct kvm_vcpu *vcpu);
1287 +diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
1288 +index ba6f99f584ac3..09fac0ddac8bd 100644
1289 +--- a/arch/x86/kvm/vmx/evmcs.c
1290 ++++ b/arch/x86/kvm/vmx/evmcs.c
1291 +@@ -12,8 +12,6 @@
1292 +
1293 + DEFINE_STATIC_KEY_FALSE(enable_evmcs);
1294 +
1295 +-#if IS_ENABLED(CONFIG_HYPERV)
1296 +-
1297 + #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
1298 + #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
1299 + {EVMCS1_OFFSET(name), clean_field}
1300 +@@ -296,6 +294,7 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
1301 + };
1302 + const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
1303 +
1304 ++#if IS_ENABLED(CONFIG_HYPERV)
1305 + __init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1306 + {
1307 + vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
1308 +diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
1309 +index 16731d2cf231b..6255fa7167720 100644
1310 +--- a/arch/x86/kvm/vmx/evmcs.h
1311 ++++ b/arch/x86/kvm/vmx/evmcs.h
1312 +@@ -63,8 +63,6 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs);
1313 + #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
1314 + #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
1315 +
1316 +-#if IS_ENABLED(CONFIG_HYPERV)
1317 +-
1318 + struct evmcs_field {
1319 + u16 offset;
1320 + u16 clean_field;
1321 +@@ -73,26 +71,56 @@ struct evmcs_field {
1322 + extern const struct evmcs_field vmcs_field_to_evmcs_1[];
1323 + extern const unsigned int nr_evmcs_1_fields;
1324 +
1325 +-static __always_inline int get_evmcs_offset(unsigned long field,
1326 +- u16 *clean_field)
1327 ++static __always_inline int evmcs_field_offset(unsigned long field,
1328 ++ u16 *clean_field)
1329 + {
1330 + unsigned int index = ROL16(field, 6);
1331 + const struct evmcs_field *evmcs_field;
1332 +
1333 +- if (unlikely(index >= nr_evmcs_1_fields)) {
1334 +- WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
1335 +- field);
1336 ++ if (unlikely(index >= nr_evmcs_1_fields))
1337 + return -ENOENT;
1338 +- }
1339 +
1340 + evmcs_field = &vmcs_field_to_evmcs_1[index];
1341 +
1342 ++ /*
1343 ++ * Use offset=0 to detect holes in eVMCS. This offset belongs to
1344 ++ * 'revision_id' but this field has no encoding and is supposed to
1345 ++ * be accessed directly.
1346 ++ */
1347 ++ if (unlikely(!evmcs_field->offset))
1348 ++ return -ENOENT;
1349 ++
1350 + if (clean_field)
1351 + *clean_field = evmcs_field->clean_field;
1352 +
1353 + return evmcs_field->offset;
1354 + }
1355 +
1356 ++static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
1357 ++ unsigned long field, u16 offset)
1358 ++{
1359 ++ /*
1360 ++ * vmcs12_read_any() doesn't care whether the supplied structure
1361 ++ * is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
1362 ++ * the exact offset of the required field, use it for convenience
1363 ++ * here.
1364 ++ */
1365 ++ return vmcs12_read_any((void *)evmcs, field, offset);
1366 ++}
1367 ++
1368 ++#if IS_ENABLED(CONFIG_HYPERV)
1369 ++
1370 ++static __always_inline int get_evmcs_offset(unsigned long field,
1371 ++ u16 *clean_field)
1372 ++{
1373 ++ int offset = evmcs_field_offset(field, clean_field);
1374 ++
1375 ++ WARN_ONCE(offset < 0, "KVM: accessing unsupported EVMCS field %lx\n",
1376 ++ field);
1377 ++
1378 ++ return offset;
1379 ++}
1380 ++
1381 + static __always_inline void evmcs_write64(unsigned long field, u64 value)
1382 + {
1383 + u16 clean_field;
1384 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1385 +index 9c941535f78c0..c605c2c01394b 100644
1386 +--- a/arch/x86/kvm/vmx/nested.c
1387 ++++ b/arch/x86/kvm/vmx/nested.c
1388 +@@ -7,6 +7,7 @@
1389 + #include <asm/mmu_context.h>
1390 +
1391 + #include "cpuid.h"
1392 ++#include "evmcs.h"
1393 + #include "hyperv.h"
1394 + #include "mmu.h"
1395 + #include "nested.h"
1396 +@@ -5074,27 +5075,49 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
1397 + if (!nested_vmx_check_permission(vcpu))
1398 + return 1;
1399 +
1400 +- /*
1401 +- * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
1402 +- * any VMREAD sets the ALU flags for VMfailInvalid.
1403 +- */
1404 +- if (vmx->nested.current_vmptr == INVALID_GPA ||
1405 +- (is_guest_mode(vcpu) &&
1406 +- get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
1407 +- return nested_vmx_failInvalid(vcpu);
1408 +-
1409 + /* Decode instruction info and find the field to read */
1410 + field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
1411 +
1412 +- offset = vmcs_field_to_offset(field);
1413 +- if (offset < 0)
1414 +- return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
1415 ++ if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
1416 ++ /*
1417 ++ * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
1418 ++ * any VMREAD sets the ALU flags for VMfailInvalid.
1419 ++ */
1420 ++ if (vmx->nested.current_vmptr == INVALID_GPA ||
1421 ++ (is_guest_mode(vcpu) &&
1422 ++ get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
1423 ++ return nested_vmx_failInvalid(vcpu);
1424 +
1425 +- if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
1426 +- copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
1427 ++ offset = get_vmcs12_field_offset(field);
1428 ++ if (offset < 0)
1429 ++ return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
1430 ++
1431 ++ if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
1432 ++ copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
1433 ++
1434 ++ /* Read the field, zero-extended to a u64 value */
1435 ++ value = vmcs12_read_any(vmcs12, field, offset);
1436 ++ } else {
1437 ++ /*
1438 ++ * Hyper-V TLFS (as of 6.0b) explicitly states, that while an
1439 ++ * enlightened VMCS is active VMREAD/VMWRITE instructions are
1440 ++ * unsupported. Unfortunately, certain versions of Windows 11
1441 ++ * don't comply with this requirement which is not enforced in
1442 ++ * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a
1443 ++ * workaround, as misbehaving guests will panic on VM-Fail.
1444 ++ * Note, enlightened VMCS is incompatible with shadow VMCS so
1445 ++ * all VMREADs from L2 should go to L1.
1446 ++ */
1447 ++ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
1448 ++ return nested_vmx_failInvalid(vcpu);
1449 +
1450 +- /* Read the field, zero-extended to a u64 value */
1451 +- value = vmcs12_read_any(vmcs12, field, offset);
1452 ++ offset = evmcs_field_offset(field, NULL);
1453 ++ if (offset < 0)
1454 ++ return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
1455 ++
1456 ++ /* Read the field, zero-extended to a u64 value */
1457 ++ value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
1458 ++ }
1459 +
1460 + /*
1461 + * Now copy part of this value to register or memory, as requested.
1462 +@@ -5189,7 +5212,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
1463 +
1464 + field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
1465 +
1466 +- offset = vmcs_field_to_offset(field);
1467 ++ offset = get_vmcs12_field_offset(field);
1468 + if (offset < 0)
1469 + return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
1470 +
1471 +@@ -6435,7 +6458,7 @@ static u64 nested_vmx_calc_vmcs_enum_msr(void)
1472 + max_idx = 0;
1473 + for (i = 0; i < nr_vmcs12_fields; i++) {
1474 + /* The vmcs12 table is very, very sparsely populated. */
1475 +- if (!vmcs_field_to_offset_table[i])
1476 ++ if (!vmcs12_field_offsets[i])
1477 + continue;
1478 +
1479 + idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
1480 +@@ -6744,6 +6767,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
1481 + }
1482 +
1483 + struct kvm_x86_nested_ops vmx_nested_ops = {
1484 ++ .leave_nested = vmx_leave_nested,
1485 + .check_events = vmx_check_nested_events,
1486 + .hv_timer_pending = nested_vmx_preemption_timer_pending,
1487 + .triple_fault = nested_vmx_triple_fault,
1488 +diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c
1489 +index cab6ba7a5005e..2251b60920f81 100644
1490 +--- a/arch/x86/kvm/vmx/vmcs12.c
1491 ++++ b/arch/x86/kvm/vmx/vmcs12.c
1492 +@@ -8,7 +8,7 @@
1493 + FIELD(number, name), \
1494 + [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
1495 +
1496 +-const unsigned short vmcs_field_to_offset_table[] = {
1497 ++const unsigned short vmcs12_field_offsets[] = {
1498 + FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
1499 + FIELD(POSTED_INTR_NV, posted_intr_nv),
1500 + FIELD(GUEST_ES_SELECTOR, guest_es_selector),
1501 +@@ -151,4 +151,4 @@ const unsigned short vmcs_field_to_offset_table[] = {
1502 + FIELD(HOST_RSP, host_rsp),
1503 + FIELD(HOST_RIP, host_rip),
1504 + };
1505 +-const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs_field_to_offset_table);
1506 ++const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs12_field_offsets);
1507 +diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
1508 +index 2a45f026ee11d..746129ddd5ae0 100644
1509 +--- a/arch/x86/kvm/vmx/vmcs12.h
1510 ++++ b/arch/x86/kvm/vmx/vmcs12.h
1511 +@@ -361,10 +361,10 @@ static inline void vmx_check_vmcs12_offsets(void)
1512 + CHECK_OFFSET(guest_pml_index, 996);
1513 + }
1514 +
1515 +-extern const unsigned short vmcs_field_to_offset_table[];
1516 ++extern const unsigned short vmcs12_field_offsets[];
1517 + extern const unsigned int nr_vmcs12_fields;
1518 +
1519 +-static inline short vmcs_field_to_offset(unsigned long field)
1520 ++static inline short get_vmcs12_field_offset(unsigned long field)
1521 + {
1522 + unsigned short offset;
1523 + unsigned int index;
1524 +@@ -377,7 +377,7 @@ static inline short vmcs_field_to_offset(unsigned long field)
1525 + return -ENOENT;
1526 +
1527 + index = array_index_nospec(index, nr_vmcs12_fields);
1528 +- offset = vmcs_field_to_offset_table[index];
1529 ++ offset = vmcs12_field_offsets[index];
1530 + if (offset == 0)
1531 + return -ENOENT;
1532 + return offset;
1533 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1534 +index 4acf43111e9c2..0714fa0e7ede0 100644
1535 +--- a/arch/x86/kvm/x86.c
1536 ++++ b/arch/x86/kvm/x86.c
1537 +@@ -3508,6 +3508,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1538 + if (data & ~supported_xss)
1539 + return 1;
1540 + vcpu->arch.ia32_xss = data;
1541 ++ kvm_update_cpuid_runtime(vcpu);
1542 + break;
1543 + case MSR_SMI_COUNT:
1544 + if (!msr_info->host_initiated)
1545 +@@ -4784,8 +4785,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1546 + vcpu->arch.apic->sipi_vector = events->sipi_vector;
1547 +
1548 + if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
1549 +- if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm)
1550 ++ if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
1551 ++ kvm_x86_ops.nested_ops->leave_nested(vcpu);
1552 + kvm_smm_changed(vcpu, events->smi.smm);
1553 ++ }
1554 +
1555 + vcpu->arch.smi_pending = events->smi.pending;
1556 +
1557 +@@ -11062,7 +11065,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1558 +
1559 + vcpu->arch.msr_misc_features_enables = 0;
1560 +
1561 +- vcpu->arch.xcr0 = XFEATURE_MASK_FP;
1562 ++ __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
1563 ++ __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true);
1564 + }
1565 +
1566 + /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
1567 +@@ -11079,8 +11083,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1568 + cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1, 0);
1569 + kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
1570 +
1571 +- vcpu->arch.ia32_xss = 0;
1572 +-
1573 + static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
1574 +
1575 + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
1576 +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
1577 +index 2edd86649468f..615a76d700194 100644
1578 +--- a/arch/x86/pci/fixup.c
1579 ++++ b/arch/x86/pci/fixup.c
1580 +@@ -353,8 +353,8 @@ static void pci_fixup_video(struct pci_dev *pdev)
1581 + }
1582 + }
1583 + }
1584 +-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1585 +- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
1586 ++DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
1587 ++ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
1588 +
1589 +
1590 + static const struct dmi_system_id msi_k8t_dmi_table[] = {
1591 +diff --git a/block/bio.c b/block/bio.c
1592 +index 15ab0d6d1c06e..99cad261ec531 100644
1593 +--- a/block/bio.c
1594 ++++ b/block/bio.c
1595 +@@ -569,7 +569,8 @@ static void bio_truncate(struct bio *bio, unsigned new_size)
1596 + offset = new_size - done;
1597 + else
1598 + offset = 0;
1599 +- zero_user(bv.bv_page, offset, bv.bv_len - offset);
1600 ++ zero_user(bv.bv_page, bv.bv_offset + offset,
1601 ++ bv.bv_len - offset);
1602 + truncated = true;
1603 + }
1604 + done += bv.bv_len;
1605 +diff --git a/block/blk-core.c b/block/blk-core.c
1606 +index 1378d084c770f..9ebeb9bdf5832 100644
1607 +--- a/block/blk-core.c
1608 ++++ b/block/blk-core.c
1609 +@@ -1258,20 +1258,32 @@ void __blk_account_io_start(struct request *rq)
1610 + }
1611 +
1612 + static unsigned long __part_start_io_acct(struct block_device *part,
1613 +- unsigned int sectors, unsigned int op)
1614 ++ unsigned int sectors, unsigned int op,
1615 ++ unsigned long start_time)
1616 + {
1617 + const int sgrp = op_stat_group(op);
1618 +- unsigned long now = READ_ONCE(jiffies);
1619 +
1620 + part_stat_lock();
1621 +- update_io_ticks(part, now, false);
1622 ++ update_io_ticks(part, start_time, false);
1623 + part_stat_inc(part, ios[sgrp]);
1624 + part_stat_add(part, sectors[sgrp], sectors);
1625 + part_stat_local_inc(part, in_flight[op_is_write(op)]);
1626 + part_stat_unlock();
1627 +
1628 +- return now;
1629 ++ return start_time;
1630 ++}
1631 ++
1632 ++/**
1633 ++ * bio_start_io_acct_time - start I/O accounting for bio based drivers
1634 ++ * @bio: bio to start account for
1635 ++ * @start_time: start time that should be passed back to bio_end_io_acct().
1636 ++ */
1637 ++void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
1638 ++{
1639 ++ __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
1640 ++ bio_op(bio), start_time);
1641 + }
1642 ++EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
1643 +
1644 + /**
1645 + * bio_start_io_acct - start I/O accounting for bio based drivers
1646 +@@ -1281,14 +1293,15 @@ static unsigned long __part_start_io_acct(struct block_device *part,
1647 + */
1648 + unsigned long bio_start_io_acct(struct bio *bio)
1649 + {
1650 +- return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
1651 ++ return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
1652 ++ bio_op(bio), jiffies);
1653 + }
1654 + EXPORT_SYMBOL_GPL(bio_start_io_acct);
1655 +
1656 + unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1657 + unsigned int op)
1658 + {
1659 +- return __part_start_io_acct(disk->part0, sectors, op);
1660 ++ return __part_start_io_acct(disk->part0, sectors, op, jiffies);
1661 + }
1662 + EXPORT_SYMBOL(disk_start_io_acct);
1663 +
1664 +diff --git a/block/blk-ia-ranges.c b/block/blk-ia-ranges.c
1665 +index b925f3db3ab7a..18c68d8b9138e 100644
1666 +--- a/block/blk-ia-ranges.c
1667 ++++ b/block/blk-ia-ranges.c
1668 +@@ -144,7 +144,7 @@ int disk_register_independent_access_ranges(struct gendisk *disk,
1669 + &q->kobj, "%s", "independent_access_ranges");
1670 + if (ret) {
1671 + q->ia_ranges = NULL;
1672 +- kfree(iars);
1673 ++ kobject_put(&iars->kobj);
1674 + return ret;
1675 + }
1676 +
1677 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1678 +index ae79c33001297..7de3f5b6e8d0a 100644
1679 +--- a/drivers/firmware/efi/efi.c
1680 ++++ b/drivers/firmware/efi/efi.c
1681 +@@ -722,6 +722,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
1682 + systab_hdr->revision >> 16,
1683 + systab_hdr->revision & 0xffff,
1684 + vendor);
1685 ++
1686 ++ if (IS_ENABLED(CONFIG_X86_64) &&
1687 ++ systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
1688 ++ !strcmp(vendor, "Apple")) {
1689 ++ pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
1690 ++ efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
1691 ++ }
1692 + }
1693 +
1694 + static __initdata char memory_type_name[][13] = {
1695 +diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
1696 +index 2363fee9211c9..9cc556013d085 100644
1697 +--- a/drivers/firmware/efi/libstub/arm64-stub.c
1698 ++++ b/drivers/firmware/efi/libstub/arm64-stub.c
1699 +@@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
1700 + if (image->image_base != _text)
1701 + efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
1702 +
1703 +- if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
1704 +- efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
1705 +- EFI_KIMG_ALIGN >> 10);
1706 ++ if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
1707 ++ efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
1708 ++ SEGMENT_ALIGN >> 10);
1709 +
1710 + kernel_size = _edata - _text;
1711 + kernel_memsize = kernel_size + (_end - _edata);
1712 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1713 +index fab8faf345604..f999638a04ed6 100644
1714 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1715 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1716 +@@ -1523,6 +1523,87 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
1717 + 0x99A0,
1718 + 0x99A2,
1719 + 0x99A4,
1720 ++ /* radeon secondary ids */
1721 ++ 0x3171,
1722 ++ 0x3e70,
1723 ++ 0x4164,
1724 ++ 0x4165,
1725 ++ 0x4166,
1726 ++ 0x4168,
1727 ++ 0x4170,
1728 ++ 0x4171,
1729 ++ 0x4172,
1730 ++ 0x4173,
1731 ++ 0x496e,
1732 ++ 0x4a69,
1733 ++ 0x4a6a,
1734 ++ 0x4a6b,
1735 ++ 0x4a70,
1736 ++ 0x4a74,
1737 ++ 0x4b69,
1738 ++ 0x4b6b,
1739 ++ 0x4b6c,
1740 ++ 0x4c6e,
1741 ++ 0x4e64,
1742 ++ 0x4e65,
1743 ++ 0x4e66,
1744 ++ 0x4e67,
1745 ++ 0x4e68,
1746 ++ 0x4e69,
1747 ++ 0x4e6a,
1748 ++ 0x4e71,
1749 ++ 0x4f73,
1750 ++ 0x5569,
1751 ++ 0x556b,
1752 ++ 0x556d,
1753 ++ 0x556f,
1754 ++ 0x5571,
1755 ++ 0x5854,
1756 ++ 0x5874,
1757 ++ 0x5940,
1758 ++ 0x5941,
1759 ++ 0x5b72,
1760 ++ 0x5b73,
1761 ++ 0x5b74,
1762 ++ 0x5b75,
1763 ++ 0x5d44,
1764 ++ 0x5d45,
1765 ++ 0x5d6d,
1766 ++ 0x5d6f,
1767 ++ 0x5d72,
1768 ++ 0x5d77,
1769 ++ 0x5e6b,
1770 ++ 0x5e6d,
1771 ++ 0x7120,
1772 ++ 0x7124,
1773 ++ 0x7129,
1774 ++ 0x712e,
1775 ++ 0x712f,
1776 ++ 0x7162,
1777 ++ 0x7163,
1778 ++ 0x7166,
1779 ++ 0x7167,
1780 ++ 0x7172,
1781 ++ 0x7173,
1782 ++ 0x71a0,
1783 ++ 0x71a1,
1784 ++ 0x71a3,
1785 ++ 0x71a7,
1786 ++ 0x71bb,
1787 ++ 0x71e0,
1788 ++ 0x71e1,
1789 ++ 0x71e2,
1790 ++ 0x71e6,
1791 ++ 0x71e7,
1792 ++ 0x71f2,
1793 ++ 0x7269,
1794 ++ 0x726b,
1795 ++ 0x726e,
1796 ++ 0x72a0,
1797 ++ 0x72a8,
1798 ++ 0x72b1,
1799 ++ 0x72b3,
1800 ++ 0x793f,
1801 + };
1802 +
1803 + static const struct pci_device_id pciidlist[] = {
1804 +diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
1805 +index 6b248cd2a461c..b9c7407563784 100644
1806 +--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
1807 ++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
1808 +@@ -503,7 +503,6 @@ static void dcn_bw_calc_rq_dlg_ttu(
1809 + //input[in_idx].dout.output_standard;
1810 +
1811 + /*todo: soc->sr_enter_plus_exit_time??*/
1812 +- dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
1813 +
1814 + dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src);
1815 + dml1_extract_rq_regs(dml, rq_regs, rq_param);
1816 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
1817 +index 98852b5862956..b52046bb78dc8 100644
1818 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
1819 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
1820 +@@ -1880,7 +1880,6 @@ noinline bool dcn30_internal_validate_bw(
1821 + dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
1822 + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
1823 +
1824 +- DC_FP_START();
1825 + if (!pipe_cnt) {
1826 + out = true;
1827 + goto validate_out;
1828 +@@ -2106,7 +2105,6 @@ validate_fail:
1829 + out = false;
1830 +
1831 + validate_out:
1832 +- DC_FP_END();
1833 + return out;
1834 + }
1835 +
1836 +@@ -2308,7 +2306,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
1837 +
1838 + BW_VAL_TRACE_COUNT();
1839 +
1840 ++ DC_FP_START();
1841 + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
1842 ++ DC_FP_END();
1843 +
1844 + if (pipe_cnt == 0)
1845 + goto validate_out;
1846 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
1847 +index e472b729d8690..9254da120e615 100644
1848 +--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
1849 ++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
1850 +@@ -1391,6 +1391,17 @@ static void set_wm_ranges(
1851 + pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
1852 + }
1853 +
1854 ++static void dcn301_calculate_wm_and_dlg(
1855 ++ struct dc *dc, struct dc_state *context,
1856 ++ display_e2e_pipe_params_st *pipes,
1857 ++ int pipe_cnt,
1858 ++ int vlevel)
1859 ++{
1860 ++ DC_FP_START();
1861 ++ dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
1862 ++ DC_FP_END();
1863 ++}
1864 ++
1865 + static struct resource_funcs dcn301_res_pool_funcs = {
1866 + .destroy = dcn301_destroy_resource_pool,
1867 + .link_enc_create = dcn301_link_encoder_create,
1868 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
1869 +index 246071c72f6bf..548cdef8a8ade 100644
1870 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
1871 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
1872 +@@ -1576,8 +1576,6 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
1873 + dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
1874 + e2e_pipe_param,
1875 + num_pipes);
1876 +- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
1877 +- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
1878 +
1879 + print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
1880 +
1881 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
1882 +index 015e7f2c0b160..0fc9f3e3ffaef 100644
1883 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
1884 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
1885 +@@ -1577,8 +1577,6 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
1886 + dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
1887 + e2e_pipe_param,
1888 + num_pipes);
1889 +- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
1890 +- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
1891 +
1892 + print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
1893 +
1894 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
1895 +index 46c433c0bcb0f..c2807ab8bf5ab 100644
1896 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
1897 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
1898 +@@ -1688,8 +1688,6 @@ void dml21_rq_dlg_get_dlg_reg(
1899 + mode_lib,
1900 + e2e_pipe_param,
1901 + num_pipes);
1902 +- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
1903 +- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
1904 +
1905 + print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
1906 +
1907 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
1908 +index aef8542700544..747167083dea6 100644
1909 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
1910 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
1911 +@@ -1858,8 +1858,6 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
1912 + dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
1913 + e2e_pipe_param,
1914 + num_pipes);
1915 +- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
1916 +- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
1917 +
1918 + print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
1919 +
1920 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
1921 +index 94c32832a0e7b..0a7a338649731 100644
1922 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
1923 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
1924 +@@ -327,7 +327,7 @@ void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
1925 + dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
1926 + }
1927 +
1928 +-void dcn301_calculate_wm_and_dlg(struct dc *dc,
1929 ++void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
1930 + struct dc_state *context,
1931 + display_e2e_pipe_params_st *pipes,
1932 + int pipe_cnt,
1933 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
1934 +index fc7065d178422..774b0fdfc80be 100644
1935 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
1936 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
1937 +@@ -34,7 +34,7 @@ void dcn301_fpu_set_wm_ranges(int i,
1938 +
1939 + void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
1940 +
1941 +-void dcn301_calculate_wm_and_dlg(struct dc *dc,
1942 ++void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
1943 + struct dc_state *context,
1944 + display_e2e_pipe_params_st *pipes,
1945 + int pipe_cnt,
1946 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
1947 +index d46a2733024ce..8f9f1d607f7cb 100644
1948 +--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
1949 ++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
1950 +@@ -546,7 +546,6 @@ struct _vcs_dpi_display_dlg_sys_params_st {
1951 + double t_sr_wm_us;
1952 + double t_extra_us;
1953 + double mem_trip_us;
1954 +- double t_srx_delay_us;
1955 + double deepsleep_dcfclk_mhz;
1956 + double total_flip_bw;
1957 + unsigned int total_flip_bytes;
1958 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
1959 +index 71ea503cb32ff..412e75eb47041 100644
1960 +--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
1961 ++++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
1962 +@@ -141,9 +141,6 @@ void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _v
1963 + dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param->t_urg_wm_us);
1964 + dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us);
1965 + dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us);
1966 +- dml_print(
1967 +- "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
1968 +- dlg_sys_param->t_srx_delay_us);
1969 + dml_print(
1970 + "DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
1971 + dlg_sys_param->deepsleep_dcfclk_mhz);
1972 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
1973 +index 59dc2c5b58dd7..3df559c591f89 100644
1974 +--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
1975 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
1976 +@@ -1331,10 +1331,6 @@ void dml1_rq_dlg_get_dlg_params(
1977 + if (dual_plane)
1978 + DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c);
1979 +
1980 +- DTRACE(
1981 +- "DLG: %s: t_srx_delay_us = %3.2f",
1982 +- __func__,
1983 +- (double) dlg_sys_param->t_srx_delay_us);
1984 + DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
1985 + DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
1986 + DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
1987 +diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
1988 +index d9eb353a4bf09..dbe1cc620f6e6 100644
1989 +--- a/drivers/gpu/drm/ast/ast_tables.h
1990 ++++ b/drivers/gpu/drm/ast/ast_tables.h
1991 +@@ -282,8 +282,6 @@ static const struct ast_vbios_enhtable res_1360x768[] = {
1992 + };
1993 +
1994 + static const struct ast_vbios_enhtable res_1600x900[] = {
1995 +- {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */
1996 +- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A },
1997 + {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
1998 + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
1999 + AST2500PreCatchCRT), 60, 1, 0x3A },
2000 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
2001 +index ff1416cd609a5..a1e4c7905ebbe 100644
2002 +--- a/drivers/gpu/drm/drm_atomic.c
2003 ++++ b/drivers/gpu/drm/drm_atomic.c
2004 +@@ -1310,8 +1310,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
2005 +
2006 + DRM_DEBUG_ATOMIC("checking %p\n", state);
2007 +
2008 +- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
2009 +- requested_crtc |= drm_crtc_mask(crtc);
2010 ++ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2011 ++ if (new_crtc_state->enable)
2012 ++ requested_crtc |= drm_crtc_mask(crtc);
2013 ++ }
2014 +
2015 + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2016 + ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
2017 +@@ -1360,8 +1362,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
2018 + }
2019 + }
2020 +
2021 +- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
2022 +- affected_crtc |= drm_crtc_mask(crtc);
2023 ++ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2024 ++ if (new_crtc_state->enable)
2025 ++ affected_crtc |= drm_crtc_mask(crtc);
2026 ++ }
2027 +
2028 + /*
2029 + * For commits that allow modesets drivers can add other CRTCs to the
2030 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
2031 +index 225fa5879ebd9..90488ab8c6d8e 100644
2032 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
2033 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
2034 +@@ -469,8 +469,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
2035 + return -EINVAL;
2036 + }
2037 +
2038 +- if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
2039 +- args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
2040 ++ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
2041 ++ args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
2042 + DRM_ERROR("submit arguments out of size limits\n");
2043 + return -EINVAL;
2044 + }
2045 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2046 +index 78aad5216a613..a305ff7e8c6fb 100644
2047 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2048 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2049 +@@ -1557,6 +1557,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
2050 + for (i = 0; i < gpu->nr_rings; i++)
2051 + a6xx_gpu->shadow[i] = 0;
2052 +
2053 ++ gpu->suspend_count++;
2054 ++
2055 + return 0;
2056 + }
2057 +
2058 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
2059 +index a98e964c3b6fa..355894a3b48c3 100644
2060 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
2061 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
2062 +@@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
2063 + struct dpu_hw_pcc_cfg *cfg)
2064 + {
2065 +
2066 +- u32 base = ctx->cap->sblk->pcc.base;
2067 ++ u32 base;
2068 +
2069 +- if (!ctx || !base) {
2070 ++ if (!ctx) {
2071 ++ DRM_ERROR("invalid ctx %pK\n", ctx);
2072 ++ return;
2073 ++ }
2074 ++
2075 ++ base = ctx->cap->sblk->pcc.base;
2076 ++
2077 ++ if (!base) {
2078 + DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
2079 + return;
2080 + }
2081 +diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
2082 +index fc280cc434943..122fadcf7cc1e 100644
2083 +--- a/drivers/gpu/drm/msm/dsi/dsi.c
2084 ++++ b/drivers/gpu/drm/msm/dsi/dsi.c
2085 +@@ -40,7 +40,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
2086 +
2087 + of_node_put(phy_node);
2088 +
2089 +- if (!phy_pdev || !msm_dsi->phy) {
2090 ++ if (!phy_pdev) {
2091 ++ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
2092 ++ return -EPROBE_DEFER;
2093 ++ }
2094 ++ if (!msm_dsi->phy) {
2095 ++ put_device(&phy_pdev->dev);
2096 + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
2097 + return -EPROBE_DEFER;
2098 + }
2099 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2100 +index 9842e04b58580..baa6af0c3bccf 100644
2101 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2102 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
2103 +@@ -808,12 +808,14 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
2104 + struct msm_dsi_phy_clk_request *clk_req,
2105 + struct msm_dsi_phy_shared_timings *shared_timings)
2106 + {
2107 +- struct device *dev = &phy->pdev->dev;
2108 ++ struct device *dev;
2109 + int ret;
2110 +
2111 + if (!phy || !phy->cfg->ops.enable)
2112 + return -EINVAL;
2113 +
2114 ++ dev = &phy->pdev->dev;
2115 ++
2116 + ret = dsi_phy_enable_resource(phy);
2117 + if (ret) {
2118 + DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
2119 +diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
2120 +index 75b64e6ae0350..a439794a32e81 100644
2121 +--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
2122 ++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
2123 +@@ -95,10 +95,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
2124 +
2125 + of_node_put(phy_node);
2126 +
2127 +- if (!phy_pdev || !hdmi->phy) {
2128 ++ if (!phy_pdev) {
2129 + DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
2130 + return -EPROBE_DEFER;
2131 + }
2132 ++ if (!hdmi->phy) {
2133 ++ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
2134 ++ put_device(&phy_pdev->dev);
2135 ++ return -EPROBE_DEFER;
2136 ++ }
2137 +
2138 + hdmi->phy_dev = get_device(&phy_pdev->dev);
2139 +
2140 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
2141 +index 892c04365239b..f04a2337da006 100644
2142 +--- a/drivers/gpu/drm/msm/msm_drv.c
2143 ++++ b/drivers/gpu/drm/msm/msm_drv.c
2144 +@@ -466,7 +466,7 @@ static int msm_init_vram(struct drm_device *dev)
2145 + of_node_put(node);
2146 + if (ret)
2147 + return ret;
2148 +- size = r.end - r.start;
2149 ++ size = r.end - r.start + 1;
2150 + DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
2151 +
2152 + /* if we have no IOMMU, then we need to use carveout allocator.
2153 +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
2154 +index ca873a3b98dbe..f2d05bff42453 100644
2155 +--- a/drivers/hv/hv_balloon.c
2156 ++++ b/drivers/hv/hv_balloon.c
2157 +@@ -1660,6 +1660,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
2158 + unsigned long t;
2159 + int ret;
2160 +
2161 ++ /*
2162 ++ * max_pkt_size should be large enough for one vmbus packet header plus
2163 ++ * our receive buffer size. Hyper-V sends messages up to
2164 ++ * HV_HYP_PAGE_SIZE bytes long on balloon channel.
2165 ++ */
2166 ++ dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
2167 ++
2168 + ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
2169 + balloon_onchannelcallback, dev);
2170 + if (ret)
2171 +diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
2172 +index d519aca4a9d64..fb6d14d213a18 100644
2173 +--- a/drivers/hwmon/adt7470.c
2174 ++++ b/drivers/hwmon/adt7470.c
2175 +@@ -662,6 +662,9 @@ static int adt7470_fan_write(struct device *dev, u32 attr, int channel, long val
2176 + struct adt7470_data *data = dev_get_drvdata(dev);
2177 + int err;
2178 +
2179 ++ if (val <= 0)
2180 ++ return -EINVAL;
2181 ++
2182 + val = FAN_RPM_TO_PERIOD(val);
2183 + val = clamp_val(val, 1, 65534);
2184 +
2185 +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
2186 +index 74019dff2550e..1c9493c708132 100644
2187 +--- a/drivers/hwmon/lm90.c
2188 ++++ b/drivers/hwmon/lm90.c
2189 +@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = {
2190 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
2191 + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
2192 + .alert_alarms = 0x7c,
2193 +- .max_convrate = 8,
2194 ++ .max_convrate = 7,
2195 + },
2196 + [lm86] = {
2197 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
2198 +@@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = {
2199 + .max_convrate = 9,
2200 + },
2201 + [max6646] = {
2202 +- .flags = LM90_HAVE_CRIT,
2203 ++ .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
2204 + .alert_alarms = 0x7c,
2205 + .max_convrate = 6,
2206 + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
2207 + },
2208 + [max6654] = {
2209 ++ .flags = LM90_HAVE_BROKEN_ALERT,
2210 + .alert_alarms = 0x7c,
2211 + .max_convrate = 7,
2212 + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
2213 +@@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = {
2214 + },
2215 + [max6680] = {
2216 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
2217 +- | LM90_HAVE_CRIT_ALRM_SWP,
2218 ++ | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
2219 + .alert_alarms = 0x7c,
2220 + .max_convrate = 7,
2221 + },
2222 +@@ -848,7 +849,7 @@ static int lm90_update_device(struct device *dev)
2223 + * Re-enable ALERT# output if it was originally enabled and
2224 + * relevant alarms are all clear
2225 + */
2226 +- if (!(data->config_orig & 0x80) &&
2227 ++ if ((client->irq || !(data->config_orig & 0x80)) &&
2228 + !(data->alarms & data->alert_alarms)) {
2229 + if (data->config & 0x80) {
2230 + dev_dbg(&client->dev, "Re-enabling ALERT#\n");
2231 +@@ -1807,22 +1808,22 @@ static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
2232 +
2233 + if (st & LM90_STATUS_LLOW)
2234 + hwmon_notify_event(data->hwmon_dev, hwmon_temp,
2235 +- hwmon_temp_min, 0);
2236 ++ hwmon_temp_min_alarm, 0);
2237 + if (st & LM90_STATUS_RLOW)
2238 + hwmon_notify_event(data->hwmon_dev, hwmon_temp,
2239 +- hwmon_temp_min, 1);
2240 ++ hwmon_temp_min_alarm, 1);
2241 + if (st2 & MAX6696_STATUS2_R2LOW)
2242 + hwmon_notify_event(data->hwmon_dev, hwmon_temp,
2243 +- hwmon_temp_min, 2);
2244 ++ hwmon_temp_min_alarm, 2);
2245 + if (st & LM90_STATUS_LHIGH)
2246 + hwmon_notify_event(data->hwmon_dev, hwmon_temp,
2247 +- hwmon_temp_max, 0);
2248 ++ hwmon_temp_max_alarm, 0);
2249 + if (st & LM90_STATUS_RHIGH)
2250 + hwmon_notify_event(data->hwmon_dev, hwmon_temp,
2251 +- hwmon_temp_max, 1);
2252 ++ hwmon_temp_max_alarm, 1);
2253 + if (st2 & MAX6696_STATUS2_R2HIGH)
2254 + hwmon_notify_event(data->hwmon_dev, hwmon_temp,
2255 +- hwmon_temp_max, 2);
2256 ++ hwmon_temp_max_alarm, 2);
2257 +
2258 + return true;
2259 + }
2260 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
2261 +index 57ce8633a7256..3496a823e4d4f 100644
2262 +--- a/drivers/hwmon/nct6775.c
2263 ++++ b/drivers/hwmon/nct6775.c
2264 +@@ -1175,7 +1175,7 @@ static inline u8 in_to_reg(u32 val, u8 nr)
2265 +
2266 + struct nct6775_data {
2267 + int addr; /* IO base of hw monitor block */
2268 +- int sioreg; /* SIO register address */
2269 ++ struct nct6775_sio_data *sio_data;
2270 + enum kinds kind;
2271 + const char *name;
2272 +
2273 +@@ -3561,7 +3561,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
2274 + const char *buf, size_t count)
2275 + {
2276 + struct nct6775_data *data = dev_get_drvdata(dev);
2277 +- struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
2278 ++ struct nct6775_sio_data *sio_data = data->sio_data;
2279 + int nr = to_sensor_dev_attr(attr)->index - INTRUSION_ALARM_BASE;
2280 + unsigned long val;
2281 + u8 reg;
2282 +@@ -3969,7 +3969,7 @@ static int nct6775_probe(struct platform_device *pdev)
2283 + return -ENOMEM;
2284 +
2285 + data->kind = sio_data->kind;
2286 +- data->sioreg = sio_data->sioreg;
2287 ++ data->sio_data = sio_data;
2288 +
2289 + if (sio_data->access == access_direct) {
2290 + data->addr = res->start;
2291 +diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
2292 +index fd9f275592d29..568614edd88f4 100644
2293 +--- a/drivers/irqchip/irq-realtek-rtl.c
2294 ++++ b/drivers/irqchip/irq-realtek-rtl.c
2295 +@@ -62,7 +62,7 @@ static struct irq_chip realtek_ictl_irq = {
2296 +
2297 + static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
2298 + {
2299 +- irq_set_chip_and_handler(hw, &realtek_ictl_irq, handle_level_irq);
2300 ++ irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq);
2301 +
2302 + return 0;
2303 + }
2304 +@@ -95,7 +95,8 @@ out:
2305 + * SoC interrupts are cascaded to MIPS CPU interrupts according to the
2306 + * interrupt-map in the device tree. Each SoC interrupt gets 4 bits for
2307 + * the CPU interrupt in an Interrupt Routing Register. Max 32 SoC interrupts
2308 +- * thus go into 4 IRRs.
2309 ++ * thus go into 4 IRRs. A routing value of '0' means the interrupt is left
2310 ++ * disconnected. Routing values {1..15} connect to output lines {0..14}.
2311 + */
2312 + static int __init map_interrupts(struct device_node *node, struct irq_domain *domain)
2313 + {
2314 +@@ -134,7 +135,7 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
2315 + of_node_put(cpu_ictl);
2316 +
2317 + cpu_int = be32_to_cpup(imap + 2);
2318 +- if (cpu_int > 7)
2319 ++ if (cpu_int > 7 || cpu_int < 2)
2320 + return -EINVAL;
2321 +
2322 + if (!(mips_irqs_set & BIT(cpu_int))) {
2323 +@@ -143,7 +144,8 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
2324 + mips_irqs_set |= BIT(cpu_int);
2325 + }
2326 +
2327 +- regs[(soc_int * 4) / 32] |= cpu_int << (soc_int * 4) % 32;
2328 ++ /* Use routing values (1..6) for CPU interrupts (2..7) */
2329 ++ regs[(soc_int * 4) / 32] |= (cpu_int - 1) << (soc_int * 4) % 32;
2330 + imap += 3;
2331 + }
2332 +
2333 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2334 +index b93fcc91176e5..bd814fa0b7216 100644
2335 +--- a/drivers/md/dm.c
2336 ++++ b/drivers/md/dm.c
2337 +@@ -489,7 +489,7 @@ static void start_io_acct(struct dm_io *io)
2338 + struct mapped_device *md = io->md;
2339 + struct bio *bio = io->orig_bio;
2340 +
2341 +- io->start_time = bio_start_io_acct(bio);
2342 ++ bio_start_io_acct_time(bio, io->start_time);
2343 + if (unlikely(dm_stats_used(&md->stats)))
2344 + dm_stats_account_io(&md->stats, bio_data_dir(bio),
2345 + bio->bi_iter.bi_sector, bio_sectors(bio),
2346 +@@ -535,7 +535,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
2347 + io->md = md;
2348 + spin_lock_init(&io->endio_lock);
2349 +
2350 +- start_io_acct(io);
2351 ++ io->start_time = jiffies;
2352 +
2353 + return io;
2354 + }
2355 +@@ -1510,9 +1510,6 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
2356 + ci->sector = bio->bi_iter.bi_sector;
2357 + }
2358 +
2359 +-#define __dm_part_stat_sub(part, field, subnd) \
2360 +- (part_stat_get(part, field) -= (subnd))
2361 +-
2362 + /*
2363 + * Entry point to split a bio into clones and submit them to the targets.
2364 + */
2365 +@@ -1548,23 +1545,12 @@ static void __split_and_process_bio(struct mapped_device *md,
2366 + GFP_NOIO, &md->queue->bio_split);
2367 + ci.io->orig_bio = b;
2368 +
2369 +- /*
2370 +- * Adjust IO stats for each split, otherwise upon queue
2371 +- * reentry there will be redundant IO accounting.
2372 +- * NOTE: this is a stop-gap fix, a proper fix involves
2373 +- * significant refactoring of DM core's bio splitting
2374 +- * (by eliminating DM's splitting and just using bio_split)
2375 +- */
2376 +- part_stat_lock();
2377 +- __dm_part_stat_sub(dm_disk(md)->part0,
2378 +- sectors[op_stat_group(bio_op(bio))], ci.sector_count);
2379 +- part_stat_unlock();
2380 +-
2381 + bio_chain(b, bio);
2382 + trace_block_split(b, bio->bi_iter.bi_sector);
2383 + submit_bio_noacct(bio);
2384 + }
2385 + }
2386 ++ start_io_acct(ci.io);
2387 +
2388 + /* drop the extra reference count */
2389 + dm_io_dec_pending(ci.io, errno_to_blk_status(error));
2390 +diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
2391 +index cb293c50acb87..5b9271b9c3265 100644
2392 +--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
2393 ++++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
2394 +@@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
2395 + /* Control chips select signal on ADS5121 board */
2396 + static void ads5121_select_chip(struct nand_chip *nand, int chip)
2397 + {
2398 +- struct mtd_info *mtd = nand_to_mtd(nand);
2399 + struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
2400 + u8 v;
2401 +
2402 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
2403 +index 30d94cb43113d..755f4a59f784e 100644
2404 +--- a/drivers/net/can/m_can/m_can.c
2405 ++++ b/drivers/net/can/m_can/m_can.c
2406 +@@ -336,6 +336,9 @@ m_can_fifo_read(struct m_can_classdev *cdev,
2407 + u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
2408 + offset;
2409 +
2410 ++ if (val_count == 0)
2411 ++ return 0;
2412 ++
2413 + return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
2414 + }
2415 +
2416 +@@ -346,6 +349,9 @@ m_can_fifo_write(struct m_can_classdev *cdev,
2417 + u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
2418 + offset;
2419 +
2420 ++ if (val_count == 0)
2421 ++ return 0;
2422 ++
2423 + return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
2424 + }
2425 +
2426 +diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
2427 +index ca80dbaf7a3f5..26e212b8ca7a6 100644
2428 +--- a/drivers/net/can/m_can/tcan4x5x-regmap.c
2429 ++++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
2430 +@@ -12,7 +12,7 @@
2431 + #define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
2432 + #define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
2433 +
2434 +-#define TCAN4X5X_MAX_REGISTER 0x8ffc
2435 ++#define TCAN4X5X_MAX_REGISTER 0x87fc
2436 +
2437 + static int tcan4x5x_regmap_gather_write(void *context,
2438 + const void *reg, size_t reg_len,
2439 +diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
2440 +index b719f72281c44..3d469073fbc54 100644
2441 +--- a/drivers/net/ethernet/google/gve/gve.h
2442 ++++ b/drivers/net/ethernet/google/gve/gve.h
2443 +@@ -830,7 +830,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
2444 + /* buffers */
2445 + int gve_alloc_page(struct gve_priv *priv, struct device *dev,
2446 + struct page **page, dma_addr_t *dma,
2447 +- enum dma_data_direction);
2448 ++ enum dma_data_direction, gfp_t gfp_flags);
2449 + void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
2450 + enum dma_data_direction);
2451 + /* tx handling */
2452 +diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
2453 +index 59b66f679e46e..28e2d4d8ed7c6 100644
2454 +--- a/drivers/net/ethernet/google/gve/gve_main.c
2455 ++++ b/drivers/net/ethernet/google/gve/gve_main.c
2456 +@@ -752,9 +752,9 @@ static void gve_free_rings(struct gve_priv *priv)
2457 +
2458 + int gve_alloc_page(struct gve_priv *priv, struct device *dev,
2459 + struct page **page, dma_addr_t *dma,
2460 +- enum dma_data_direction dir)
2461 ++ enum dma_data_direction dir, gfp_t gfp_flags)
2462 + {
2463 +- *page = alloc_page(GFP_KERNEL);
2464 ++ *page = alloc_page(gfp_flags);
2465 + if (!*page) {
2466 + priv->page_alloc_fail++;
2467 + return -ENOMEM;
2468 +@@ -797,7 +797,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
2469 + for (i = 0; i < pages; i++) {
2470 + err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
2471 + &qpl->page_buses[i],
2472 +- gve_qpl_dma_dir(priv, id));
2473 ++ gve_qpl_dma_dir(priv, id), GFP_KERNEL);
2474 + /* caller handles clean up */
2475 + if (err)
2476 + return -ENOMEM;
2477 +diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
2478 +index 3d04b5aff331b..04a08904305a9 100644
2479 +--- a/drivers/net/ethernet/google/gve/gve_rx.c
2480 ++++ b/drivers/net/ethernet/google/gve/gve_rx.c
2481 +@@ -86,7 +86,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
2482 + dma_addr_t dma;
2483 + int err;
2484 +
2485 +- err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
2486 ++ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
2487 ++ GFP_ATOMIC);
2488 + if (err)
2489 + return err;
2490 +
2491 +diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
2492 +index beb8bb079023c..8c939628e2d85 100644
2493 +--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
2494 ++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
2495 +@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
2496 + int err;
2497 +
2498 + err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
2499 +- &buf_state->addr, DMA_FROM_DEVICE);
2500 ++ &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
2501 + if (err)
2502 + return err;
2503 +
2504 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2505 +index 41afaeea881bc..70491e07b0ff6 100644
2506 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2507 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2508 +@@ -2496,8 +2496,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2509 + break;
2510 + }
2511 +
2512 +- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2513 +- hclgevf_enable_vector(&hdev->misc_vector, true);
2514 ++ hclgevf_enable_vector(&hdev->misc_vector, true);
2515 +
2516 + return IRQ_HANDLED;
2517 + }
2518 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2519 +index 0bb3911dd014d..682a440151a87 100644
2520 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
2521 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
2522 +@@ -2598,6 +2598,7 @@ static void __ibmvnic_reset(struct work_struct *work)
2523 + struct ibmvnic_rwi *rwi;
2524 + unsigned long flags;
2525 + u32 reset_state;
2526 ++ int num_fails = 0;
2527 + int rc = 0;
2528 +
2529 + adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2530 +@@ -2651,11 +2652,23 @@ static void __ibmvnic_reset(struct work_struct *work)
2531 + rc = do_hard_reset(adapter, rwi, reset_state);
2532 + rtnl_unlock();
2533 + }
2534 +- if (rc) {
2535 +- /* give backing device time to settle down */
2536 ++ if (rc)
2537 ++ num_fails++;
2538 ++ else
2539 ++ num_fails = 0;
2540 ++
2541 ++ /* If auto-priority-failover is enabled we can get
2542 ++ * back to back failovers during resets, resulting
2543 ++ * in at least two failed resets (from high-priority
2544 ++ * backing device to low-priority one and then back)
2545 ++ * If resets continue to fail beyond that, give the
2546 ++ * adapter some time to settle down before retrying.
2547 ++ */
2548 ++ if (num_fails >= 3) {
2549 + netdev_dbg(adapter->netdev,
2550 +- "[S:%s] Hard reset failed, waiting 60 secs\n",
2551 +- adapter_state_to_string(adapter->state));
2552 ++ "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
2553 ++ adapter_state_to_string(adapter->state),
2554 ++ num_fails);
2555 + set_current_state(TASK_UNINTERRUPTIBLE);
2556 + schedule_timeout(60 * HZ);
2557 + }
2558 +@@ -3836,11 +3849,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
2559 + struct device *dev = &adapter->vdev->dev;
2560 + union ibmvnic_crq crq;
2561 + int max_entries;
2562 ++ int cap_reqs;
2563 ++
2564 ++ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
2565 ++ * the PROMISC flag). Initialize this count upfront. When the tasklet
2566 ++ * receives a response to all of these, it will send the next protocol
2567 ++ * message (QUERY_IP_OFFLOAD).
2568 ++ */
2569 ++ if (!(adapter->netdev->flags & IFF_PROMISC) ||
2570 ++ adapter->promisc_supported)
2571 ++ cap_reqs = 7;
2572 ++ else
2573 ++ cap_reqs = 6;
2574 +
2575 + if (!retry) {
2576 + /* Sub-CRQ entries are 32 byte long */
2577 + int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2578 +
2579 ++ atomic_set(&adapter->running_cap_crqs, cap_reqs);
2580 ++
2581 + if (adapter->min_tx_entries_per_subcrq > entries_page ||
2582 + adapter->min_rx_add_entries_per_subcrq > entries_page) {
2583 + dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2584 +@@ -3901,44 +3928,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
2585 + adapter->opt_rx_comp_queues;
2586 +
2587 + adapter->req_rx_add_queues = adapter->max_rx_add_queues;
2588 ++ } else {
2589 ++ atomic_add(cap_reqs, &adapter->running_cap_crqs);
2590 + }
2591 +-
2592 + memset(&crq, 0, sizeof(crq));
2593 + crq.request_capability.first = IBMVNIC_CRQ_CMD;
2594 + crq.request_capability.cmd = REQUEST_CAPABILITY;
2595 +
2596 + crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2597 + crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2598 +- atomic_inc(&adapter->running_cap_crqs);
2599 ++ cap_reqs--;
2600 + ibmvnic_send_crq(adapter, &crq);
2601 +
2602 + crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2603 + crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2604 +- atomic_inc(&adapter->running_cap_crqs);
2605 ++ cap_reqs--;
2606 + ibmvnic_send_crq(adapter, &crq);
2607 +
2608 + crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
2609 + crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
2610 +- atomic_inc(&adapter->running_cap_crqs);
2611 ++ cap_reqs--;
2612 + ibmvnic_send_crq(adapter, &crq);
2613 +
2614 + crq.request_capability.capability =
2615 + cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2616 + crq.request_capability.number =
2617 + cpu_to_be64(adapter->req_tx_entries_per_subcrq);
2618 +- atomic_inc(&adapter->running_cap_crqs);
2619 ++ cap_reqs--;
2620 + ibmvnic_send_crq(adapter, &crq);
2621 +
2622 + crq.request_capability.capability =
2623 + cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2624 + crq.request_capability.number =
2625 + cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
2626 +- atomic_inc(&adapter->running_cap_crqs);
2627 ++ cap_reqs--;
2628 + ibmvnic_send_crq(adapter, &crq);
2629 +
2630 + crq.request_capability.capability = cpu_to_be16(REQ_MTU);
2631 + crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
2632 +- atomic_inc(&adapter->running_cap_crqs);
2633 ++ cap_reqs--;
2634 + ibmvnic_send_crq(adapter, &crq);
2635 +
2636 + if (adapter->netdev->flags & IFF_PROMISC) {
2637 +@@ -3946,16 +3974,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
2638 + crq.request_capability.capability =
2639 + cpu_to_be16(PROMISC_REQUESTED);
2640 + crq.request_capability.number = cpu_to_be64(1);
2641 +- atomic_inc(&adapter->running_cap_crqs);
2642 ++ cap_reqs--;
2643 + ibmvnic_send_crq(adapter, &crq);
2644 + }
2645 + } else {
2646 + crq.request_capability.capability =
2647 + cpu_to_be16(PROMISC_REQUESTED);
2648 + crq.request_capability.number = cpu_to_be64(0);
2649 +- atomic_inc(&adapter->running_cap_crqs);
2650 ++ cap_reqs--;
2651 + ibmvnic_send_crq(adapter, &crq);
2652 + }
2653 ++
2654 ++ /* Keep at end to catch any discrepancy between expected and actual
2655 ++ * CRQs sent.
2656 ++ */
2657 ++ WARN_ON(cap_reqs != 0);
2658 + }
2659 +
2660 + static int pending_scrq(struct ibmvnic_adapter *adapter,
2661 +@@ -4349,118 +4382,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)
2662 + static void send_query_cap(struct ibmvnic_adapter *adapter)
2663 + {
2664 + union ibmvnic_crq crq;
2665 ++ int cap_reqs;
2666 ++
2667 ++ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
2668 ++ * upfront. When the tasklet receives a response to all of these, it
2669 ++ * can send out the next protocol messaage (REQUEST_CAPABILITY).
2670 ++ */
2671 ++ cap_reqs = 25;
2672 ++
2673 ++ atomic_set(&adapter->running_cap_crqs, cap_reqs);
2674 +
2675 +- atomic_set(&adapter->running_cap_crqs, 0);
2676 + memset(&crq, 0, sizeof(crq));
2677 + crq.query_capability.first = IBMVNIC_CRQ_CMD;
2678 + crq.query_capability.cmd = QUERY_CAPABILITY;
2679 +
2680 + crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2681 +- atomic_inc(&adapter->running_cap_crqs);
2682 + ibmvnic_send_crq(adapter, &crq);
2683 ++ cap_reqs--;
2684 +
2685 + crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2686 +- atomic_inc(&adapter->running_cap_crqs);
2687 + ibmvnic_send_crq(adapter, &crq);
2688 ++ cap_reqs--;
2689 +
2690 + crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2691 +- atomic_inc(&adapter->running_cap_crqs);
2692 + ibmvnic_send_crq(adapter, &crq);
2693 ++ cap_reqs--;
2694 +
2695 + crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2696 +- atomic_inc(&adapter->running_cap_crqs);
2697 + ibmvnic_send_crq(adapter, &crq);
2698 ++ cap_reqs--;
2699 +
2700 + crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2701 +- atomic_inc(&adapter->running_cap_crqs);
2702 + ibmvnic_send_crq(adapter, &crq);
2703 ++ cap_reqs--;
2704 +
2705 + crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2706 +- atomic_inc(&adapter->running_cap_crqs);
2707 + ibmvnic_send_crq(adapter, &crq);
2708 ++ cap_reqs--;
2709 +
2710 + crq.query_capability.capability =
2711 + cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2712 +- atomic_inc(&adapter->running_cap_crqs);
2713 + ibmvnic_send_crq(adapter, &crq);
2714 ++ cap_reqs--;
2715 +
2716 + crq.query_capability.capability =
2717 + cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2718 +- atomic_inc(&adapter->running_cap_crqs);
2719 + ibmvnic_send_crq(adapter, &crq);
2720 ++ cap_reqs--;
2721 +
2722 + crq.query_capability.capability =
2723 + cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2724 +- atomic_inc(&adapter->running_cap_crqs);
2725 + ibmvnic_send_crq(adapter, &crq);
2726 ++ cap_reqs--;
2727 +
2728 + crq.query_capability.capability =
2729 + cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2730 +- atomic_inc(&adapter->running_cap_crqs);
2731 + ibmvnic_send_crq(adapter, &crq);
2732 ++ cap_reqs--;
2733 +
2734 + crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2735 +- atomic_inc(&adapter->running_cap_crqs);
2736 + ibmvnic_send_crq(adapter, &crq);
2737 ++ cap_reqs--;
2738 +
2739 + crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2740 +- atomic_inc(&adapter->running_cap_crqs);
2741 + ibmvnic_send_crq(adapter, &crq);
2742 ++ cap_reqs--;
2743 +
2744 + crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2745 +- atomic_inc(&adapter->running_cap_crqs);
2746 + ibmvnic_send_crq(adapter, &crq);
2747 ++ cap_reqs--;
2748 +
2749 + crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2750 +- atomic_inc(&adapter->running_cap_crqs);
2751 + ibmvnic_send_crq(adapter, &crq);
2752 ++ cap_reqs--;
2753 +
2754 + crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2755 +- atomic_inc(&adapter->running_cap_crqs);
2756 + ibmvnic_send_crq(adapter, &crq);
2757 ++ cap_reqs--;
2758 +
2759 + crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2760 +- atomic_inc(&adapter->running_cap_crqs);
2761 + ibmvnic_send_crq(adapter, &crq);
2762 ++ cap_reqs--;
2763 +
2764 + crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
2765 +- atomic_inc(&adapter->running_cap_crqs);
2766 + ibmvnic_send_crq(adapter, &crq);
2767 ++ cap_reqs--;
2768 +
2769 + crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2770 +- atomic_inc(&adapter->running_cap_crqs);
2771 + ibmvnic_send_crq(adapter, &crq);
2772 ++ cap_reqs--;
2773 +
2774 + crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2775 +- atomic_inc(&adapter->running_cap_crqs);
2776 + ibmvnic_send_crq(adapter, &crq);
2777 ++ cap_reqs--;
2778 +
2779 + crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2780 +- atomic_inc(&adapter->running_cap_crqs);
2781 + ibmvnic_send_crq(adapter, &crq);
2782 ++ cap_reqs--;
2783 +
2784 + crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2785 +- atomic_inc(&adapter->running_cap_crqs);
2786 + ibmvnic_send_crq(adapter, &crq);
2787 ++ cap_reqs--;
2788 +
2789 + crq.query_capability.capability =
2790 + cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2791 +- atomic_inc(&adapter->running_cap_crqs);
2792 + ibmvnic_send_crq(adapter, &crq);
2793 ++ cap_reqs--;
2794 +
2795 + crq.query_capability.capability =
2796 + cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2797 +- atomic_inc(&adapter->running_cap_crqs);
2798 + ibmvnic_send_crq(adapter, &crq);
2799 ++ cap_reqs--;
2800 +
2801 + crq.query_capability.capability =
2802 + cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2803 +- atomic_inc(&adapter->running_cap_crqs);
2804 + ibmvnic_send_crq(adapter, &crq);
2805 ++ cap_reqs--;
2806 +
2807 + crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2808 +- atomic_inc(&adapter->running_cap_crqs);
2809 ++
2810 + ibmvnic_send_crq(adapter, &crq);
2811 ++ cap_reqs--;
2812 ++
2813 ++ /* Keep at end to catch any discrepancy between expected and actual
2814 ++ * CRQs sent.
2815 ++ */
2816 ++ WARN_ON(cap_reqs != 0);
2817 + }
2818 +
2819 + static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
2820 +@@ -4764,6 +4811,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2821 + char *name;
2822 +
2823 + atomic_dec(&adapter->running_cap_crqs);
2824 ++ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
2825 ++ atomic_read(&adapter->running_cap_crqs));
2826 + switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2827 + case REQ_TX_QUEUES:
2828 + req_value = &adapter->req_tx_queues;
2829 +@@ -5442,12 +5491,6 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
2830 + ibmvnic_handle_crq(crq, adapter);
2831 + crq->generic.first = 0;
2832 + }
2833 +-
2834 +- /* remain in tasklet until all
2835 +- * capabilities responses are received
2836 +- */
2837 +- if (!adapter->wait_capability)
2838 +- done = true;
2839 + }
2840 + /* if capabilities CRQ's were sent in this tasklet, the following
2841 + * tasklet must wait until all responses are received
2842 +diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
2843 +index 4d939af0a626c..2e02cc68cd3f7 100644
2844 +--- a/drivers/net/ethernet/intel/i40e/i40e.h
2845 ++++ b/drivers/net/ethernet/intel/i40e/i40e.h
2846 +@@ -174,7 +174,6 @@ enum i40e_interrupt_policy {
2847 +
2848 + struct i40e_lump_tracking {
2849 + u16 num_entries;
2850 +- u16 search_hint;
2851 + u16 list[0];
2852 + #define I40E_PILE_VALID_BIT 0x8000
2853 + #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
2854 +@@ -848,12 +847,12 @@ struct i40e_vsi {
2855 + struct rtnl_link_stats64 net_stats_offsets;
2856 + struct i40e_eth_stats eth_stats;
2857 + struct i40e_eth_stats eth_stats_offsets;
2858 +- u32 tx_restart;
2859 +- u32 tx_busy;
2860 ++ u64 tx_restart;
2861 ++ u64 tx_busy;
2862 + u64 tx_linearize;
2863 + u64 tx_force_wb;
2864 +- u32 rx_buf_failed;
2865 +- u32 rx_page_failed;
2866 ++ u64 rx_buf_failed;
2867 ++ u64 rx_page_failed;
2868 +
2869 + /* These are containers of ring pointers, allocated at run-time */
2870 + struct i40e_ring **rx_rings;
2871 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
2872 +index 2c1b1da1220ec..1e57cc8c47d7b 100644
2873 +--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
2874 ++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
2875 +@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
2876 + (unsigned long int)vsi->net_stats_offsets.rx_compressed,
2877 + (unsigned long int)vsi->net_stats_offsets.tx_compressed);
2878 + dev_info(&pf->pdev->dev,
2879 +- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
2880 ++ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
2881 + vsi->tx_restart, vsi->tx_busy,
2882 + vsi->rx_buf_failed, vsi->rx_page_failed);
2883 + rcu_read_lock();
2884 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2885 +index 61afc220fc6cd..f605c0205e4e7 100644
2886 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2887 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2888 +@@ -196,10 +196,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
2889 + * @id: an owner id to stick on the items assigned
2890 + *
2891 + * Returns the base item index of the lump, or negative for error
2892 +- *
2893 +- * The search_hint trick and lack of advanced fit-finding only work
2894 +- * because we're highly likely to have all the same size lump requests.
2895 +- * Linear search time and any fragmentation should be minimal.
2896 + **/
2897 + static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
2898 + u16 needed, u16 id)
2899 +@@ -214,8 +210,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
2900 + return -EINVAL;
2901 + }
2902 +
2903 +- /* start the linear search with an imperfect hint */
2904 +- i = pile->search_hint;
2905 ++ /* Allocate last queue in the pile for FDIR VSI queue
2906 ++ * so it doesn't fragment the qp_pile
2907 ++ */
2908 ++ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
2909 ++ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
2910 ++ dev_err(&pf->pdev->dev,
2911 ++ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
2912 ++ pile->num_entries - 1);
2913 ++ return -ENOMEM;
2914 ++ }
2915 ++ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
2916 ++ return pile->num_entries - 1;
2917 ++ }
2918 ++
2919 ++ i = 0;
2920 + while (i < pile->num_entries) {
2921 + /* skip already allocated entries */
2922 + if (pile->list[i] & I40E_PILE_VALID_BIT) {
2923 +@@ -234,7 +243,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
2924 + for (j = 0; j < needed; j++)
2925 + pile->list[i+j] = id | I40E_PILE_VALID_BIT;
2926 + ret = i;
2927 +- pile->search_hint = i + j;
2928 + break;
2929 + }
2930 +
2931 +@@ -257,7 +265,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
2932 + {
2933 + int valid_id = (id | I40E_PILE_VALID_BIT);
2934 + int count = 0;
2935 +- int i;
2936 ++ u16 i;
2937 +
2938 + if (!pile || index >= pile->num_entries)
2939 + return -EINVAL;
2940 +@@ -269,8 +277,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
2941 + count++;
2942 + }
2943 +
2944 +- if (count && index < pile->search_hint)
2945 +- pile->search_hint = index;
2946 +
2947 + return count;
2948 + }
2949 +@@ -772,9 +778,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
2950 + struct rtnl_link_stats64 *ns; /* netdev stats */
2951 + struct i40e_eth_stats *oes;
2952 + struct i40e_eth_stats *es; /* device's eth stats */
2953 +- u32 tx_restart, tx_busy;
2954 ++ u64 tx_restart, tx_busy;
2955 + struct i40e_ring *p;
2956 +- u32 rx_page, rx_buf;
2957 ++ u64 rx_page, rx_buf;
2958 + u64 bytes, packets;
2959 + unsigned int start;
2960 + u64 tx_linearize;
2961 +@@ -10574,15 +10580,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
2962 + }
2963 + i40e_get_oem_version(&pf->hw);
2964 +
2965 +- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
2966 +- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
2967 +- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
2968 +- /* The following delay is necessary for 4.33 firmware and older
2969 +- * to recover after EMP reset. 200 ms should suffice but we
2970 +- * put here 300 ms to be sure that FW is ready to operate
2971 +- * after reset.
2972 +- */
2973 +- mdelay(300);
2974 ++ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
2975 ++ /* The following delay is necessary for firmware update. */
2976 ++ mdelay(1000);
2977 + }
2978 +
2979 + /* re-verify the eeprom if we just had an EMP reset */
2980 +@@ -11792,7 +11792,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
2981 + return -ENOMEM;
2982 +
2983 + pf->irq_pile->num_entries = vectors;
2984 +- pf->irq_pile->search_hint = 0;
2985 +
2986 + /* track first vector for misc interrupts, ignore return */
2987 + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
2988 +@@ -12595,7 +12594,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
2989 + goto sw_init_done;
2990 + }
2991 + pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
2992 +- pf->qp_pile->search_hint = 0;
2993 +
2994 + pf->tx_timeout_recovery_level = 1;
2995 +
2996 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
2997 +index 8d0588a27a053..1908eed4fa5ee 100644
2998 +--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
2999 ++++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
3000 +@@ -413,6 +413,9 @@
3001 + #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
3002 + #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
3003 + #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
3004 ++#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
3005 ++#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
3006 ++#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
3007 + #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
3008 + #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
3009 + #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
3010 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3011 +index 048f1678ab8ac..c6f643e54c4f7 100644
3012 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3013 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3014 +@@ -1376,6 +1376,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
3015 + return aq_ret;
3016 + }
3017 +
3018 ++/**
3019 ++ * i40e_sync_vfr_reset
3020 ++ * @hw: pointer to hw struct
3021 ++ * @vf_id: VF identifier
3022 ++ *
3023 ++ * Before trigger hardware reset, we need to know if no other process has
3024 ++ * reserved the hardware for any reset operations. This check is done by
3025 ++ * examining the status of the RSTAT1 register used to signal the reset.
3026 ++ **/
3027 ++static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
3028 ++{
3029 ++ u32 reg;
3030 ++ int i;
3031 ++
3032 ++ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
3033 ++ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
3034 ++ I40E_VFINT_ICR0_ADMINQ_MASK;
3035 ++ if (reg)
3036 ++ return 0;
3037 ++
3038 ++ usleep_range(100, 200);
3039 ++ }
3040 ++
3041 ++ return -EAGAIN;
3042 ++}
3043 ++
3044 + /**
3045 + * i40e_trigger_vf_reset
3046 + * @vf: pointer to the VF structure
3047 +@@ -1390,9 +1416,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
3048 + struct i40e_pf *pf = vf->pf;
3049 + struct i40e_hw *hw = &pf->hw;
3050 + u32 reg, reg_idx, bit_idx;
3051 ++ bool vf_active;
3052 ++ u32 radq;
3053 +
3054 + /* warn the VF */
3055 +- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
3056 ++ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
3057 +
3058 + /* Disable VF's configuration API during reset. The flag is re-enabled
3059 + * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
3060 +@@ -1406,7 +1434,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
3061 + * just need to clean up, so don't hit the VFRTRIG register.
3062 + */
3063 + if (!flr) {
3064 +- /* reset VF using VPGEN_VFRTRIG reg */
3065 ++ /* Sync VFR reset before trigger next one */
3066 ++ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
3067 ++ I40E_VFINT_ICR0_ADMINQ_MASK;
3068 ++ if (vf_active && !radq)
3069 ++ /* waiting for finish reset by virtual driver */
3070 ++ if (i40e_sync_vfr_reset(hw, vf->vf_id))
3071 ++ dev_info(&pf->pdev->dev,
3072 ++ "Reset VF %d never finished\n",
3073 ++ vf->vf_id);
3074 ++
3075 ++ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
3076 ++ * in progress state in rstat1 register.
3077 ++ */
3078 + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
3079 + reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
3080 + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
3081 +@@ -2617,6 +2657,59 @@ error_param:
3082 + aq_ret);
3083 + }
3084 +
3085 ++/**
3086 ++ * i40e_check_enough_queue - find big enough queue number
3087 ++ * @vf: pointer to the VF info
3088 ++ * @needed: the number of items needed
3089 ++ *
3090 ++ * Returns the base item index of the queue, or negative for error
3091 ++ **/
3092 ++static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
3093 ++{
3094 ++ unsigned int i, cur_queues, more, pool_size;
3095 ++ struct i40e_lump_tracking *pile;
3096 ++ struct i40e_pf *pf = vf->pf;
3097 ++ struct i40e_vsi *vsi;
3098 ++
3099 ++ vsi = pf->vsi[vf->lan_vsi_idx];
3100 ++ cur_queues = vsi->alloc_queue_pairs;
3101 ++
3102 ++ /* if current allocated queues are enough for need */
3103 ++ if (cur_queues >= needed)
3104 ++ return vsi->base_queue;
3105 ++
3106 ++ pile = pf->qp_pile;
3107 ++ if (cur_queues > 0) {
3108 ++ /* if the allocated queues are not zero
3109 ++ * just check if there are enough queues for more
3110 ++ * behind the allocated queues.
3111 ++ */
3112 ++ more = needed - cur_queues;
3113 ++ for (i = vsi->base_queue + cur_queues;
3114 ++ i < pile->num_entries; i++) {
3115 ++ if (pile->list[i] & I40E_PILE_VALID_BIT)
3116 ++ break;
3117 ++
3118 ++ if (more-- == 1)
3119 ++ /* there is enough */
3120 ++ return vsi->base_queue;
3121 ++ }
3122 ++ }
3123 ++
3124 ++ pool_size = 0;
3125 ++ for (i = 0; i < pile->num_entries; i++) {
3126 ++ if (pile->list[i] & I40E_PILE_VALID_BIT) {
3127 ++ pool_size = 0;
3128 ++ continue;
3129 ++ }
3130 ++ if (needed <= ++pool_size)
3131 ++ /* there is enough */
3132 ++ return i;
3133 ++ }
3134 ++
3135 ++ return -ENOMEM;
3136 ++}
3137 ++
3138 + /**
3139 + * i40e_vc_request_queues_msg
3140 + * @vf: pointer to the VF info
3141 +@@ -2651,6 +2744,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
3142 + req_pairs - cur_pairs,
3143 + pf->queues_left);
3144 + vfres->num_queue_pairs = pf->queues_left + cur_pairs;
3145 ++ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
3146 ++ dev_warn(&pf->pdev->dev,
3147 ++ "VF %d requested %d more queues, but there is not enough for it.\n",
3148 ++ vf->vf_id,
3149 ++ req_pairs - cur_pairs);
3150 ++ vfres->num_queue_pairs = cur_pairs;
3151 + } else {
3152 + /* successful request */
3153 + vf->num_req_queues = req_pairs;
3154 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
3155 +index 49575a640a84c..03c42fd0fea19 100644
3156 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
3157 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
3158 +@@ -19,6 +19,7 @@
3159 + #define I40E_MAX_VF_PROMISC_FLAGS 3
3160 +
3161 + #define I40E_VF_STATE_WAIT_COUNT 20
3162 ++#define I40E_VFR_WAIT_COUNT 100
3163 +
3164 + /* Various queue ctrls */
3165 + enum i40e_queue_ctrl {
3166 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
3167 +index 186d00a9ab35c..3631d612aaca1 100644
3168 +--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
3169 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
3170 +@@ -1570,6 +1570,8 @@ static struct mac_ops cgx_mac_ops = {
3171 + .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
3172 + .mac_pause_frm_config = cgx_lmac_pause_frm_config,
3173 + .mac_enadis_ptp_config = cgx_lmac_ptp_config,
3174 ++ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
3175 ++ .mac_tx_enable = cgx_lmac_tx_enable,
3176 + };
3177 +
3178 + static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3179 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
3180 +index fc6e7423cbd81..b33e7d1d0851c 100644
3181 +--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
3182 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
3183 +@@ -107,6 +107,9 @@ struct mac_ops {
3184 + void (*mac_enadis_ptp_config)(void *cgxd,
3185 + int lmac_id,
3186 + bool enable);
3187 ++
3188 ++ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
3189 ++ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
3190 + };
3191 +
3192 + struct cgx {
3193 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
3194 +index 4e79e918a1617..58e2aeebc14f8 100644
3195 +--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
3196 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
3197 +@@ -732,6 +732,7 @@ enum nix_af_status {
3198 + NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
3199 + NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
3200 + NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
3201 ++ NIX_AF_ERR_LINK_CREDITS = -431,
3202 + };
3203 +
3204 + /* For NIX RX vtag action */
3205 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
3206 +index 0fe7ad35e36fd..4180376fa6763 100644
3207 +--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
3208 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
3209 +@@ -185,7 +185,6 @@ enum npc_kpu_parser_state {
3210 + NPC_S_KPU2_QINQ,
3211 + NPC_S_KPU2_ETAG,
3212 + NPC_S_KPU2_EXDSA,
3213 +- NPC_S_KPU2_NGIO,
3214 + NPC_S_KPU2_CPT_CTAG,
3215 + NPC_S_KPU2_CPT_QINQ,
3216 + NPC_S_KPU3_CTAG,
3217 +@@ -212,6 +211,7 @@ enum npc_kpu_parser_state {
3218 + NPC_S_KPU5_NSH,
3219 + NPC_S_KPU5_CPT_IP,
3220 + NPC_S_KPU5_CPT_IP6,
3221 ++ NPC_S_KPU5_NGIO,
3222 + NPC_S_KPU6_IP6_EXT,
3223 + NPC_S_KPU6_IP6_HOP_DEST,
3224 + NPC_S_KPU6_IP6_ROUT,
3225 +@@ -1120,15 +1120,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
3226 + 0x0000,
3227 + 0x0000,
3228 + },
3229 +- {
3230 +- NPC_S_KPU1_ETHER, 0xff,
3231 +- NPC_ETYPE_CTAG,
3232 +- 0xffff,
3233 +- NPC_ETYPE_NGIO,
3234 +- 0xffff,
3235 +- 0x0000,
3236 +- 0x0000,
3237 +- },
3238 + {
3239 + NPC_S_KPU1_ETHER, 0xff,
3240 + NPC_ETYPE_CTAG,
3241 +@@ -1966,6 +1957,15 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
3242 + 0x0000,
3243 + 0x0000,
3244 + },
3245 ++ {
3246 ++ NPC_S_KPU2_CTAG, 0xff,
3247 ++ NPC_ETYPE_NGIO,
3248 ++ 0xffff,
3249 ++ 0x0000,
3250 ++ 0x0000,
3251 ++ 0x0000,
3252 ++ 0x0000,
3253 ++ },
3254 + {
3255 + NPC_S_KPU2_CTAG, 0xff,
3256 + NPC_ETYPE_PPPOE,
3257 +@@ -2749,15 +2749,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
3258 + 0x0000,
3259 + 0x0000,
3260 + },
3261 +- {
3262 +- NPC_S_KPU2_NGIO, 0xff,
3263 +- 0x0000,
3264 +- 0x0000,
3265 +- 0x0000,
3266 +- 0x0000,
3267 +- 0x0000,
3268 +- 0x0000,
3269 +- },
3270 + {
3271 + NPC_S_KPU2_CPT_CTAG, 0xff,
3272 + NPC_ETYPE_IP,
3273 +@@ -5089,6 +5080,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
3274 + 0x0000,
3275 + 0x0000,
3276 + },
3277 ++ {
3278 ++ NPC_S_KPU5_NGIO, 0xff,
3279 ++ 0x0000,
3280 ++ 0x0000,
3281 ++ 0x0000,
3282 ++ 0x0000,
3283 ++ 0x0000,
3284 ++ 0x0000,
3285 ++ },
3286 + {
3287 + NPC_S_NA, 0X00,
3288 + 0x0000,
3289 +@@ -8422,14 +8422,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
3290 + 0,
3291 + 0, 0, 0, 0,
3292 + },
3293 +- {
3294 +- NPC_ERRLEV_RE, NPC_EC_NOERR,
3295 +- 8, 12, 0, 0, 0,
3296 +- NPC_S_KPU2_NGIO, 12, 1,
3297 +- NPC_LID_LA, NPC_LT_LA_ETHER,
3298 +- 0,
3299 +- 0, 0, 0, 0,
3300 +- },
3301 + {
3302 + NPC_ERRLEV_RE, NPC_EC_NOERR,
3303 + 8, 12, 0, 0, 0,
3304 +@@ -9194,6 +9186,14 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
3305 + 0,
3306 + 0, 0, 0, 0,
3307 + },
3308 ++ {
3309 ++ NPC_ERRLEV_RE, NPC_EC_NOERR,
3310 ++ 0, 0, 0, 2, 0,
3311 ++ NPC_S_KPU5_NGIO, 6, 1,
3312 ++ NPC_LID_LB, NPC_LT_LB_CTAG,
3313 ++ 0,
3314 ++ 0, 0, 0, 0,
3315 ++ },
3316 + {
3317 + NPC_ERRLEV_RE, NPC_EC_NOERR,
3318 + 8, 0, 6, 2, 0,
3319 +@@ -9890,14 +9890,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
3320 + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EXDSA,
3321 + 0, 0, 0, 0,
3322 + },
3323 +- {
3324 +- NPC_ERRLEV_RE, NPC_EC_NOERR,
3325 +- 0, 0, 0, 0, 1,
3326 +- NPC_S_NA, 0, 1,
3327 +- NPC_LID_LC, NPC_LT_LC_NGIO,
3328 +- 0,
3329 +- 0, 0, 0, 0,
3330 +- },
3331 + {
3332 + NPC_ERRLEV_RE, NPC_EC_NOERR,
3333 + 8, 0, 6, 2, 0,
3334 +@@ -11973,6 +11965,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
3335 + 0,
3336 + 0, 0, 0, 0,
3337 + },
3338 ++ {
3339 ++ NPC_ERRLEV_RE, NPC_EC_NOERR,
3340 ++ 0, 0, 0, 0, 1,
3341 ++ NPC_S_NA, 0, 1,
3342 ++ NPC_LID_LC, NPC_LT_LC_NGIO,
3343 ++ 0,
3344 ++ 0, 0, 0, 0,
3345 ++ },
3346 + {
3347 + NPC_ERRLEV_LC, NPC_EC_UNK,
3348 + 0, 0, 0, 0, 1,
3349 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
3350 +index e695fa0e82a94..9ea2f6ac38ec1 100644
3351 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
3352 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
3353 +@@ -30,6 +30,8 @@ static struct mac_ops rpm_mac_ops = {
3354 + .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
3355 + .mac_pause_frm_config = rpm_lmac_pause_frm_config,
3356 + .mac_enadis_ptp_config = rpm_lmac_ptp_config,
3357 ++ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
3358 ++ .mac_tx_enable = rpm_lmac_tx_enable,
3359 + };
3360 +
3361 + struct mac_ops *rpm_get_mac_ops(void)
3362 +@@ -54,6 +56,43 @@ int rpm_get_nr_lmacs(void *rpmd)
3363 + return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
3364 + }
3365 +
3366 ++int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
3367 ++{
3368 ++ rpm_t *rpm = rpmd;
3369 ++ u64 cfg, last;
3370 ++
3371 ++ if (!is_lmac_valid(rpm, lmac_id))
3372 ++ return -ENODEV;
3373 ++
3374 ++ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
3375 ++ last = cfg;
3376 ++ if (enable)
3377 ++ cfg |= RPM_TX_EN;
3378 ++ else
3379 ++ cfg &= ~(RPM_TX_EN);
3380 ++
3381 ++ if (cfg != last)
3382 ++ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
3383 ++ return !!(last & RPM_TX_EN);
3384 ++}
3385 ++
3386 ++int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
3387 ++{
3388 ++ rpm_t *rpm = rpmd;
3389 ++ u64 cfg;
3390 ++
3391 ++ if (!is_lmac_valid(rpm, lmac_id))
3392 ++ return -ENODEV;
3393 ++
3394 ++ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
3395 ++ if (enable)
3396 ++ cfg |= RPM_RX_EN | RPM_TX_EN;
3397 ++ else
3398 ++ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
3399 ++ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
3400 ++ return 0;
3401 ++}
3402 ++
3403 + void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
3404 + {
3405 + rpm_t *rpm = rpmd;
3406 +@@ -252,23 +291,20 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
3407 + if (!rpm || lmac_id >= rpm->lmac_count)
3408 + return -ENODEV;
3409 + lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
3410 +- if (lmac_type == LMAC_MODE_100G_R) {
3411 +- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
3412 +-
3413 +- if (enable)
3414 +- cfg |= RPMX_MTI_PCS_LBK;
3415 +- else
3416 +- cfg &= ~RPMX_MTI_PCS_LBK;
3417 +- rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
3418 +- } else {
3419 +- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
3420 +- if (enable)
3421 +- cfg |= RPMX_MTI_PCS_LBK;
3422 +- else
3423 +- cfg &= ~RPMX_MTI_PCS_LBK;
3424 +- rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
3425 ++
3426 ++ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
3427 ++ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
3428 ++ return 0;
3429 + }
3430 +
3431 ++ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
3432 ++
3433 ++ if (enable)
3434 ++ cfg |= RPMX_MTI_PCS_LBK;
3435 ++ else
3436 ++ cfg &= ~RPMX_MTI_PCS_LBK;
3437 ++ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
3438 ++
3439 + return 0;
3440 + }
3441 +
3442 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
3443 +index 57c8a687b488a..ff580311edd03 100644
3444 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
3445 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
3446 +@@ -43,6 +43,8 @@
3447 + #define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
3448 +
3449 + #define RPM_LMAC_FWI 0xa
3450 ++#define RPM_TX_EN BIT_ULL(0)
3451 ++#define RPM_RX_EN BIT_ULL(1)
3452 +
3453 + /* Function Declarations */
3454 + int rpm_get_nr_lmacs(void *rpmd);
3455 +@@ -57,4 +59,6 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
3456 + int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
3457 + int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
3458 + void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
3459 ++int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
3460 ++int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
3461 + #endif /* RPM_H */
3462 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3463 +index 3ca6b942ebe25..54e1b27a7dfec 100644
3464 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3465 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
3466 +@@ -520,8 +520,11 @@ static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
3467 +
3468 + rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
3469 + err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
3470 +- if (err)
3471 +- dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
3472 ++ if (err) {
3473 ++ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
3474 ++ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
3475 ++ ;
3476 ++ }
3477 + }
3478 +
3479 + static void rvu_reset_all_blocks(struct rvu *rvu)
3480 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
3481 +index 66e45d733824e..5ed94cfb47d2d 100644
3482 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
3483 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
3484 +@@ -806,6 +806,7 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
3485 + u32 rvu_cgx_get_fifolen(struct rvu *rvu);
3486 + void *rvu_first_cgx_pdata(struct rvu *rvu);
3487 + int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
3488 ++int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
3489 +
3490 + int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
3491 + int type);
3492 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
3493 +index 2ca182a4ce823..8a7ac5a8b821d 100644
3494 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
3495 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
3496 +@@ -441,16 +441,26 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
3497 + int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
3498 + {
3499 + int pf = rvu_get_pf(pcifunc);
3500 ++ struct mac_ops *mac_ops;
3501 + u8 cgx_id, lmac_id;
3502 ++ void *cgxd;
3503 +
3504 + if (!is_cgx_config_permitted(rvu, pcifunc))
3505 + return LMAC_AF_ERR_PERM_DENIED;
3506 +
3507 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
3508 ++ cgxd = rvu_cgx_pdata(cgx_id, rvu);
3509 ++ mac_ops = get_mac_ops(cgxd);
3510 ++
3511 ++ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
3512 ++}
3513 +
3514 +- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
3515 ++int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
3516 ++{
3517 ++ struct mac_ops *mac_ops;
3518 +
3519 +- return 0;
3520 ++ mac_ops = get_mac_ops(cgxd);
3521 ++ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
3522 + }
3523 +
3524 + void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
3525 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3526 +index a09a507369ac3..d1eddb769a419 100644
3527 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3528 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3529 +@@ -1224,6 +1224,8 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
3530 + seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
3531 + sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
3532 +
3533 ++ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
3534 ++ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
3535 + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
3536 + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
3537 + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
3538 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3539 +index d8b1948aaa0ae..97fb61915379a 100644
3540 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3541 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3542 +@@ -512,11 +512,11 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
3543 + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
3544 + lmac_chan_cnt = cfg & 0xFF;
3545 +
3546 +- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3547 +- sdp_chan_cnt = cfg & 0xFFF;
3548 +-
3549 + cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
3550 + lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
3551 ++
3552 ++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3553 ++ sdp_chan_cnt = cfg & 0xFFF;
3554 + sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
3555 +
3556 + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
3557 +@@ -2068,8 +2068,8 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
3558 + /* enable cgx tx if disabled */
3559 + if (is_pf_cgxmapped(rvu, pf)) {
3560 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
3561 +- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
3562 +- lmac_id, true);
3563 ++ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
3564 ++ lmac_id, true);
3565 + }
3566 +
3567 + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
3568 +@@ -2092,7 +2092,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
3569 + rvu_cgx_enadis_rx_bp(rvu, pf, true);
3570 + /* restore cgx tx state */
3571 + if (restore_tx_en)
3572 +- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
3573 ++ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
3574 + return err;
3575 + }
3576 +
3577 +@@ -3878,7 +3878,7 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
3578 + /* Enable cgx tx if disabled for credits to be back */
3579 + if (is_pf_cgxmapped(rvu, pf)) {
3580 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
3581 +- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
3582 ++ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
3583 + lmac_id, true);
3584 + }
3585 +
3586 +@@ -3891,8 +3891,8 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
3587 + NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
3588 + }
3589 +
3590 +- rc = -EBUSY;
3591 +- poll_tmo = jiffies + usecs_to_jiffies(10000);
3592 ++ rc = NIX_AF_ERR_LINK_CREDITS;
3593 ++ poll_tmo = jiffies + usecs_to_jiffies(200000);
3594 + /* Wait for credits to return */
3595 + do {
3596 + if (time_after(jiffies, poll_tmo))
3597 +@@ -3918,7 +3918,7 @@ exit:
3598 +
3599 + /* Restore state of cgx tx */
3600 + if (restore_tx_en)
3601 +- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
3602 ++ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
3603 +
3604 + mutex_unlock(&rvu->rsrc_lock);
3605 + return rc;
3606 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3607 +index c0005a1feee69..91f86d77cd41b 100644
3608 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3609 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
3610 +@@ -402,6 +402,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
3611 + int blkaddr, int index, struct mcam_entry *entry,
3612 + bool *enable)
3613 + {
3614 ++ struct rvu_npc_mcam_rule *rule;
3615 + u16 owner, target_func;
3616 + struct rvu_pfvf *pfvf;
3617 + u64 rx_action;
3618 +@@ -423,6 +424,12 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
3619 + test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
3620 + *enable = false;
3621 +
3622 ++ /* fix up not needed for the rules added by user(ntuple filters) */
3623 ++ list_for_each_entry(rule, &mcam->mcam_rules, list) {
3624 ++ if (rule->entry == index)
3625 ++ return;
3626 ++ }
3627 ++
3628 + /* copy VF default entry action to the VF mcam entry */
3629 + rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
3630 + target_func);
3631 +@@ -489,8 +496,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
3632 + }
3633 +
3634 + /* PF installing VF rule */
3635 +- if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
3636 +- npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
3637 ++ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
3638 ++ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
3639 +
3640 + /* Set 'action' */
3641 + rvu_write64(rvu, blkaddr,
3642 +@@ -916,7 +923,8 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
3643 + int blkaddr, u16 pcifunc, u64 rx_action)
3644 + {
3645 + int actindex, index, bank, entry;
3646 +- bool enable;
3647 ++ struct rvu_npc_mcam_rule *rule;
3648 ++ bool enable, update;
3649 +
3650 + if (!(pcifunc & RVU_PFVF_FUNC_MASK))
3651 + return;
3652 +@@ -924,6 +932,14 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
3653 + mutex_lock(&mcam->lock);
3654 + for (index = 0; index < mcam->bmap_entries; index++) {
3655 + if (mcam->entry2target_pffunc[index] == pcifunc) {
3656 ++ update = true;
3657 ++ /* update not needed for the rules added via ntuple filters */
3658 ++ list_for_each_entry(rule, &mcam->mcam_rules, list) {
3659 ++ if (rule->entry == index)
3660 ++ update = false;
3661 ++ }
3662 ++ if (!update)
3663 ++ continue;
3664 + bank = npc_get_bank(mcam, index);
3665 + actindex = index;
3666 + entry = index & (mcam->banksize - 1);
3667 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
3668 +index ff2b21999f36f..19c53e591d0da 100644
3669 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
3670 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
3671 +@@ -1098,14 +1098,6 @@ find_rule:
3672 + write_req.cntr = rule->cntr;
3673 + }
3674 +
3675 +- err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
3676 +- &write_rsp);
3677 +- if (err) {
3678 +- rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
3679 +- if (new)
3680 +- kfree(rule);
3681 +- return err;
3682 +- }
3683 + /* update rule */
3684 + memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
3685 + memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
3686 +@@ -1132,6 +1124,18 @@ find_rule:
3687 + if (req->default_rule)
3688 + pfvf->def_ucast_rule = rule;
3689 +
3690 ++ /* write to mcam entry registers */
3691 ++ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
3692 ++ &write_rsp);
3693 ++ if (err) {
3694 ++ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
3695 ++ if (new) {
3696 ++ list_del(&rule->list);
3697 ++ kfree(rule);
3698 ++ }
3699 ++ return err;
3700 ++ }
3701 ++
3702 + /* VF's MAC address is being changed via PF */
3703 + if (pf_set_vfs_mac) {
3704 + ether_addr_copy(pfvf->default_mac, req->packet.dmac);
3705 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
3706 +index 61e52812983fa..14509fc64cce9 100644
3707 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
3708 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
3709 +@@ -603,6 +603,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
3710 + size++;
3711 + tar_addr |= ((size - 1) & 0x7) << 4;
3712 + }
3713 ++ dma_wmb();
3714 + memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
3715 + /* Perform LMTST flush */
3716 + cn10k_lmt_flush(val, tar_addr);
3717 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3718 +index 1e0d0c9c1dac3..ba7f6b295ca55 100644
3719 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3720 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
3721 +@@ -394,7 +394,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
3722 + dst_mdev->msg_size = mbox_hdr->msg_size;
3723 + dst_mdev->num_msgs = num_msgs;
3724 + err = otx2_sync_mbox_msg(dst_mbox);
3725 +- if (err) {
3726 ++ /* Error code -EIO indicate there is a communication failure
3727 ++ * to the AF. Rest of the error codes indicate that AF processed
3728 ++ * VF messages and set the error codes in response messages
3729 ++ * (if any) so simply forward responses to VF.
3730 ++ */
3731 ++ if (err == -EIO) {
3732 + dev_warn(pf->dev,
3733 + "AF not responding to VF%d messages\n", vf);
3734 + /* restore PF mbase and exit */
3735 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
3736 +index e2e0f977875d7..dde5b772a5af7 100644
3737 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
3738 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
3739 +@@ -22,21 +22,21 @@
3740 + #define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
3741 + #define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
3742 + #define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
3743 +-#define ETHER_CLK_SEL_DIV_SEL_20 BIT(0)
3744 ++#define ETHER_CLK_SEL_DIV_SEL_20 0
3745 + #define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
3746 + #define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
3747 + #define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
3748 + #define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
3749 +-#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
3750 ++#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN 0
3751 + #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
3752 + #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
3753 +-#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN BIT(0)
3754 ++#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN 0
3755 + #define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
3756 + #define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
3757 +-#define ETHER_CLK_SEL_TX_CLK_O_TX_I BIT(0)
3758 ++#define ETHER_CLK_SEL_TX_CLK_O_TX_I 0
3759 + #define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
3760 + #define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
3761 +-#define ETHER_CLK_SEL_RMII_CLK_SEL_IN BIT(0)
3762 ++#define ETHER_CLK_SEL_RMII_CLK_SEL_IN 0
3763 + #define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
3764 +
3765 + #define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
3766 +@@ -96,31 +96,41 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
3767 + val |= ETHER_CLK_SEL_TX_O_E_N_IN;
3768 + writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3769 +
3770 ++ /* Set Clock-Mux, Start clock, Set TX_O direction */
3771 + switch (dwmac->phy_intf_sel) {
3772 + case ETHER_CONFIG_INTF_RGMII:
3773 + val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
3774 ++ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3775 ++
3776 ++ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
3777 ++ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3778 ++
3779 ++ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
3780 ++ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3781 + break;
3782 + case ETHER_CONFIG_INTF_RMII:
3783 + val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
3784 +- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN |
3785 ++ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
3786 + ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
3787 ++ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3788 ++
3789 ++ val |= ETHER_CLK_SEL_RMII_CLK_RST;
3790 ++ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3791 ++
3792 ++ val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
3793 ++ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3794 + break;
3795 + case ETHER_CONFIG_INTF_MII:
3796 + default:
3797 + val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
3798 +- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
3799 +- ETHER_CLK_SEL_RMII_CLK_EN;
3800 ++ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
3801 ++ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3802 ++
3803 ++ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
3804 ++ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3805 + break;
3806 + }
3807 +
3808 +- /* Start clock */
3809 +- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3810 +- val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
3811 +- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3812 +-
3813 +- val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
3814 +- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
3815 +-
3816 + spin_unlock_irqrestore(&dwmac->lock, flags);
3817 + }
3818 +
3819 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3820 +index e81a79845d425..ac8e3b932bf1e 100644
3821 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3822 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3823 +@@ -899,6 +899,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
3824 + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3825 + int ret;
3826 +
3827 ++ if (priv->plat->ptp_clk_freq_config)
3828 ++ priv->plat->ptp_clk_freq_config(priv);
3829 ++
3830 + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
3831 + if (ret)
3832 + return ret;
3833 +@@ -921,8 +924,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
3834 + priv->hwts_tx_en = 0;
3835 + priv->hwts_rx_en = 0;
3836 +
3837 +- stmmac_ptp_register(priv);
3838 +-
3839 + return 0;
3840 + }
3841 +
3842 +@@ -3245,7 +3246,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3843 + /**
3844 + * stmmac_hw_setup - setup mac in a usable state.
3845 + * @dev : pointer to the device structure.
3846 +- * @init_ptp: initialize PTP if set
3847 ++ * @ptp_register: register PTP if set
3848 + * Description:
3849 + * this is the main function to setup the HW in a usable state because the
3850 + * dma engine is reset, the core registers are configured (e.g. AXI,
3851 +@@ -3255,7 +3256,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3852 + * 0 on success and an appropriate (-)ve integer as defined in errno.h
3853 + * file on failure.
3854 + */
3855 +-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3856 ++static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3857 + {
3858 + struct stmmac_priv *priv = netdev_priv(dev);
3859 + u32 rx_cnt = priv->plat->rx_queues_to_use;
3860 +@@ -3312,13 +3313,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3861 +
3862 + stmmac_mmc_setup(priv);
3863 +
3864 +- if (init_ptp) {
3865 +- ret = stmmac_init_ptp(priv);
3866 +- if (ret == -EOPNOTSUPP)
3867 +- netdev_warn(priv->dev, "PTP not supported by HW\n");
3868 +- else if (ret)
3869 +- netdev_warn(priv->dev, "PTP init failed\n");
3870 +- }
3871 ++ ret = stmmac_init_ptp(priv);
3872 ++ if (ret == -EOPNOTSUPP)
3873 ++ netdev_warn(priv->dev, "PTP not supported by HW\n");
3874 ++ else if (ret)
3875 ++ netdev_warn(priv->dev, "PTP init failed\n");
3876 ++ else if (ptp_register)
3877 ++ stmmac_ptp_register(priv);
3878 +
3879 + priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3880 +
3881 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
3882 +index be9b58b2abf9b..ac8bc1c8614d3 100644
3883 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
3884 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
3885 +@@ -297,9 +297,6 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
3886 + {
3887 + int i;
3888 +
3889 +- if (priv->plat->ptp_clk_freq_config)
3890 +- priv->plat->ptp_clk_freq_config(priv);
3891 +-
3892 + for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
3893 + if (i >= STMMAC_PPS_MAX)
3894 + break;
3895 +diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
3896 +index 6bb5ac51d23c3..f8e591d69d2cb 100644
3897 +--- a/drivers/net/ethernet/ti/cpsw_priv.c
3898 ++++ b/drivers/net/ethernet/ti/cpsw_priv.c
3899 +@@ -1144,7 +1144,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
3900 + static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
3901 + int size)
3902 + {
3903 +- struct page_pool_params pp_params;
3904 ++ struct page_pool_params pp_params = {};
3905 + struct page_pool *pool;
3906 +
3907 + pp_params.order = 0;
3908 +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
3909 +index 6376b8485976c..980f2be32f05a 100644
3910 +--- a/drivers/net/hamradio/yam.c
3911 ++++ b/drivers/net/hamradio/yam.c
3912 +@@ -950,9 +950,7 @@ static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __
3913 + ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs));
3914 + if (IS_ERR(ym))
3915 + return PTR_ERR(ym);
3916 +- if (ym->cmd != SIOCYAMSMCS)
3917 +- return -EINVAL;
3918 +- if (ym->bitrate > YAM_MAXBITRATE) {
3919 ++ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
3920 + kfree(ym);
3921 + return -EINVAL;
3922 + }
3923 +diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
3924 +index bb5104ae46104..3c683e0e40e9e 100644
3925 +--- a/drivers/net/phy/broadcom.c
3926 ++++ b/drivers/net/phy/broadcom.c
3927 +@@ -854,6 +854,7 @@ static struct phy_driver broadcom_drivers[] = {
3928 + .phy_id_mask = 0xfffffff0,
3929 + .name = "Broadcom BCM54616S",
3930 + /* PHY_GBIT_FEATURES */
3931 ++ .soft_reset = genphy_soft_reset,
3932 + .config_init = bcm54xx_config_init,
3933 + .config_aneg = bcm54616s_config_aneg,
3934 + .config_intr = bcm_phy_config_intr,
3935 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
3936 +index 74d8e1dc125f8..ce0bb5951b81e 100644
3937 +--- a/drivers/net/phy/phy_device.c
3938 ++++ b/drivers/net/phy/phy_device.c
3939 +@@ -1746,6 +1746,9 @@ void phy_detach(struct phy_device *phydev)
3940 + phy_driver_is_genphy_10g(phydev))
3941 + device_release_driver(&phydev->mdio.dev);
3942 +
3943 ++ /* Assert the reset signal */
3944 ++ phy_device_reset(phydev, 1);
3945 ++
3946 + /*
3947 + * The phydev might go away on the put_device() below, so avoid
3948 + * a use-after-free bug by reading the underlying bus first.
3949 +@@ -1757,9 +1760,6 @@ void phy_detach(struct phy_device *phydev)
3950 + ndev_owner = dev->dev.parent->driver->owner;
3951 + if (ndev_owner != bus->owner)
3952 + module_put(bus->owner);
3953 +-
3954 +- /* Assert the reset signal */
3955 +- phy_device_reset(phydev, 1);
3956 + }
3957 + EXPORT_SYMBOL(phy_detach);
3958 +
3959 +diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
3960 +index 0c6c0d1843bce..c1512c9925a66 100644
3961 +--- a/drivers/net/phy/sfp-bus.c
3962 ++++ b/drivers/net/phy/sfp-bus.c
3963 +@@ -651,6 +651,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
3964 + else if (ret < 0)
3965 + return ERR_PTR(ret);
3966 +
3967 ++ if (!fwnode_device_is_available(ref.fwnode)) {
3968 ++ fwnode_handle_put(ref.fwnode);
3969 ++ return NULL;
3970 ++ }
3971 ++
3972 + bus = sfp_bus_get(ref.fwnode);
3973 + fwnode_handle_put(ref.fwnode);
3974 + if (!bus)
3975 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
3976 +index fcbcfc9f5a04f..58be537adb1f1 100644
3977 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
3978 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
3979 +@@ -145,7 +145,7 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
3980 + mcu_txd->cid = mcu_cmd;
3981 + mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
3982 +
3983 +- if (mcu_txd->ext_cid || (cmd & MCU_CE_PREFIX)) {
3984 ++ if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
3985 + if (cmd & __MCU_CMD_FIELD_QUERY)
3986 + mcu_txd->set_query = MCU_Q_QUERY;
3987 + else
3988 +@@ -193,7 +193,7 @@ int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd,
3989 + skb_pull(skb, sizeof(*rxd));
3990 + event = (struct mt7615_mcu_uni_event *)skb->data;
3991 + ret = le32_to_cpu(event->status);
3992 +- } else if (cmd == MCU_CMD_REG_READ) {
3993 ++ } else if (cmd == MCU_CE_QUERY(REG_READ)) {
3994 + struct mt7615_mcu_reg_event *event;
3995 +
3996 + skb_pull(skb, sizeof(*rxd));
3997 +@@ -2737,13 +2737,13 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
3998 + if (vif->type != NL80211_IFTYPE_STATION)
3999 + return 0;
4000 +
4001 +- err = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, &req_hdr,
4002 +- sizeof(req_hdr), false);
4003 ++ err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
4004 ++ &req_hdr, sizeof(req_hdr), false);
4005 + if (err < 0 || !enable)
4006 + return err;
4007 +
4008 +- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, &req,
4009 +- sizeof(req), false);
4010 ++ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED),
4011 ++ &req, sizeof(req), false);
4012 + }
4013 +
4014 + int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
4015 +@@ -2762,6 +2762,6 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
4016 +
4017 + phy->roc_grant = false;
4018 +
4019 +- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req,
4020 +- sizeof(req), false);
4021 ++ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_ROC),
4022 ++ &req, sizeof(req), false);
4023 + }
4024 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
4025 +index 7733c8fad2413..1fb8432aa27ca 100644
4026 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
4027 ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
4028 +@@ -160,7 +160,8 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy)
4029 +
4030 + memcpy(__skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
4031 +
4032 +- return mt76_mcu_skb_send_msg(dev, skb, MCU_CMD_SET_CHAN_DOMAIN, false);
4033 ++ return mt76_mcu_skb_send_msg(dev, skb, MCU_CE_CMD(SET_CHAN_DOMAIN),
4034 ++ false);
4035 + }
4036 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_channel_domain);
4037 +
4038 +@@ -198,8 +199,8 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
4039 + if (vif->type != NL80211_IFTYPE_STATION)
4040 + return -EOPNOTSUPP;
4041 +
4042 +- return mt76_mcu_send_msg(dev, MCU_CMD_SET_PS_PROFILE, &req,
4043 +- sizeof(req), false);
4044 ++ return mt76_mcu_send_msg(dev, MCU_CE_CMD(SET_PS_PROFILE),
4045 ++ &req, sizeof(req), false);
4046 + }
4047 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_vif_ps);
4048 +
4049 +@@ -1523,7 +1524,8 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
4050 + req->scan_func |= SCAN_FUNC_RANDOM_MAC;
4051 + }
4052 +
4053 +- err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false);
4054 ++ err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CE_CMD(START_HW_SCAN),
4055 ++ false);
4056 + if (err < 0)
4057 + clear_bit(MT76_HW_SCANNING, &phy->state);
4058 +
4059 +@@ -1551,8 +1553,8 @@ int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
4060 + ieee80211_scan_completed(phy->hw, &info);
4061 + }
4062 +
4063 +- return mt76_mcu_send_msg(phy->dev, MCU_CMD_CANCEL_HW_SCAN, &req,
4064 +- sizeof(req), false);
4065 ++ return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(CANCEL_HW_SCAN),
4066 ++ &req, sizeof(req), false);
4067 + }
4068 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_cancel_hw_scan);
4069 +
4070 +@@ -1638,7 +1640,8 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
4071 + memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len);
4072 + }
4073 +
4074 +- return mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_SCHED_SCAN_REQ, false);
4075 ++ return mt76_mcu_skb_send_msg(mdev, skb, MCU_CE_CMD(SCHED_SCAN_REQ),
4076 ++ false);
4077 + }
4078 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_req);
4079 +
4080 +@@ -1658,8 +1661,8 @@ int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
4081 + else
4082 + clear_bit(MT76_HW_SCHED_SCANNING, &phy->state);
4083 +
4084 +- return mt76_mcu_send_msg(phy->dev, MCU_CMD_SCHED_SCAN_ENABLE, &req,
4085 +- sizeof(req), false);
4086 ++ return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(SCHED_SCAN_ENABLE),
4087 ++ &req, sizeof(req), false);
4088 + }
4089 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable);
4090 +
4091 +@@ -1671,8 +1674,8 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
4092 +
4093 + memcpy(req.data, "assert", 7);
4094 +
4095 +- return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
4096 +- false);
4097 ++ return mt76_mcu_send_msg(dev, MCU_CE_CMD(CHIP_CONFIG),
4098 ++ &req, sizeof(req), false);
4099 + }
4100 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config);
4101 +
4102 +@@ -1684,8 +1687,8 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
4103 +
4104 + snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable);
4105 +
4106 +- return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
4107 +- false);
4108 ++ return mt76_mcu_send_msg(dev, MCU_CE_CMD(CHIP_CONFIG),
4109 ++ &req, sizeof(req), false);
4110 + }
4111 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
4112 +
4113 +@@ -1787,8 +1790,8 @@ int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
4114 + struct sk_buff *skb;
4115 + int ret, i;
4116 +
4117 +- ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CMD_GET_NIC_CAPAB, NULL,
4118 +- 0, true, &skb);
4119 ++ ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB),
4120 ++ NULL, 0, true, &skb);
4121 + if (ret)
4122 + return ret;
4123 +
4124 +@@ -2071,7 +2074,8 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
4125 + memcpy(skb->data, &tx_power_tlv, sizeof(tx_power_tlv));
4126 +
4127 + err = mt76_mcu_skb_send_msg(dev, skb,
4128 +- MCU_CMD_SET_RATE_TX_POWER, false);
4129 ++ MCU_CE_CMD(SET_RATE_TX_POWER),
4130 ++ false);
4131 + if (err < 0)
4132 + return err;
4133 + }
4134 +@@ -2163,8 +2167,8 @@ int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
4135 + .bss_idx = mvif->idx,
4136 + };
4137 +
4138 +- return mt76_mcu_send_msg(phy->dev, MCU_CMD_SET_P2P_OPPPS, &req,
4139 +- sizeof(req), false);
4140 ++ return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(SET_P2P_OPPPS),
4141 ++ &req, sizeof(req), false);
4142 + }
4143 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_p2p_oppps);
4144 +
4145 +@@ -2490,8 +2494,8 @@ u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset)
4146 + .addr = cpu_to_le32(offset),
4147 + };
4148 +
4149 +- return mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, &req, sizeof(req),
4150 +- true);
4151 ++ return mt76_mcu_send_msg(dev, MCU_CE_QUERY(REG_READ), &req,
4152 ++ sizeof(req), true);
4153 + }
4154 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_rr);
4155 +
4156 +@@ -2505,7 +2509,8 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
4157 + .val = cpu_to_le32(val),
4158 + };
4159 +
4160 +- mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, &req, sizeof(req), false);
4161 ++ mt76_mcu_send_msg(dev, MCU_CE_CMD(REG_WRITE), &req,
4162 ++ sizeof(req), false);
4163 + }
4164 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr);
4165 +
4166 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
4167 +index 5c5fab9154e59..acb9a286d3546 100644
4168 +--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
4169 ++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
4170 +@@ -496,13 +496,11 @@ enum {
4171 + #define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | \
4172 + MCU_CMD_QUERY)
4173 +
4174 +-#define MCU_CE_PREFIX BIT(29)
4175 +-#define MCU_CMD_MASK ~(MCU_CE_PREFIX)
4176 +-
4177 + #define __MCU_CMD_FIELD_ID GENMASK(7, 0)
4178 + #define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8)
4179 + #define __MCU_CMD_FIELD_QUERY BIT(16)
4180 + #define __MCU_CMD_FIELD_UNI BIT(17)
4181 ++#define __MCU_CMD_FIELD_CE BIT(18)
4182 +
4183 + #define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, \
4184 + MCU_CMD_##_t)
4185 +@@ -513,6 +511,10 @@ enum {
4186 + #define MCU_UNI_CMD(_t) (__MCU_CMD_FIELD_UNI | \
4187 + FIELD_PREP(__MCU_CMD_FIELD_ID, \
4188 + MCU_UNI_CMD_##_t))
4189 ++#define MCU_CE_CMD(_t) (__MCU_CMD_FIELD_CE | \
4190 ++ FIELD_PREP(__MCU_CMD_FIELD_ID, \
4191 ++ MCU_CE_CMD_##_t))
4192 ++#define MCU_CE_QUERY(_t) (MCU_CE_CMD(_t) | __MCU_CMD_FIELD_QUERY)
4193 +
4194 + enum {
4195 + MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
4196 +@@ -589,26 +591,26 @@ enum {
4197 +
4198 + /* offload mcu commands */
4199 + enum {
4200 +- MCU_CMD_TEST_CTRL = MCU_CE_PREFIX | 0x01,
4201 +- MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
4202 +- MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05,
4203 +- MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f,
4204 +- MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16,
4205 +- MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17,
4206 +- MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b,
4207 +- MCU_CMD_SET_ROC = MCU_CE_PREFIX | 0x1d,
4208 +- MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
4209 +- MCU_CMD_SET_RATE_TX_POWER = MCU_CE_PREFIX | 0x5d,
4210 +- MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
4211 +- MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
4212 +- MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a,
4213 +- MCU_CMD_SET_MU_EDCA_PARMS = MCU_CE_PREFIX | 0xb0,
4214 +- MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
4215 +- MCU_CMD_REG_READ = MCU_CE_PREFIX | __MCU_CMD_FIELD_QUERY | 0xc0,
4216 +- MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
4217 +- MCU_CMD_FWLOG_2_HOST = MCU_CE_PREFIX | 0xc5,
4218 +- MCU_CMD_GET_WTBL = MCU_CE_PREFIX | 0xcd,
4219 +- MCU_CMD_GET_TXPWR = MCU_CE_PREFIX | 0xd0,
4220 ++ MCU_CE_CMD_TEST_CTRL = 0x01,
4221 ++ MCU_CE_CMD_START_HW_SCAN = 0x03,
4222 ++ MCU_CE_CMD_SET_PS_PROFILE = 0x05,
4223 ++ MCU_CE_CMD_SET_CHAN_DOMAIN = 0x0f,
4224 ++ MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
4225 ++ MCU_CE_CMD_SET_BSS_ABORT = 0x17,
4226 ++ MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b,
4227 ++ MCU_CE_CMD_SET_ROC = 0x1d,
4228 ++ MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
4229 ++ MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
4230 ++ MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
4231 ++ MCU_CE_CMD_SCHED_SCAN_REQ = 0x62,
4232 ++ MCU_CE_CMD_GET_NIC_CAPAB = 0x8a,
4233 ++ MCU_CE_CMD_SET_MU_EDCA_PARMS = 0xb0,
4234 ++ MCU_CE_CMD_REG_WRITE = 0xc0,
4235 ++ MCU_CE_CMD_REG_READ = 0xc0,
4236 ++ MCU_CE_CMD_CHIP_CONFIG = 0xca,
4237 ++ MCU_CE_CMD_FWLOG_2_HOST = 0xc5,
4238 ++ MCU_CE_CMD_GET_WTBL = 0xcd,
4239 ++ MCU_CE_CMD_GET_TXPWR = 0xd0,
4240 + };
4241 +
4242 + enum {
4243 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
4244 +index 484a8c57b862e..4c6adbb969550 100644
4245 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
4246 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
4247 +@@ -163,8 +163,8 @@ mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
4248 + int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
4249 + struct sk_buff *skb, int seq)
4250 + {
4251 ++ int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
4252 + struct mt7921_mcu_rxd *rxd;
4253 +- int mcu_cmd = cmd & MCU_CMD_MASK;
4254 + int ret = 0;
4255 +
4256 + if (!skb) {
4257 +@@ -201,7 +201,7 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
4258 + /* skip invalid event */
4259 + if (mcu_cmd != event->cid)
4260 + ret = -EAGAIN;
4261 +- } else if (cmd == MCU_CMD_REG_READ) {
4262 ++ } else if (cmd == MCU_CE_QUERY(REG_READ)) {
4263 + struct mt7921_mcu_reg_event *event;
4264 +
4265 + skb_pull(skb, sizeof(*rxd));
4266 +@@ -274,7 +274,7 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
4267 + mcu_txd->s2d_index = MCU_S2D_H2N;
4268 + mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
4269 +
4270 +- if (mcu_txd->ext_cid || (cmd & MCU_CE_PREFIX)) {
4271 ++ if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
4272 + if (cmd & __MCU_CMD_FIELD_QUERY)
4273 + mcu_txd->set_query = MCU_Q_QUERY;
4274 + else
4275 +@@ -883,8 +883,8 @@ int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl)
4276 + .ctrl_val = ctrl
4277 + };
4278 +
4279 +- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FWLOG_2_HOST, &data,
4280 +- sizeof(data), false);
4281 ++ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST),
4282 ++ &data, sizeof(data), false);
4283 + }
4284 +
4285 + int mt7921_run_firmware(struct mt7921_dev *dev)
4286 +@@ -1009,8 +1009,8 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
4287 + e->timer = q->mu_edca_timer;
4288 + }
4289 +
4290 +- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_MU_EDCA_PARMS, &req_mu,
4291 +- sizeof(req_mu), false);
4292 ++ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS),
4293 ++ &req_mu, sizeof(req_mu), false);
4294 + }
4295 +
4296 + int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
4297 +@@ -1214,13 +1214,13 @@ mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
4298 + if (vif->type != NL80211_IFTYPE_STATION)
4299 + return 0;
4300 +
4301 +- err = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, &req_hdr,
4302 +- sizeof(req_hdr), false);
4303 ++ err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
4304 ++ &req_hdr, sizeof(req_hdr), false);
4305 + if (err < 0 || !enable)
4306 + return err;
4307 +
4308 +- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, &req,
4309 +- sizeof(req), false);
4310 ++ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED),
4311 ++ &req, sizeof(req), false);
4312 + }
4313 +
4314 + int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
4315 +@@ -1330,7 +1330,7 @@ int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
4316 + struct sk_buff *skb;
4317 + int ret;
4318 +
4319 +- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_TXPWR,
4320 ++ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR),
4321 + &req, sizeof(req), true, &skb);
4322 + if (ret)
4323 + return ret;
4324 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
4325 +index 8bd43879dd6fe..bdec8684ce94c 100644
4326 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
4327 ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
4328 +@@ -66,7 +66,7 @@ mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req)
4329 + if (!mt76_testmode_enabled(phy))
4330 + goto out;
4331 +
4332 +- ret = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TEST_CTRL, &cmd,
4333 ++ ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(TEST_CTRL), &cmd,
4334 + sizeof(cmd), false);
4335 + if (ret)
4336 + goto out;
4337 +@@ -95,7 +95,7 @@ mt7921_tm_query(struct mt7921_dev *dev, struct mt7921_tm_cmd *req,
4338 + struct sk_buff *skb;
4339 + int ret;
4340 +
4341 +- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_TEST_CTRL,
4342 ++ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(TEST_CTRL),
4343 + &cmd, sizeof(cmd), true, &skb);
4344 + if (ret)
4345 + goto out;
4346 +diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
4347 +index 73b91315c1656..6ec53aa81b920 100644
4348 +--- a/drivers/pci/controller/pcie-mt7621.c
4349 ++++ b/drivers/pci/controller/pcie-mt7621.c
4350 +@@ -109,15 +109,6 @@ static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
4351 + writel_relaxed(val, pcie->base + reg);
4352 + }
4353 +
4354 +-static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set)
4355 +-{
4356 +- u32 val = readl_relaxed(pcie->base + reg);
4357 +-
4358 +- val &= ~clr;
4359 +- val |= set;
4360 +- writel_relaxed(val, pcie->base + reg);
4361 +-}
4362 +-
4363 + static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
4364 + {
4365 + return readl_relaxed(port->base + reg);
4366 +diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
4367 +index f2e961f998ca2..341156e2a29b9 100644
4368 +--- a/drivers/remoteproc/Kconfig
4369 ++++ b/drivers/remoteproc/Kconfig
4370 +@@ -180,6 +180,7 @@ config QCOM_Q6V5_ADSP
4371 + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
4372 + depends on QCOM_SYSMON || QCOM_SYSMON=n
4373 + depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
4374 ++ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
4375 + select MFD_SYSCON
4376 + select QCOM_PIL_INFO
4377 + select QCOM_MDT_LOADER
4378 +@@ -199,6 +200,7 @@ config QCOM_Q6V5_MSS
4379 + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
4380 + depends on QCOM_SYSMON || QCOM_SYSMON=n
4381 + depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
4382 ++ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
4383 + select MFD_SYSCON
4384 + select QCOM_MDT_LOADER
4385 + select QCOM_PIL_INFO
4386 +@@ -218,6 +220,7 @@ config QCOM_Q6V5_PAS
4387 + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
4388 + depends on QCOM_SYSMON || QCOM_SYSMON=n
4389 + depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
4390 ++ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
4391 + select MFD_SYSCON
4392 + select QCOM_PIL_INFO
4393 + select QCOM_MDT_LOADER
4394 +@@ -239,6 +242,7 @@ config QCOM_Q6V5_WCSS
4395 + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
4396 + depends on QCOM_SYSMON || QCOM_SYSMON=n
4397 + depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
4398 ++ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
4399 + select MFD_SYSCON
4400 + select QCOM_MDT_LOADER
4401 + select QCOM_PIL_INFO
4402 +diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
4403 +index eada7e34f3af5..442a388f81028 100644
4404 +--- a/drivers/remoteproc/qcom_q6v5.c
4405 ++++ b/drivers/remoteproc/qcom_q6v5.c
4406 +@@ -10,6 +10,7 @@
4407 + #include <linux/platform_device.h>
4408 + #include <linux/interrupt.h>
4409 + #include <linux/module.h>
4410 ++#include <linux/soc/qcom/qcom_aoss.h>
4411 + #include <linux/soc/qcom/smem.h>
4412 + #include <linux/soc/qcom/smem_state.h>
4413 + #include <linux/remoteproc.h>
4414 +diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
4415 +index b5907b80727cc..3ec34ad0e893d 100644
4416 +--- a/drivers/rpmsg/rpmsg_char.c
4417 ++++ b/drivers/rpmsg/rpmsg_char.c
4418 +@@ -90,7 +90,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
4419 + /* wake up any blocked readers */
4420 + wake_up_interruptible(&eptdev->readq);
4421 +
4422 +- device_del(&eptdev->dev);
4423 ++ cdev_device_del(&eptdev->cdev, &eptdev->dev);
4424 + put_device(&eptdev->dev);
4425 +
4426 + return 0;
4427 +@@ -333,7 +333,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
4428 +
4429 + ida_simple_remove(&rpmsg_ept_ida, dev->id);
4430 + ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
4431 +- cdev_del(&eptdev->cdev);
4432 + kfree(eptdev);
4433 + }
4434 +
4435 +@@ -378,19 +377,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
4436 + dev->id = ret;
4437 + dev_set_name(dev, "rpmsg%d", ret);
4438 +
4439 +- ret = cdev_add(&eptdev->cdev, dev->devt, 1);
4440 ++ ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
4441 + if (ret)
4442 + goto free_ept_ida;
4443 +
4444 + /* We can now rely on the release function for cleanup */
4445 + dev->release = rpmsg_eptdev_release_device;
4446 +
4447 +- ret = device_add(dev);
4448 +- if (ret) {
4449 +- dev_err(dev, "device_add failed: %d\n", ret);
4450 +- put_device(dev);
4451 +- }
4452 +-
4453 + return ret;
4454 +
4455 + free_ept_ida:
4456 +@@ -459,7 +452,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
4457 +
4458 + ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
4459 + ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
4460 +- cdev_del(&ctrldev->cdev);
4461 + kfree(ctrldev);
4462 + }
4463 +
4464 +@@ -494,19 +486,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
4465 + dev->id = ret;
4466 + dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
4467 +
4468 +- ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
4469 ++ ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
4470 + if (ret)
4471 + goto free_ctrl_ida;
4472 +
4473 + /* We can now rely on the release function for cleanup */
4474 + dev->release = rpmsg_ctrldev_release_device;
4475 +
4476 +- ret = device_add(dev);
4477 +- if (ret) {
4478 +- dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
4479 +- put_device(dev);
4480 +- }
4481 +-
4482 + dev_set_drvdata(&rpdev->dev, ctrldev);
4483 +
4484 + return ret;
4485 +@@ -532,7 +518,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
4486 + if (ret)
4487 + dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
4488 +
4489 +- device_del(&ctrldev->dev);
4490 ++ cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
4491 + put_device(&ctrldev->dev);
4492 + }
4493 +
4494 +diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
4495 +index d24cafe02708f..511bf8e0a436c 100644
4496 +--- a/drivers/s390/scsi/zfcp_fc.c
4497 ++++ b/drivers/s390/scsi/zfcp_fc.c
4498 +@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
4499 + goto out;
4500 + }
4501 +
4502 ++ /* re-init to undo drop from zfcp_fc_adisc() */
4503 ++ port->d_id = ntoh24(adisc_resp->adisc_port_id);
4504 + /* port is good, unblock rport without going through erp */
4505 + zfcp_scsi_schedule_rport_register(port);
4506 + out:
4507 +@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
4508 + struct zfcp_fc_req *fc_req;
4509 + struct zfcp_adapter *adapter = port->adapter;
4510 + struct Scsi_Host *shost = adapter->scsi_host;
4511 ++ u32 d_id;
4512 + int ret;
4513 +
4514 + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
4515 +@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
4516 + fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
4517 + hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
4518 +
4519 +- ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
4520 ++ d_id = port->d_id; /* remember as destination for send els below */
4521 ++ /*
4522 ++ * Force fresh GID_PN lookup on next port recovery.
4523 ++ * Must happen after request setup and before sending request,
4524 ++ * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
4525 ++ */
4526 ++ port->d_id = 0;
4527 ++
4528 ++ ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
4529 + ZFCP_FC_CTELS_TMO);
4530 + if (ret)
4531 + kmem_cache_free(zfcp_fc_req_cache, fc_req);
4532 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
4533 +index 71fa62bd30830..9be273c320e21 100644
4534 +--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
4535 ++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
4536 +@@ -82,7 +82,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
4537 + static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
4538 + static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
4539 + struct device *parent, int npiv);
4540 +-static void bnx2fc_destroy_work(struct work_struct *work);
4541 ++static void bnx2fc_port_destroy(struct fcoe_port *port);
4542 +
4543 + static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
4544 + static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
4545 +@@ -907,9 +907,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
4546 + __bnx2fc_destroy(interface);
4547 + }
4548 + mutex_unlock(&bnx2fc_dev_lock);
4549 +-
4550 +- /* Ensure ALL destroy work has been completed before return */
4551 +- flush_workqueue(bnx2fc_wq);
4552 + return;
4553 +
4554 + default:
4555 +@@ -1215,8 +1212,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
4556 + mutex_unlock(&n_port->lp_mutex);
4557 + bnx2fc_free_vport(interface->hba, port->lport);
4558 + bnx2fc_port_shutdown(port->lport);
4559 ++ bnx2fc_port_destroy(port);
4560 + bnx2fc_interface_put(interface);
4561 +- queue_work(bnx2fc_wq, &port->destroy_work);
4562 + return 0;
4563 + }
4564 +
4565 +@@ -1525,7 +1522,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
4566 + port->lport = lport;
4567 + port->priv = interface;
4568 + port->get_netdev = bnx2fc_netdev;
4569 +- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
4570 +
4571 + /* Configure fcoe_port */
4572 + rc = bnx2fc_lport_config(lport);
4573 +@@ -1653,8 +1649,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
4574 + bnx2fc_interface_cleanup(interface);
4575 + bnx2fc_stop(interface);
4576 + list_del(&interface->list);
4577 ++ bnx2fc_port_destroy(port);
4578 + bnx2fc_interface_put(interface);
4579 +- queue_work(bnx2fc_wq, &port->destroy_work);
4580 + }
4581 +
4582 + /**
4583 +@@ -1694,15 +1690,12 @@ netdev_err:
4584 + return rc;
4585 + }
4586 +
4587 +-static void bnx2fc_destroy_work(struct work_struct *work)
4588 ++static void bnx2fc_port_destroy(struct fcoe_port *port)
4589 + {
4590 +- struct fcoe_port *port;
4591 + struct fc_lport *lport;
4592 +
4593 +- port = container_of(work, struct fcoe_port, destroy_work);
4594 + lport = port->lport;
4595 +-
4596 +- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
4597 ++ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
4598 +
4599 + bnx2fc_if_destroy(lport);
4600 + }
4601 +@@ -2556,9 +2549,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
4602 + __bnx2fc_destroy(interface);
4603 + mutex_unlock(&bnx2fc_dev_lock);
4604 +
4605 +- /* Ensure ALL destroy work has been completed before return */
4606 +- flush_workqueue(bnx2fc_wq);
4607 +-
4608 + bnx2fc_ulp_stop(hba);
4609 + /* unregister cnic device */
4610 + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
4611 +diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
4612 +index 24db0accb256e..5f690378fe9a9 100644
4613 +--- a/drivers/scsi/elx/libefc/efc_els.c
4614 ++++ b/drivers/scsi/elx/libefc/efc_els.c
4615 +@@ -46,18 +46,14 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
4616 +
4617 + efc = node->efc;
4618 +
4619 +- spin_lock_irqsave(&node->els_ios_lock, flags);
4620 +-
4621 + if (!node->els_io_enabled) {
4622 + efc_log_err(efc, "els io alloc disabled\n");
4623 +- spin_unlock_irqrestore(&node->els_ios_lock, flags);
4624 + return NULL;
4625 + }
4626 +
4627 + els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
4628 + if (!els) {
4629 + atomic_add_return(1, &efc->els_io_alloc_failed_count);
4630 +- spin_unlock_irqrestore(&node->els_ios_lock, flags);
4631 + return NULL;
4632 + }
4633 +
4634 +@@ -74,7 +70,6 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
4635 + &els->io.req.phys, GFP_DMA);
4636 + if (!els->io.req.virt) {
4637 + mempool_free(els, efc->els_io_pool);
4638 +- spin_unlock_irqrestore(&node->els_ios_lock, flags);
4639 + return NULL;
4640 + }
4641 +
4642 +@@ -94,10 +89,11 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
4643 +
4644 + /* add els structure to ELS IO list */
4645 + INIT_LIST_HEAD(&els->list_entry);
4646 ++ spin_lock_irqsave(&node->els_ios_lock, flags);
4647 + list_add_tail(&els->list_entry, &node->els_ios_list);
4648 ++ spin_unlock_irqrestore(&node->els_ios_lock, flags);
4649 + }
4650 +
4651 +- spin_unlock_irqrestore(&node->els_ios_lock, flags);
4652 + return els;
4653 + }
4654 +
4655 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
4656 +index 0b96b14bbfe11..9c5211f2ea84c 100644
4657 +--- a/drivers/tty/n_gsm.c
4658 ++++ b/drivers/tty/n_gsm.c
4659 +@@ -322,6 +322,7 @@ static int addr_cnt;
4660 + #define GSM1_ESCAPE_BITS 0x20
4661 + #define XON 0x11
4662 + #define XOFF 0x13
4663 ++#define ISO_IEC_646_MASK 0x7F
4664 +
4665 + static const struct tty_port_operations gsm_port_ops;
4666 +
4667 +@@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
4668 + int olen = 0;
4669 + while (len--) {
4670 + if (*input == GSM1_SOF || *input == GSM1_ESCAPE
4671 +- || *input == XON || *input == XOFF) {
4672 ++ || (*input & ISO_IEC_646_MASK) == XON
4673 ++ || (*input & ISO_IEC_646_MASK) == XOFF) {
4674 + *output++ = GSM1_ESCAPE;
4675 + *output++ = *input++ ^ GSM1_ESCAPE_BITS;
4676 + olen++;
4677 +diff --git a/drivers/tty/rpmsg_tty.c b/drivers/tty/rpmsg_tty.c
4678 +index dae2a4e44f386..29db413bbc030 100644
4679 +--- a/drivers/tty/rpmsg_tty.c
4680 ++++ b/drivers/tty/rpmsg_tty.c
4681 +@@ -50,10 +50,17 @@ static int rpmsg_tty_cb(struct rpmsg_device *rpdev, void *data, int len, void *p
4682 + static int rpmsg_tty_install(struct tty_driver *driver, struct tty_struct *tty)
4683 + {
4684 + struct rpmsg_tty_port *cport = idr_find(&tty_idr, tty->index);
4685 ++ struct tty_port *port;
4686 +
4687 + tty->driver_data = cport;
4688 +
4689 +- return tty_port_install(&cport->port, driver, tty);
4690 ++ port = tty_port_get(&cport->port);
4691 ++ return tty_port_install(port, driver, tty);
4692 ++}
4693 ++
4694 ++static void rpmsg_tty_cleanup(struct tty_struct *tty)
4695 ++{
4696 ++ tty_port_put(tty->port);
4697 + }
4698 +
4699 + static int rpmsg_tty_open(struct tty_struct *tty, struct file *filp)
4700 +@@ -106,12 +113,19 @@ static unsigned int rpmsg_tty_write_room(struct tty_struct *tty)
4701 + return size;
4702 + }
4703 +
4704 ++static void rpmsg_tty_hangup(struct tty_struct *tty)
4705 ++{
4706 ++ tty_port_hangup(tty->port);
4707 ++}
4708 ++
4709 + static const struct tty_operations rpmsg_tty_ops = {
4710 + .install = rpmsg_tty_install,
4711 + .open = rpmsg_tty_open,
4712 + .close = rpmsg_tty_close,
4713 + .write = rpmsg_tty_write,
4714 + .write_room = rpmsg_tty_write_room,
4715 ++ .hangup = rpmsg_tty_hangup,
4716 ++ .cleanup = rpmsg_tty_cleanup,
4717 + };
4718 +
4719 + static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
4720 +@@ -137,8 +151,10 @@ static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
4721 + return cport;
4722 + }
4723 +
4724 +-static void rpmsg_tty_release_cport(struct rpmsg_tty_port *cport)
4725 ++static void rpmsg_tty_destruct_port(struct tty_port *port)
4726 + {
4727 ++ struct rpmsg_tty_port *cport = container_of(port, struct rpmsg_tty_port, port);
4728 ++
4729 + mutex_lock(&idr_lock);
4730 + idr_remove(&tty_idr, cport->id);
4731 + mutex_unlock(&idr_lock);
4732 +@@ -146,7 +162,10 @@ static void rpmsg_tty_release_cport(struct rpmsg_tty_port *cport)
4733 + kfree(cport);
4734 + }
4735 +
4736 +-static const struct tty_port_operations rpmsg_tty_port_ops = { };
4737 ++static const struct tty_port_operations rpmsg_tty_port_ops = {
4738 ++ .destruct = rpmsg_tty_destruct_port,
4739 ++};
4740 ++
4741 +
4742 + static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
4743 + {
4744 +@@ -166,7 +185,8 @@ static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
4745 + cport->id, dev);
4746 + if (IS_ERR(tty_dev)) {
4747 + ret = dev_err_probe(dev, PTR_ERR(tty_dev), "Failed to register tty port\n");
4748 +- goto err_destroy;
4749 ++ tty_port_put(&cport->port);
4750 ++ return ret;
4751 + }
4752 +
4753 + cport->rpdev = rpdev;
4754 +@@ -177,12 +197,6 @@ static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
4755 + rpdev->src, rpdev->dst, cport->id);
4756 +
4757 + return 0;
4758 +-
4759 +-err_destroy:
4760 +- tty_port_destroy(&cport->port);
4761 +- rpmsg_tty_release_cport(cport);
4762 +-
4763 +- return ret;
4764 + }
4765 +
4766 + static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
4767 +@@ -192,13 +206,11 @@ static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
4768 + dev_dbg(&rpdev->dev, "Removing rpmsg tty device %d\n", cport->id);
4769 +
4770 + /* User hang up to release the tty */
4771 +- if (tty_port_initialized(&cport->port))
4772 +- tty_port_tty_hangup(&cport->port, false);
4773 ++ tty_port_tty_hangup(&cport->port, false);
4774 +
4775 + tty_unregister_device(rpmsg_tty_driver, cport->id);
4776 +
4777 +- tty_port_destroy(&cport->port);
4778 +- rpmsg_tty_release_cport(cport);
4779 ++ tty_port_put(&cport->port);
4780 + }
4781 +
4782 + static struct rpmsg_device_id rpmsg_driver_tty_id_table[] = {
4783 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
4784 +index bce28729dd7bd..be8626234627e 100644
4785 +--- a/drivers/tty/serial/8250/8250_of.c
4786 ++++ b/drivers/tty/serial/8250/8250_of.c
4787 +@@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
4788 + port->mapsize = resource_size(&resource);
4789 +
4790 + /* Check for shifted address mapping */
4791 +- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
4792 ++ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
4793 ++ if (prop >= port->mapsize) {
4794 ++ dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
4795 ++ prop, &port->mapsize);
4796 ++ ret = -EINVAL;
4797 ++ goto err_unprepare;
4798 ++ }
4799 ++
4800 + port->mapbase += prop;
4801 ++ port->mapsize -= prop;
4802 ++ }
4803 +
4804 + port->iotype = UPIO_MEM;
4805 + if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
4806 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
4807 +index 60f8fffdfd776..8817f6c912cfd 100644
4808 +--- a/drivers/tty/serial/8250/8250_pci.c
4809 ++++ b/drivers/tty/serial/8250/8250_pci.c
4810 +@@ -5174,8 +5174,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
4811 + { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
4812 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
4813 + pbn_b2_4_115200 },
4814 ++ /* Brainboxes Devices */
4815 + /*
4816 +- * BrainBoxes UC-260
4817 ++ * Brainboxes UC-101
4818 ++ */
4819 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
4820 ++ PCI_ANY_ID, PCI_ANY_ID,
4821 ++ 0, 0,
4822 ++ pbn_b2_2_115200 },
4823 ++ /*
4824 ++ * Brainboxes UC-235/246
4825 ++ */
4826 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
4827 ++ PCI_ANY_ID, PCI_ANY_ID,
4828 ++ 0, 0,
4829 ++ pbn_b2_1_115200 },
4830 ++ /*
4831 ++ * Brainboxes UC-257
4832 ++ */
4833 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
4834 ++ PCI_ANY_ID, PCI_ANY_ID,
4835 ++ 0, 0,
4836 ++ pbn_b2_2_115200 },
4837 ++ /*
4838 ++ * Brainboxes UC-260/271/701/756
4839 + */
4840 + { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
4841 + PCI_ANY_ID, PCI_ANY_ID,
4842 +@@ -5183,7 +5205,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
4843 + pbn_b2_4_115200 },
4844 + { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
4845 + PCI_ANY_ID, PCI_ANY_ID,
4846 +- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
4847 ++ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
4848 ++ pbn_b2_4_115200 },
4849 ++ /*
4850 ++ * Brainboxes UC-268
4851 ++ */
4852 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0841,
4853 ++ PCI_ANY_ID, PCI_ANY_ID,
4854 ++ 0, 0,
4855 ++ pbn_b2_4_115200 },
4856 ++ /*
4857 ++ * Brainboxes UC-275/279
4858 ++ */
4859 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0881,
4860 ++ PCI_ANY_ID, PCI_ANY_ID,
4861 ++ 0, 0,
4862 ++ pbn_b2_8_115200 },
4863 ++ /*
4864 ++ * Brainboxes UC-302
4865 ++ */
4866 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08E1,
4867 ++ PCI_ANY_ID, PCI_ANY_ID,
4868 ++ 0, 0,
4869 ++ pbn_b2_2_115200 },
4870 ++ /*
4871 ++ * Brainboxes UC-310
4872 ++ */
4873 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08C1,
4874 ++ PCI_ANY_ID, PCI_ANY_ID,
4875 ++ 0, 0,
4876 ++ pbn_b2_2_115200 },
4877 ++ /*
4878 ++ * Brainboxes UC-313
4879 ++ */
4880 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
4881 ++ PCI_ANY_ID, PCI_ANY_ID,
4882 ++ 0, 0,
4883 ++ pbn_b2_2_115200 },
4884 ++ /*
4885 ++ * Brainboxes UC-320/324
4886 ++ */
4887 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A61,
4888 ++ PCI_ANY_ID, PCI_ANY_ID,
4889 ++ 0, 0,
4890 ++ pbn_b2_1_115200 },
4891 ++ /*
4892 ++ * Brainboxes UC-346
4893 ++ */
4894 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
4895 ++ PCI_ANY_ID, PCI_ANY_ID,
4896 ++ 0, 0,
4897 ++ pbn_b2_4_115200 },
4898 ++ /*
4899 ++ * Brainboxes UC-357
4900 ++ */
4901 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A81,
4902 ++ PCI_ANY_ID, PCI_ANY_ID,
4903 ++ 0, 0,
4904 ++ pbn_b2_2_115200 },
4905 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
4906 ++ PCI_ANY_ID, PCI_ANY_ID,
4907 ++ 0, 0,
4908 ++ pbn_b2_2_115200 },
4909 ++ /*
4910 ++ * Brainboxes UC-368
4911 ++ */
4912 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0C41,
4913 ++ PCI_ANY_ID, PCI_ANY_ID,
4914 ++ 0, 0,
4915 ++ pbn_b2_4_115200 },
4916 ++ /*
4917 ++ * Brainboxes UC-420/431
4918 ++ */
4919 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
4920 ++ PCI_ANY_ID, PCI_ANY_ID,
4921 ++ 0, 0,
4922 + pbn_b2_4_115200 },
4923 + /*
4924 + * Perle PCI-RAS cards
4925 +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
4926 +index 6ec34260d6b18..da54f827c5efc 100644
4927 +--- a/drivers/tty/serial/amba-pl011.c
4928 ++++ b/drivers/tty/serial/amba-pl011.c
4929 +@@ -1615,8 +1615,12 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
4930 + container_of(port, struct uart_amba_port, port);
4931 + unsigned int cr;
4932 +
4933 +- if (port->rs485.flags & SER_RS485_ENABLED)
4934 +- mctrl &= ~TIOCM_RTS;
4935 ++ if (port->rs485.flags & SER_RS485_ENABLED) {
4936 ++ if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
4937 ++ mctrl &= ~TIOCM_RTS;
4938 ++ else
4939 ++ mctrl |= TIOCM_RTS;
4940 ++ }
4941 +
4942 + cr = pl011_read(uap, REG_CR);
4943 +
4944 +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
4945 +index 6cfc3bec67492..2d3fbcbfaf108 100644
4946 +--- a/drivers/tty/serial/stm32-usart.c
4947 ++++ b/drivers/tty/serial/stm32-usart.c
4948 +@@ -696,7 +696,7 @@ static void stm32_usart_start_tx(struct uart_port *port)
4949 + struct serial_rs485 *rs485conf = &port->rs485;
4950 + struct circ_buf *xmit = &port->state->xmit;
4951 +
4952 +- if (uart_circ_empty(xmit))
4953 ++ if (uart_circ_empty(xmit) && !port->x_char)
4954 + return;
4955 +
4956 + if (rs485conf->flags & SER_RS485_ENABLED) {
4957 +diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
4958 +index 55c73b1d87047..d00ff98dffabf 100644
4959 +--- a/drivers/usb/cdns3/drd.c
4960 ++++ b/drivers/usb/cdns3/drd.c
4961 +@@ -483,11 +483,11 @@ int cdns_drd_exit(struct cdns *cdns)
4962 + /* Indicate the cdns3 core was power lost before */
4963 + bool cdns_power_is_lost(struct cdns *cdns)
4964 + {
4965 +- if (cdns->version == CDNS3_CONTROLLER_V1) {
4966 +- if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
4967 ++ if (cdns->version == CDNS3_CONTROLLER_V0) {
4968 ++ if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
4969 + return true;
4970 + } else {
4971 +- if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
4972 ++ if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
4973 + return true;
4974 + }
4975 + return false;
4976 +diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
4977 +index 4169cf40a03b5..8f8405b0d6080 100644
4978 +--- a/drivers/usb/common/ulpi.c
4979 ++++ b/drivers/usb/common/ulpi.c
4980 +@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
4981 + struct ulpi *ulpi = to_ulpi_dev(dev);
4982 + const struct ulpi_device_id *id;
4983 +
4984 +- /* Some ULPI devices don't have a vendor id so rely on OF match */
4985 +- if (ulpi->id.vendor == 0)
4986 ++ /*
4987 ++ * Some ULPI devices don't have a vendor id
4988 ++ * or provide an id_table so rely on OF match.
4989 ++ */
4990 ++ if (ulpi->id.vendor == 0 || !drv->id_table)
4991 + return of_driver_match_device(dev, driver);
4992 +
4993 + for (id = drv->id_table; id->vendor; id++)
4994 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
4995 +index 8948b3bf7622b..95d13b9adc139 100644
4996 +--- a/drivers/usb/core/hcd.c
4997 ++++ b/drivers/usb/core/hcd.c
4998 +@@ -1563,6 +1563,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
4999 + urb->hcpriv = NULL;
5000 + INIT_LIST_HEAD(&urb->urb_list);
5001 + atomic_dec(&urb->use_count);
5002 ++ /*
5003 ++ * Order the write of urb->use_count above before the read
5004 ++ * of urb->reject below. Pairs with the memory barriers in
5005 ++ * usb_kill_urb() and usb_poison_urb().
5006 ++ */
5007 ++ smp_mb__after_atomic();
5008 ++
5009 + atomic_dec(&urb->dev->urbnum);
5010 + if (atomic_read(&urb->reject))
5011 + wake_up(&usb_kill_urb_queue);
5012 +@@ -1665,6 +1672,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
5013 +
5014 + usb_anchor_resume_wakeups(anchor);
5015 + atomic_dec(&urb->use_count);
5016 ++ /*
5017 ++ * Order the write of urb->use_count above before the read
5018 ++ * of urb->reject below. Pairs with the memory barriers in
5019 ++ * usb_kill_urb() and usb_poison_urb().
5020 ++ */
5021 ++ smp_mb__after_atomic();
5022 ++
5023 + if (unlikely(atomic_read(&urb->reject)))
5024 + wake_up(&usb_kill_urb_queue);
5025 + usb_put_urb(urb);
5026 +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
5027 +index 30727729a44cc..33d62d7e3929f 100644
5028 +--- a/drivers/usb/core/urb.c
5029 ++++ b/drivers/usb/core/urb.c
5030 +@@ -715,6 +715,12 @@ void usb_kill_urb(struct urb *urb)
5031 + if (!(urb && urb->dev && urb->ep))
5032 + return;
5033 + atomic_inc(&urb->reject);
5034 ++ /*
5035 ++ * Order the write of urb->reject above before the read
5036 ++ * of urb->use_count below. Pairs with the barriers in
5037 ++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
5038 ++ */
5039 ++ smp_mb__after_atomic();
5040 +
5041 + usb_hcd_unlink_urb(urb, -ENOENT);
5042 + wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
5043 +@@ -756,6 +762,12 @@ void usb_poison_urb(struct urb *urb)
5044 + if (!urb)
5045 + return;
5046 + atomic_inc(&urb->reject);
5047 ++ /*
5048 ++ * Order the write of urb->reject above before the read
5049 ++ * of urb->use_count below. Pairs with the barriers in
5050 ++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
5051 ++ */
5052 ++ smp_mb__after_atomic();
5053 +
5054 + if (!urb->dev || !urb->ep)
5055 + return;
5056 +diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
5057 +index 9cc3ad701a295..a6f3a9b38789e 100644
5058 +--- a/drivers/usb/dwc3/dwc3-xilinx.c
5059 ++++ b/drivers/usb/dwc3/dwc3-xilinx.c
5060 +@@ -99,17 +99,29 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
5061 + struct device *dev = priv_data->dev;
5062 + struct reset_control *crst, *hibrst, *apbrst;
5063 + struct phy *usb3_phy;
5064 +- int ret;
5065 ++ int ret = 0;
5066 + u32 reg;
5067 +
5068 +- usb3_phy = devm_phy_get(dev, "usb3-phy");
5069 +- if (PTR_ERR(usb3_phy) == -EPROBE_DEFER) {
5070 +- ret = -EPROBE_DEFER;
5071 ++ usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
5072 ++ if (IS_ERR(usb3_phy)) {
5073 ++ ret = PTR_ERR(usb3_phy);
5074 ++ dev_err_probe(dev, ret,
5075 ++ "failed to get USB3 PHY\n");
5076 + goto err;
5077 +- } else if (IS_ERR(usb3_phy)) {
5078 +- usb3_phy = NULL;
5079 + }
5080 +
5081 ++ /*
5082 ++ * The following core resets are not required unless a USB3 PHY
5083 ++ * is used, and the subsequent register settings are not required
5084 ++ * unless a core reset is performed (they should be set properly
5085 ++ * by the first-stage boot loader, but may be reverted by a core
5086 ++ * reset). They may also break the configuration if USB3 is actually
5087 ++ * in use but the usb3-phy entry is missing from the device tree.
5088 ++ * Therefore, skip these operations in this case.
5089 ++ */
5090 ++ if (!usb3_phy)
5091 ++ goto skip_usb3_phy;
5092 ++
5093 + crst = devm_reset_control_get_exclusive(dev, "usb_crst");
5094 + if (IS_ERR(crst)) {
5095 + ret = PTR_ERR(crst);
5096 +@@ -188,6 +200,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
5097 + goto err;
5098 + }
5099 +
5100 ++skip_usb3_phy:
5101 + /*
5102 + * This routes the USB DMA traffic to go through FPD path instead
5103 + * of reaching DDR directly. This traffic routing is needed to
5104 +diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
5105 +index 1abf08e5164af..6803cd60cc6dc 100644
5106 +--- a/drivers/usb/gadget/function/f_sourcesink.c
5107 ++++ b/drivers/usb/gadget/function/f_sourcesink.c
5108 +@@ -584,6 +584,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
5109 +
5110 + if (is_iso) {
5111 + switch (speed) {
5112 ++ case USB_SPEED_SUPER_PLUS:
5113 + case USB_SPEED_SUPER:
5114 + size = ss->isoc_maxpacket *
5115 + (ss->isoc_mult + 1) *
5116 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
5117 +index c1edcc9b13cec..dc570ce4e8319 100644
5118 +--- a/drivers/usb/host/xhci-plat.c
5119 ++++ b/drivers/usb/host/xhci-plat.c
5120 +@@ -437,6 +437,9 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
5121 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5122 + int ret;
5123 +
5124 ++ if (pm_runtime_suspended(dev))
5125 ++ pm_runtime_resume(dev);
5126 ++
5127 + ret = xhci_priv_suspend_quirk(hcd);
5128 + if (ret)
5129 + return ret;
5130 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
5131 +index 29191d33c0e3e..1a05e3dcfec8a 100644
5132 +--- a/drivers/usb/storage/unusual_devs.h
5133 ++++ b/drivers/usb/storage/unusual_devs.h
5134 +@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
5135 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
5136 + US_FL_SCM_MULT_TARG ),
5137 +
5138 ++/*
5139 ++ * Reported by DocMAX <mail@××××××××××.de>
5140 ++ * and Thomas Weißschuh <linux@××××××××××.net>
5141 ++ */
5142 ++UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
5143 ++ "VIA Labs, Inc.",
5144 ++ "VL817 SATA Bridge",
5145 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5146 ++ US_FL_IGNORE_UAS),
5147 ++
5148 + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
5149 + "ST",
5150 + "2A",
5151 +diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
5152 +index 35a1307349a20..e07d26a3cd8e1 100644
5153 +--- a/drivers/usb/typec/tcpm/tcpci.c
5154 ++++ b/drivers/usb/typec/tcpm/tcpci.c
5155 +@@ -75,9 +75,25 @@ static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
5156 + static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
5157 + {
5158 + struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
5159 ++ bool vconn_pres;
5160 ++ enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1;
5161 + unsigned int reg;
5162 + int ret;
5163 +
5164 ++ ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, &reg);
5165 ++ if (ret < 0)
5166 ++ return ret;
5167 ++
5168 ++ vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES);
5169 ++ if (vconn_pres) {
5170 ++ ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, &reg);
5171 ++ if (ret < 0)
5172 ++ return ret;
5173 ++
5174 ++ if (reg & TCPC_TCPC_CTRL_ORIENTATION)
5175 ++ polarity = TYPEC_POLARITY_CC2;
5176 ++ }
5177 ++
5178 + switch (cc) {
5179 + case TYPEC_CC_RA:
5180 + reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
5181 +@@ -112,6 +128,16 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
5182 + break;
5183 + }
5184 +
5185 ++ if (vconn_pres) {
5186 ++ if (polarity == TYPEC_POLARITY_CC2) {
5187 ++ reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
5188 ++ reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
5189 ++ } else {
5190 ++ reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
5191 ++ reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
5192 ++ }
5193 ++ }
5194 ++
5195 + ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
5196 + if (ret < 0)
5197 + return ret;
5198 +diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
5199 +index 2be7a77d400ef..b2edd45f13c68 100644
5200 +--- a/drivers/usb/typec/tcpm/tcpci.h
5201 ++++ b/drivers/usb/typec/tcpm/tcpci.h
5202 +@@ -98,6 +98,7 @@
5203 + #define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4)
5204 + #define TCPC_POWER_STATUS_VBUS_DET BIT(3)
5205 + #define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
5206 ++#define TCPC_POWER_STATUS_VCONN_PRES BIT(1)
5207 + #define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
5208 +
5209 + #define TCPC_FAULT_STATUS 0x1f
5210 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
5211 +index 59d4fa2443f2b..5fce795b69c7f 100644
5212 +--- a/drivers/usb/typec/tcpm/tcpm.c
5213 ++++ b/drivers/usb/typec/tcpm/tcpm.c
5214 +@@ -5156,7 +5156,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
5215 + case SNK_TRYWAIT_DEBOUNCE:
5216 + break;
5217 + case SNK_ATTACH_WAIT:
5218 +- tcpm_set_state(port, SNK_UNATTACHED, 0);
5219 ++ case SNK_DEBOUNCED:
5220 ++ /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
5221 + break;
5222 +
5223 + case SNK_NEGOTIATE_CAPABILITIES:
5224 +@@ -5263,6 +5264,10 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
5225 + case PR_SWAP_SNK_SRC_SOURCE_ON:
5226 + /* Do nothing, vsafe0v is expected during transition */
5227 + break;
5228 ++ case SNK_ATTACH_WAIT:
5229 ++ case SNK_DEBOUNCED:
5230 ++ /*Do nothing, still waiting for VSAFE5V for connect */
5231 ++ break;
5232 + default:
5233 + if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
5234 + tcpm_set_state(port, SNK_UNATTACHED, 0);
5235 +diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
5236 +index bff96d64dddff..6db7c8ddd51cd 100644
5237 +--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
5238 ++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
5239 +@@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
5240 + if (status < 0)
5241 + return status;
5242 +
5243 +- if (!data)
5244 ++ if (!(data & DEV_INT))
5245 + return 0;
5246 +
5247 + status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
5248 +diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
5249 +index 23999df527393..c8e0ea27caf1d 100644
5250 +--- a/drivers/video/fbdev/hyperv_fb.c
5251 ++++ b/drivers/video/fbdev/hyperv_fb.c
5252 +@@ -287,8 +287,6 @@ struct hvfb_par {
5253 +
5254 + static uint screen_width = HVFB_WIDTH;
5255 + static uint screen_height = HVFB_HEIGHT;
5256 +-static uint screen_width_max = HVFB_WIDTH;
5257 +-static uint screen_height_max = HVFB_HEIGHT;
5258 + static uint screen_depth;
5259 + static uint screen_fb_size;
5260 + static uint dio_fb_size; /* FB size for deferred IO */
5261 +@@ -582,7 +580,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
5262 + int ret = 0;
5263 + unsigned long t;
5264 + u8 index;
5265 +- int i;
5266 +
5267 + memset(msg, 0, sizeof(struct synthvid_msg));
5268 + msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
5269 +@@ -613,13 +610,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
5270 + goto out;
5271 + }
5272 +
5273 +- for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
5274 +- screen_width_max = max_t(unsigned int, screen_width_max,
5275 +- msg->resolution_resp.supported_resolution[i].width);
5276 +- screen_height_max = max_t(unsigned int, screen_height_max,
5277 +- msg->resolution_resp.supported_resolution[i].height);
5278 +- }
5279 +-
5280 + screen_width =
5281 + msg->resolution_resp.supported_resolution[index].width;
5282 + screen_height =
5283 +@@ -941,7 +931,7 @@ static void hvfb_get_option(struct fb_info *info)
5284 +
5285 + if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
5286 + (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
5287 +- (x > screen_width_max || y > screen_height_max)) ||
5288 ++ (x * y * screen_depth / 8 > screen_fb_size)) ||
5289 + (par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
5290 + x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
5291 + (par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
5292 +@@ -1194,8 +1184,8 @@ static int hvfb_probe(struct hv_device *hdev,
5293 + }
5294 +
5295 + hvfb_get_option(info);
5296 +- pr_info("Screen resolution: %dx%d, Color depth: %d\n",
5297 +- screen_width, screen_height, screen_depth);
5298 ++ pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n",
5299 ++ screen_width, screen_height, screen_depth, screen_fb_size);
5300 +
5301 + ret = hvfb_getmem(hdev, info);
5302 + if (ret) {
5303 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
5304 +index edfecfe62b4b6..124b9e6815e5f 100644
5305 +--- a/fs/btrfs/ioctl.c
5306 ++++ b/fs/btrfs/ioctl.c
5307 +@@ -1187,6 +1187,35 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
5308 + if (em->generation < newer_than)
5309 + goto next;
5310 +
5311 ++ /*
5312 ++ * Our start offset might be in the middle of an existing extent
5313 ++ * map, so take that into account.
5314 ++ */
5315 ++ range_len = em->len - (cur - em->start);
5316 ++ /*
5317 ++ * If this range of the extent map is already flagged for delalloc,
5318 ++ * skip it, because:
5319 ++ *
5320 ++ * 1) We could deadlock later, when trying to reserve space for
5321 ++ * delalloc, because in case we can't immediately reserve space
5322 ++ * the flusher can start delalloc and wait for the respective
5323 ++ * ordered extents to complete. The deadlock would happen
5324 ++ * because we do the space reservation while holding the range
5325 ++ * locked, and starting writeback, or finishing an ordered
5326 ++ * extent, requires locking the range;
5327 ++ *
5328 ++ * 2) If there's delalloc there, it means there's dirty pages for
5329 ++ * which writeback has not started yet (we clean the delalloc
5330 ++ * flag when starting writeback and after creating an ordered
5331 ++ * extent). If we mark pages in an adjacent range for defrag,
5332 ++ * then we will have a larger contiguous range for delalloc,
5333 ++ * very likely resulting in a larger extent after writeback is
5334 ++ * triggered (except in a case of free space fragmentation).
5335 ++ */
5336 ++ if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1,
5337 ++ EXTENT_DELALLOC, 0, NULL))
5338 ++ goto next;
5339 ++
5340 + /*
5341 + * For do_compress case, we want to compress all valid file
5342 + * extents, thus no @extent_thresh or mergeable check.
5343 +@@ -1195,7 +1224,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
5344 + goto add;
5345 +
5346 + /* Skip too large extent */
5347 +- if (em->len >= extent_thresh)
5348 ++ if (range_len >= extent_thresh)
5349 + goto next;
5350 +
5351 + next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
5352 +@@ -1416,9 +1445,11 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
5353 + list_for_each_entry(entry, &target_list, list) {
5354 + u32 range_len = entry->len;
5355 +
5356 +- /* Reached the limit */
5357 +- if (max_sectors && max_sectors == *sectors_defragged)
5358 ++ /* Reached or beyond the limit */
5359 ++ if (max_sectors && *sectors_defragged >= max_sectors) {
5360 ++ ret = 1;
5361 + break;
5362 ++ }
5363 +
5364 + if (max_sectors)
5365 + range_len = min_t(u32, range_len,
5366 +@@ -1439,7 +1470,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
5367 + extent_thresh, newer_than, do_compress);
5368 + if (ret < 0)
5369 + break;
5370 +- *sectors_defragged += range_len;
5371 ++ *sectors_defragged += range_len >>
5372 ++ inode->root->fs_info->sectorsize_bits;
5373 + }
5374 + out:
5375 + list_for_each_entry_safe(entry, tmp, &target_list, list) {
5376 +@@ -1458,6 +1490,12 @@ out:
5377 + * @newer_than: minimum transid to defrag
5378 + * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
5379 + * will be defragged.
5380 ++ *
5381 ++ * Return <0 for error.
5382 ++ * Return >=0 for the number of sectors defragged, and range->start will be updated
5383 ++ * to indicate the file offset where next defrag should be started at.
5384 ++ * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
5385 ++ * defragging all the range).
5386 + */
5387 + int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
5388 + struct btrfs_ioctl_defrag_range_args *range,
5389 +@@ -1473,6 +1511,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
5390 + int compress_type = BTRFS_COMPRESS_ZLIB;
5391 + int ret = 0;
5392 + u32 extent_thresh = range->extent_thresh;
5393 ++ pgoff_t start_index;
5394 +
5395 + if (isize == 0)
5396 + return 0;
5397 +@@ -1492,12 +1531,16 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
5398 +
5399 + if (range->start + range->len > range->start) {
5400 + /* Got a specific range */
5401 +- last_byte = min(isize, range->start + range->len) - 1;
5402 ++ last_byte = min(isize, range->start + range->len);
5403 + } else {
5404 + /* Defrag until file end */
5405 +- last_byte = isize - 1;
5406 ++ last_byte = isize;
5407 + }
5408 +
5409 ++ /* Align the range */
5410 ++ cur = round_down(range->start, fs_info->sectorsize);
5411 ++ last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
5412 ++
5413 + /*
5414 + * If we were not given a ra, allocate a readahead context. As
5415 + * readahead is just an optimization, defrag will work without it so
5416 +@@ -1510,16 +1553,26 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
5417 + file_ra_state_init(ra, inode->i_mapping);
5418 + }
5419 +
5420 +- /* Align the range */
5421 +- cur = round_down(range->start, fs_info->sectorsize);
5422 +- last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
5423 ++ /*
5424 ++ * Make writeback start from the beginning of the range, so that the
5425 ++ * defrag range can be written sequentially.
5426 ++ */
5427 ++ start_index = cur >> PAGE_SHIFT;
5428 ++ if (start_index < inode->i_mapping->writeback_index)
5429 ++ inode->i_mapping->writeback_index = start_index;
5430 +
5431 + while (cur < last_byte) {
5432 ++ const unsigned long prev_sectors_defragged = sectors_defragged;
5433 + u64 cluster_end;
5434 +
5435 + /* The cluster size 256K should always be page aligned */
5436 + BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
5437 +
5438 ++ if (btrfs_defrag_cancelled(fs_info)) {
5439 ++ ret = -EAGAIN;
5440 ++ break;
5441 ++ }
5442 ++
5443 + /* We want the cluster end at page boundary when possible */
5444 + cluster_end = (((cur >> PAGE_SHIFT) +
5445 + (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
5446 +@@ -1541,14 +1594,27 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
5447 + cluster_end + 1 - cur, extent_thresh,
5448 + newer_than, do_compress,
5449 + &sectors_defragged, max_to_defrag);
5450 ++
5451 ++ if (sectors_defragged > prev_sectors_defragged)
5452 ++ balance_dirty_pages_ratelimited(inode->i_mapping);
5453 ++
5454 + btrfs_inode_unlock(inode, 0);
5455 + if (ret < 0)
5456 + break;
5457 + cur = cluster_end + 1;
5458 ++ if (ret > 0) {
5459 ++ ret = 0;
5460 ++ break;
5461 ++ }
5462 + }
5463 +
5464 + if (ra_allocated)
5465 + kfree(ra);
5466 ++ /*
5467 ++ * Update range.start for autodefrag, this will indicate where to start
5468 ++ * in next run.
5469 ++ */
5470 ++ range->start = cur;
5471 + if (sectors_defragged) {
5472 + /*
5473 + * We have defragged some sectors, for compression case they
5474 +@@ -3060,10 +3126,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
5475 + btrfs_inode_lock(inode, 0);
5476 + err = btrfs_delete_subvolume(dir, dentry);
5477 + btrfs_inode_unlock(inode, 0);
5478 +- if (!err) {
5479 +- fsnotify_rmdir(dir, dentry);
5480 +- d_delete(dentry);
5481 +- }
5482 ++ if (!err)
5483 ++ d_delete_notify(dir, dentry);
5484 +
5485 + out_dput:
5486 + dput(dentry);
5487 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
5488 +index c447fa2e2d1fe..2f8696f3b925d 100644
5489 +--- a/fs/ceph/caps.c
5490 ++++ b/fs/ceph/caps.c
5491 +@@ -2218,6 +2218,7 @@ static int unsafe_request_wait(struct inode *inode)
5492 + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
5493 + struct ceph_inode_info *ci = ceph_inode(inode);
5494 + struct ceph_mds_request *req1 = NULL, *req2 = NULL;
5495 ++ unsigned int max_sessions;
5496 + int ret, err = 0;
5497 +
5498 + spin_lock(&ci->i_unsafe_lock);
5499 +@@ -2235,37 +2236,45 @@ static int unsafe_request_wait(struct inode *inode)
5500 + }
5501 + spin_unlock(&ci->i_unsafe_lock);
5502 +
5503 ++ /*
5504 ++ * The mdsc->max_sessions is unlikely to be changed
5505 ++ * mostly, here we will retry it by reallocating the
5506 ++ * sessions array memory to get rid of the mdsc->mutex
5507 ++ * lock.
5508 ++ */
5509 ++retry:
5510 ++ max_sessions = mdsc->max_sessions;
5511 ++
5512 + /*
5513 + * Trigger to flush the journal logs in all the relevant MDSes
5514 + * manually, or in the worst case we must wait at most 5 seconds
5515 + * to wait the journal logs to be flushed by the MDSes periodically.
5516 + */
5517 +- if (req1 || req2) {
5518 ++ if ((req1 || req2) && likely(max_sessions)) {
5519 + struct ceph_mds_session **sessions = NULL;
5520 + struct ceph_mds_session *s;
5521 + struct ceph_mds_request *req;
5522 +- unsigned int max;
5523 + int i;
5524 +
5525 +- /*
5526 +- * The mdsc->max_sessions is unlikely to be changed
5527 +- * mostly, here we will retry it by reallocating the
5528 +- * sessions arrary memory to get rid of the mdsc->mutex
5529 +- * lock.
5530 +- */
5531 +-retry:
5532 +- max = mdsc->max_sessions;
5533 +- sessions = krealloc(sessions, max * sizeof(s), __GFP_ZERO);
5534 +- if (!sessions)
5535 +- return -ENOMEM;
5536 ++ sessions = kzalloc(max_sessions * sizeof(s), GFP_KERNEL);
5537 ++ if (!sessions) {
5538 ++ err = -ENOMEM;
5539 ++ goto out;
5540 ++ }
5541 +
5542 + spin_lock(&ci->i_unsafe_lock);
5543 + if (req1) {
5544 + list_for_each_entry(req, &ci->i_unsafe_dirops,
5545 + r_unsafe_dir_item) {
5546 + s = req->r_session;
5547 +- if (unlikely(s->s_mds >= max)) {
5548 ++ if (unlikely(s->s_mds >= max_sessions)) {
5549 + spin_unlock(&ci->i_unsafe_lock);
5550 ++ for (i = 0; i < max_sessions; i++) {
5551 ++ s = sessions[i];
5552 ++ if (s)
5553 ++ ceph_put_mds_session(s);
5554 ++ }
5555 ++ kfree(sessions);
5556 + goto retry;
5557 + }
5558 + if (!sessions[s->s_mds]) {
5559 +@@ -2278,8 +2287,14 @@ retry:
5560 + list_for_each_entry(req, &ci->i_unsafe_iops,
5561 + r_unsafe_target_item) {
5562 + s = req->r_session;
5563 +- if (unlikely(s->s_mds >= max)) {
5564 ++ if (unlikely(s->s_mds >= max_sessions)) {
5565 + spin_unlock(&ci->i_unsafe_lock);
5566 ++ for (i = 0; i < max_sessions; i++) {
5567 ++ s = sessions[i];
5568 ++ if (s)
5569 ++ ceph_put_mds_session(s);
5570 ++ }
5571 ++ kfree(sessions);
5572 + goto retry;
5573 + }
5574 + if (!sessions[s->s_mds]) {
5575 +@@ -2300,7 +2315,7 @@ retry:
5576 + spin_unlock(&ci->i_ceph_lock);
5577 +
5578 + /* send flush mdlog request to MDSes */
5579 +- for (i = 0; i < max; i++) {
5580 ++ for (i = 0; i < max_sessions; i++) {
5581 + s = sessions[i];
5582 + if (s) {
5583 + send_flush_mdlog(s);
5584 +@@ -2317,15 +2332,19 @@ retry:
5585 + ceph_timeout_jiffies(req1->r_timeout));
5586 + if (ret)
5587 + err = -EIO;
5588 +- ceph_mdsc_put_request(req1);
5589 + }
5590 + if (req2) {
5591 + ret = !wait_for_completion_timeout(&req2->r_safe_completion,
5592 + ceph_timeout_jiffies(req2->r_timeout));
5593 + if (ret)
5594 + err = -EIO;
5595 +- ceph_mdsc_put_request(req2);
5596 + }
5597 ++
5598 ++out:
5599 ++ if (req1)
5600 ++ ceph_mdsc_put_request(req1);
5601 ++ if (req2)
5602 ++ ceph_mdsc_put_request(req2);
5603 + return err;
5604 + }
5605 +
5606 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
5607 +index c138e8126286c..7f3291e027b06 100644
5608 +--- a/fs/ceph/file.c
5609 ++++ b/fs/ceph/file.c
5610 +@@ -579,6 +579,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
5611 + struct ceph_inode_info *ci = ceph_inode(dir);
5612 + struct inode *inode;
5613 + struct timespec64 now;
5614 ++ struct ceph_string *pool_ns;
5615 + struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
5616 + struct ceph_vino vino = { .ino = req->r_deleg_ino,
5617 + .snap = CEPH_NOSNAP };
5618 +@@ -628,6 +629,12 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
5619 + in.max_size = cpu_to_le64(lo->stripe_unit);
5620 +
5621 + ceph_file_layout_to_legacy(lo, &in.layout);
5622 ++ /* lo is private, so pool_ns can't change */
5623 ++ pool_ns = rcu_dereference_raw(lo->pool_ns);
5624 ++ if (pool_ns) {
5625 ++ iinfo.pool_ns_len = pool_ns->len;
5626 ++ iinfo.pool_ns_data = pool_ns->str;
5627 ++ }
5628 +
5629 + down_read(&mdsc->snap_rwsem);
5630 + ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
5631 +@@ -746,8 +753,10 @@ retry:
5632 + restore_deleg_ino(dir, req->r_deleg_ino);
5633 + ceph_mdsc_put_request(req);
5634 + try_async = false;
5635 ++ ceph_put_string(rcu_dereference_raw(lo.pool_ns));
5636 + goto retry;
5637 + }
5638 ++ ceph_put_string(rcu_dereference_raw(lo.pool_ns));
5639 + goto out_req;
5640 + }
5641 + }
5642 +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
5643 +index 1466b5d01cbb9..d3cd2a94d1e8c 100644
5644 +--- a/fs/configfs/dir.c
5645 ++++ b/fs/configfs/dir.c
5646 +@@ -1780,8 +1780,8 @@ void configfs_unregister_group(struct config_group *group)
5647 + configfs_detach_group(&group->cg_item);
5648 + d_inode(dentry)->i_flags |= S_DEAD;
5649 + dont_mount(dentry);
5650 ++ d_drop(dentry);
5651 + fsnotify_rmdir(d_inode(parent), dentry);
5652 +- d_delete(dentry);
5653 + inode_unlock(d_inode(parent));
5654 +
5655 + dput(dentry);
5656 +@@ -1922,10 +1922,10 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
5657 + configfs_detach_group(&group->cg_item);
5658 + d_inode(dentry)->i_flags |= S_DEAD;
5659 + dont_mount(dentry);
5660 +- fsnotify_rmdir(d_inode(root), dentry);
5661 + inode_unlock(d_inode(dentry));
5662 +
5663 +- d_delete(dentry);
5664 ++ d_drop(dentry);
5665 ++ fsnotify_rmdir(d_inode(root), dentry);
5666 +
5667 + inode_unlock(d_inode(root));
5668 +
5669 +diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
5670 +index 42e5a766d33c7..4f25015aa5342 100644
5671 +--- a/fs/devpts/inode.c
5672 ++++ b/fs/devpts/inode.c
5673 +@@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry)
5674 +
5675 + dentry->d_fsdata = NULL;
5676 + drop_nlink(dentry->d_inode);
5677 +- fsnotify_unlink(d_inode(dentry->d_parent), dentry);
5678 + d_drop(dentry);
5679 ++ fsnotify_unlink(d_inode(dentry->d_parent), dentry);
5680 + dput(dentry); /* d_alloc_name() in devpts_pty_new() */
5681 + }
5682 +
5683 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5684 +index 15f303180d70c..698db7fb62e06 100644
5685 +--- a/fs/io_uring.c
5686 ++++ b/fs/io_uring.c
5687 +@@ -7761,10 +7761,15 @@ static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
5688 + struct io_ring_ctx *ctx = node->rsrc_data->ctx;
5689 + unsigned long flags;
5690 + bool first_add = false;
5691 ++ unsigned long delay = HZ;
5692 +
5693 + spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
5694 + node->done = true;
5695 +
5696 ++ /* if we are mid-quiesce then do not delay */
5697 ++ if (node->rsrc_data->quiesce)
5698 ++ delay = 0;
5699 ++
5700 + while (!list_empty(&ctx->rsrc_ref_list)) {
5701 + node = list_first_entry(&ctx->rsrc_ref_list,
5702 + struct io_rsrc_node, node);
5703 +@@ -7777,7 +7782,7 @@ static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
5704 + spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
5705 +
5706 + if (first_add)
5707 +- mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
5708 ++ mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
5709 + }
5710 +
5711 + static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
5712 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
5713 +index 35302bc192eb9..bd9ac98916043 100644
5714 +--- a/fs/jbd2/journal.c
5715 ++++ b/fs/jbd2/journal.c
5716 +@@ -2970,6 +2970,7 @@ struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
5717 + jbd_unlock_bh_journal_head(bh);
5718 + return jh;
5719 + }
5720 ++EXPORT_SYMBOL(jbd2_journal_grab_journal_head);
5721 +
5722 + static void __journal_remove_journal_head(struct buffer_head *bh)
5723 + {
5724 +@@ -3022,6 +3023,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
5725 + jbd_unlock_bh_journal_head(bh);
5726 + }
5727 + }
5728 ++EXPORT_SYMBOL(jbd2_journal_put_journal_head);
5729 +
5730 + /*
5731 + * Initialize jbd inode head
5732 +diff --git a/fs/namei.c b/fs/namei.c
5733 +index 1f9d2187c7655..3c0568d3155be 100644
5734 +--- a/fs/namei.c
5735 ++++ b/fs/namei.c
5736 +@@ -3973,13 +3973,12 @@ int vfs_rmdir(struct user_namespace *mnt_userns, struct inode *dir,
5737 + dentry->d_inode->i_flags |= S_DEAD;
5738 + dont_mount(dentry);
5739 + detach_mounts(dentry);
5740 +- fsnotify_rmdir(dir, dentry);
5741 +
5742 + out:
5743 + inode_unlock(dentry->d_inode);
5744 + dput(dentry);
5745 + if (!error)
5746 +- d_delete(dentry);
5747 ++ d_delete_notify(dir, dentry);
5748 + return error;
5749 + }
5750 + EXPORT_SYMBOL(vfs_rmdir);
5751 +@@ -4101,7 +4100,6 @@ int vfs_unlink(struct user_namespace *mnt_userns, struct inode *dir,
5752 + if (!error) {
5753 + dont_mount(dentry);
5754 + detach_mounts(dentry);
5755 +- fsnotify_unlink(dir, dentry);
5756 + }
5757 + }
5758 + }
5759 +@@ -4109,9 +4107,11 @@ out:
5760 + inode_unlock(target);
5761 +
5762 + /* We don't d_delete() NFS sillyrenamed files--they still exist. */
5763 +- if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
5764 ++ if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
5765 ++ fsnotify_unlink(dir, dentry);
5766 ++ } else if (!error) {
5767 + fsnotify_link_count(target);
5768 +- d_delete(dentry);
5769 ++ d_delete_notify(dir, dentry);
5770 + }
5771 +
5772 + return error;
5773 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
5774 +index 731d31015b6aa..24ce5652d9be8 100644
5775 +--- a/fs/nfs/dir.c
5776 ++++ b/fs/nfs/dir.c
5777 +@@ -1967,6 +1967,24 @@ out:
5778 +
5779 + no_open:
5780 + res = nfs_lookup(dir, dentry, lookup_flags);
5781 ++ if (!res) {
5782 ++ inode = d_inode(dentry);
5783 ++ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
5784 ++ !S_ISDIR(inode->i_mode))
5785 ++ res = ERR_PTR(-ENOTDIR);
5786 ++ else if (inode && S_ISREG(inode->i_mode))
5787 ++ res = ERR_PTR(-EOPENSTALE);
5788 ++ } else if (!IS_ERR(res)) {
5789 ++ inode = d_inode(res);
5790 ++ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
5791 ++ !S_ISDIR(inode->i_mode)) {
5792 ++ dput(res);
5793 ++ res = ERR_PTR(-ENOTDIR);
5794 ++ } else if (inode && S_ISREG(inode->i_mode)) {
5795 ++ dput(res);
5796 ++ res = ERR_PTR(-EOPENSTALE);
5797 ++ }
5798 ++ }
5799 + if (switched) {
5800 + d_lookup_done(dentry);
5801 + if (!res)
5802 +@@ -2379,6 +2397,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
5803 +
5804 + trace_nfs_link_enter(inode, dir, dentry);
5805 + d_drop(dentry);
5806 ++ if (S_ISREG(inode->i_mode))
5807 ++ nfs_sync_inode(inode);
5808 + error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
5809 + if (error == 0) {
5810 + nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
5811 +@@ -2468,6 +2488,8 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
5812 + }
5813 + }
5814 +
5815 ++ if (S_ISREG(old_inode->i_mode))
5816 ++ nfs_sync_inode(old_inode);
5817 + task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
5818 + if (IS_ERR(task)) {
5819 + error = PTR_ERR(task);
5820 +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
5821 +index 51a49e0cfe376..d0761ca8cb542 100644
5822 +--- a/fs/nfsd/nfsctl.c
5823 ++++ b/fs/nfsd/nfsctl.c
5824 +@@ -1249,7 +1249,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry)
5825 + clear_ncl(d_inode(dentry));
5826 + dget(dentry);
5827 + ret = simple_unlink(dir, dentry);
5828 +- d_delete(dentry);
5829 ++ d_drop(dentry);
5830 ++ fsnotify_unlink(dir, dentry);
5831 + dput(dentry);
5832 + WARN_ON_ONCE(ret);
5833 + }
5834 +@@ -1340,8 +1341,8 @@ void nfsd_client_rmdir(struct dentry *dentry)
5835 + dget(dentry);
5836 + ret = simple_rmdir(dir, dentry);
5837 + WARN_ON_ONCE(ret);
5838 ++ d_drop(dentry);
5839 + fsnotify_rmdir(dir, dentry);
5840 +- d_delete(dentry);
5841 + dput(dentry);
5842 + inode_unlock(dir);
5843 + }
5844 +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
5845 +index 481017e1dac5a..166c8918c825a 100644
5846 +--- a/fs/ocfs2/suballoc.c
5847 ++++ b/fs/ocfs2/suballoc.c
5848 +@@ -1251,26 +1251,23 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
5849 + {
5850 + struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
5851 + struct journal_head *jh;
5852 +- int ret = 1;
5853 ++ int ret;
5854 +
5855 + if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
5856 + return 0;
5857 +
5858 +- if (!buffer_jbd(bg_bh))
5859 ++ jh = jbd2_journal_grab_journal_head(bg_bh);
5860 ++ if (!jh)
5861 + return 1;
5862 +
5863 +- jbd_lock_bh_journal_head(bg_bh);
5864 +- if (buffer_jbd(bg_bh)) {
5865 +- jh = bh2jh(bg_bh);
5866 +- spin_lock(&jh->b_state_lock);
5867 +- bg = (struct ocfs2_group_desc *) jh->b_committed_data;
5868 +- if (bg)
5869 +- ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
5870 +- else
5871 +- ret = 1;
5872 +- spin_unlock(&jh->b_state_lock);
5873 +- }
5874 +- jbd_unlock_bh_journal_head(bg_bh);
5875 ++ spin_lock(&jh->b_state_lock);
5876 ++ bg = (struct ocfs2_group_desc *) jh->b_committed_data;
5877 ++ if (bg)
5878 ++ ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
5879 ++ else
5880 ++ ret = 1;
5881 ++ spin_unlock(&jh->b_state_lock);
5882 ++ jbd2_journal_put_journal_head(jh);
5883 +
5884 + return ret;
5885 + }
5886 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
5887 +index 1d6b7a50736ba..ea8f6cd01f501 100644
5888 +--- a/fs/udf/inode.c
5889 ++++ b/fs/udf/inode.c
5890 +@@ -258,10 +258,6 @@ int udf_expand_file_adinicb(struct inode *inode)
5891 + char *kaddr;
5892 + struct udf_inode_info *iinfo = UDF_I(inode);
5893 + int err;
5894 +- struct writeback_control udf_wbc = {
5895 +- .sync_mode = WB_SYNC_NONE,
5896 +- .nr_to_write = 1,
5897 +- };
5898 +
5899 + WARN_ON_ONCE(!inode_is_locked(inode));
5900 + if (!iinfo->i_lenAlloc) {
5901 +@@ -305,8 +301,10 @@ int udf_expand_file_adinicb(struct inode *inode)
5902 + iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
5903 + /* from now on we have normal address_space methods */
5904 + inode->i_data.a_ops = &udf_aops;
5905 ++ set_page_dirty(page);
5906 ++ unlock_page(page);
5907 + up_write(&iinfo->i_data_sem);
5908 +- err = inode->i_data.a_ops->writepage(page, &udf_wbc);
5909 ++ err = filemap_fdatawrite(inode->i_mapping);
5910 + if (err) {
5911 + /* Restore everything back so that we don't lose data... */
5912 + lock_page(page);
5913 +@@ -317,6 +315,7 @@ int udf_expand_file_adinicb(struct inode *inode)
5914 + unlock_page(page);
5915 + iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
5916 + inode->i_data.a_ops = &udf_adinicb_aops;
5917 ++ iinfo->i_lenAlloc = inode->i_size;
5918 + up_write(&iinfo->i_data_sem);
5919 + }
5920 + put_page(page);
5921 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
5922 +index bd4370baccca3..d73887c805e05 100644
5923 +--- a/include/linux/blkdev.h
5924 ++++ b/include/linux/blkdev.h
5925 +@@ -1254,6 +1254,7 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
5926 + void disk_end_io_acct(struct gendisk *disk, unsigned int op,
5927 + unsigned long start_time);
5928 +
5929 ++void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
5930 + unsigned long bio_start_io_acct(struct bio *bio);
5931 + void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
5932 + struct block_device *orig_bdev);
5933 +diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
5934 +index 845a0ffc16ee8..d8f07baf272ad 100644
5935 +--- a/include/linux/ethtool.h
5936 ++++ b/include/linux/ethtool.h
5937 +@@ -95,7 +95,7 @@ struct ethtool_link_ext_state_info {
5938 + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
5939 + enum ethtool_link_ext_substate_cable_issue cable_issue;
5940 + enum ethtool_link_ext_substate_module module;
5941 +- u8 __link_ext_substate;
5942 ++ u32 __link_ext_substate;
5943 + };
5944 + };
5945 +
5946 +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
5947 +index 787545e87eeb0..bec1e23ecf787 100644
5948 +--- a/include/linux/fsnotify.h
5949 ++++ b/include/linux/fsnotify.h
5950 +@@ -221,6 +221,43 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
5951 + dir, &new_dentry->d_name, 0);
5952 + }
5953 +
5954 ++/*
5955 ++ * fsnotify_delete - @dentry was unlinked and unhashed
5956 ++ *
5957 ++ * Caller must make sure that dentry->d_name is stable.
5958 ++ *
5959 ++ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
5960 ++ * as this may be called after d_delete() and old_dentry may be negative.
5961 ++ */
5962 ++static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
5963 ++ struct dentry *dentry)
5964 ++{
5965 ++ __u32 mask = FS_DELETE;
5966 ++
5967 ++ if (S_ISDIR(inode->i_mode))
5968 ++ mask |= FS_ISDIR;
5969 ++
5970 ++ fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name,
5971 ++ 0);
5972 ++}
5973 ++
5974 ++/**
5975 ++ * d_delete_notify - delete a dentry and call fsnotify_delete()
5976 ++ * @dentry: The dentry to delete
5977 ++ *
5978 ++ * This helper is used to guaranty that the unlinked inode cannot be found
5979 ++ * by lookup of this name after fsnotify_delete() event has been delivered.
5980 ++ */
5981 ++static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
5982 ++{
5983 ++ struct inode *inode = d_inode(dentry);
5984 ++
5985 ++ ihold(inode);
5986 ++ d_delete(dentry);
5987 ++ fsnotify_delete(dir, inode, dentry);
5988 ++ iput(inode);
5989 ++}
5990 ++
5991 + /*
5992 + * fsnotify_unlink - 'name' was unlinked
5993 + *
5994 +@@ -228,10 +265,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
5995 + */
5996 + static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
5997 + {
5998 +- /* Expected to be called before d_delete() */
5999 +- WARN_ON_ONCE(d_is_negative(dentry));
6000 ++ if (WARN_ON_ONCE(d_is_negative(dentry)))
6001 ++ return;
6002 +
6003 +- fsnotify_dirent(dir, dentry, FS_DELETE);
6004 ++ fsnotify_delete(dir, d_inode(dentry), dentry);
6005 + }
6006 +
6007 + /*
6008 +@@ -255,10 +292,10 @@ static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
6009 + */
6010 + static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
6011 + {
6012 +- /* Expected to be called before d_delete() */
6013 +- WARN_ON_ONCE(d_is_negative(dentry));
6014 ++ if (WARN_ON_ONCE(d_is_negative(dentry)))
6015 ++ return;
6016 +
6017 +- fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
6018 ++ fsnotify_delete(dir, d_inode(dentry), dentry);
6019 + }
6020 +
6021 + /*
6022 +diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
6023 +index df8de62f4710f..f0c7b352340a4 100644
6024 +--- a/include/linux/lsm_hook_defs.h
6025 ++++ b/include/linux/lsm_hook_defs.h
6026 +@@ -82,7 +82,7 @@ LSM_HOOK(int, 0, sb_add_mnt_opt, const char *option, const char *val,
6027 + int len, void **mnt_opts)
6028 + LSM_HOOK(int, 0, move_mount, const struct path *from_path,
6029 + const struct path *to_path)
6030 +-LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry,
6031 ++LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
6032 + int mode, const struct qstr *name, const char **xattr_name,
6033 + void **ctx, u32 *ctxlen)
6034 + LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
6035 +diff --git a/include/linux/mm.h b/include/linux/mm.h
6036 +index a7e4a9e7d807a..73210623bda7a 100644
6037 +--- a/include/linux/mm.h
6038 ++++ b/include/linux/mm.h
6039 +@@ -1524,11 +1524,18 @@ static inline u8 page_kasan_tag(const struct page *page)
6040 +
6041 + static inline void page_kasan_tag_set(struct page *page, u8 tag)
6042 + {
6043 +- if (kasan_enabled()) {
6044 +- tag ^= 0xff;
6045 +- page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
6046 +- page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
6047 +- }
6048 ++ unsigned long old_flags, flags;
6049 ++
6050 ++ if (!kasan_enabled())
6051 ++ return;
6052 ++
6053 ++ tag ^= 0xff;
6054 ++ old_flags = READ_ONCE(page->flags);
6055 ++ do {
6056 ++ flags = old_flags;
6057 ++ flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
6058 ++ flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
6059 ++ } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
6060 + }
6061 +
6062 + static inline void page_kasan_tag_reset(struct page *page)
6063 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
6064 +index 6aadcc0ecb5b0..6cbefb660fa3b 100644
6065 +--- a/include/linux/netdevice.h
6066 ++++ b/include/linux/netdevice.h
6067 +@@ -2636,6 +2636,7 @@ struct packet_type {
6068 + struct net_device *);
6069 + bool (*id_match)(struct packet_type *ptype,
6070 + struct sock *sk);
6071 ++ struct net *af_packet_net;
6072 + void *af_packet_priv;
6073 + struct list_head list;
6074 + };
6075 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
6076 +index 318c489b735bc..d7f927f8c335d 100644
6077 +--- a/include/linux/perf_event.h
6078 ++++ b/include/linux/perf_event.h
6079 +@@ -681,18 +681,6 @@ struct perf_event {
6080 + u64 total_time_running;
6081 + u64 tstamp;
6082 +
6083 +- /*
6084 +- * timestamp shadows the actual context timing but it can
6085 +- * be safely used in NMI interrupt context. It reflects the
6086 +- * context time as it was when the event was last scheduled in,
6087 +- * or when ctx_sched_in failed to schedule the event because we
6088 +- * run out of PMC.
6089 +- *
6090 +- * ctx_time already accounts for ctx->timestamp. Therefore to
6091 +- * compute ctx_time for a sample, simply add perf_clock().
6092 +- */
6093 +- u64 shadow_ctx_time;
6094 +-
6095 + struct perf_event_attr attr;
6096 + u16 header_size;
6097 + u16 id_header_size;
6098 +@@ -839,6 +827,7 @@ struct perf_event_context {
6099 + */
6100 + u64 time;
6101 + u64 timestamp;
6102 ++ u64 timeoffset;
6103 +
6104 + /*
6105 + * These fields let us detect when two contexts have both
6106 +@@ -921,6 +910,8 @@ struct bpf_perf_event_data_kern {
6107 + struct perf_cgroup_info {
6108 + u64 time;
6109 + u64 timestamp;
6110 ++ u64 timeoffset;
6111 ++ int active;
6112 + };
6113 +
6114 + struct perf_cgroup {
6115 +diff --git a/include/linux/psi.h b/include/linux/psi.h
6116 +index 65eb1476ac705..57823b30c2d3d 100644
6117 +--- a/include/linux/psi.h
6118 ++++ b/include/linux/psi.h
6119 +@@ -24,18 +24,17 @@ void psi_memstall_enter(unsigned long *flags);
6120 + void psi_memstall_leave(unsigned long *flags);
6121 +
6122 + int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
6123 +-
6124 +-#ifdef CONFIG_CGROUPS
6125 +-int psi_cgroup_alloc(struct cgroup *cgrp);
6126 +-void psi_cgroup_free(struct cgroup *cgrp);
6127 +-void cgroup_move_task(struct task_struct *p, struct css_set *to);
6128 +-
6129 + struct psi_trigger *psi_trigger_create(struct psi_group *group,
6130 + char *buf, size_t nbytes, enum psi_res res);
6131 +-void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
6132 ++void psi_trigger_destroy(struct psi_trigger *t);
6133 +
6134 + __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
6135 + poll_table *wait);
6136 ++
6137 ++#ifdef CONFIG_CGROUPS
6138 ++int psi_cgroup_alloc(struct cgroup *cgrp);
6139 ++void psi_cgroup_free(struct cgroup *cgrp);
6140 ++void cgroup_move_task(struct task_struct *p, struct css_set *to);
6141 + #endif
6142 +
6143 + #else /* CONFIG_PSI */
6144 +diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
6145 +index 0819c82dba920..6f190002a2022 100644
6146 +--- a/include/linux/psi_types.h
6147 ++++ b/include/linux/psi_types.h
6148 +@@ -140,9 +140,6 @@ struct psi_trigger {
6149 + * events to one per window
6150 + */
6151 + u64 last_event_time;
6152 +-
6153 +- /* Refcounting to prevent premature destruction */
6154 +- struct kref refcount;
6155 + };
6156 +
6157 + struct psi_group {
6158 +diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
6159 +index 031f148ab3734..b5deafd91f67b 100644
6160 +--- a/include/linux/usb/role.h
6161 ++++ b/include/linux/usb/role.h
6162 +@@ -91,6 +91,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node)
6163 +
6164 + static inline void usb_role_switch_put(struct usb_role_switch *sw) { }
6165 +
6166 ++static inline struct usb_role_switch *
6167 ++usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
6168 ++{
6169 ++ return NULL;
6170 ++}
6171 ++
6172 + static inline struct usb_role_switch *
6173 + usb_role_switch_register(struct device *parent,
6174 + const struct usb_role_switch_desc *desc)
6175 +diff --git a/include/net/addrconf.h b/include/net/addrconf.h
6176 +index 78ea3e332688f..e7ce719838b5e 100644
6177 +--- a/include/net/addrconf.h
6178 ++++ b/include/net/addrconf.h
6179 +@@ -6,6 +6,8 @@
6180 + #define RTR_SOLICITATION_INTERVAL (4*HZ)
6181 + #define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
6182 +
6183 ++#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
6184 ++
6185 + #define TEMP_VALID_LIFETIME (7*86400)
6186 + #define TEMP_PREFERRED_LIFETIME (86400)
6187 + #define REGEN_MAX_RETRY (3)
6188 +diff --git a/include/net/ip.h b/include/net/ip.h
6189 +index b71e88507c4a0..ff68af118020a 100644
6190 +--- a/include/net/ip.h
6191 ++++ b/include/net/ip.h
6192 +@@ -525,19 +525,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
6193 + {
6194 + struct iphdr *iph = ip_hdr(skb);
6195 +
6196 ++ /* We had many attacks based on IPID, use the private
6197 ++ * generator as much as we can.
6198 ++ */
6199 ++ if (sk && inet_sk(sk)->inet_daddr) {
6200 ++ iph->id = htons(inet_sk(sk)->inet_id);
6201 ++ inet_sk(sk)->inet_id += segs;
6202 ++ return;
6203 ++ }
6204 + if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
6205 +- /* This is only to work around buggy Windows95/2000
6206 +- * VJ compression implementations. If the ID field
6207 +- * does not change, they drop every other packet in
6208 +- * a TCP stream using header compression.
6209 +- */
6210 +- if (sk && inet_sk(sk)->inet_daddr) {
6211 +- iph->id = htons(inet_sk(sk)->inet_id);
6212 +- inet_sk(sk)->inet_id += segs;
6213 +- } else {
6214 +- iph->id = 0;
6215 +- }
6216 ++ iph->id = 0;
6217 + } else {
6218 ++ /* Unfortunately we need the big hammer to get a suitable IPID */
6219 + __ip_select_ident(net, iph, segs);
6220 + }
6221 + }
6222 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
6223 +index 83b8070d1cc93..c85b040728d7e 100644
6224 +--- a/include/net/ip6_fib.h
6225 ++++ b/include/net/ip6_fib.h
6226 +@@ -281,7 +281,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
6227 + fn = rcu_dereference(f6i->fib6_node);
6228 +
6229 + if (fn) {
6230 +- *cookie = fn->fn_sernum;
6231 ++ *cookie = READ_ONCE(fn->fn_sernum);
6232 + /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */
6233 + smp_rmb();
6234 + status = true;
6235 +diff --git a/include/net/route.h b/include/net/route.h
6236 +index 2e6c0e153e3a5..2551f3f03b37e 100644
6237 +--- a/include/net/route.h
6238 ++++ b/include/net/route.h
6239 +@@ -369,7 +369,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
6240 + {
6241 + struct neighbour *neigh;
6242 +
6243 +- neigh = __ipv4_neigh_lookup_noref(dev, daddr);
6244 ++ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
6245 + if (unlikely(!neigh))
6246 + neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
6247 +
6248 +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
6249 +index 7b5dcff84cf27..54eb1654d9ecc 100644
6250 +--- a/include/trace/events/sunrpc.h
6251 ++++ b/include/trace/events/sunrpc.h
6252 +@@ -953,7 +953,8 @@ TRACE_EVENT(rpc_socket_nospace,
6253 + { BIT(XPRT_REMOVE), "REMOVE" }, \
6254 + { BIT(XPRT_CONGESTED), "CONGESTED" }, \
6255 + { BIT(XPRT_CWND_WAIT), "CWND_WAIT" }, \
6256 +- { BIT(XPRT_WRITE_SPACE), "WRITE_SPACE" })
6257 ++ { BIT(XPRT_WRITE_SPACE), "WRITE_SPACE" }, \
6258 ++ { BIT(XPRT_SND_IS_COOKIE), "SND_IS_COOKIE" })
6259 +
6260 + DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
6261 + TP_PROTO(
6262 +@@ -1150,8 +1151,11 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
6263 + __entry->task_id = -1;
6264 + __entry->client_id = -1;
6265 + }
6266 +- __entry->snd_task_id = xprt->snd_task ?
6267 +- xprt->snd_task->tk_pid : -1;
6268 ++ if (xprt->snd_task &&
6269 ++ !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
6270 ++ __entry->snd_task_id = xprt->snd_task->tk_pid;
6271 ++ else
6272 ++ __entry->snd_task_id = -1;
6273 + ),
6274 +
6275 + TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
6276 +@@ -1196,8 +1200,12 @@ DECLARE_EVENT_CLASS(xprt_cong_event,
6277 + __entry->task_id = -1;
6278 + __entry->client_id = -1;
6279 + }
6280 +- __entry->snd_task_id = xprt->snd_task ?
6281 +- xprt->snd_task->tk_pid : -1;
6282 ++ if (xprt->snd_task &&
6283 ++ !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
6284 ++ __entry->snd_task_id = xprt->snd_task->tk_pid;
6285 ++ else
6286 ++ __entry->snd_task_id = -1;
6287 ++
6288 + __entry->cong = xprt->cong;
6289 + __entry->cwnd = xprt->cwnd;
6290 + __entry->wait = test_bit(XPRT_CWND_WAIT, &xprt->state);
6291 +diff --git a/include/uapi/linux/cyclades.h b/include/uapi/linux/cyclades.h
6292 +new file mode 100644
6293 +index 0000000000000..6225c5aebe06a
6294 +--- /dev/null
6295 ++++ b/include/uapi/linux/cyclades.h
6296 +@@ -0,0 +1,35 @@
6297 ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
6298 ++
6299 ++#ifndef _UAPI_LINUX_CYCLADES_H
6300 ++#define _UAPI_LINUX_CYCLADES_H
6301 ++
6302 ++#warning "Support for features provided by this header has been removed"
6303 ++#warning "Please consider updating your code"
6304 ++
6305 ++struct cyclades_monitor {
6306 ++ unsigned long int_count;
6307 ++ unsigned long char_count;
6308 ++ unsigned long char_max;
6309 ++ unsigned long char_last;
6310 ++};
6311 ++
6312 ++#define CYGETMON 0x435901
6313 ++#define CYGETTHRESH 0x435902
6314 ++#define CYSETTHRESH 0x435903
6315 ++#define CYGETDEFTHRESH 0x435904
6316 ++#define CYSETDEFTHRESH 0x435905
6317 ++#define CYGETTIMEOUT 0x435906
6318 ++#define CYSETTIMEOUT 0x435907
6319 ++#define CYGETDEFTIMEOUT 0x435908
6320 ++#define CYSETDEFTIMEOUT 0x435909
6321 ++#define CYSETRFLOW 0x43590a
6322 ++#define CYGETRFLOW 0x43590b
6323 ++#define CYSETRTSDTR_INV 0x43590c
6324 ++#define CYGETRTSDTR_INV 0x43590d
6325 ++#define CYZSETPOLLCYCLE 0x43590e
6326 ++#define CYZGETPOLLCYCLE 0x43590f
6327 ++#define CYGETCD1400VER 0x435910
6328 ++#define CYSETWAIT 0x435912
6329 ++#define CYGETWAIT 0x435913
6330 ++
6331 ++#endif /* _UAPI_LINUX_CYCLADES_H */
6332 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
6333 +index 6e75bbee39f0b..0dcaed4d3f4ce 100644
6334 +--- a/kernel/bpf/stackmap.c
6335 ++++ b/kernel/bpf/stackmap.c
6336 +@@ -525,13 +525,14 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
6337 + u32, size, u64, flags)
6338 + {
6339 + struct pt_regs *regs;
6340 +- long res;
6341 ++ long res = -EINVAL;
6342 +
6343 + if (!try_get_task_stack(task))
6344 + return -EFAULT;
6345 +
6346 + regs = task_pt_regs(task);
6347 +- res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
6348 ++ if (regs)
6349 ++ res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
6350 + put_task_stack(task);
6351 +
6352 + return res;
6353 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
6354 +index cafb8c114a21c..d18c2ef3180ed 100644
6355 +--- a/kernel/cgroup/cgroup.c
6356 ++++ b/kernel/cgroup/cgroup.c
6357 +@@ -3642,6 +3642,12 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
6358 + cgroup_get(cgrp);
6359 + cgroup_kn_unlock(of->kn);
6360 +
6361 ++ /* Allow only one trigger per file descriptor */
6362 ++ if (ctx->psi.trigger) {
6363 ++ cgroup_put(cgrp);
6364 ++ return -EBUSY;
6365 ++ }
6366 ++
6367 + psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
6368 + new = psi_trigger_create(psi, buf, nbytes, res);
6369 + if (IS_ERR(new)) {
6370 +@@ -3649,8 +3655,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
6371 + return PTR_ERR(new);
6372 + }
6373 +
6374 +- psi_trigger_replace(&ctx->psi.trigger, new);
6375 +-
6376 ++ smp_store_release(&ctx->psi.trigger, new);
6377 + cgroup_put(cgrp);
6378 +
6379 + return nbytes;
6380 +@@ -3689,7 +3694,7 @@ static void cgroup_pressure_release(struct kernfs_open_file *of)
6381 + {
6382 + struct cgroup_file_ctx *ctx = of->priv;
6383 +
6384 +- psi_trigger_replace(&ctx->psi.trigger, NULL);
6385 ++ psi_trigger_destroy(ctx->psi.trigger);
6386 + }
6387 +
6388 + bool cgroup_psi_enabled(void)
6389 +diff --git a/kernel/events/core.c b/kernel/events/core.c
6390 +index 63f0414666438..6ed890480c4aa 100644
6391 +--- a/kernel/events/core.c
6392 ++++ b/kernel/events/core.c
6393 +@@ -674,6 +674,23 @@ perf_event_set_state(struct perf_event *event, enum perf_event_state state)
6394 + WRITE_ONCE(event->state, state);
6395 + }
6396 +
6397 ++/*
6398 ++ * UP store-release, load-acquire
6399 ++ */
6400 ++
6401 ++#define __store_release(ptr, val) \
6402 ++do { \
6403 ++ barrier(); \
6404 ++ WRITE_ONCE(*(ptr), (val)); \
6405 ++} while (0)
6406 ++
6407 ++#define __load_acquire(ptr) \
6408 ++({ \
6409 ++ __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \
6410 ++ barrier(); \
6411 ++ ___p; \
6412 ++})
6413 ++
6414 + #ifdef CONFIG_CGROUP_PERF
6415 +
6416 + static inline bool
6417 +@@ -719,34 +736,51 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
6418 + return t->time;
6419 + }
6420 +
6421 +-static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
6422 ++static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
6423 + {
6424 +- struct perf_cgroup_info *info;
6425 +- u64 now;
6426 +-
6427 +- now = perf_clock();
6428 ++ struct perf_cgroup_info *t;
6429 +
6430 +- info = this_cpu_ptr(cgrp->info);
6431 ++ t = per_cpu_ptr(event->cgrp->info, event->cpu);
6432 ++ if (!__load_acquire(&t->active))
6433 ++ return t->time;
6434 ++ now += READ_ONCE(t->timeoffset);
6435 ++ return now;
6436 ++}
6437 +
6438 +- info->time += now - info->timestamp;
6439 ++static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
6440 ++{
6441 ++ if (adv)
6442 ++ info->time += now - info->timestamp;
6443 + info->timestamp = now;
6444 ++ /*
6445 ++ * see update_context_time()
6446 ++ */
6447 ++ WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
6448 + }
6449 +
6450 +-static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
6451 ++static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
6452 + {
6453 + struct perf_cgroup *cgrp = cpuctx->cgrp;
6454 + struct cgroup_subsys_state *css;
6455 ++ struct perf_cgroup_info *info;
6456 +
6457 + if (cgrp) {
6458 ++ u64 now = perf_clock();
6459 ++
6460 + for (css = &cgrp->css; css; css = css->parent) {
6461 + cgrp = container_of(css, struct perf_cgroup, css);
6462 +- __update_cgrp_time(cgrp);
6463 ++ info = this_cpu_ptr(cgrp->info);
6464 ++
6465 ++ __update_cgrp_time(info, now, true);
6466 ++ if (final)
6467 ++ __store_release(&info->active, 0);
6468 + }
6469 + }
6470 + }
6471 +
6472 + static inline void update_cgrp_time_from_event(struct perf_event *event)
6473 + {
6474 ++ struct perf_cgroup_info *info;
6475 + struct perf_cgroup *cgrp;
6476 +
6477 + /*
6478 +@@ -760,8 +794,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
6479 + /*
6480 + * Do not update time when cgroup is not active
6481 + */
6482 +- if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
6483 +- __update_cgrp_time(event->cgrp);
6484 ++ if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) {
6485 ++ info = this_cpu_ptr(event->cgrp->info);
6486 ++ __update_cgrp_time(info, perf_clock(), true);
6487 ++ }
6488 + }
6489 +
6490 + static inline void
6491 +@@ -785,7 +821,8 @@ perf_cgroup_set_timestamp(struct task_struct *task,
6492 + for (css = &cgrp->css; css; css = css->parent) {
6493 + cgrp = container_of(css, struct perf_cgroup, css);
6494 + info = this_cpu_ptr(cgrp->info);
6495 +- info->timestamp = ctx->timestamp;
6496 ++ __update_cgrp_time(info, ctx->timestamp, false);
6497 ++ __store_release(&info->active, 1);
6498 + }
6499 + }
6500 +
6501 +@@ -981,14 +1018,6 @@ out:
6502 + return ret;
6503 + }
6504 +
6505 +-static inline void
6506 +-perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
6507 +-{
6508 +- struct perf_cgroup_info *t;
6509 +- t = per_cpu_ptr(event->cgrp->info, event->cpu);
6510 +- event->shadow_ctx_time = now - t->timestamp;
6511 +-}
6512 +-
6513 + static inline void
6514 + perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
6515 + {
6516 +@@ -1066,7 +1095,8 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
6517 + {
6518 + }
6519 +
6520 +-static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
6521 ++static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
6522 ++ bool final)
6523 + {
6524 + }
6525 +
6526 +@@ -1098,12 +1128,12 @@ perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
6527 + {
6528 + }
6529 +
6530 +-static inline void
6531 +-perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
6532 ++static inline u64 perf_cgroup_event_time(struct perf_event *event)
6533 + {
6534 ++ return 0;
6535 + }
6536 +
6537 +-static inline u64 perf_cgroup_event_time(struct perf_event *event)
6538 ++static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
6539 + {
6540 + return 0;
6541 + }
6542 +@@ -1525,22 +1555,59 @@ static void perf_unpin_context(struct perf_event_context *ctx)
6543 + /*
6544 + * Update the record of the current time in a context.
6545 + */
6546 +-static void update_context_time(struct perf_event_context *ctx)
6547 ++static void __update_context_time(struct perf_event_context *ctx, bool adv)
6548 + {
6549 + u64 now = perf_clock();
6550 +
6551 +- ctx->time += now - ctx->timestamp;
6552 ++ if (adv)
6553 ++ ctx->time += now - ctx->timestamp;
6554 + ctx->timestamp = now;
6555 ++
6556 ++ /*
6557 ++ * The above: time' = time + (now - timestamp), can be re-arranged
6558 ++ * into: time` = now + (time - timestamp), which gives a single value
6559 ++ * offset to compute future time without locks on.
6560 ++ *
6561 ++ * See perf_event_time_now(), which can be used from NMI context where
6562 ++ * it's (obviously) not possible to acquire ctx->lock in order to read
6563 ++ * both the above values in a consistent manner.
6564 ++ */
6565 ++ WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
6566 ++}
6567 ++
6568 ++static void update_context_time(struct perf_event_context *ctx)
6569 ++{
6570 ++ __update_context_time(ctx, true);
6571 + }
6572 +
6573 + static u64 perf_event_time(struct perf_event *event)
6574 + {
6575 + struct perf_event_context *ctx = event->ctx;
6576 +
6577 ++ if (unlikely(!ctx))
6578 ++ return 0;
6579 ++
6580 + if (is_cgroup_event(event))
6581 + return perf_cgroup_event_time(event);
6582 +
6583 +- return ctx ? ctx->time : 0;
6584 ++ return ctx->time;
6585 ++}
6586 ++
6587 ++static u64 perf_event_time_now(struct perf_event *event, u64 now)
6588 ++{
6589 ++ struct perf_event_context *ctx = event->ctx;
6590 ++
6591 ++ if (unlikely(!ctx))
6592 ++ return 0;
6593 ++
6594 ++ if (is_cgroup_event(event))
6595 ++ return perf_cgroup_event_time_now(event, now);
6596 ++
6597 ++ if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
6598 ++ return ctx->time;
6599 ++
6600 ++ now += READ_ONCE(ctx->timeoffset);
6601 ++ return now;
6602 + }
6603 +
6604 + static enum event_type_t get_event_type(struct perf_event *event)
6605 +@@ -2346,7 +2413,7 @@ __perf_remove_from_context(struct perf_event *event,
6606 +
6607 + if (ctx->is_active & EVENT_TIME) {
6608 + update_context_time(ctx);
6609 +- update_cgrp_time_from_cpuctx(cpuctx);
6610 ++ update_cgrp_time_from_cpuctx(cpuctx, false);
6611 + }
6612 +
6613 + event_sched_out(event, cpuctx, ctx);
6614 +@@ -2357,6 +2424,9 @@ __perf_remove_from_context(struct perf_event *event,
6615 + list_del_event(event, ctx);
6616 +
6617 + if (!ctx->nr_events && ctx->is_active) {
6618 ++ if (ctx == &cpuctx->ctx)
6619 ++ update_cgrp_time_from_cpuctx(cpuctx, true);
6620 ++
6621 + ctx->is_active = 0;
6622 + ctx->rotate_necessary = 0;
6623 + if (ctx->task) {
6624 +@@ -2388,7 +2458,11 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
6625 + * event_function_call() user.
6626 + */
6627 + raw_spin_lock_irq(&ctx->lock);
6628 +- if (!ctx->is_active) {
6629 ++ /*
6630 ++ * Cgroup events are per-cpu events, and must IPI because of
6631 ++ * cgrp_cpuctx_list.
6632 ++ */
6633 ++ if (!ctx->is_active && !is_cgroup_event(event)) {
6634 + __perf_remove_from_context(event, __get_cpu_context(ctx),
6635 + ctx, (void *)flags);
6636 + raw_spin_unlock_irq(&ctx->lock);
6637 +@@ -2478,40 +2552,6 @@ void perf_event_disable_inatomic(struct perf_event *event)
6638 + irq_work_queue(&event->pending);
6639 + }
6640 +
6641 +-static void perf_set_shadow_time(struct perf_event *event,
6642 +- struct perf_event_context *ctx)
6643 +-{
6644 +- /*
6645 +- * use the correct time source for the time snapshot
6646 +- *
6647 +- * We could get by without this by leveraging the
6648 +- * fact that to get to this function, the caller
6649 +- * has most likely already called update_context_time()
6650 +- * and update_cgrp_time_xx() and thus both timestamp
6651 +- * are identical (or very close). Given that tstamp is,
6652 +- * already adjusted for cgroup, we could say that:
6653 +- * tstamp - ctx->timestamp
6654 +- * is equivalent to
6655 +- * tstamp - cgrp->timestamp.
6656 +- *
6657 +- * Then, in perf_output_read(), the calculation would
6658 +- * work with no changes because:
6659 +- * - event is guaranteed scheduled in
6660 +- * - no scheduled out in between
6661 +- * - thus the timestamp would be the same
6662 +- *
6663 +- * But this is a bit hairy.
6664 +- *
6665 +- * So instead, we have an explicit cgroup call to remain
6666 +- * within the time source all along. We believe it
6667 +- * is cleaner and simpler to understand.
6668 +- */
6669 +- if (is_cgroup_event(event))
6670 +- perf_cgroup_set_shadow_time(event, event->tstamp);
6671 +- else
6672 +- event->shadow_ctx_time = event->tstamp - ctx->timestamp;
6673 +-}
6674 +-
6675 + #define MAX_INTERRUPTS (~0ULL)
6676 +
6677 + static void perf_log_throttle(struct perf_event *event, int enable);
6678 +@@ -2552,8 +2592,6 @@ event_sched_in(struct perf_event *event,
6679 +
6680 + perf_pmu_disable(event->pmu);
6681 +
6682 +- perf_set_shadow_time(event, ctx);
6683 +-
6684 + perf_log_itrace_start(event);
6685 +
6686 + if (event->pmu->add(event, PERF_EF_START)) {
6687 +@@ -2857,11 +2895,14 @@ perf_install_in_context(struct perf_event_context *ctx,
6688 + * perf_event_attr::disabled events will not run and can be initialized
6689 + * without IPI. Except when this is the first event for the context, in
6690 + * that case we need the magic of the IPI to set ctx->is_active.
6691 ++ * Similarly, cgroup events for the context also needs the IPI to
6692 ++ * manipulate the cgrp_cpuctx_list.
6693 + *
6694 + * The IOC_ENABLE that is sure to follow the creation of a disabled
6695 + * event will issue the IPI and reprogram the hardware.
6696 + */
6697 +- if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) {
6698 ++ if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
6699 ++ ctx->nr_events && !is_cgroup_event(event)) {
6700 + raw_spin_lock_irq(&ctx->lock);
6701 + if (ctx->task == TASK_TOMBSTONE) {
6702 + raw_spin_unlock_irq(&ctx->lock);
6703 +@@ -3247,16 +3288,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
6704 + return;
6705 + }
6706 +
6707 +- ctx->is_active &= ~event_type;
6708 +- if (!(ctx->is_active & EVENT_ALL))
6709 +- ctx->is_active = 0;
6710 +-
6711 +- if (ctx->task) {
6712 +- WARN_ON_ONCE(cpuctx->task_ctx != ctx);
6713 +- if (!ctx->is_active)
6714 +- cpuctx->task_ctx = NULL;
6715 +- }
6716 +-
6717 + /*
6718 + * Always update time if it was set; not only when it changes.
6719 + * Otherwise we can 'forget' to update time for any but the last
6720 +@@ -3270,7 +3301,22 @@ static void ctx_sched_out(struct perf_event_context *ctx,
6721 + if (is_active & EVENT_TIME) {
6722 + /* update (and stop) ctx time */
6723 + update_context_time(ctx);
6724 +- update_cgrp_time_from_cpuctx(cpuctx);
6725 ++ update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
6726 ++ /*
6727 ++ * CPU-release for the below ->is_active store,
6728 ++ * see __load_acquire() in perf_event_time_now()
6729 ++ */
6730 ++ barrier();
6731 ++ }
6732 ++
6733 ++ ctx->is_active &= ~event_type;
6734 ++ if (!(ctx->is_active & EVENT_ALL))
6735 ++ ctx->is_active = 0;
6736 ++
6737 ++ if (ctx->task) {
6738 ++ WARN_ON_ONCE(cpuctx->task_ctx != ctx);
6739 ++ if (!ctx->is_active)
6740 ++ cpuctx->task_ctx = NULL;
6741 + }
6742 +
6743 + is_active ^= ctx->is_active; /* changed bits */
6744 +@@ -3707,13 +3753,19 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
6745 + return 0;
6746 + }
6747 +
6748 ++/*
6749 ++ * Because the userpage is strictly per-event (there is no concept of context,
6750 ++ * so there cannot be a context indirection), every userpage must be updated
6751 ++ * when context time starts :-(
6752 ++ *
6753 ++ * IOW, we must not miss EVENT_TIME edges.
6754 ++ */
6755 + static inline bool event_update_userpage(struct perf_event *event)
6756 + {
6757 + if (likely(!atomic_read(&event->mmap_count)))
6758 + return false;
6759 +
6760 + perf_event_update_time(event);
6761 +- perf_set_shadow_time(event, event->ctx);
6762 + perf_event_update_userpage(event);
6763 +
6764 + return true;
6765 +@@ -3797,13 +3849,23 @@ ctx_sched_in(struct perf_event_context *ctx,
6766 + struct task_struct *task)
6767 + {
6768 + int is_active = ctx->is_active;
6769 +- u64 now;
6770 +
6771 + lockdep_assert_held(&ctx->lock);
6772 +
6773 + if (likely(!ctx->nr_events))
6774 + return;
6775 +
6776 ++ if (is_active ^ EVENT_TIME) {
6777 ++ /* start ctx time */
6778 ++ __update_context_time(ctx, false);
6779 ++ perf_cgroup_set_timestamp(task, ctx);
6780 ++ /*
6781 ++ * CPU-release for the below ->is_active store,
6782 ++ * see __load_acquire() in perf_event_time_now()
6783 ++ */
6784 ++ barrier();
6785 ++ }
6786 ++
6787 + ctx->is_active |= (event_type | EVENT_TIME);
6788 + if (ctx->task) {
6789 + if (!is_active)
6790 +@@ -3814,13 +3876,6 @@ ctx_sched_in(struct perf_event_context *ctx,
6791 +
6792 + is_active ^= ctx->is_active; /* changed bits */
6793 +
6794 +- if (is_active & EVENT_TIME) {
6795 +- /* start ctx time */
6796 +- now = perf_clock();
6797 +- ctx->timestamp = now;
6798 +- perf_cgroup_set_timestamp(task, ctx);
6799 +- }
6800 +-
6801 + /*
6802 + * First go through the list and put on any pinned groups
6803 + * in order to give them the best chance of going on.
6804 +@@ -4414,6 +4469,18 @@ static inline u64 perf_event_count(struct perf_event *event)
6805 + return local64_read(&event->count) + atomic64_read(&event->child_count);
6806 + }
6807 +
6808 ++static void calc_timer_values(struct perf_event *event,
6809 ++ u64 *now,
6810 ++ u64 *enabled,
6811 ++ u64 *running)
6812 ++{
6813 ++ u64 ctx_time;
6814 ++
6815 ++ *now = perf_clock();
6816 ++ ctx_time = perf_event_time_now(event, *now);
6817 ++ __perf_update_times(event, ctx_time, enabled, running);
6818 ++}
6819 ++
6820 + /*
6821 + * NMI-safe method to read a local event, that is an event that
6822 + * is:
6823 +@@ -4473,10 +4540,9 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
6824 +
6825 + *value = local64_read(&event->count);
6826 + if (enabled || running) {
6827 +- u64 now = event->shadow_ctx_time + perf_clock();
6828 +- u64 __enabled, __running;
6829 ++ u64 __enabled, __running, __now;;
6830 +
6831 +- __perf_update_times(event, now, &__enabled, &__running);
6832 ++ calc_timer_values(event, &__now, &__enabled, &__running);
6833 + if (enabled)
6834 + *enabled = __enabled;
6835 + if (running)
6836 +@@ -5798,18 +5864,6 @@ static int perf_event_index(struct perf_event *event)
6837 + return event->pmu->event_idx(event);
6838 + }
6839 +
6840 +-static void calc_timer_values(struct perf_event *event,
6841 +- u64 *now,
6842 +- u64 *enabled,
6843 +- u64 *running)
6844 +-{
6845 +- u64 ctx_time;
6846 +-
6847 +- *now = perf_clock();
6848 +- ctx_time = event->shadow_ctx_time + *now;
6849 +- __perf_update_times(event, ctx_time, enabled, running);
6850 +-}
6851 +-
6852 + static void perf_event_init_userpage(struct perf_event *event)
6853 + {
6854 + struct perf_event_mmap_page *userpg;
6855 +@@ -6349,7 +6403,6 @@ accounting:
6856 + ring_buffer_attach(event, rb);
6857 +
6858 + perf_event_update_time(event);
6859 +- perf_set_shadow_time(event, event->ctx);
6860 + perf_event_init_userpage(event);
6861 + perf_event_update_userpage(event);
6862 + } else {
6863 +diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
6864 +index 105df4dfc7839..52571dcad768b 100644
6865 +--- a/kernel/power/wakelock.c
6866 ++++ b/kernel/power/wakelock.c
6867 +@@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
6868 + {
6869 + struct rb_node *node;
6870 + struct wakelock *wl;
6871 +- char *str = buf;
6872 +- char *end = buf + PAGE_SIZE;
6873 ++ int len = 0;
6874 +
6875 + mutex_lock(&wakelocks_lock);
6876 +
6877 + for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
6878 + wl = rb_entry(node, struct wakelock, node);
6879 + if (wl->ws->active == show_active)
6880 +- str += scnprintf(str, end - str, "%s ", wl->name);
6881 ++ len += sysfs_emit_at(buf, len, "%s ", wl->name);
6882 + }
6883 +- if (str > buf)
6884 +- str--;
6885 +
6886 +- str += scnprintf(str, end - str, "\n");
6887 ++ len += sysfs_emit_at(buf, len, "\n");
6888 +
6889 + mutex_unlock(&wakelocks_lock);
6890 +- return (str - buf);
6891 ++ return len;
6892 + }
6893 +
6894 + #if CONFIG_PM_WAKELOCKS_LIMIT > 0
6895 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
6896 +index f2cf047b25e56..069e01772d922 100644
6897 +--- a/kernel/sched/fair.c
6898 ++++ b/kernel/sched/fair.c
6899 +@@ -3382,7 +3382,6 @@ void set_task_rq_fair(struct sched_entity *se,
6900 + se->avg.last_update_time = n_last_update_time;
6901 + }
6902 +
6903 +-
6904 + /*
6905 + * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
6906 + * propagate its contribution. The key to this propagation is the invariant
6907 +@@ -3450,7 +3449,6 @@ void set_task_rq_fair(struct sched_entity *se,
6908 + * XXX: only do this for the part of runnable > running ?
6909 + *
6910 + */
6911 +-
6912 + static inline void
6913 + update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
6914 + {
6915 +@@ -3682,7 +3680,19 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
6916 +
6917 + r = removed_util;
6918 + sub_positive(&sa->util_avg, r);
6919 +- sa->util_sum = sa->util_avg * divider;
6920 ++ sub_positive(&sa->util_sum, r * divider);
6921 ++ /*
6922 ++ * Because of rounding, se->util_sum might ends up being +1 more than
6923 ++ * cfs->util_sum. Although this is not a problem by itself, detaching
6924 ++ * a lot of tasks with the rounding problem between 2 updates of
6925 ++ * util_avg (~1ms) can make cfs->util_sum becoming null whereas
6926 ++ * cfs_util_avg is not.
6927 ++ * Check that util_sum is still above its lower bound for the new
6928 ++ * util_avg. Given that period_contrib might have moved since the last
6929 ++ * sync, we are only sure that util_sum must be above or equal to
6930 ++ * util_avg * minimum possible divider
6931 ++ */
6932 ++ sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
6933 +
6934 + r = removed_runnable;
6935 + sub_positive(&sa->runnable_avg, r);
6936 +diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
6937 +index b5add64d9698c..3d2825408e3a2 100644
6938 +--- a/kernel/sched/membarrier.c
6939 ++++ b/kernel/sched/membarrier.c
6940 +@@ -147,11 +147,11 @@
6941 + #endif
6942 +
6943 + #ifdef CONFIG_RSEQ
6944 +-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK \
6945 ++#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
6946 + (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
6947 +- | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
6948 ++ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
6949 + #else
6950 +-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
6951 ++#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
6952 + #endif
6953 +
6954 + #define MEMBARRIER_CMD_BITMASK \
6955 +@@ -159,7 +159,8 @@
6956 + | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
6957 + | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
6958 + | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
6959 +- | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
6960 ++ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
6961 ++ | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
6962 +
6963 + static void ipi_mb(void *info)
6964 + {
6965 +diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
6966 +index e06071bf3472c..c336f5f481bca 100644
6967 +--- a/kernel/sched/pelt.h
6968 ++++ b/kernel/sched/pelt.h
6969 +@@ -37,9 +37,11 @@ update_irq_load_avg(struct rq *rq, u64 running)
6970 + }
6971 + #endif
6972 +
6973 ++#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
6974 ++
6975 + static inline u32 get_pelt_divider(struct sched_avg *avg)
6976 + {
6977 +- return LOAD_AVG_MAX - 1024 + avg->period_contrib;
6978 ++ return PELT_MIN_DIVIDER + avg->period_contrib;
6979 + }
6980 +
6981 + static inline void cfs_se_util_change(struct sched_avg *avg)
6982 +diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
6983 +index 69b19d3af690f..422f3b0445cf1 100644
6984 +--- a/kernel/sched/psi.c
6985 ++++ b/kernel/sched/psi.c
6986 +@@ -1082,44 +1082,6 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
6987 + return 0;
6988 + }
6989 +
6990 +-static int psi_io_show(struct seq_file *m, void *v)
6991 +-{
6992 +- return psi_show(m, &psi_system, PSI_IO);
6993 +-}
6994 +-
6995 +-static int psi_memory_show(struct seq_file *m, void *v)
6996 +-{
6997 +- return psi_show(m, &psi_system, PSI_MEM);
6998 +-}
6999 +-
7000 +-static int psi_cpu_show(struct seq_file *m, void *v)
7001 +-{
7002 +- return psi_show(m, &psi_system, PSI_CPU);
7003 +-}
7004 +-
7005 +-static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
7006 +-{
7007 +- if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
7008 +- return -EPERM;
7009 +-
7010 +- return single_open(file, psi_show, NULL);
7011 +-}
7012 +-
7013 +-static int psi_io_open(struct inode *inode, struct file *file)
7014 +-{
7015 +- return psi_open(file, psi_io_show);
7016 +-}
7017 +-
7018 +-static int psi_memory_open(struct inode *inode, struct file *file)
7019 +-{
7020 +- return psi_open(file, psi_memory_show);
7021 +-}
7022 +-
7023 +-static int psi_cpu_open(struct inode *inode, struct file *file)
7024 +-{
7025 +- return psi_open(file, psi_cpu_show);
7026 +-}
7027 +-
7028 + struct psi_trigger *psi_trigger_create(struct psi_group *group,
7029 + char *buf, size_t nbytes, enum psi_res res)
7030 + {
7031 +@@ -1162,7 +1124,6 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
7032 + t->event = 0;
7033 + t->last_event_time = 0;
7034 + init_waitqueue_head(&t->event_wait);
7035 +- kref_init(&t->refcount);
7036 +
7037 + mutex_lock(&group->trigger_lock);
7038 +
7039 +@@ -1191,15 +1152,19 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
7040 + return t;
7041 + }
7042 +
7043 +-static void psi_trigger_destroy(struct kref *ref)
7044 ++void psi_trigger_destroy(struct psi_trigger *t)
7045 + {
7046 +- struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
7047 +- struct psi_group *group = t->group;
7048 ++ struct psi_group *group;
7049 + struct task_struct *task_to_destroy = NULL;
7050 +
7051 +- if (static_branch_likely(&psi_disabled))
7052 ++ /*
7053 ++ * We do not check psi_disabled since it might have been disabled after
7054 ++ * the trigger got created.
7055 ++ */
7056 ++ if (!t)
7057 + return;
7058 +
7059 ++ group = t->group;
7060 + /*
7061 + * Wakeup waiters to stop polling. Can happen if cgroup is deleted
7062 + * from under a polling process.
7063 +@@ -1235,9 +1200,9 @@ static void psi_trigger_destroy(struct kref *ref)
7064 + mutex_unlock(&group->trigger_lock);
7065 +
7066 + /*
7067 +- * Wait for both *trigger_ptr from psi_trigger_replace and
7068 +- * poll_task RCUs to complete their read-side critical sections
7069 +- * before destroying the trigger and optionally the poll_task
7070 ++ * Wait for psi_schedule_poll_work RCU to complete its read-side
7071 ++ * critical section before destroying the trigger and optionally the
7072 ++ * poll_task.
7073 + */
7074 + synchronize_rcu();
7075 + /*
7076 +@@ -1254,18 +1219,6 @@ static void psi_trigger_destroy(struct kref *ref)
7077 + kfree(t);
7078 + }
7079 +
7080 +-void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
7081 +-{
7082 +- struct psi_trigger *old = *trigger_ptr;
7083 +-
7084 +- if (static_branch_likely(&psi_disabled))
7085 +- return;
7086 +-
7087 +- rcu_assign_pointer(*trigger_ptr, new);
7088 +- if (old)
7089 +- kref_put(&old->refcount, psi_trigger_destroy);
7090 +-}
7091 +-
7092 + __poll_t psi_trigger_poll(void **trigger_ptr,
7093 + struct file *file, poll_table *wait)
7094 + {
7095 +@@ -1275,27 +1228,57 @@ __poll_t psi_trigger_poll(void **trigger_ptr,
7096 + if (static_branch_likely(&psi_disabled))
7097 + return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
7098 +
7099 +- rcu_read_lock();
7100 +-
7101 +- t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
7102 +- if (!t) {
7103 +- rcu_read_unlock();
7104 ++ t = smp_load_acquire(trigger_ptr);
7105 ++ if (!t)
7106 + return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
7107 +- }
7108 +- kref_get(&t->refcount);
7109 +-
7110 +- rcu_read_unlock();
7111 +
7112 + poll_wait(file, &t->event_wait, wait);
7113 +
7114 + if (cmpxchg(&t->event, 1, 0) == 1)
7115 + ret |= EPOLLPRI;
7116 +
7117 +- kref_put(&t->refcount, psi_trigger_destroy);
7118 +-
7119 + return ret;
7120 + }
7121 +
7122 ++#ifdef CONFIG_PROC_FS
7123 ++static int psi_io_show(struct seq_file *m, void *v)
7124 ++{
7125 ++ return psi_show(m, &psi_system, PSI_IO);
7126 ++}
7127 ++
7128 ++static int psi_memory_show(struct seq_file *m, void *v)
7129 ++{
7130 ++ return psi_show(m, &psi_system, PSI_MEM);
7131 ++}
7132 ++
7133 ++static int psi_cpu_show(struct seq_file *m, void *v)
7134 ++{
7135 ++ return psi_show(m, &psi_system, PSI_CPU);
7136 ++}
7137 ++
7138 ++static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
7139 ++{
7140 ++ if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
7141 ++ return -EPERM;
7142 ++
7143 ++ return single_open(file, psi_show, NULL);
7144 ++}
7145 ++
7146 ++static int psi_io_open(struct inode *inode, struct file *file)
7147 ++{
7148 ++ return psi_open(file, psi_io_show);
7149 ++}
7150 ++
7151 ++static int psi_memory_open(struct inode *inode, struct file *file)
7152 ++{
7153 ++ return psi_open(file, psi_memory_show);
7154 ++}
7155 ++
7156 ++static int psi_cpu_open(struct inode *inode, struct file *file)
7157 ++{
7158 ++ return psi_open(file, psi_cpu_show);
7159 ++}
7160 ++
7161 + static ssize_t psi_write(struct file *file, const char __user *user_buf,
7162 + size_t nbytes, enum psi_res res)
7163 + {
7164 +@@ -1316,14 +1299,24 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
7165 +
7166 + buf[buf_size - 1] = '\0';
7167 +
7168 +- new = psi_trigger_create(&psi_system, buf, nbytes, res);
7169 +- if (IS_ERR(new))
7170 +- return PTR_ERR(new);
7171 +-
7172 + seq = file->private_data;
7173 ++
7174 + /* Take seq->lock to protect seq->private from concurrent writes */
7175 + mutex_lock(&seq->lock);
7176 +- psi_trigger_replace(&seq->private, new);
7177 ++
7178 ++ /* Allow only one trigger per file descriptor */
7179 ++ if (seq->private) {
7180 ++ mutex_unlock(&seq->lock);
7181 ++ return -EBUSY;
7182 ++ }
7183 ++
7184 ++ new = psi_trigger_create(&psi_system, buf, nbytes, res);
7185 ++ if (IS_ERR(new)) {
7186 ++ mutex_unlock(&seq->lock);
7187 ++ return PTR_ERR(new);
7188 ++ }
7189 ++
7190 ++ smp_store_release(&seq->private, new);
7191 + mutex_unlock(&seq->lock);
7192 +
7193 + return nbytes;
7194 +@@ -1358,7 +1351,7 @@ static int psi_fop_release(struct inode *inode, struct file *file)
7195 + {
7196 + struct seq_file *seq = file->private_data;
7197 +
7198 +- psi_trigger_replace(&seq->private, NULL);
7199 ++ psi_trigger_destroy(seq->private);
7200 + return single_release(inode, file);
7201 + }
7202 +
7203 +@@ -1400,3 +1393,5 @@ static int __init psi_proc_init(void)
7204 + return 0;
7205 + }
7206 + module_init(psi_proc_init);
7207 ++
7208 ++#endif /* CONFIG_PROC_FS */
7209 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
7210 +index 78ea542ce3bc2..ae9f9e4af9314 100644
7211 +--- a/kernel/trace/trace.c
7212 ++++ b/kernel/trace/trace.c
7213 +@@ -7734,7 +7734,8 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7214 + err = kzalloc(sizeof(*err), GFP_KERNEL);
7215 + if (!err)
7216 + err = ERR_PTR(-ENOMEM);
7217 +- tr->n_err_log_entries++;
7218 ++ else
7219 ++ tr->n_err_log_entries++;
7220 +
7221 + return err;
7222 + }
7223 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
7224 +index 319f9c8ca7e7d..e697bfedac2f5 100644
7225 +--- a/kernel/trace/trace_events_hist.c
7226 ++++ b/kernel/trace/trace_events_hist.c
7227 +@@ -2487,6 +2487,8 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
7228 + (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
7229 + expr->fn = hist_field_unary_minus;
7230 + expr->operands[0] = operand1;
7231 ++ expr->size = operand1->size;
7232 ++ expr->is_signed = operand1->is_signed;
7233 + expr->operator = FIELD_OP_UNARY_MINUS;
7234 + expr->name = expr_str(expr, 0);
7235 + expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
7236 +@@ -2703,6 +2705,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
7237 +
7238 + /* The operand sizes should be the same, so just pick one */
7239 + expr->size = operand1->size;
7240 ++ expr->is_signed = operand1->is_signed;
7241 +
7242 + expr->operator = field_op;
7243 + expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
7244 +@@ -3919,6 +3922,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
7245 +
7246 + var_ref_idx = find_var_ref_idx(hist_data, var_ref);
7247 + if (WARN_ON(var_ref_idx < 0)) {
7248 ++ kfree(p);
7249 + ret = var_ref_idx;
7250 + goto err;
7251 + }
7252 +diff --git a/kernel/ucount.c b/kernel/ucount.c
7253 +index 7b32c356ebc5c..65b597431c861 100644
7254 +--- a/kernel/ucount.c
7255 ++++ b/kernel/ucount.c
7256 +@@ -190,6 +190,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
7257 + kfree(new);
7258 + } else {
7259 + hlist_add_head(&new->node, hashent);
7260 ++ get_user_ns(new->ns);
7261 + spin_unlock_irq(&ucounts_lock);
7262 + return new;
7263 + }
7264 +@@ -210,6 +211,7 @@ void put_ucounts(struct ucounts *ucounts)
7265 + if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
7266 + hlist_del_init(&ucounts->node);
7267 + spin_unlock_irqrestore(&ucounts_lock, flags);
7268 ++ put_user_ns(ucounts->ns);
7269 + kfree(ucounts);
7270 + }
7271 + }
7272 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
7273 +index 8882c6dfb48f4..816a0f6823a3a 100644
7274 +--- a/net/bluetooth/hci_event.c
7275 ++++ b/net/bluetooth/hci_event.c
7276 +@@ -5822,6 +5822,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
7277 + struct hci_ev_le_advertising_info *ev = ptr;
7278 + s8 rssi;
7279 +
7280 ++ if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
7281 ++ bt_dev_err(hdev, "Malicious advertising data.");
7282 ++ break;
7283 ++ }
7284 ++
7285 + if (ev->length <= HCI_MAX_AD_LENGTH &&
7286 + ev->data + ev->length <= skb_tail_pointer(skb)) {
7287 + rssi = ev->data[ev->length];
7288 +@@ -5833,11 +5838,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
7289 + }
7290 +
7291 + ptr += sizeof(*ev) + ev->length + 1;
7292 +-
7293 +- if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
7294 +- bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
7295 +- break;
7296 +- }
7297 + }
7298 +
7299 + hci_dev_unlock(hdev);
7300 +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
7301 +index 49e105e0a4479..f02351b4acaca 100644
7302 +--- a/net/bridge/br_vlan.c
7303 ++++ b/net/bridge/br_vlan.c
7304 +@@ -560,10 +560,10 @@ static bool __allowed_ingress(const struct net_bridge *br,
7305 + !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
7306 + if (*state == BR_STATE_FORWARDING) {
7307 + *state = br_vlan_get_pvid_state(vg);
7308 +- return br_vlan_state_allowed(*state, true);
7309 +- } else {
7310 +- return true;
7311 ++ if (!br_vlan_state_allowed(*state, true))
7312 ++ goto drop;
7313 + }
7314 ++ return true;
7315 + }
7316 + }
7317 + v = br_vlan_find(vg, *vid);
7318 +@@ -2020,7 +2020,8 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
7319 + goto out_err;
7320 + }
7321 + err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
7322 +- if (err && err != -EMSGSIZE)
7323 ++ /* if the dump completed without an error we return 0 here */
7324 ++ if (err != -EMSGSIZE)
7325 + goto out_err;
7326 + } else {
7327 + for_each_netdev_rcu(net, dev) {
7328 +diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
7329 +index d8b9dbabd4a43..88cc0ad7d386e 100644
7330 +--- a/net/core/net-procfs.c
7331 ++++ b/net/core/net-procfs.c
7332 +@@ -190,12 +190,23 @@ static const struct seq_operations softnet_seq_ops = {
7333 + .show = softnet_seq_show,
7334 + };
7335 +
7336 +-static void *ptype_get_idx(loff_t pos)
7337 ++static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
7338 + {
7339 ++ struct list_head *ptype_list = NULL;
7340 + struct packet_type *pt = NULL;
7341 ++ struct net_device *dev;
7342 + loff_t i = 0;
7343 + int t;
7344 +
7345 ++ for_each_netdev_rcu(seq_file_net(seq), dev) {
7346 ++ ptype_list = &dev->ptype_all;
7347 ++ list_for_each_entry_rcu(pt, ptype_list, list) {
7348 ++ if (i == pos)
7349 ++ return pt;
7350 ++ ++i;
7351 ++ }
7352 ++ }
7353 ++
7354 + list_for_each_entry_rcu(pt, &ptype_all, list) {
7355 + if (i == pos)
7356 + return pt;
7357 +@@ -216,22 +227,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
7358 + __acquires(RCU)
7359 + {
7360 + rcu_read_lock();
7361 +- return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
7362 ++ return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
7363 + }
7364 +
7365 + static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7366 + {
7367 ++ struct net_device *dev;
7368 + struct packet_type *pt;
7369 + struct list_head *nxt;
7370 + int hash;
7371 +
7372 + ++*pos;
7373 + if (v == SEQ_START_TOKEN)
7374 +- return ptype_get_idx(0);
7375 ++ return ptype_get_idx(seq, 0);
7376 +
7377 + pt = v;
7378 + nxt = pt->list.next;
7379 ++ if (pt->dev) {
7380 ++ if (nxt != &pt->dev->ptype_all)
7381 ++ goto found;
7382 ++
7383 ++ dev = pt->dev;
7384 ++ for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
7385 ++ if (!list_empty(&dev->ptype_all)) {
7386 ++ nxt = dev->ptype_all.next;
7387 ++ goto found;
7388 ++ }
7389 ++ }
7390 ++
7391 ++ nxt = ptype_all.next;
7392 ++ goto ptype_all;
7393 ++ }
7394 ++
7395 + if (pt->type == htons(ETH_P_ALL)) {
7396 ++ptype_all:
7397 + if (nxt != &ptype_all)
7398 + goto found;
7399 + hash = 0;
7400 +@@ -260,7 +289,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
7401 +
7402 + if (v == SEQ_START_TOKEN)
7403 + seq_puts(seq, "Type Device Function\n");
7404 +- else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
7405 ++ else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
7406 ++ (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
7407 + if (pt->type == htons(ETH_P_ALL))
7408 + seq_puts(seq, "ALL ");
7409 + else
7410 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
7411 +index 9bca57ef8b838..a4d2eb691cbc1 100644
7412 +--- a/net/ipv4/ip_output.c
7413 ++++ b/net/ipv4/ip_output.c
7414 +@@ -162,12 +162,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
7415 + iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
7416 + iph->saddr = saddr;
7417 + iph->protocol = sk->sk_protocol;
7418 +- if (ip_dont_fragment(sk, &rt->dst)) {
7419 ++ /* Do not bother generating IPID for small packets (eg SYNACK) */
7420 ++ if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
7421 + iph->frag_off = htons(IP_DF);
7422 + iph->id = 0;
7423 + } else {
7424 + iph->frag_off = 0;
7425 +- __ip_select_ident(net, iph, 1);
7426 ++ /* TCP packets here are SYNACK with fat IPv4/TCP options.
7427 ++ * Avoid using the hashed IP ident generator.
7428 ++ */
7429 ++ if (sk->sk_protocol == IPPROTO_TCP)
7430 ++ iph->id = (__force __be16)prandom_u32();
7431 ++ else
7432 ++ __ip_select_ident(net, iph, 1);
7433 + }
7434 +
7435 + if (opt && opt->opt.optlen) {
7436 +@@ -826,15 +833,24 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
7437 + /* Everything is OK. Generate! */
7438 + ip_fraglist_init(skb, iph, hlen, &iter);
7439 +
7440 +- if (iter.frag)
7441 +- ip_options_fragment(iter.frag);
7442 +-
7443 + for (;;) {
7444 + /* Prepare header of the next frame,
7445 + * before previous one went down. */
7446 + if (iter.frag) {
7447 ++ bool first_frag = (iter.offset == 0);
7448 ++
7449 + IPCB(iter.frag)->flags = IPCB(skb)->flags;
7450 + ip_fraglist_prepare(skb, &iter);
7451 ++ if (first_frag && IPCB(skb)->opt.optlen) {
7452 ++ /* ipcb->opt is not populated for frags
7453 ++ * coming from __ip_make_skb(),
7454 ++ * ip_options_fragment() needs optlen
7455 ++ */
7456 ++ IPCB(iter.frag)->opt.optlen =
7457 ++ IPCB(skb)->opt.optlen;
7458 ++ ip_options_fragment(iter.frag);
7459 ++ ip_send_check(iter.iph);
7460 ++ }
7461 + }
7462 +
7463 + skb->tstamp = tstamp;
7464 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
7465 +index 1e44a43acfe2d..086822cb1cc96 100644
7466 +--- a/net/ipv4/ping.c
7467 ++++ b/net/ipv4/ping.c
7468 +@@ -220,7 +220,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
7469 + continue;
7470 + }
7471 +
7472 +- if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
7473 ++ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
7474 ++ sk->sk_bound_dev_if != inet_sdif(skb))
7475 + continue;
7476 +
7477 + sock_hold(sk);
7478 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
7479 +index bb446e60cf580..b8689052079cd 100644
7480 +--- a/net/ipv4/raw.c
7481 ++++ b/net/ipv4/raw.c
7482 +@@ -721,6 +721,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
7483 + int ret = -EINVAL;
7484 + int chk_addr_ret;
7485 +
7486 ++ lock_sock(sk);
7487 + if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
7488 + goto out;
7489 +
7490 +@@ -740,7 +741,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
7491 + inet->inet_saddr = 0; /* Use device */
7492 + sk_dst_reset(sk);
7493 + ret = 0;
7494 +-out: return ret;
7495 ++out:
7496 ++ release_sock(sk);
7497 ++ return ret;
7498 + }
7499 +
7500 + /*
7501 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
7502 +index 3445f8017430f..87961f1d9959b 100644
7503 +--- a/net/ipv6/addrconf.c
7504 ++++ b/net/ipv6/addrconf.c
7505 +@@ -2589,7 +2589,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
7506 + __u32 valid_lft, u32 prefered_lft)
7507 + {
7508 + struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
7509 +- int create = 0;
7510 ++ int create = 0, update_lft = 0;
7511 +
7512 + if (!ifp && valid_lft) {
7513 + int max_addresses = in6_dev->cnf.max_addresses;
7514 +@@ -2633,19 +2633,32 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
7515 + unsigned long now;
7516 + u32 stored_lft;
7517 +
7518 +- /* Update lifetime (RFC4862 5.5.3 e)
7519 +- * We deviate from RFC4862 by honoring all Valid Lifetimes to
7520 +- * improve the reaction of SLAAC to renumbering events
7521 +- * (draft-gont-6man-slaac-renum-06, Section 4.2)
7522 +- */
7523 ++ /* update lifetime (RFC2462 5.5.3 e) */
7524 + spin_lock_bh(&ifp->lock);
7525 + now = jiffies;
7526 + if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
7527 + stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
7528 + else
7529 + stored_lft = 0;
7530 +-
7531 + if (!create && stored_lft) {
7532 ++ const u32 minimum_lft = min_t(u32,
7533 ++ stored_lft, MIN_VALID_LIFETIME);
7534 ++ valid_lft = max(valid_lft, minimum_lft);
7535 ++
7536 ++ /* RFC4862 Section 5.5.3e:
7537 ++ * "Note that the preferred lifetime of the
7538 ++ * corresponding address is always reset to
7539 ++ * the Preferred Lifetime in the received
7540 ++ * Prefix Information option, regardless of
7541 ++ * whether the valid lifetime is also reset or
7542 ++ * ignored."
7543 ++ *
7544 ++ * So we should always update prefered_lft here.
7545 ++ */
7546 ++ update_lft = 1;
7547 ++ }
7548 ++
7549 ++ if (update_lft) {
7550 + ifp->valid_lft = valid_lft;
7551 + ifp->prefered_lft = prefered_lft;
7552 + ifp->tstamp = now;
7553 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
7554 +index 0371d2c141455..a506e57c4032a 100644
7555 +--- a/net/ipv6/ip6_fib.c
7556 ++++ b/net/ipv6/ip6_fib.c
7557 +@@ -111,7 +111,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i)
7558 + fn = rcu_dereference_protected(f6i->fib6_node,
7559 + lockdep_is_held(&f6i->fib6_table->tb6_lock));
7560 + if (fn)
7561 +- fn->fn_sernum = fib6_new_sernum(net);
7562 ++ WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net));
7563 + }
7564 +
7565 + /*
7566 +@@ -589,12 +589,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
7567 + spin_unlock_bh(&table->tb6_lock);
7568 + if (res > 0) {
7569 + cb->args[4] = 1;
7570 +- cb->args[5] = w->root->fn_sernum;
7571 ++ cb->args[5] = READ_ONCE(w->root->fn_sernum);
7572 + }
7573 + } else {
7574 +- if (cb->args[5] != w->root->fn_sernum) {
7575 ++ int sernum = READ_ONCE(w->root->fn_sernum);
7576 ++ if (cb->args[5] != sernum) {
7577 + /* Begin at the root if the tree changed */
7578 +- cb->args[5] = w->root->fn_sernum;
7579 ++ cb->args[5] = sernum;
7580 + w->state = FWS_INIT;
7581 + w->node = w->root;
7582 + w->skip = w->count;
7583 +@@ -1344,7 +1345,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
7584 + /* paired with smp_rmb() in fib6_get_cookie_safe() */
7585 + smp_wmb();
7586 + while (fn) {
7587 +- fn->fn_sernum = sernum;
7588 ++ WRITE_ONCE(fn->fn_sernum, sernum);
7589 + fn = rcu_dereference_protected(fn->parent,
7590 + lockdep_is_held(&rt->fib6_table->tb6_lock));
7591 + }
7592 +@@ -2173,8 +2174,8 @@ static int fib6_clean_node(struct fib6_walker *w)
7593 + };
7594 +
7595 + if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
7596 +- w->node->fn_sernum != c->sernum)
7597 +- w->node->fn_sernum = c->sernum;
7598 ++ READ_ONCE(w->node->fn_sernum) != c->sernum)
7599 ++ WRITE_ONCE(w->node->fn_sernum, c->sernum);
7600 +
7601 + if (!c->func) {
7602 + WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
7603 +@@ -2542,7 +2543,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
7604 + iter->w.state = FWS_INIT;
7605 + iter->w.node = iter->w.root;
7606 + iter->w.args = iter;
7607 +- iter->sernum = iter->w.root->fn_sernum;
7608 ++ iter->sernum = READ_ONCE(iter->w.root->fn_sernum);
7609 + INIT_LIST_HEAD(&iter->w.lh);
7610 + fib6_walker_link(net, &iter->w);
7611 + }
7612 +@@ -2570,8 +2571,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
7613 +
7614 + static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
7615 + {
7616 +- if (iter->sernum != iter->w.root->fn_sernum) {
7617 +- iter->sernum = iter->w.root->fn_sernum;
7618 ++ int sernum = READ_ONCE(iter->w.root->fn_sernum);
7619 ++
7620 ++ if (iter->sernum != sernum) {
7621 ++ iter->sernum = sernum;
7622 + iter->w.state = FWS_INIT;
7623 + iter->w.node = iter->w.root;
7624 + WARN_ON(iter->w.skip);
7625 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
7626 +index 484aca492cc06..7c6a0bdb0e1ba 100644
7627 +--- a/net/ipv6/ip6_tunnel.c
7628 ++++ b/net/ipv6/ip6_tunnel.c
7629 +@@ -1036,14 +1036,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
7630 +
7631 + if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
7632 + 0, IFA_F_TENTATIVE)))
7633 +- pr_warn("%s xmit: Local address not yet configured!\n",
7634 +- p->name);
7635 ++ pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
7636 ++ p->name);
7637 + else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
7638 + !ipv6_addr_is_multicast(raddr) &&
7639 + unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
7640 + true, 0, IFA_F_TENTATIVE)))
7641 +- pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
7642 +- p->name);
7643 ++ pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
7644 ++ p->name);
7645 + else
7646 + ret = 1;
7647 + rcu_read_unlock();
7648 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
7649 +index 1deb6297aab66..49fee1f1951c2 100644
7650 +--- a/net/ipv6/route.c
7651 ++++ b/net/ipv6/route.c
7652 +@@ -2802,7 +2802,7 @@ static void ip6_link_failure(struct sk_buff *skb)
7653 + if (from) {
7654 + fn = rcu_dereference(from->fib6_node);
7655 + if (fn && (rt->rt6i_flags & RTF_DEFAULT))
7656 +- fn->fn_sernum = -1;
7657 ++ WRITE_ONCE(fn->fn_sernum, -1);
7658 + }
7659 + }
7660 + rcu_read_unlock();
7661 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
7662 +index 4712a90a1820c..7f79974607643 100644
7663 +--- a/net/netfilter/nf_conntrack_core.c
7664 ++++ b/net/netfilter/nf_conntrack_core.c
7665 +@@ -1922,15 +1922,17 @@ repeat:
7666 + pr_debug("nf_conntrack_in: Can't track with proto module\n");
7667 + nf_conntrack_put(&ct->ct_general);
7668 + skb->_nfct = 0;
7669 +- NF_CT_STAT_INC_ATOMIC(state->net, invalid);
7670 +- if (ret == -NF_DROP)
7671 +- NF_CT_STAT_INC_ATOMIC(state->net, drop);
7672 + /* Special case: TCP tracker reports an attempt to reopen a
7673 + * closed/aborted connection. We have to go back and create a
7674 + * fresh conntrack.
7675 + */
7676 + if (ret == -NF_REPEAT)
7677 + goto repeat;
7678 ++
7679 ++ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
7680 ++ if (ret == -NF_DROP)
7681 ++ NF_CT_STAT_INC_ATOMIC(state->net, drop);
7682 ++
7683 + ret = -ret;
7684 + goto out;
7685 + }
7686 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
7687 +index 76c2dca7f0a59..43eef5c712c1e 100644
7688 +--- a/net/packet/af_packet.c
7689 ++++ b/net/packet/af_packet.c
7690 +@@ -1773,6 +1773,7 @@ static int fanout_add(struct sock *sk, struct fanout_args *args)
7691 + match->prot_hook.dev = po->prot_hook.dev;
7692 + match->prot_hook.func = packet_rcv_fanout;
7693 + match->prot_hook.af_packet_priv = match;
7694 ++ match->prot_hook.af_packet_net = read_pnet(&match->net);
7695 + match->prot_hook.id_match = match_fanout_group;
7696 + match->max_num_members = args->max_num_members;
7697 + list_add(&match->list, &fanout_list);
7698 +@@ -3358,6 +3359,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
7699 + po->prot_hook.func = packet_rcv_spkt;
7700 +
7701 + po->prot_hook.af_packet_priv = sk;
7702 ++ po->prot_hook.af_packet_net = sock_net(sk);
7703 +
7704 + if (proto) {
7705 + po->prot_hook.type = proto;
7706 +diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
7707 +index 6be2672a65eab..df864e6922679 100644
7708 +--- a/net/rxrpc/call_event.c
7709 ++++ b/net/rxrpc/call_event.c
7710 +@@ -157,7 +157,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
7711 + static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
7712 + {
7713 + struct sk_buff *skb;
7714 +- unsigned long resend_at, rto_j;
7715 ++ unsigned long resend_at;
7716 + rxrpc_seq_t cursor, seq, top;
7717 + ktime_t now, max_age, oldest, ack_ts;
7718 + int ix;
7719 +@@ -165,10 +165,8 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
7720 +
7721 + _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
7722 +
7723 +- rto_j = call->peer->rto_j;
7724 +-
7725 + now = ktime_get_real();
7726 +- max_age = ktime_sub(now, jiffies_to_usecs(rto_j));
7727 ++ max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
7728 +
7729 + spin_lock_bh(&call->lock);
7730 +
7731 +@@ -213,7 +211,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
7732 + }
7733 +
7734 + resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
7735 +- resend_at += jiffies + rto_j;
7736 ++ resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans);
7737 + WRITE_ONCE(call->resend_at, resend_at);
7738 +
7739 + if (unacked)
7740 +diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
7741 +index 10f2bf2e9068a..a45c83f22236e 100644
7742 +--- a/net/rxrpc/output.c
7743 ++++ b/net/rxrpc/output.c
7744 +@@ -468,7 +468,7 @@ done:
7745 + if (call->peer->rtt_count > 1) {
7746 + unsigned long nowj = jiffies, ack_lost_at;
7747 +
7748 +- ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans);
7749 ++ ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
7750 + ack_lost_at += nowj;
7751 + WRITE_ONCE(call->ack_lost_at, ack_lost_at);
7752 + rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
7753 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
7754 +index 9267922ea9c37..23a9d6242429f 100644
7755 +--- a/net/sched/sch_htb.c
7756 ++++ b/net/sched/sch_htb.c
7757 +@@ -1810,6 +1810,26 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
7758 + if (!hopt->rate.rate || !hopt->ceil.rate)
7759 + goto failure;
7760 +
7761 ++ if (q->offload) {
7762 ++ /* Options not supported by the offload. */
7763 ++ if (hopt->rate.overhead || hopt->ceil.overhead) {
7764 ++ NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter");
7765 ++ goto failure;
7766 ++ }
7767 ++ if (hopt->rate.mpu || hopt->ceil.mpu) {
7768 ++ NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter");
7769 ++ goto failure;
7770 ++ }
7771 ++ if (hopt->quantum) {
7772 ++ NL_SET_ERR_MSG(extack, "HTB offload doesn't support the quantum parameter");
7773 ++ goto failure;
7774 ++ }
7775 ++ if (hopt->prio) {
7776 ++ NL_SET_ERR_MSG(extack, "HTB offload doesn't support the prio parameter");
7777 ++ goto failure;
7778 ++ }
7779 ++ }
7780 ++
7781 + /* Keeping backward compatible with rate_table based iproute2 tc */
7782 + if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
7783 + qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
7784 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
7785 +index 211cd91b6c408..85e077a69c67d 100644
7786 +--- a/net/smc/af_smc.c
7787 ++++ b/net/smc/af_smc.c
7788 +@@ -566,12 +566,17 @@ static void smc_stat_fallback(struct smc_sock *smc)
7789 + mutex_unlock(&net->smc.mutex_fback_rsn);
7790 + }
7791 +
7792 +-static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
7793 ++static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
7794 + {
7795 + wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
7796 +- wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
7797 ++ wait_queue_head_t *clc_wait;
7798 + unsigned long flags;
7799 +
7800 ++ mutex_lock(&smc->clcsock_release_lock);
7801 ++ if (!smc->clcsock) {
7802 ++ mutex_unlock(&smc->clcsock_release_lock);
7803 ++ return -EBADF;
7804 ++ }
7805 + smc->use_fallback = true;
7806 + smc->fallback_rsn = reason_code;
7807 + smc_stat_fallback(smc);
7808 +@@ -586,18 +591,30 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
7809 + * smc socket->wq, which should be removed
7810 + * to clcsocket->wq during the fallback.
7811 + */
7812 ++ clc_wait = sk_sleep(smc->clcsock->sk);
7813 + spin_lock_irqsave(&smc_wait->lock, flags);
7814 + spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
7815 + list_splice_init(&smc_wait->head, &clc_wait->head);
7816 + spin_unlock(&clc_wait->lock);
7817 + spin_unlock_irqrestore(&smc_wait->lock, flags);
7818 + }
7819 ++ mutex_unlock(&smc->clcsock_release_lock);
7820 ++ return 0;
7821 + }
7822 +
7823 + /* fall back during connect */
7824 + static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
7825 + {
7826 +- smc_switch_to_fallback(smc, reason_code);
7827 ++ struct net *net = sock_net(&smc->sk);
7828 ++ int rc = 0;
7829 ++
7830 ++ rc = smc_switch_to_fallback(smc, reason_code);
7831 ++ if (rc) { /* fallback fails */
7832 ++ this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
7833 ++ if (smc->sk.sk_state == SMC_INIT)
7834 ++ sock_put(&smc->sk); /* passive closing */
7835 ++ return rc;
7836 ++ }
7837 + smc_copy_sock_settings_to_clc(smc);
7838 + smc->connect_nonblock = 0;
7839 + if (smc->sk.sk_state == SMC_INIT)
7840 +@@ -1514,11 +1531,12 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
7841 + {
7842 + /* RDMA setup failed, switch back to TCP */
7843 + smc_conn_abort(new_smc, local_first);
7844 +- if (reason_code < 0) { /* error, no fallback possible */
7845 ++ if (reason_code < 0 ||
7846 ++ smc_switch_to_fallback(new_smc, reason_code)) {
7847 ++ /* error, no fallback possible */
7848 + smc_listen_out_err(new_smc);
7849 + return;
7850 + }
7851 +- smc_switch_to_fallback(new_smc, reason_code);
7852 + if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
7853 + if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
7854 + smc_listen_out_err(new_smc);
7855 +@@ -1960,8 +1978,11 @@ static void smc_listen_work(struct work_struct *work)
7856 +
7857 + /* check if peer is smc capable */
7858 + if (!tcp_sk(newclcsock->sk)->syn_smc) {
7859 +- smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
7860 +- smc_listen_out_connected(new_smc);
7861 ++ rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
7862 ++ if (rc)
7863 ++ smc_listen_out_err(new_smc);
7864 ++ else
7865 ++ smc_listen_out_connected(new_smc);
7866 + return;
7867 + }
7868 +
7869 +@@ -2250,7 +2271,9 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
7870 +
7871 + if (msg->msg_flags & MSG_FASTOPEN) {
7872 + if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
7873 +- smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
7874 ++ rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
7875 ++ if (rc)
7876 ++ goto out;
7877 + } else {
7878 + rc = -EINVAL;
7879 + goto out;
7880 +@@ -2443,6 +2466,11 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
7881 + /* generic setsockopts reaching us here always apply to the
7882 + * CLC socket
7883 + */
7884 ++ mutex_lock(&smc->clcsock_release_lock);
7885 ++ if (!smc->clcsock) {
7886 ++ mutex_unlock(&smc->clcsock_release_lock);
7887 ++ return -EBADF;
7888 ++ }
7889 + if (unlikely(!smc->clcsock->ops->setsockopt))
7890 + rc = -EOPNOTSUPP;
7891 + else
7892 +@@ -2452,6 +2480,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
7893 + sk->sk_err = smc->clcsock->sk->sk_err;
7894 + sk_error_report(sk);
7895 + }
7896 ++ mutex_unlock(&smc->clcsock_release_lock);
7897 +
7898 + if (optlen < sizeof(int))
7899 + return -EINVAL;
7900 +@@ -2468,7 +2497,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
7901 + case TCP_FASTOPEN_NO_COOKIE:
7902 + /* option not supported by SMC */
7903 + if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
7904 +- smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
7905 ++ rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
7906 + } else {
7907 + rc = -EINVAL;
7908 + }
7909 +@@ -2511,13 +2540,23 @@ static int smc_getsockopt(struct socket *sock, int level, int optname,
7910 + char __user *optval, int __user *optlen)
7911 + {
7912 + struct smc_sock *smc;
7913 ++ int rc;
7914 +
7915 + smc = smc_sk(sock->sk);
7916 ++ mutex_lock(&smc->clcsock_release_lock);
7917 ++ if (!smc->clcsock) {
7918 ++ mutex_unlock(&smc->clcsock_release_lock);
7919 ++ return -EBADF;
7920 ++ }
7921 + /* socket options apply to the CLC socket */
7922 +- if (unlikely(!smc->clcsock->ops->getsockopt))
7923 ++ if (unlikely(!smc->clcsock->ops->getsockopt)) {
7924 ++ mutex_unlock(&smc->clcsock_release_lock);
7925 + return -EOPNOTSUPP;
7926 +- return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
7927 +- optval, optlen);
7928 ++ }
7929 ++ rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
7930 ++ optval, optlen);
7931 ++ mutex_unlock(&smc->clcsock_release_lock);
7932 ++ return rc;
7933 + }
7934 +
7935 + static int smc_ioctl(struct socket *sock, unsigned int cmd,
7936 +diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
7937 +index ee5336d73fddc..35588f0afa864 100644
7938 +--- a/net/sunrpc/rpc_pipe.c
7939 ++++ b/net/sunrpc/rpc_pipe.c
7940 +@@ -600,9 +600,9 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry)
7941 +
7942 + dget(dentry);
7943 + ret = simple_rmdir(dir, dentry);
7944 ++ d_drop(dentry);
7945 + if (!ret)
7946 + fsnotify_rmdir(dir, dentry);
7947 +- d_delete(dentry);
7948 + dput(dentry);
7949 + return ret;
7950 + }
7951 +@@ -613,9 +613,9 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
7952 +
7953 + dget(dentry);
7954 + ret = simple_unlink(dir, dentry);
7955 ++ d_drop(dentry);
7956 + if (!ret)
7957 + fsnotify_unlink(dir, dentry);
7958 +- d_delete(dentry);
7959 + dput(dentry);
7960 + return ret;
7961 + }
7962 +diff --git a/security/security.c b/security/security.c
7963 +index c88167a414b41..64abdfb20bc2c 100644
7964 +--- a/security/security.c
7965 ++++ b/security/security.c
7966 +@@ -1056,8 +1056,19 @@ int security_dentry_init_security(struct dentry *dentry, int mode,
7967 + const char **xattr_name, void **ctx,
7968 + u32 *ctxlen)
7969 + {
7970 +- return call_int_hook(dentry_init_security, -EOPNOTSUPP, dentry, mode,
7971 +- name, xattr_name, ctx, ctxlen);
7972 ++ struct security_hook_list *hp;
7973 ++ int rc;
7974 ++
7975 ++ /*
7976 ++ * Only one module will provide a security context.
7977 ++ */
7978 ++ hlist_for_each_entry(hp, &security_hook_heads.dentry_init_security, list) {
7979 ++ rc = hp->hook.dentry_init_security(dentry, mode, name,
7980 ++ xattr_name, ctx, ctxlen);
7981 ++ if (rc != LSM_RET_DEFAULT(dentry_init_security))
7982 ++ return rc;
7983 ++ }
7984 ++ return LSM_RET_DEFAULT(dentry_init_security);
7985 + }
7986 + EXPORT_SYMBOL(security_dentry_init_security);
7987 +
7988 +diff --git a/tools/testing/scatterlist/linux/mm.h b/tools/testing/scatterlist/linux/mm.h
7989 +index 16ec895bbe5ff..5bd9e6e806254 100644
7990 +--- a/tools/testing/scatterlist/linux/mm.h
7991 ++++ b/tools/testing/scatterlist/linux/mm.h
7992 +@@ -74,7 +74,7 @@ static inline unsigned long page_to_phys(struct page *page)
7993 + __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
7994 + x, y)
7995 +
7996 +-#define preemptible() (1)
7997 ++#define pagefault_disabled() (0)
7998 +
7999 + static inline void *kmap(struct page *page)
8000 + {
8001 +@@ -127,6 +127,7 @@ kmalloc_array(unsigned int n, unsigned int size, unsigned int flags)
8002 + #define kmemleak_free(a)
8003 +
8004 + #define PageSlab(p) (0)
8005 ++#define flush_dcache_page(p)
8006 +
8007 + #define MAX_ERRNO 4095
8008 +
8009 +diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
8010 +index 290b1b0552d6e..4fdfb42aeddba 100644
8011 +--- a/tools/testing/selftests/kvm/Makefile
8012 ++++ b/tools/testing/selftests/kvm/Makefile
8013 +@@ -77,6 +77,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
8014 + TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
8015 + TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
8016 + TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
8017 ++TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
8018 + TEST_GEN_PROGS_x86_64 += demand_paging_test
8019 + TEST_GEN_PROGS_x86_64 += dirty_log_test
8020 + TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
8021 +diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
8022 +index d0fe2fdce58c4..db2a17559c3d5 100644
8023 +--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
8024 ++++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
8025 +@@ -105,7 +105,6 @@ static void guest_code(void *arg)
8026 +
8027 + if (cpu_has_svm()) {
8028 + run_guest(svm->vmcb, svm->vmcb_gpa);
8029 +- svm->vmcb->save.rip += 3;
8030 + run_guest(svm->vmcb, svm->vmcb_gpa);
8031 + } else {
8032 + vmlaunch();
8033 +diff --git a/usr/include/Makefile b/usr/include/Makefile
8034 +index 1c2ae1368079d..adc6cb2587369 100644
8035 +--- a/usr/include/Makefile
8036 ++++ b/usr/include/Makefile
8037 +@@ -28,13 +28,13 @@ no-header-test += linux/am437x-vpfe.h
8038 + no-header-test += linux/android/binder.h
8039 + no-header-test += linux/android/binderfs.h
8040 + no-header-test += linux/coda.h
8041 ++no-header-test += linux/cyclades.h
8042 + no-header-test += linux/errqueue.h
8043 + no-header-test += linux/fsmap.h
8044 + no-header-test += linux/hdlc/ioctl.h
8045 + no-header-test += linux/ivtv.h
8046 + no-header-test += linux/kexec.h
8047 + no-header-test += linux/matroxfb.h
8048 +-no-header-test += linux/nfc.h
8049 + no-header-test += linux/omap3isp.h
8050 + no-header-test += linux/omapfb.h
8051 + no-header-test += linux/patchkey.h
8052 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
8053 +index 5bd62342c482b..71ddc7a8bc302 100644
8054 +--- a/virt/kvm/kvm_main.c
8055 ++++ b/virt/kvm/kvm_main.c
8056 +@@ -2112,7 +2112,6 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
8057 +
8058 + return NULL;
8059 + }
8060 +-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
8061 +
8062 + bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
8063 + {