Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.11 commit in: /
Date: Wed, 21 Apr 2021 12:03:13
Message-Id: 1619006573.3b662fd6a0e43b36dd236cc79c220988ed6965ca.mpagano@gentoo
1 commit: 3b662fd6a0e43b36dd236cc79c220988ed6965ca
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 21 12:02:53 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 21 12:02:53 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3b662fd6
7
8 Linux patch 5.11.16
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1015_linux-5.11.16.patch | 4084 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4088 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 09827cb..e06ab59 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -103,6 +103,10 @@ Patch: 1014_linux-5.11.15.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.11.15
23
24 +Patch: 1015_linux-5.11.16.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.11.16
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1015_linux-5.11.16.patch b/1015_linux-5.11.16.patch
33 new file mode 100644
34 index 0000000..d3a96ae
35 --- /dev/null
36 +++ b/1015_linux-5.11.16.patch
37 @@ -0,0 +1,4084 @@
38 +diff --git a/Makefile b/Makefile
39 +index bcd8764fead98..124d8e2007765 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 11
46 +-SUBLEVEL = 15
47 ++SUBLEVEL = 16
48 + EXTRAVERSION =
49 + NAME = 💕 Valentine's Day Edition 💕
50 +
51 +diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
52 +index a78d8f745a678..fdbe06c98895e 100644
53 +--- a/arch/arc/kernel/signal.c
54 ++++ b/arch/arc/kernel/signal.c
55 +@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
56 + sizeof(sf->uc.uc_mcontext.regs.scratch));
57 + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
58 +
59 +- return err;
60 ++ return err ? -EFAULT : 0;
61 + }
62 +
63 + static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
64 +@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
65 + &(sf->uc.uc_mcontext.regs.scratch),
66 + sizeof(sf->uc.uc_mcontext.regs.scratch));
67 + if (err)
68 +- return err;
69 ++ return -EFAULT;
70 +
71 + set_current_blocked(&set);
72 + regs->bta = uregs.scratch.bta;
73 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
74 +index 138248999df74..3d2c684eab775 100644
75 +--- a/arch/arm/Kconfig
76 ++++ b/arch/arm/Kconfig
77 +@@ -1310,9 +1310,15 @@ config KASAN_SHADOW_OFFSET
78 +
79 + config NR_CPUS
80 + int "Maximum number of CPUs (2-32)"
81 +- range 2 32
82 ++ range 2 16 if DEBUG_KMAP_LOCAL
83 ++ range 2 32 if !DEBUG_KMAP_LOCAL
84 + depends on SMP
85 + default "4"
86 ++ help
87 ++ The maximum number of CPUs that the kernel can support.
88 ++ Up to 32 CPUs can be supported, or up to 16 if kmap_local()
89 ++ debugging is enabled, which uses half of the per-CPU fixmap
90 ++ slots as guard regions.
91 +
92 + config HOTPLUG_CPU
93 + bool "Support for hot-pluggable CPUs"
94 +diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
95 +index 72e4f6481776c..4a9f9496a8677 100644
96 +--- a/arch/arm/boot/dts/omap4.dtsi
97 ++++ b/arch/arm/boot/dts/omap4.dtsi
98 +@@ -22,6 +22,11 @@
99 + i2c1 = &i2c2;
100 + i2c2 = &i2c3;
101 + i2c3 = &i2c4;
102 ++ mmc0 = &mmc1;
103 ++ mmc1 = &mmc2;
104 ++ mmc2 = &mmc3;
105 ++ mmc3 = &mmc4;
106 ++ mmc4 = &mmc5;
107 + serial0 = &uart1;
108 + serial1 = &uart2;
109 + serial2 = &uart3;
110 +diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
111 +index 532868591107b..1f1c04d8f4721 100644
112 +--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
113 ++++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
114 +@@ -770,14 +770,6 @@
115 + ti,max-div = <2>;
116 + };
117 +
118 +- sha2md5_fck: sha2md5_fck@15c8 {
119 +- #clock-cells = <0>;
120 +- compatible = "ti,gate-clock";
121 +- clocks = <&l3_div_ck>;
122 +- ti,bit-shift = <1>;
123 +- reg = <0x15c8>;
124 +- };
125 +-
126 + usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
127 + #clock-cells = <0>;
128 + compatible = "ti,gate-clock";
129 +diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
130 +index 5f1a8bd138804..c303510dfa97d 100644
131 +--- a/arch/arm/boot/dts/omap5.dtsi
132 ++++ b/arch/arm/boot/dts/omap5.dtsi
133 +@@ -25,6 +25,11 @@
134 + i2c2 = &i2c3;
135 + i2c3 = &i2c4;
136 + i2c4 = &i2c5;
137 ++ mmc0 = &mmc1;
138 ++ mmc1 = &mmc2;
139 ++ mmc2 = &mmc3;
140 ++ mmc3 = &mmc4;
141 ++ mmc4 = &mmc5;
142 + serial0 = &uart1;
143 + serial1 = &uart2;
144 + serial2 = &uart3;
145 +diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c
146 +index 0b2fd7e2e9b42..90b1e9be430e9 100644
147 +--- a/arch/arm/mach-footbridge/cats-pci.c
148 ++++ b/arch/arm/mach-footbridge/cats-pci.c
149 +@@ -15,14 +15,14 @@
150 + #include <asm/mach-types.h>
151 +
152 + /* cats host-specific stuff */
153 +-static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
154 ++static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
155 +
156 + static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
157 + {
158 + return 0;
159 + }
160 +
161 +-static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
162 ++static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
163 + {
164 + if (dev->irq >= 255)
165 + return -1; /* not a valid interrupt. */
166 +diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c
167 +index 6f28aaa9ca79b..c3f280d08fa7f 100644
168 +--- a/arch/arm/mach-footbridge/ebsa285-pci.c
169 ++++ b/arch/arm/mach-footbridge/ebsa285-pci.c
170 +@@ -14,9 +14,9 @@
171 + #include <asm/mach/pci.h>
172 + #include <asm/mach-types.h>
173 +
174 +-static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
175 ++static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
176 +
177 +-static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
178 ++static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
179 + {
180 + if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
181 + dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
182 +diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c
183 +index 9473aa0305e5f..e8304392074b8 100644
184 +--- a/arch/arm/mach-footbridge/netwinder-pci.c
185 ++++ b/arch/arm/mach-footbridge/netwinder-pci.c
186 +@@ -18,7 +18,7 @@
187 + * We now use the slot ID instead of the device identifiers to select
188 + * which interrupt is routed where.
189 + */
190 +-static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
191 ++static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
192 + {
193 + switch (slot) {
194 + case 0: /* host bridge */
195 +diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c
196 +index 4391e433a4b2f..9d19aa98a663e 100644
197 +--- a/arch/arm/mach-footbridge/personal-pci.c
198 ++++ b/arch/arm/mach-footbridge/personal-pci.c
199 +@@ -14,13 +14,12 @@
200 + #include <asm/mach/pci.h>
201 + #include <asm/mach-types.h>
202 +
203 +-static int irqmap_personal_server[] __initdata = {
204 ++static int irqmap_personal_server[] = {
205 + IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
206 + IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
207 + };
208 +
209 +-static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
210 +- u8 pin)
211 ++static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
212 + {
213 + unsigned char line;
214 +
215 +diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
216 +index cd711bfc591f2..2c647bdf8d258 100644
217 +--- a/arch/arm/mach-keystone/keystone.c
218 ++++ b/arch/arm/mach-keystone/keystone.c
219 +@@ -65,7 +65,7 @@ static void __init keystone_init(void)
220 + static long long __init keystone_pv_fixup(void)
221 + {
222 + long long offset;
223 +- phys_addr_t mem_start, mem_end;
224 ++ u64 mem_start, mem_end;
225 +
226 + mem_start = memblock_start_of_DRAM();
227 + mem_end = memblock_end_of_DRAM();
228 +@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
229 + if (mem_start < KEYSTONE_HIGH_PHYS_START ||
230 + mem_end > KEYSTONE_HIGH_PHYS_END) {
231 + pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
232 +- (u64)mem_start, (u64)mem_end);
233 ++ mem_start, mem_end);
234 + return 0;
235 + }
236 +
237 +diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
238 +index 14a6c3eb32985..f745a65d3bd7a 100644
239 +--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
240 ++++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
241 +@@ -15,6 +15,7 @@
242 + #include <linux/platform_data/gpio-omap.h>
243 +
244 + #include <asm/assembler.h>
245 ++#include <asm/irq.h>
246 +
247 + #include "ams-delta-fiq.h"
248 + #include "board-ams-delta.h"
249 +diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
250 +index 7290f033fd2da..1610c567a6a3a 100644
251 +--- a/arch/arm/mach-omap2/board-generic.c
252 ++++ b/arch/arm/mach-omap2/board-generic.c
253 +@@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void)
254 + }
255 +
256 + /* Clocks are needed early, see drivers/clocksource for the rest */
257 +-void __init __maybe_unused omap_init_time_of(void)
258 ++static void __init __maybe_unused omap_init_time_of(void)
259 + {
260 + omap_clk_init();
261 + timer_probe();
262 +diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
263 +index 17b66f0d0deef..605925684b0aa 100644
264 +--- a/arch/arm/mach-omap2/sr_device.c
265 ++++ b/arch/arm/mach-omap2/sr_device.c
266 +@@ -188,7 +188,7 @@ static const char * const dra7_sr_instances[] = {
267 +
268 + int __init omap_devinit_smartreflex(void)
269 + {
270 +- const char * const *sr_inst;
271 ++ const char * const *sr_inst = NULL;
272 + int i, nr_sr = 0;
273 +
274 + if (soc_is_omap44xx()) {
275 +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
276 +index c06ebfbc48c4a..56c7954cb6268 100644
277 +--- a/arch/arm/mm/mmu.c
278 ++++ b/arch/arm/mm/mmu.c
279 +@@ -388,8 +388,7 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
280 + pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
281 +
282 + /* Make sure fixmap region does not exceed available allocation. */
283 +- BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
284 +- FIXADDR_END);
285 ++ BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START);
286 + BUG_ON(idx >= __end_of_fixed_addresses);
287 +
288 + /* we only support device mappings until pgprot_kernel has been set */
289 +diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
290 +index 88950e41a3a9e..59d916ccdf25f 100644
291 +--- a/arch/arm/mm/pmsa-v7.c
292 ++++ b/arch/arm/mm/pmsa-v7.c
293 +@@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
294 + phys_addr_t mem_end;
295 + phys_addr_t reg_start, reg_end;
296 + unsigned int mem_max_regions;
297 ++ bool first = true;
298 + int num;
299 + u64 i;
300 +
301 +@@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
302 + #endif
303 +
304 + for_each_mem_range(i, &reg_start, &reg_end) {
305 +- if (i == 0) {
306 ++ if (first) {
307 + phys_addr_t phys_offset = PHYS_OFFSET;
308 +
309 + /*
310 +@@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
311 + mem_start = reg_start;
312 + mem_end = reg_end;
313 + specified_mem_size = mem_end - mem_start;
314 ++ first = false;
315 + } else {
316 + /*
317 + * memblock auto merges contiguous blocks, remove
318 +diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
319 +index 2de019f7503e8..8359748a19a11 100644
320 +--- a/arch/arm/mm/pmsa-v8.c
321 ++++ b/arch/arm/mm/pmsa-v8.c
322 +@@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void)
323 + {
324 + phys_addr_t mem_end;
325 + phys_addr_t reg_start, reg_end;
326 ++ bool first = true;
327 + u64 i;
328 +
329 + for_each_mem_range(i, &reg_start, &reg_end) {
330 +- if (i == 0) {
331 ++ if (first) {
332 + phys_addr_t phys_offset = PHYS_OFFSET;
333 +
334 + /*
335 +@@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void)
336 + if (reg_start != phys_offset)
337 + panic("First memory bank must be contiguous from PHYS_OFFSET");
338 + mem_end = reg_end;
339 ++ first = false;
340 + } else {
341 + /*
342 + * memblock auto merges contiguous blocks, remove
343 +diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c
344 +index c4b49b322e8a8..f5f790c6e5f89 100644
345 +--- a/arch/arm/probes/uprobes/core.c
346 ++++ b/arch/arm/probes/uprobes/core.c
347 +@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
348 + static struct undef_hook uprobes_arm_break_hook = {
349 + .instr_mask = 0x0fffffff,
350 + .instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
351 +- .cpsr_mask = MODE_MASK,
352 ++ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
353 + .cpsr_val = USR_MODE,
354 + .fn = uprobe_trap_handler,
355 + };
356 +@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
357 + static struct undef_hook uprobes_arm_ss_hook = {
358 + .instr_mask = 0x0fffffff,
359 + .instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff),
360 +- .cpsr_mask = MODE_MASK,
361 ++ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
362 + .cpsr_val = USR_MODE,
363 + .fn = uprobe_trap_handler,
364 + };
365 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
366 +index 2517dd8c5a4d1..cd7f725b80d40 100644
367 +--- a/arch/arm64/Kconfig
368 ++++ b/arch/arm64/Kconfig
369 +@@ -1399,10 +1399,13 @@ config ARM64_PAN
370 + config AS_HAS_LDAPR
371 + def_bool $(as-instr,.arch_extension rcpc)
372 +
373 ++config AS_HAS_LSE_ATOMICS
374 ++ def_bool $(as-instr,.arch_extension lse)
375 ++
376 + config ARM64_LSE_ATOMICS
377 + bool
378 + default ARM64_USE_LSE_ATOMICS
379 +- depends on $(as-instr,.arch_extension lse)
380 ++ depends on AS_HAS_LSE_ATOMICS
381 +
382 + config ARM64_USE_LSE_ATOMICS
383 + bool "Atomic instructions"
384 +@@ -1659,6 +1662,7 @@ config ARM64_MTE
385 + default y
386 + depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
387 + depends on AS_HAS_ARMV8_5
388 ++ depends on AS_HAS_LSE_ATOMICS
389 + # Required for tag checking in the uaccess routines
390 + depends on ARM64_PAN
391 + select ARCH_USES_HIGH_VMA_FLAGS
392 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
393 +index 302e24be0a318..a1f621b388fe7 100644
394 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
395 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
396 +@@ -8,3 +8,7 @@
397 + compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
398 + "allwinner,sun50i-a64";
399 + };
400 ++
401 ++&mmc0 {
402 ++ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
403 ++};
404 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
405 +index 3402cec87035b..df62044ff7a7a 100644
406 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
407 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
408 +@@ -34,7 +34,7 @@
409 + vmmc-supply = <&reg_dcdc1>;
410 + disable-wp;
411 + bus-width = <4>;
412 +- cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
413 ++ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
414 + status = "okay";
415 + };
416 +
417 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
418 +index 7c9dbde645b52..e8163c572daba 100644
419 +--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
420 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
421 +@@ -289,10 +289,6 @@
422 + vcc-pm-supply = <&reg_aldo1>;
423 + };
424 +
425 +-&rtc {
426 +- clocks = <&ext_osc32k>;
427 +-};
428 +-
429 + &spdif {
430 + status = "okay";
431 + };
432 +diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
433 +index 5df500dcc627a..8a078fc662ac5 100644
434 +--- a/arch/arm64/include/asm/alternative-macros.h
435 ++++ b/arch/arm64/include/asm/alternative-macros.h
436 +@@ -97,9 +97,9 @@
437 + .popsection
438 + .subsection 1
439 + 663: \insn2
440 +-664: .previous
441 +- .org . - (664b-663b) + (662b-661b)
442 ++664: .org . - (664b-663b) + (662b-661b)
443 + .org . - (662b-661b) + (664b-663b)
444 ++ .previous
445 + .endif
446 + .endm
447 +
448 +@@ -169,11 +169,11 @@
449 + */
450 + .macro alternative_endif
451 + 664:
452 ++ .org . - (664b-663b) + (662b-661b)
453 ++ .org . - (662b-661b) + (664b-663b)
454 + .if .Lasm_alt_mode==0
455 + .previous
456 + .endif
457 +- .org . - (664b-663b) + (662b-661b)
458 +- .org . - (662b-661b) + (664b-663b)
459 + .endm
460 +
461 + /*
462 +diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
463 +index 3333950b59093..ea487218db790 100644
464 +--- a/arch/arm64/include/asm/word-at-a-time.h
465 ++++ b/arch/arm64/include/asm/word-at-a-time.h
466 +@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
467 + */
468 + static inline unsigned long load_unaligned_zeropad(const void *addr)
469 + {
470 +- unsigned long ret, offset;
471 ++ unsigned long ret, tmp;
472 +
473 + /* Load word from unaligned pointer addr */
474 + asm(
475 +@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
476 + "2:\n"
477 + " .pushsection .fixup,\"ax\"\n"
478 + " .align 2\n"
479 +- "3: and %1, %2, #0x7\n"
480 +- " bic %2, %2, #0x7\n"
481 +- " ldr %0, [%2]\n"
482 ++ "3: bic %1, %2, #0x7\n"
483 ++ " ldr %0, [%1]\n"
484 ++ " and %1, %2, #0x7\n"
485 + " lsl %1, %1, #0x3\n"
486 + #ifndef __AARCH64EB__
487 + " lsr %0, %0, %1\n"
488 +@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
489 + " b 2b\n"
490 + " .popsection\n"
491 + _ASM_EXTABLE(1b, 3b)
492 +- : "=&r" (ret), "=&r" (offset)
493 ++ : "=&r" (ret), "=&r" (tmp)
494 + : "r" (addr), "Q" (*(unsigned long *)addr));
495 +
496 + return ret;
497 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
498 +index c9bae73f2621a..14d5119489fe1 100644
499 +--- a/arch/arm64/kernel/entry.S
500 ++++ b/arch/arm64/kernel/entry.S
501 +@@ -148,16 +148,18 @@ alternative_cb_end
502 + .endm
503 +
504 + /* Check for MTE asynchronous tag check faults */
505 +- .macro check_mte_async_tcf, flgs, tmp
506 ++ .macro check_mte_async_tcf, tmp, ti_flags
507 + #ifdef CONFIG_ARM64_MTE
508 ++ .arch_extension lse
509 + alternative_if_not ARM64_MTE
510 + b 1f
511 + alternative_else_nop_endif
512 + mrs_s \tmp, SYS_TFSRE0_EL1
513 + tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
514 + /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
515 +- orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
516 +- str \flgs, [tsk, #TSK_TI_FLAGS]
517 ++ mov \tmp, #_TIF_MTE_ASYNC_FAULT
518 ++ add \ti_flags, tsk, #TSK_TI_FLAGS
519 ++ stset \tmp, [\ti_flags]
520 + msr_s SYS_TFSRE0_EL1, xzr
521 + 1:
522 + #endif
523 +@@ -244,7 +246,7 @@ alternative_else_nop_endif
524 + disable_step_tsk x19, x20
525 +
526 + /* Check for asynchronous tag check faults in user space */
527 +- check_mte_async_tcf x19, x22
528 ++ check_mte_async_tcf x22, x23
529 + apply_ssbd 1, x22, x23
530 +
531 + ptrauth_keys_install_kernel tsk, x20, x22, x23
532 +diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
533 +index 6bdef7362c0eb..7c44ede122a94 100644
534 +--- a/arch/arm64/kernel/sleep.S
535 ++++ b/arch/arm64/kernel/sleep.S
536 +@@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume)
537 + */
538 + bl cpu_do_resume
539 +
540 +-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
541 ++#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
542 + mov x0, sp
543 + bl kasan_unpoison_task_stack_below
544 + #endif
545 +diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
546 +index ca0d596c800d8..8916a2850c48b 100644
547 +--- a/arch/ia64/configs/generic_defconfig
548 ++++ b/arch/ia64/configs/generic_defconfig
549 +@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
550 + CONFIG_SCSI_FC_ATTRS=y
551 + CONFIG_SCSI_SYM53C8XX_2=y
552 + CONFIG_SCSI_QLOGIC_1280=y
553 +-CONFIG_ATA=y
554 +-CONFIG_ATA_PIIX=y
555 + CONFIG_SATA_VITESSE=y
556 + CONFIG_MD=y
557 + CONFIG_BLK_DEV_MD=m
558 +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
559 +index 934cbdf6dd10e..30eddc69c9cf5 100644
560 +--- a/arch/powerpc/kernel/signal_32.c
561 ++++ b/arch/powerpc/kernel/signal_32.c
562 +@@ -775,7 +775,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
563 + else
564 + prepare_save_user_regs(1);
565 +
566 +- if (!user_write_access_begin(frame, sizeof(*frame)))
567 ++ if (!user_access_begin(frame, sizeof(*frame)))
568 + goto badframe;
569 +
570 + /* Put the siginfo & fill in most of the ucontext */
571 +@@ -809,17 +809,15 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
572 + unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
573 + failed);
574 + unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
575 ++ asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
576 + }
577 + unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
578 +
579 +- user_write_access_end();
580 ++ user_access_end();
581 +
582 + if (copy_siginfo_to_user(&frame->info, &ksig->info))
583 + goto badframe;
584 +
585 +- if (tramp == (unsigned long)mctx->mc_pad)
586 +- flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
587 +-
588 + regs->link = tramp;
589 +
590 + #ifdef CONFIG_PPC_FPU_REGS
591 +@@ -844,7 +842,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
592 + return 0;
593 +
594 + failed:
595 +- user_write_access_end();
596 ++ user_access_end();
597 +
598 + badframe:
599 + signal_fault(tsk, regs, "handle_rt_signal32", frame);
600 +@@ -879,7 +877,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
601 + else
602 + prepare_save_user_regs(1);
603 +
604 +- if (!user_write_access_begin(frame, sizeof(*frame)))
605 ++ if (!user_access_begin(frame, sizeof(*frame)))
606 + goto badframe;
607 + sc = (struct sigcontext __user *) &frame->sctx;
608 +
609 +@@ -908,11 +906,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
610 + /* Set up the sigreturn trampoline: li r0,sigret; sc */
611 + unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
612 + unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
613 ++ asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
614 + }
615 +- user_write_access_end();
616 +-
617 +- if (tramp == (unsigned long)mctx->mc_pad)
618 +- flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
619 ++ user_access_end();
620 +
621 + regs->link = tramp;
622 +
623 +@@ -934,7 +930,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
624 + return 0;
625 +
626 + failed:
627 +- user_write_access_end();
628 ++ user_access_end();
629 +
630 + badframe:
631 + signal_fault(tsk, regs, "handle_signal32", frame);
632 +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
633 +index e6d569ae817d2..7c9bfdbd7813a 100644
634 +--- a/arch/riscv/Kconfig
635 ++++ b/arch/riscv/Kconfig
636 +@@ -147,7 +147,7 @@ config ARCH_FLATMEM_ENABLE
637 + config ARCH_SPARSEMEM_ENABLE
638 + def_bool y
639 + depends on MMU
640 +- select SPARSEMEM_STATIC if 32BIT && SPARSMEM
641 ++ select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
642 + select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
643 +
644 + config ARCH_SELECT_MEMORY_MODEL
645 +diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
646 +index 5d3a0b8fd3798..c7f412f4e07d6 100644
647 +--- a/arch/x86/kernel/acpi/wakeup_64.S
648 ++++ b/arch/x86/kernel/acpi/wakeup_64.S
649 +@@ -112,7 +112,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
650 + movq pt_regs_r14(%rax), %r14
651 + movq pt_regs_r15(%rax), %r15
652 +
653 +-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
654 ++#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
655 + /*
656 + * The suspend path may have poisoned some areas deeper in the stack,
657 + * which we now need to unpoison.
658 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
659 +index df964571a6b43..54a3048ebc5b9 100644
660 +--- a/arch/x86/kernel/setup.c
661 ++++ b/arch/x86/kernel/setup.c
662 +@@ -1046,9 +1046,6 @@ void __init setup_arch(char **cmdline_p)
663 +
664 + cleanup_highmap();
665 +
666 +- /* Look for ACPI tables and reserve memory occupied by them. */
667 +- acpi_boot_table_init();
668 +-
669 + memblock_set_current_limit(ISA_END_ADDRESS);
670 + e820__memblock_setup();
671 +
672 +@@ -1133,6 +1130,8 @@ void __init setup_arch(char **cmdline_p)
673 + reserve_initrd();
674 +
675 + acpi_table_upgrade();
676 ++ /* Look for ACPI tables and reserve memory occupied by them. */
677 ++ acpi_boot_table_init();
678 +
679 + vsmp_init();
680 +
681 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
682 +index f2b9bfb582067..cb48236cc24d6 100644
683 +--- a/arch/x86/kvm/vmx/nested.c
684 ++++ b/arch/x86/kvm/vmx/nested.c
685 +@@ -3330,7 +3330,11 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
686 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
687 + enum vm_entry_failure_code entry_failure_code;
688 + bool evaluate_pending_interrupts;
689 +- u32 exit_reason, failed_index;
690 ++ union vmx_exit_reason exit_reason = {
691 ++ .basic = EXIT_REASON_INVALID_STATE,
692 ++ .failed_vmentry = 1,
693 ++ };
694 ++ u32 failed_index;
695 +
696 + if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
697 + kvm_vcpu_flush_tlb_current(vcpu);
698 +@@ -3382,7 +3386,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
699 +
700 + if (nested_vmx_check_guest_state(vcpu, vmcs12,
701 + &entry_failure_code)) {
702 +- exit_reason = EXIT_REASON_INVALID_STATE;
703 ++ exit_reason.basic = EXIT_REASON_INVALID_STATE;
704 + vmcs12->exit_qualification = entry_failure_code;
705 + goto vmentry_fail_vmexit;
706 + }
707 +@@ -3393,7 +3397,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
708 + vcpu->arch.tsc_offset += vmcs12->tsc_offset;
709 +
710 + if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
711 +- exit_reason = EXIT_REASON_INVALID_STATE;
712 ++ exit_reason.basic = EXIT_REASON_INVALID_STATE;
713 + vmcs12->exit_qualification = entry_failure_code;
714 + goto vmentry_fail_vmexit_guest_mode;
715 + }
716 +@@ -3403,7 +3407,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
717 + vmcs12->vm_entry_msr_load_addr,
718 + vmcs12->vm_entry_msr_load_count);
719 + if (failed_index) {
720 +- exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
721 ++ exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
722 + vmcs12->exit_qualification = failed_index;
723 + goto vmentry_fail_vmexit_guest_mode;
724 + }
725 +@@ -3471,7 +3475,7 @@ vmentry_fail_vmexit:
726 + return NVMX_VMENTRY_VMEXIT;
727 +
728 + load_vmcs12_host_state(vcpu, vmcs12);
729 +- vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
730 ++ vmcs12->vm_exit_reason = exit_reason.full;
731 + if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
732 + vmx->nested.need_vmcs12_to_shadow_sync = true;
733 + return NVMX_VMENTRY_VMEXIT;
734 +@@ -5559,7 +5563,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
735 + return kvm_skip_emulated_instruction(vcpu);
736 +
737 + fail:
738 +- nested_vmx_vmexit(vcpu, vmx->exit_reason,
739 ++ /*
740 ++ * This is effectively a reflected VM-Exit, as opposed to a synthesized
741 ++ * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
742 ++ * EXIT_REASON_VMFUNC as the exit reason.
743 ++ */
744 ++ nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
745 + vmx_get_intr_info(vcpu),
746 + vmx_get_exit_qual(vcpu));
747 + return 1;
748 +@@ -5627,7 +5636,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
749 + * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
750 + */
751 + static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
752 +- struct vmcs12 *vmcs12, u32 exit_reason)
753 ++ struct vmcs12 *vmcs12,
754 ++ union vmx_exit_reason exit_reason)
755 + {
756 + u32 msr_index = kvm_rcx_read(vcpu);
757 + gpa_t bitmap;
758 +@@ -5641,7 +5651,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
759 + * First we need to figure out which of the four to use:
760 + */
761 + bitmap = vmcs12->msr_bitmap;
762 +- if (exit_reason == EXIT_REASON_MSR_WRITE)
763 ++ if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
764 + bitmap += 2048;
765 + if (msr_index >= 0xc0000000) {
766 + msr_index -= 0xc0000000;
767 +@@ -5778,11 +5788,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
768 + * Return true if L0 wants to handle an exit from L2 regardless of whether or not
769 + * L1 wants the exit. Only call this when in is_guest_mode (L2).
770 + */
771 +-static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
772 ++static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
773 ++ union vmx_exit_reason exit_reason)
774 + {
775 + u32 intr_info;
776 +
777 +- switch ((u16)exit_reason) {
778 ++ switch ((u16)exit_reason.basic) {
779 + case EXIT_REASON_EXCEPTION_NMI:
780 + intr_info = vmx_get_intr_info(vcpu);
781 + if (is_nmi(intr_info))
782 +@@ -5838,12 +5849,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
783 + * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
784 + * is_guest_mode (L2).
785 + */
786 +-static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
787 ++static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
788 ++ union vmx_exit_reason exit_reason)
789 + {
790 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
791 + u32 intr_info;
792 +
793 +- switch ((u16)exit_reason) {
794 ++ switch ((u16)exit_reason.basic) {
795 + case EXIT_REASON_EXCEPTION_NMI:
796 + intr_info = vmx_get_intr_info(vcpu);
797 + if (is_nmi(intr_info))
798 +@@ -5962,7 +5974,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
799 + bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
800 + {
801 + struct vcpu_vmx *vmx = to_vmx(vcpu);
802 +- u32 exit_reason = vmx->exit_reason;
803 ++ union vmx_exit_reason exit_reason = vmx->exit_reason;
804 + unsigned long exit_qual;
805 + u32 exit_intr_info;
806 +
807 +@@ -5981,7 +5993,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
808 + goto reflect_vmexit;
809 + }
810 +
811 +- trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX);
812 ++ trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
813 +
814 + /* If L0 (KVM) wants the exit, it trumps L1's desires. */
815 + if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
816 +@@ -6007,7 +6019,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
817 + exit_qual = vmx_get_exit_qual(vcpu);
818 +
819 + reflect_vmexit:
820 +- nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual);
821 ++ nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
822 + return true;
823 + }
824 +
825 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
826 +index eb69fef57485d..95f836fbceb27 100644
827 +--- a/arch/x86/kvm/vmx/vmx.c
828 ++++ b/arch/x86/kvm/vmx/vmx.c
829 +@@ -1577,7 +1577,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
830 + * i.e. we end up advancing IP with some random value.
831 + */
832 + if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
833 +- to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
834 ++ to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
835 + orig_rip = kvm_rip_read(vcpu);
836 + rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
837 + #ifdef CONFIG_X86_64
838 +@@ -5667,7 +5667,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
839 + struct vcpu_vmx *vmx = to_vmx(vcpu);
840 +
841 + *info1 = vmx_get_exit_qual(vcpu);
842 +- if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
843 ++ if (!(vmx->exit_reason.failed_vmentry)) {
844 + *info2 = vmx->idt_vectoring_info;
845 + *intr_info = vmx_get_intr_info(vcpu);
846 + if (is_exception_with_error_code(*intr_info))
847 +@@ -5911,8 +5911,9 @@ void dump_vmcs(void)
848 + static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
849 + {
850 + struct vcpu_vmx *vmx = to_vmx(vcpu);
851 +- u32 exit_reason = vmx->exit_reason;
852 ++ union vmx_exit_reason exit_reason = vmx->exit_reason;
853 + u32 vectoring_info = vmx->idt_vectoring_info;
854 ++ u16 exit_handler_index;
855 +
856 + /*
857 + * Flush logged GPAs PML buffer, this will make dirty_bitmap more
858 +@@ -5954,11 +5955,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
859 + return 1;
860 + }
861 +
862 +- if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
863 ++ if (exit_reason.failed_vmentry) {
864 + dump_vmcs();
865 + vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
866 + vcpu->run->fail_entry.hardware_entry_failure_reason
867 +- = exit_reason;
868 ++ = exit_reason.full;
869 + vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
870 + return 0;
871 + }
872 +@@ -5980,24 +5981,24 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
873 + * will cause infinite loop.
874 + */
875 + if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
876 +- (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
877 +- exit_reason != EXIT_REASON_EPT_VIOLATION &&
878 +- exit_reason != EXIT_REASON_PML_FULL &&
879 +- exit_reason != EXIT_REASON_APIC_ACCESS &&
880 +- exit_reason != EXIT_REASON_TASK_SWITCH)) {
881 ++ (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
882 ++ exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
883 ++ exit_reason.basic != EXIT_REASON_PML_FULL &&
884 ++ exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
885 ++ exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
886 ++ int ndata = 3;
887 ++
888 + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
889 + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
890 +- vcpu->run->internal.ndata = 3;
891 + vcpu->run->internal.data[0] = vectoring_info;
892 +- vcpu->run->internal.data[1] = exit_reason;
893 ++ vcpu->run->internal.data[1] = exit_reason.full;
894 + vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
895 +- if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
896 +- vcpu->run->internal.ndata++;
897 +- vcpu->run->internal.data[3] =
898 ++ if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
899 ++ vcpu->run->internal.data[ndata++] =
900 + vmcs_read64(GUEST_PHYSICAL_ADDRESS);
901 + }
902 +- vcpu->run->internal.data[vcpu->run->internal.ndata++] =
903 +- vcpu->arch.last_vmentry_cpu;
904 ++ vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
905 ++ vcpu->run->internal.ndata = ndata;
906 + return 0;
907 + }
908 +
909 +@@ -6023,38 +6024,39 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
910 + if (exit_fastpath != EXIT_FASTPATH_NONE)
911 + return 1;
912 +
913 +- if (exit_reason >= kvm_vmx_max_exit_handlers)
914 ++ if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
915 + goto unexpected_vmexit;
916 + #ifdef CONFIG_RETPOLINE
917 +- if (exit_reason == EXIT_REASON_MSR_WRITE)
918 ++ if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
919 + return kvm_emulate_wrmsr(vcpu);
920 +- else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
921 ++ else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
922 + return handle_preemption_timer(vcpu);
923 +- else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
924 ++ else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
925 + return handle_interrupt_window(vcpu);
926 +- else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
927 ++ else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
928 + return handle_external_interrupt(vcpu);
929 +- else if (exit_reason == EXIT_REASON_HLT)
930 ++ else if (exit_reason.basic == EXIT_REASON_HLT)
931 + return kvm_emulate_halt(vcpu);
932 +- else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
933 ++ else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
934 + return handle_ept_misconfig(vcpu);
935 + #endif
936 +
937 +- exit_reason = array_index_nospec(exit_reason,
938 +- kvm_vmx_max_exit_handlers);
939 +- if (!kvm_vmx_exit_handlers[exit_reason])
940 ++ exit_handler_index = array_index_nospec((u16)exit_reason.basic,
941 ++ kvm_vmx_max_exit_handlers);
942 ++ if (!kvm_vmx_exit_handlers[exit_handler_index])
943 + goto unexpected_vmexit;
944 +
945 +- return kvm_vmx_exit_handlers[exit_reason](vcpu);
946 ++ return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
947 +
948 + unexpected_vmexit:
949 +- vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason);
950 ++ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
951 ++ exit_reason.full);
952 + dump_vmcs();
953 + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
954 + vcpu->run->internal.suberror =
955 + KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
956 + vcpu->run->internal.ndata = 2;
957 +- vcpu->run->internal.data[0] = exit_reason;
958 ++ vcpu->run->internal.data[0] = exit_reason.full;
959 + vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
960 + return 0;
961 + }
962 +@@ -6373,9 +6375,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
963 + {
964 + struct vcpu_vmx *vmx = to_vmx(vcpu);
965 +
966 +- if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
967 ++ if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
968 + handle_external_interrupt_irqoff(vcpu);
969 +- else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
970 ++ else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
971 + handle_exception_nmi_irqoff(vmx);
972 + }
973 +
974 +@@ -6567,7 +6569,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
975 +
976 + static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
977 + {
978 +- switch (to_vmx(vcpu)->exit_reason) {
979 ++ switch (to_vmx(vcpu)->exit_reason.basic) {
980 + case EXIT_REASON_MSR_WRITE:
981 + return handle_fastpath_set_msr_irqoff(vcpu);
982 + case EXIT_REASON_PREEMPTION_TIMER:
983 +@@ -6768,17 +6770,17 @@ reenter_guest:
984 + vmx->idt_vectoring_info = 0;
985 +
986 + if (unlikely(vmx->fail)) {
987 +- vmx->exit_reason = 0xdead;
988 ++ vmx->exit_reason.full = 0xdead;
989 + return EXIT_FASTPATH_NONE;
990 + }
991 +
992 +- vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
993 +- if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
994 ++ vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
995 ++ if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
996 + kvm_machine_check();
997 +
998 +- trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
999 ++ trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
1000 +
1001 +- if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
1002 ++ if (unlikely(vmx->exit_reason.failed_vmentry))
1003 + return EXIT_FASTPATH_NONE;
1004 +
1005 + vmx->loaded_vmcs->launched = 1;
1006 +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
1007 +index 9d3a557949ac2..4dd71b7494eac 100644
1008 +--- a/arch/x86/kvm/vmx/vmx.h
1009 ++++ b/arch/x86/kvm/vmx/vmx.h
1010 +@@ -70,6 +70,29 @@ struct pt_desc {
1011 + struct pt_ctx guest;
1012 + };
1013 +
1014 ++union vmx_exit_reason {
1015 ++ struct {
1016 ++ u32 basic : 16;
1017 ++ u32 reserved16 : 1;
1018 ++ u32 reserved17 : 1;
1019 ++ u32 reserved18 : 1;
1020 ++ u32 reserved19 : 1;
1021 ++ u32 reserved20 : 1;
1022 ++ u32 reserved21 : 1;
1023 ++ u32 reserved22 : 1;
1024 ++ u32 reserved23 : 1;
1025 ++ u32 reserved24 : 1;
1026 ++ u32 reserved25 : 1;
1027 ++ u32 reserved26 : 1;
1028 ++ u32 enclave_mode : 1;
1029 ++ u32 smi_pending_mtf : 1;
1030 ++ u32 smi_from_vmx_root : 1;
1031 ++ u32 reserved30 : 1;
1032 ++ u32 failed_vmentry : 1;
1033 ++ };
1034 ++ u32 full;
1035 ++};
1036 ++
1037 + /*
1038 + * The nested_vmx structure is part of vcpu_vmx, and holds information we need
1039 + * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
1040 +@@ -244,7 +267,7 @@ struct vcpu_vmx {
1041 + int vpid;
1042 + bool emulation_required;
1043 +
1044 +- u32 exit_reason;
1045 ++ union vmx_exit_reason exit_reason;
1046 +
1047 + /* Posted interrupt descriptor */
1048 + struct pi_desc pi_desc;
1049 +diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
1050 +index fe6a460c43735..af3ee288bc117 100644
1051 +--- a/drivers/dma/dmaengine.c
1052 ++++ b/drivers/dma/dmaengine.c
1053 +@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
1054 + kfree(chan->dev);
1055 + err_free_local:
1056 + free_percpu(chan->local);
1057 ++ chan->local = NULL;
1058 + return rc;
1059 + }
1060 +
1061 +diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
1062 +index e5162690de8f1..db25f9b7778c9 100644
1063 +--- a/drivers/dma/dw/Kconfig
1064 ++++ b/drivers/dma/dw/Kconfig
1065 +@@ -10,6 +10,7 @@ config DW_DMAC_CORE
1066 +
1067 + config DW_DMAC
1068 + tristate "Synopsys DesignWare AHB DMA platform driver"
1069 ++ depends on HAS_IOMEM
1070 + select DW_DMAC_CORE
1071 + help
1072 + Support the Synopsys DesignWare AHB DMA controller. This
1073 +@@ -18,6 +19,7 @@ config DW_DMAC
1074 + config DW_DMAC_PCI
1075 + tristate "Synopsys DesignWare AHB DMA PCI driver"
1076 + depends on PCI
1077 ++ depends on HAS_IOMEM
1078 + select DW_DMAC_CORE
1079 + help
1080 + Support the Synopsys DesignWare AHB DMA controller on the
1081 +diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
1082 +index 84a6ea60ecf0b..31c819544a229 100644
1083 +--- a/drivers/dma/idxd/device.c
1084 ++++ b/drivers/dma/idxd/device.c
1085 +@@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
1086 + idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
1087 + }
1088 +
1089 ++void idxd_wq_reset(struct idxd_wq *wq)
1090 ++{
1091 ++ struct idxd_device *idxd = wq->idxd;
1092 ++ struct device *dev = &idxd->pdev->dev;
1093 ++ u32 operand;
1094 ++
1095 ++ if (wq->state != IDXD_WQ_ENABLED) {
1096 ++ dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
1097 ++ return;
1098 ++ }
1099 ++
1100 ++ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
1101 ++ idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
1102 ++ wq->state = IDXD_WQ_DISABLED;
1103 ++}
1104 ++
1105 + int idxd_wq_map_portal(struct idxd_wq *wq)
1106 + {
1107 + struct idxd_device *idxd = wq->idxd;
1108 +@@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
1109 + void idxd_wq_disable_cleanup(struct idxd_wq *wq)
1110 + {
1111 + struct idxd_device *idxd = wq->idxd;
1112 +- struct device *dev = &idxd->pdev->dev;
1113 +- int i, wq_offset;
1114 +
1115 + lockdep_assert_held(&idxd->dev_lock);
1116 + memset(wq->wqcfg, 0, idxd->wqcfg_size);
1117 +@@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
1118 + wq->ats_dis = 0;
1119 + clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
1120 + memset(wq->name, 0, WQ_NAME_SIZE);
1121 +-
1122 +- for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1123 +- wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
1124 +- iowrite32(0, idxd->reg_base + wq_offset);
1125 +- dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
1126 +- wq->id, i, wq_offset,
1127 +- ioread32(idxd->reg_base + wq_offset));
1128 +- }
1129 + }
1130 +
1131 + /* Device control bits */
1132 +@@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
1133 + }
1134 +
1135 + /* Device configuration bits */
1136 ++void idxd_msix_perm_setup(struct idxd_device *idxd)
1137 ++{
1138 ++ union msix_perm mperm;
1139 ++ int i, msixcnt;
1140 ++
1141 ++ msixcnt = pci_msix_vec_count(idxd->pdev);
1142 ++ if (msixcnt < 0)
1143 ++ return;
1144 ++
1145 ++ mperm.bits = 0;
1146 ++ mperm.pasid = idxd->pasid;
1147 ++ mperm.pasid_en = device_pasid_enabled(idxd);
1148 ++ for (i = 1; i < msixcnt; i++)
1149 ++ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
1150 ++}
1151 ++
1152 ++void idxd_msix_perm_clear(struct idxd_device *idxd)
1153 ++{
1154 ++ union msix_perm mperm;
1155 ++ int i, msixcnt;
1156 ++
1157 ++ msixcnt = pci_msix_vec_count(idxd->pdev);
1158 ++ if (msixcnt < 0)
1159 ++ return;
1160 ++
1161 ++ mperm.bits = 0;
1162 ++ for (i = 1; i < msixcnt; i++)
1163 ++ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
1164 ++}
1165 ++
1166 + static void idxd_group_config_write(struct idxd_group *group)
1167 + {
1168 + struct idxd_device *idxd = group->idxd;
1169 +@@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
1170 + if (!wq->group)
1171 + return 0;
1172 +
1173 +- memset(wq->wqcfg, 0, idxd->wqcfg_size);
1174 ++ /*
1175 ++ * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
1176 ++ * wq reset. This will copy back the sticky values that are present on some devices.
1177 ++ */
1178 ++ for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1179 ++ wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
1180 ++ wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
1181 ++ }
1182 +
1183 + /* byte 0-3 */
1184 + wq->wqcfg->wq_size = wq->size;
1185 +diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
1186 +index 81a0e65fd316d..76014c14f4732 100644
1187 +--- a/drivers/dma/idxd/idxd.h
1188 ++++ b/drivers/dma/idxd/idxd.h
1189 +@@ -316,6 +316,8 @@ void idxd_unregister_driver(void);
1190 + struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
1191 +
1192 + /* device interrupt control */
1193 ++void idxd_msix_perm_setup(struct idxd_device *idxd);
1194 ++void idxd_msix_perm_clear(struct idxd_device *idxd);
1195 + irqreturn_t idxd_irq_handler(int vec, void *data);
1196 + irqreturn_t idxd_misc_thread(int vec, void *data);
1197 + irqreturn_t idxd_wq_thread(int irq, void *data);
1198 +@@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
1199 + int idxd_wq_enable(struct idxd_wq *wq);
1200 + int idxd_wq_disable(struct idxd_wq *wq);
1201 + void idxd_wq_drain(struct idxd_wq *wq);
1202 ++void idxd_wq_reset(struct idxd_wq *wq);
1203 + int idxd_wq_map_portal(struct idxd_wq *wq);
1204 + void idxd_wq_unmap_portal(struct idxd_wq *wq);
1205 + void idxd_wq_disable_cleanup(struct idxd_wq *wq);
1206 +diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
1207 +index fa04acd5582a0..8f3df64aa1be1 100644
1208 +--- a/drivers/dma/idxd/init.c
1209 ++++ b/drivers/dma/idxd/init.c
1210 +@@ -61,7 +61,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
1211 + struct idxd_irq_entry *irq_entry;
1212 + int i, msixcnt;
1213 + int rc = 0;
1214 +- union msix_perm mperm;
1215 +
1216 + msixcnt = pci_msix_vec_count(pdev);
1217 + if (msixcnt < 0) {
1218 +@@ -140,14 +139,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
1219 + }
1220 +
1221 + idxd_unmask_error_interrupts(idxd);
1222 +-
1223 +- /* Setup MSIX permission table */
1224 +- mperm.bits = 0;
1225 +- mperm.pasid = idxd->pasid;
1226 +- mperm.pasid_en = device_pasid_enabled(idxd);
1227 +- for (i = 1; i < msixcnt; i++)
1228 +- iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
1229 +-
1230 ++ idxd_msix_perm_setup(idxd);
1231 + return 0;
1232 +
1233 + err_no_irq:
1234 +@@ -504,6 +496,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
1235 + idxd_flush_work_list(irq_entry);
1236 + }
1237 +
1238 ++ idxd_msix_perm_clear(idxd);
1239 + destroy_workqueue(idxd->wq);
1240 + }
1241 +
1242 +diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
1243 +index a60ca11a5784a..f1463fc581125 100644
1244 +--- a/drivers/dma/idxd/irq.c
1245 ++++ b/drivers/dma/idxd/irq.c
1246 +@@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
1247 + for (i = 0; i < 4; i++)
1248 + idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
1249 + IDXD_SWERR_OFFSET + i * sizeof(u64));
1250 +- iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
1251 ++
1252 ++ iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
1253 ++ idxd->reg_base + IDXD_SWERR_OFFSET);
1254 +
1255 + if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
1256 + int id = idxd->sw_err.wq_idx;
1257 +diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
1258 +index 4dbb03c545e48..18bf4d1489890 100644
1259 +--- a/drivers/dma/idxd/sysfs.c
1260 ++++ b/drivers/dma/idxd/sysfs.c
1261 +@@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq)
1262 + {
1263 + struct idxd_device *idxd = wq->idxd;
1264 + struct device *dev = &idxd->pdev->dev;
1265 +- int rc;
1266 +
1267 + mutex_lock(&wq->wq_lock);
1268 + dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
1269 +@@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq)
1270 + idxd_wq_unmap_portal(wq);
1271 +
1272 + idxd_wq_drain(wq);
1273 +- rc = idxd_wq_disable(wq);
1274 ++ idxd_wq_reset(wq);
1275 +
1276 + idxd_wq_free_resources(wq);
1277 + wq->client_count = 0;
1278 + mutex_unlock(&wq->wq_lock);
1279 +
1280 +- if (rc < 0)
1281 +- dev_warn(dev, "Failed to disable %s: %d\n",
1282 +- dev_name(&wq->conf_dev), rc);
1283 +- else
1284 +- dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
1285 ++ dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
1286 + }
1287 +
1288 + static int idxd_config_bus_remove(struct device *dev)
1289 +@@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev,
1290 + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1291 + return -EPERM;
1292 +
1293 +- if (wq->state != IDXD_WQ_DISABLED)
1294 ++ if (idxd->state == IDXD_DEV_ENABLED)
1295 + return -EPERM;
1296 +
1297 + if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
1298 +@@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev,
1299 + {
1300 + struct idxd_device *idxd =
1301 + container_of(dev, struct idxd_device, conf_dev);
1302 ++ int i, rc = 0;
1303 ++
1304 ++ for (i = 0; i < 4; i++)
1305 ++ rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1306 +
1307 +- return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1308 ++ rc--;
1309 ++ rc += sysfs_emit_at(buf, rc, "\n");
1310 ++ return rc;
1311 + }
1312 + static DEVICE_ATTR_RO(op_cap);
1313 +
1314 +diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
1315 +index f387c5bbc170c..1669345441619 100644
1316 +--- a/drivers/dma/plx_dma.c
1317 ++++ b/drivers/dma/plx_dma.c
1318 +@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
1319 +
1320 + rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
1321 + KBUILD_MODNAME, plxdev);
1322 +- if (rc) {
1323 +- kfree(plxdev);
1324 +- return rc;
1325 +- }
1326 ++ if (rc)
1327 ++ goto free_plx;
1328 +
1329 + spin_lock_init(&plxdev->ring_lock);
1330 + tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
1331 +@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
1332 + rc = dma_async_device_register(dma);
1333 + if (rc) {
1334 + pci_err(pdev, "Failed to register dma device: %d\n", rc);
1335 +- free_irq(pci_irq_vector(pdev, 0), plxdev);
1336 +- kfree(plxdev);
1337 +- return rc;
1338 ++ goto put_device;
1339 + }
1340 +
1341 + pci_set_drvdata(pdev, plxdev);
1342 +
1343 + return 0;
1344 ++
1345 ++put_device:
1346 ++ put_device(&pdev->dev);
1347 ++ free_irq(pci_irq_vector(pdev, 0), plxdev);
1348 ++free_plx:
1349 ++ kfree(plxdev);
1350 ++
1351 ++ return rc;
1352 + }
1353 +
1354 + static int plx_dma_probe(struct pci_dev *pdev,
1355 +diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
1356 +index 26c5466b81799..ae49bb23c6ed1 100644
1357 +--- a/drivers/gpio/gpiolib-sysfs.c
1358 ++++ b/drivers/gpio/gpiolib-sysfs.c
1359 +@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
1360 + long gpio;
1361 + struct gpio_desc *desc;
1362 + int status;
1363 ++ struct gpio_chip *gc;
1364 ++ int offset;
1365 +
1366 + status = kstrtol(buf, 0, &gpio);
1367 + if (status < 0)
1368 +@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
1369 + pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
1370 + return -EINVAL;
1371 + }
1372 ++ gc = desc->gdev->chip;
1373 ++ offset = gpio_chip_hwgpio(desc);
1374 ++ if (!gpiochip_line_is_valid(gc, offset)) {
1375 ++ pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
1376 ++ return -EINVAL;
1377 ++ }
1378 +
1379 + /* No extra locking here; FLAG_SYSFS just signifies that the
1380 + * request and export were done by on behalf of userspace, so
1381 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
1382 +index 5fa150f34c600..2e89acf46e540 100644
1383 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
1384 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
1385 +@@ -133,6 +133,7 @@
1386 + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
1387 + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
1388 + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
1389 ++ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
1390 + HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
1391 + HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
1392 + HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
1393 +diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
1394 +index f94025ec603a6..a9a8ba1d3aba9 100644
1395 +--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
1396 ++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
1397 +@@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
1398 + * FIXME As we do with eDP, just make a note of the time here
1399 + * and perform the wait before the next panel power on.
1400 + */
1401 +- intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
1402 ++ msleep(intel_dsi->panel_pwr_cycle_delay);
1403 + }
1404 +
1405 + static void intel_dsi_shutdown(struct intel_encoder *encoder)
1406 + {
1407 + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
1408 +
1409 +- intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
1410 ++ msleep(intel_dsi->panel_pwr_cycle_delay);
1411 + }
1412 +
1413 + static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
1414 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1415 +index a20b5051f18c1..e53a222186a66 100644
1416 +--- a/drivers/gpu/drm/i915/intel_pm.c
1417 ++++ b/drivers/gpu/drm/i915/intel_pm.c
1418 +@@ -5539,12 +5539,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
1419 + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
1420 + int ret;
1421 +
1422 +- memset(wm, 0, sizeof(*wm));
1423 +-
1424 + /* Watermarks calculated in master */
1425 + if (plane_state->planar_slave)
1426 + return 0;
1427 +
1428 ++ memset(wm, 0, sizeof(*wm));
1429 ++
1430 + if (plane_state->planar_linked_plane) {
1431 + const struct drm_framebuffer *fb = plane_state->hw.fb;
1432 + enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
1433 +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
1434 +index 81506d2539b07..15898b9b9ce99 100644
1435 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
1436 ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
1437 +@@ -1239,8 +1239,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
1438 +
1439 + static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1440 + {
1441 +- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
1442 +- REG_A5XX_RBBM_PERFCTR_CP_0_HI);
1443 ++ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
1444 ++ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
1445 +
1446 + return 0;
1447 + }
1448 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1449 +index a676811ef69d2..b6e8ff2782da3 100644
1450 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1451 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1452 +@@ -1227,8 +1227,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1453 + /* Force the GPU power on so we can read this register */
1454 + a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
1455 +
1456 +- *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
1457 +- REG_A6XX_RBBM_PERFCTR_CP_0_HI);
1458 ++ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
1459 ++ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
1460 +
1461 + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
1462 + mutex_unlock(&perfcounter_oob);
1463 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1464 +index b45becbb00f8e..73225ab691e6a 100644
1465 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1466 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1467 +@@ -1554,6 +1554,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
1468 +
1469 + *buf = NULL;
1470 + if (tmp_buf != NULL) {
1471 ++ if (tmp_buf->base.pin_count > 0)
1472 ++ ttm_bo_unpin(&tmp_buf->base);
1473 + ttm_bo_put(&tmp_buf->base);
1474 + }
1475 + }
1476 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
1477 +index 7f95ed6aa2241..3c6e69f36767a 100644
1478 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
1479 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
1480 +@@ -277,6 +277,7 @@ out_no_setup:
1481 + &batch->otables[i]);
1482 + }
1483 +
1484 ++ ttm_bo_unpin(batch->otable_bo);
1485 + ttm_bo_put(batch->otable_bo);
1486 + batch->otable_bo = NULL;
1487 + return ret;
1488 +@@ -342,6 +343,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
1489 + vmw_bo_fence_single(bo, NULL);
1490 + ttm_bo_unreserve(bo);
1491 +
1492 ++ ttm_bo_unpin(batch->otable_bo);
1493 + ttm_bo_put(batch->otable_bo);
1494 + batch->otable_bo = NULL;
1495 + }
1496 +@@ -528,6 +530,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
1497 + void vmw_mob_destroy(struct vmw_mob *mob)
1498 + {
1499 + if (mob->pt_bo) {
1500 ++ ttm_bo_unpin(mob->pt_bo);
1501 + ttm_bo_put(mob->pt_bo);
1502 + mob->pt_bo = NULL;
1503 + }
1504 +@@ -643,6 +646,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
1505 + out_no_cmd_space:
1506 + vmw_fifo_resource_dec(dev_priv);
1507 + if (pt_set_up) {
1508 ++ ttm_bo_unpin(mob->pt_bo);
1509 + ttm_bo_put(mob->pt_bo);
1510 + mob->pt_bo = NULL;
1511 + }
1512 +diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
1513 +index 30d9adf31c844..9f14d99c763c2 100644
1514 +--- a/drivers/gpu/drm/xen/xen_drm_front.c
1515 ++++ b/drivers/gpu/drm/xen/xen_drm_front.c
1516 +@@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
1517 + drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
1518 + if (IS_ERR(drm_dev)) {
1519 + ret = PTR_ERR(drm_dev);
1520 +- goto fail;
1521 ++ goto fail_dev;
1522 + }
1523 +
1524 + drm_info->drm_dev = drm_dev;
1525 +@@ -551,8 +551,10 @@ fail_modeset:
1526 + drm_kms_helper_poll_fini(drm_dev);
1527 + drm_mode_config_cleanup(drm_dev);
1528 + drm_dev_put(drm_dev);
1529 +-fail:
1530 ++fail_dev:
1531 + kfree(drm_info);
1532 ++ front_info->drm_info = NULL;
1533 ++fail:
1534 + return ret;
1535 + }
1536 +
1537 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1538 +index dbac166416627..ddecc84fd6f0d 100644
1539 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1540 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
1541 +@@ -10,6 +10,7 @@
1542 + #include <linux/bitops.h>
1543 + #include <linux/delay.h>
1544 + #include <linux/dma-mapping.h>
1545 ++#include <linux/dmi.h>
1546 + #include <linux/interrupt.h>
1547 + #include <linux/io-64-nonatomic-lo-hi.h>
1548 + #include <linux/module.h>
1549 +@@ -22,9 +23,13 @@
1550 +
1551 + #define ACEL_EN BIT(0)
1552 + #define GYRO_EN BIT(1)
1553 +-#define MAGNO_EN BIT(2)
1554 ++#define MAGNO_EN BIT(2)
1555 + #define ALS_EN BIT(19)
1556 +
1557 ++static int sensor_mask_override = -1;
1558 ++module_param_named(sensor_mask, sensor_mask_override, int, 0444);
1559 ++MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
1560 ++
1561 + void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
1562 + {
1563 + union sfh_cmd_param cmd_param;
1564 +@@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
1565 + writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
1566 + }
1567 +
1568 ++static const struct dmi_system_id dmi_sensor_mask_overrides[] = {
1569 ++ {
1570 ++ .matches = {
1571 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"),
1572 ++ },
1573 ++ .driver_data = (void *)(ACEL_EN | MAGNO_EN),
1574 ++ },
1575 ++ {
1576 ++ .matches = {
1577 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"),
1578 ++ },
1579 ++ .driver_data = (void *)(ACEL_EN | MAGNO_EN),
1580 ++ },
1581 ++ { }
1582 ++};
1583 ++
1584 + int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
1585 + {
1586 + int activestatus, num_of_sensors = 0;
1587 ++ const struct dmi_system_id *dmi_id;
1588 ++ u32 activecontrolstatus;
1589 ++
1590 ++ if (sensor_mask_override == -1) {
1591 ++ dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
1592 ++ if (dmi_id)
1593 ++ sensor_mask_override = (long)dmi_id->driver_data;
1594 ++ }
1595 ++
1596 ++ if (sensor_mask_override >= 0) {
1597 ++ activestatus = sensor_mask_override;
1598 ++ } else {
1599 ++ activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
1600 ++ activestatus = activecontrolstatus >> 4;
1601 ++ }
1602 +
1603 +- privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
1604 +- activestatus = privdata->activecontrolstatus >> 4;
1605 + if (ACEL_EN & activestatus)
1606 + sensor_id[num_of_sensors++] = accel_idx;
1607 +
1608 +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
1609 +index 8f8d19b2cfe5b..489415f7c22ca 100644
1610 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
1611 ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
1612 +@@ -61,7 +61,6 @@ struct amd_mp2_dev {
1613 + struct pci_dev *pdev;
1614 + struct amdtp_cl_data *cl_data;
1615 + void __iomem *mmio;
1616 +- u32 activecontrolstatus;
1617 + };
1618 +
1619 + struct amd_mp2_sensor_info {
1620 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1621 +index 44d715c12f6ab..6cda5935fc09c 100644
1622 +--- a/drivers/hid/wacom_wac.c
1623 ++++ b/drivers/hid/wacom_wac.c
1624 +@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
1625 + {
1626 + struct wacom_features *features = &wacom_wac->features;
1627 +
1628 +- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
1629 +-
1630 + if (!(features->device_type & WACOM_DEVICETYPE_PEN))
1631 + return -ENODEV;
1632 +
1633 +@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
1634 + return 0;
1635 + }
1636 +
1637 ++ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
1638 + __set_bit(BTN_TOUCH, input_dev->keybit);
1639 + __set_bit(ABS_MISC, input_dev->absbit);
1640 +
1641 +@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
1642 + {
1643 + struct wacom_features *features = &wacom_wac->features;
1644 +
1645 +- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
1646 +-
1647 + if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
1648 + return -ENODEV;
1649 +
1650 +@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
1651 + /* setup has already been done */
1652 + return 0;
1653 +
1654 ++ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
1655 + __set_bit(BTN_TOUCH, input_dev->keybit);
1656 +
1657 + if (features->touch_max == 1) {
1658 +diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
1659 +index 63d5e488137dc..e9fa1423f1360 100644
1660 +--- a/drivers/input/keyboard/nspire-keypad.c
1661 ++++ b/drivers/input/keyboard/nspire-keypad.c
1662 +@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
1663 + return IRQ_HANDLED;
1664 + }
1665 +
1666 +-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
1667 ++static int nspire_keypad_open(struct input_dev *input)
1668 + {
1669 ++ struct nspire_keypad *keypad = input_get_drvdata(input);
1670 + unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
1671 ++ int error;
1672 ++
1673 ++ error = clk_prepare_enable(keypad->clk);
1674 ++ if (error)
1675 ++ return error;
1676 +
1677 + cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
1678 + if (cycles_per_us == 0)
1679 +@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
1680 + keypad->int_mask = 1 << 1;
1681 + writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
1682 +
1683 +- /* Disable GPIO interrupts to prevent hanging on touchpad */
1684 +- /* Possibly used to detect touchpad events */
1685 +- writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
1686 +- /* Acknowledge existing interrupts */
1687 +- writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
1688 +-
1689 +- return 0;
1690 +-}
1691 +-
1692 +-static int nspire_keypad_open(struct input_dev *input)
1693 +-{
1694 +- struct nspire_keypad *keypad = input_get_drvdata(input);
1695 +- int error;
1696 +-
1697 +- error = clk_prepare_enable(keypad->clk);
1698 +- if (error)
1699 +- return error;
1700 +-
1701 +- error = nspire_keypad_chip_init(keypad);
1702 +- if (error) {
1703 +- clk_disable_unprepare(keypad->clk);
1704 +- return error;
1705 +- }
1706 +-
1707 + return 0;
1708 + }
1709 +
1710 +@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
1711 + {
1712 + struct nspire_keypad *keypad = input_get_drvdata(input);
1713 +
1714 ++ /* Disable interrupts */
1715 ++ writel(0, keypad->reg_base + KEYPAD_INTMSK);
1716 ++ /* Acknowledge existing interrupts */
1717 ++ writel(~0, keypad->reg_base + KEYPAD_INT);
1718 ++
1719 + clk_disable_unprepare(keypad->clk);
1720 + }
1721 +
1722 +@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
1723 + return -ENOMEM;
1724 + }
1725 +
1726 ++ error = clk_prepare_enable(keypad->clk);
1727 ++ if (error) {
1728 ++ dev_err(&pdev->dev, "failed to enable clock\n");
1729 ++ return error;
1730 ++ }
1731 ++
1732 ++ /* Disable interrupts */
1733 ++ writel(0, keypad->reg_base + KEYPAD_INTMSK);
1734 ++ /* Acknowledge existing interrupts */
1735 ++ writel(~0, keypad->reg_base + KEYPAD_INT);
1736 ++
1737 ++ /* Disable GPIO interrupts to prevent hanging on touchpad */
1738 ++ /* Possibly used to detect touchpad events */
1739 ++ writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
1740 ++ /* Acknowledge existing GPIO interrupts */
1741 ++ writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
1742 ++
1743 ++ clk_disable_unprepare(keypad->clk);
1744 ++
1745 + input_set_drvdata(input, keypad);
1746 +
1747 + input->id.bustype = BUS_HOST;
1748 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1749 +index 9119e12a57784..a5a0035536462 100644
1750 +--- a/drivers/input/serio/i8042-x86ia64io.h
1751 ++++ b/drivers/input/serio/i8042-x86ia64io.h
1752 +@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
1753 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1754 + DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
1755 + },
1756 ++ }, {
1757 + .matches = {
1758 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1759 + DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
1760 +diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
1761 +index b63d7fdf0cd20..85a1f465c097e 100644
1762 +--- a/drivers/input/touchscreen/s6sy761.c
1763 ++++ b/drivers/input/touchscreen/s6sy761.c
1764 +@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
1765 + u8 major = event[4];
1766 + u8 minor = event[5];
1767 + u8 z = event[6] & S6SY761_MASK_Z;
1768 +- u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
1769 +- u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
1770 ++ u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
1771 ++ u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
1772 +
1773 + input_mt_slot(sdata->input, tid);
1774 +
1775 +diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
1776 +index 66f4c6398f670..cea2b37897367 100644
1777 +--- a/drivers/md/dm-verity-fec.c
1778 ++++ b/drivers/md/dm-verity-fec.c
1779 +@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
1780 + u8 *res;
1781 +
1782 + position = (index + rsb) * v->fec->roots;
1783 +- block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
1784 ++ block = div64_u64_rem(position, v->fec->io_size, &rem);
1785 + *offset = (unsigned)rem;
1786 +
1787 + res = dm_bufio_read(v->fec->bufio, block, buf);
1788 +@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
1789 +
1790 + /* read the next block when we run out of parity bytes */
1791 + offset += v->fec->roots;
1792 +- if (offset >= v->fec->roots << SECTOR_SHIFT) {
1793 ++ if (offset >= v->fec->io_size) {
1794 + dm_bufio_release(buf);
1795 +
1796 + par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
1797 +@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
1798 + return -E2BIG;
1799 + }
1800 +
1801 ++ if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
1802 ++ f->io_size = 1 << v->data_dev_block_bits;
1803 ++ else
1804 ++ f->io_size = v->fec->roots << SECTOR_SHIFT;
1805 ++
1806 + f->bufio = dm_bufio_client_create(f->dev->bdev,
1807 +- f->roots << SECTOR_SHIFT,
1808 ++ f->io_size,
1809 + 1, 0, NULL, NULL);
1810 + if (IS_ERR(f->bufio)) {
1811 + ti->error = "Cannot initialize FEC bufio client";
1812 +diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
1813 +index 42fbd3a7fc9f1..3c46c8d618833 100644
1814 +--- a/drivers/md/dm-verity-fec.h
1815 ++++ b/drivers/md/dm-verity-fec.h
1816 +@@ -36,6 +36,7 @@ struct dm_verity_fec {
1817 + struct dm_dev *dev; /* parity data device */
1818 + struct dm_bufio_client *data_bufio; /* for data dev access */
1819 + struct dm_bufio_client *bufio; /* for parity data access */
1820 ++ size_t io_size; /* IO size for roots */
1821 + sector_t start; /* parity data start in blocks */
1822 + sector_t blocks; /* number of blocks covered */
1823 + sector_t rounds; /* number of interleaving rounds */
1824 +diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
1825 +index 57f1f17089946..5c5c92132287d 100644
1826 +--- a/drivers/mtd/nand/raw/mtk_nand.c
1827 ++++ b/drivers/mtd/nand/raw/mtk_nand.c
1828 +@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
1829 + return 0;
1830 + case NAND_OP_WAITRDY_INSTR:
1831 + return readl_poll_timeout(nfc->regs + NFI_STA, status,
1832 +- status & STA_BUSY, 20,
1833 +- instr->ctx.waitrdy.timeout_ms);
1834 ++ !(status & STA_BUSY), 20,
1835 ++ instr->ctx.waitrdy.timeout_ms * 1000);
1836 + default:
1837 + break;
1838 + }
1839 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1840 +index 54aa942eedaa6..fdfe7a76c3681 100644
1841 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
1842 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
1843 +@@ -3002,10 +3002,17 @@ out_resources:
1844 + return err;
1845 + }
1846 +
1847 ++/* prod_id for switch families which do not have a PHY model number */
1848 ++static const u16 family_prod_id_table[] = {
1849 ++ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
1850 ++ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
1851 ++};
1852 ++
1853 + static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
1854 + {
1855 + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
1856 + struct mv88e6xxx_chip *chip = mdio_bus->chip;
1857 ++ u16 prod_id;
1858 + u16 val;
1859 + int err;
1860 +
1861 +@@ -3016,23 +3023,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
1862 + err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
1863 + mv88e6xxx_reg_unlock(chip);
1864 +
1865 +- if (reg == MII_PHYSID2) {
1866 +- /* Some internal PHYs don't have a model number. */
1867 +- if (chip->info->family != MV88E6XXX_FAMILY_6165)
1868 +- /* Then there is the 6165 family. It gets is
1869 +- * PHYs correct. But it can also have two
1870 +- * SERDES interfaces in the PHY address
1871 +- * space. And these don't have a model
1872 +- * number. But they are not PHYs, so we don't
1873 +- * want to give them something a PHY driver
1874 +- * will recognise.
1875 +- *
1876 +- * Use the mv88e6390 family model number
1877 +- * instead, for anything which really could be
1878 +- * a PHY,
1879 +- */
1880 +- if (!(val & 0x3f0))
1881 +- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
1882 ++ /* Some internal PHYs don't have a model number. */
1883 ++ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
1884 ++ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
1885 ++ prod_id = family_prod_id_table[chip->info->family];
1886 ++ if (prod_id)
1887 ++ val |= prod_id >> 4;
1888 + }
1889 +
1890 + return err ? err : val;
1891 +diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
1892 +index 187b0b9a6e1df..f78daba60b35c 100644
1893 +--- a/drivers/net/ethernet/amd/pcnet32.c
1894 ++++ b/drivers/net/ethernet/amd/pcnet32.c
1895 +@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1896 + }
1897 + pci_set_master(pdev);
1898 +
1899 +- ioaddr = pci_resource_start(pdev, 0);
1900 +- if (!ioaddr) {
1901 ++ if (!pci_resource_len(pdev, 0)) {
1902 + if (pcnet32_debug & NETIF_MSG_PROBE)
1903 + pr_err("card has no PCI IO resources, aborting\n");
1904 + err = -ENODEV;
1905 +@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1906 + pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1907 + goto err_disable_dev;
1908 + }
1909 ++
1910 ++ ioaddr = pci_resource_start(pdev, 0);
1911 + if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1912 + if (pcnet32_debug & NETIF_MSG_PROBE)
1913 + pr_err("io address range already allocated\n");
1914 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1915 +index fbedbceef2d1b..11bddfb43cddb 100644
1916 +--- a/drivers/net/ethernet/cadence/macb_main.c
1917 ++++ b/drivers/net/ethernet/cadence/macb_main.c
1918 +@@ -3914,6 +3914,7 @@ static int macb_init(struct platform_device *pdev)
1919 + reg = gem_readl(bp, DCFG8);
1920 + bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
1921 + GEM_BFEXT(T2SCR, reg));
1922 ++ INIT_LIST_HEAD(&bp->rx_fs_list.list);
1923 + if (bp->max_tuples > 0) {
1924 + /* also needs one ethtype match to check IPv4 */
1925 + if (GEM_BFEXT(SCR2ETH, reg) > 0) {
1926 +@@ -3924,7 +3925,6 @@ static int macb_init(struct platform_device *pdev)
1927 + /* Filtering is supported in hw but don't enable it in kernel now */
1928 + dev->hw_features |= NETIF_F_NTUPLE;
1929 + /* init Rx flow definitions */
1930 +- INIT_LIST_HEAD(&bp->rx_fs_list.list);
1931 + bp->rx_fs_list.count = 0;
1932 + spin_lock_init(&bp->rx_fs_lock);
1933 + } else
1934 +diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1935 +index 423d6d78d15c7..3a50d5a62aceb 100644
1936 +--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1937 ++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1938 +@@ -354,18 +354,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
1939 + return cxgb4_ofld_send(tx_info->netdev, skb);
1940 + }
1941 +
1942 +-/*
1943 +- * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
1944 +- * @tx_info - driver specific tls info.
1945 +- * return: NET_TX_OK/NET_XMIT_DROP.
1946 +- */
1947 +-static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
1948 +-{
1949 +- return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
1950 +- TCB_T_STATE_V(TCB_T_STATE_M),
1951 +- CHCR_TCB_STATE_CLOSED, 1);
1952 +-}
1953 +-
1954 + /*
1955 + * chcr_ktls_dev_del: call back for tls_dev_del.
1956 + * Remove the tid and l2t entry and close the connection.
1957 +@@ -400,8 +388,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
1958 +
1959 + /* clear tid */
1960 + if (tx_info->tid != -1) {
1961 +- /* clear tcb state and then release tid */
1962 +- chcr_ktls_mark_tcb_close(tx_info);
1963 + cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
1964 + tx_info->tid, tx_info->ip_family);
1965 + }
1966 +@@ -579,7 +565,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
1967 + return 0;
1968 +
1969 + free_tid:
1970 +- chcr_ktls_mark_tcb_close(tx_info);
1971 + #if IS_ENABLED(CONFIG_IPV6)
1972 + /* clear clip entry */
1973 + if (tx_info->ip_family == AF_INET6)
1974 +@@ -677,10 +662,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
1975 + if (tx_info->pending_close) {
1976 + spin_unlock(&tx_info->lock);
1977 + if (!status) {
1978 +- /* it's a late success, tcb status is establised,
1979 +- * mark it close.
1980 +- */
1981 +- chcr_ktls_mark_tcb_close(tx_info);
1982 + cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
1983 + tid, tx_info->ip_family);
1984 + }
1985 +@@ -1668,54 +1649,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
1986 + refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
1987 + }
1988 +
1989 +-/*
1990 +- * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
1991 +- * sending the same segment again. It will discard the segment which is before
1992 +- * the current tx max.
1993 +- * @tx_info - driver specific tls info.
1994 +- * @q - TX queue.
1995 +- * return: NET_TX_OK/NET_XMIT_DROP.
1996 +- */
1997 +-static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
1998 +- struct sge_eth_txq *q)
1999 +-{
2000 +- struct fw_ulptx_wr *wr;
2001 +- unsigned int ndesc;
2002 +- int credits;
2003 +- void *pos;
2004 +- u32 len;
2005 +-
2006 +- len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
2007 +- ndesc = DIV_ROUND_UP(len, 64);
2008 +-
2009 +- credits = chcr_txq_avail(&q->q) - ndesc;
2010 +- if (unlikely(credits < 0)) {
2011 +- chcr_eth_txq_stop(q);
2012 +- return NETDEV_TX_BUSY;
2013 +- }
2014 +-
2015 +- pos = &q->q.desc[q->q.pidx];
2016 +-
2017 +- wr = pos;
2018 +- /* ULPTX wr */
2019 +- wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
2020 +- wr->cookie = 0;
2021 +- /* fill len in wr field */
2022 +- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
2023 +-
2024 +- pos += sizeof(*wr);
2025 +-
2026 +- pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
2027 +- TCB_SND_UNA_RAW_W,
2028 +- TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
2029 +- TCB_SND_UNA_RAW_V(0), 0);
2030 +-
2031 +- chcr_txq_advance(&q->q, ndesc);
2032 +- cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
2033 +-
2034 +- return 0;
2035 +-}
2036 +-
2037 + /*
2038 + * chcr_end_part_handler: This handler will handle the record which
2039 + * is complete or if record's end part is received. T6 adapter has a issue that
2040 +@@ -1740,7 +1673,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
2041 + struct sge_eth_txq *q, u32 skb_offset,
2042 + u32 tls_end_offset, bool last_wr)
2043 + {
2044 ++ bool free_skb_if_tx_fails = false;
2045 + struct sk_buff *nskb = NULL;
2046 ++
2047 + /* check if it is a complete record */
2048 + if (tls_end_offset == record->len) {
2049 + nskb = skb;
2050 +@@ -1763,6 +1698,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
2051 +
2052 + if (last_wr)
2053 + dev_kfree_skb_any(skb);
2054 ++ else
2055 ++ free_skb_if_tx_fails = true;
2056 +
2057 + last_wr = true;
2058 +
2059 +@@ -1774,6 +1711,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
2060 + record->num_frags,
2061 + (last_wr && tcp_push_no_fin),
2062 + mss)) {
2063 ++ if (free_skb_if_tx_fails)
2064 ++ dev_kfree_skb_any(skb);
2065 + goto out;
2066 + }
2067 + tx_info->prev_seq = record->end_seq;
2068 +@@ -1910,11 +1849,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
2069 + /* reset tcp_seq as per the prior_data_required len */
2070 + tcp_seq -= prior_data_len;
2071 + }
2072 +- /* reset snd una, so the middle record won't send the already
2073 +- * sent part.
2074 +- */
2075 +- if (chcr_ktls_update_snd_una(tx_info, q))
2076 +- goto out;
2077 + atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
2078 + } else {
2079 + atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
2080 +@@ -2015,12 +1949,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
2081 + * we will send the complete record again.
2082 + */
2083 +
2084 ++ spin_lock_irqsave(&tx_ctx->base.lock, flags);
2085 ++
2086 + do {
2087 +- int i;
2088 +
2089 + cxgb4_reclaim_completed_tx(adap, &q->q, true);
2090 +- /* lock taken */
2091 +- spin_lock_irqsave(&tx_ctx->base.lock, flags);
2092 + /* fetch the tls record */
2093 + record = tls_get_record(&tx_ctx->base, tcp_seq,
2094 + &tx_info->record_no);
2095 +@@ -2079,11 +2012,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
2096 + tls_end_offset, skb_offset,
2097 + 0);
2098 +
2099 +- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
2100 + if (ret) {
2101 + /* free the refcount taken earlier */
2102 + if (tls_end_offset < data_len)
2103 + dev_kfree_skb_any(skb);
2104 ++ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
2105 + goto out;
2106 + }
2107 +
2108 +@@ -2093,16 +2026,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
2109 + continue;
2110 + }
2111 +
2112 +- /* increase page reference count of the record, so that there
2113 +- * won't be any chance of page free in middle if in case stack
2114 +- * receives ACK and try to delete the record.
2115 +- */
2116 +- for (i = 0; i < record->num_frags; i++)
2117 +- __skb_frag_ref(&record->frags[i]);
2118 +- /* lock cleared */
2119 +- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
2120 +-
2121 +-
2122 + /* if a tls record is finishing in this SKB */
2123 + if (tls_end_offset <= data_len) {
2124 + ret = chcr_end_part_handler(tx_info, skb, record,
2125 +@@ -2127,13 +2050,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
2126 + data_len = 0;
2127 + }
2128 +
2129 +- /* clear the frag ref count which increased locally before */
2130 +- for (i = 0; i < record->num_frags; i++) {
2131 +- /* clear the frag ref count */
2132 +- __skb_frag_unref(&record->frags[i]);
2133 +- }
2134 + /* if any failure, come out from the loop. */
2135 + if (ret) {
2136 ++ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
2137 + if (th->fin)
2138 + dev_kfree_skb_any(skb);
2139 +
2140 +@@ -2148,6 +2067,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
2141 +
2142 + } while (data_len > 0);
2143 +
2144 ++ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
2145 + atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
2146 + atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
2147 +
2148 +diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
2149 +index 252adfa5d837b..8a9096aa85cdf 100644
2150 +--- a/drivers/net/ethernet/davicom/dm9000.c
2151 ++++ b/drivers/net/ethernet/davicom/dm9000.c
2152 +@@ -1471,8 +1471,10 @@ dm9000_probe(struct platform_device *pdev)
2153 +
2154 + /* Init network device */
2155 + ndev = alloc_etherdev(sizeof(struct board_info));
2156 +- if (!ndev)
2157 +- return -ENOMEM;
2158 ++ if (!ndev) {
2159 ++ ret = -ENOMEM;
2160 ++ goto out_regulator_disable;
2161 ++ }
2162 +
2163 + SET_NETDEV_DEV(ndev, &pdev->dev);
2164 +
2165 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
2166 +index 3552c4485ed53..ce494c52d7267 100644
2167 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
2168 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
2169 +@@ -1180,19 +1180,13 @@ static int __ibmvnic_open(struct net_device *netdev)
2170 +
2171 + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
2172 + if (rc) {
2173 +- for (i = 0; i < adapter->req_rx_queues; i++)
2174 +- napi_disable(&adapter->napi[i]);
2175 ++ ibmvnic_napi_disable(adapter);
2176 + release_resources(adapter);
2177 + return rc;
2178 + }
2179 +
2180 + netif_tx_start_all_queues(netdev);
2181 +
2182 +- if (prev_state == VNIC_CLOSED) {
2183 +- for (i = 0; i < adapter->req_rx_queues; i++)
2184 +- napi_schedule(&adapter->napi[i]);
2185 +- }
2186 +-
2187 + adapter->state = VNIC_OPEN;
2188 + return rc;
2189 + }
2190 +@@ -2026,7 +2020,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
2191 + u64 old_num_rx_queues, old_num_tx_queues;
2192 + u64 old_num_rx_slots, old_num_tx_slots;
2193 + struct net_device *netdev = adapter->netdev;
2194 +- int i, rc;
2195 ++ int rc;
2196 +
2197 + netdev_dbg(adapter->netdev,
2198 + "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
2199 +@@ -2172,10 +2166,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
2200 + /* refresh device's multicast list */
2201 + ibmvnic_set_multi(netdev);
2202 +
2203 +- /* kick napi */
2204 +- for (i = 0; i < adapter->req_rx_queues; i++)
2205 +- napi_schedule(&adapter->napi[i]);
2206 +-
2207 + if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2208 + adapter->reset_reason == VNIC_RESET_MOBILITY)
2209 + __netdev_notify_peers(netdev);
2210 +@@ -3274,9 +3264,6 @@ restart_loop:
2211 +
2212 + next = ibmvnic_next_scrq(adapter, scrq);
2213 + for (i = 0; i < next->tx_comp.num_comps; i++) {
2214 +- if (next->tx_comp.rcs[i])
2215 +- dev_err(dev, "tx error %x\n",
2216 +- next->tx_comp.rcs[i]);
2217 + index = be32_to_cpu(next->tx_comp.correlators[i]);
2218 + if (index & IBMVNIC_TSO_POOL_MASK) {
2219 + tx_pool = &adapter->tso_pool[pool];
2220 +@@ -3290,7 +3277,13 @@ restart_loop:
2221 + num_entries += txbuff->num_entries;
2222 + if (txbuff->skb) {
2223 + total_bytes += txbuff->skb->len;
2224 +- dev_consume_skb_irq(txbuff->skb);
2225 ++ if (next->tx_comp.rcs[i]) {
2226 ++ dev_err(dev, "tx error %x\n",
2227 ++ next->tx_comp.rcs[i]);
2228 ++ dev_kfree_skb_irq(txbuff->skb);
2229 ++ } else {
2230 ++ dev_consume_skb_irq(txbuff->skb);
2231 ++ }
2232 + txbuff->skb = NULL;
2233 + } else {
2234 + netdev_warn(adapter->netdev,
2235 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2236 +index 7fab60128c76d..f0edea7cdbccc 100644
2237 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2238 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2239 +@@ -11863,6 +11863,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
2240 + {
2241 + int err = 0;
2242 + int size;
2243 ++ u16 pow;
2244 +
2245 + /* Set default capability flags */
2246 + pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
2247 +@@ -11881,6 +11882,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
2248 + pf->rss_table_size = pf->hw.func_caps.rss_table_size;
2249 + pf->rss_size_max = min_t(int, pf->rss_size_max,
2250 + pf->hw.func_caps.num_tx_qp);
2251 ++
2252 ++ /* find the next higher power-of-2 of num cpus */
2253 ++ pow = roundup_pow_of_two(num_online_cpus());
2254 ++ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
2255 ++
2256 + if (pf->hw.func_caps.rss) {
2257 + pf->flags |= I40E_FLAG_RSS_ENABLED;
2258 + pf->alloc_rss_size = min_t(int, pf->rss_size_max,
2259 +diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
2260 +index 211ac6f907adb..28e834a128c07 100644
2261 +--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
2262 ++++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
2263 +@@ -747,8 +747,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
2264 + struct ice_port_info *pi)
2265 + {
2266 + u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
2267 +- u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
2268 +- u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
2269 ++ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
2270 ++ u8 i, err, sync, oper, app_index, ice_app_sel_type;
2271 + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
2272 + u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
2273 + struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
2274 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2275 +index e9c2d28efc815..e3d605283ca4a 100644
2276 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2277 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2278 +@@ -6540,6 +6540,13 @@ err_setup_tx:
2279 + return err;
2280 + }
2281 +
2282 ++static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
2283 ++{
2284 ++ struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
2285 ++
2286 ++ return q_vector ? q_vector->napi.napi_id : 0;
2287 ++}
2288 ++
2289 + /**
2290 + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2291 + * @adapter: pointer to ixgbe_adapter
2292 +@@ -6587,7 +6594,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2293 +
2294 + /* XDP RX-queue info */
2295 + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
2296 +- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
2297 ++ rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
2298 + goto err;
2299 +
2300 + rx_ring->xdp_prog = adapter->xdp_prog;
2301 +@@ -6896,6 +6903,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
2302 +
2303 + adapter->hw.hw_addr = adapter->io_addr;
2304 +
2305 ++ err = pci_enable_device_mem(pdev);
2306 ++ if (err) {
2307 ++ e_dev_err("Cannot enable PCI device from suspend\n");
2308 ++ return err;
2309 ++ }
2310 + smp_mb__before_atomic();
2311 + clear_bit(__IXGBE_DISABLED, &adapter->state);
2312 + pci_set_master(pdev);
2313 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
2314 +index 308fd279669ec..89510cac46c22 100644
2315 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
2316 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
2317 +@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
2318 + *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
2319 + } while (0)
2320 +
2321 +-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
2322 +- do { \
2323 +- unsigned long policy_long; \
2324 +- u16 *__policy = &(policy); \
2325 +- bool _write = (write); \
2326 +- \
2327 +- policy_long = *__policy; \
2328 +- if (_write && *__policy) \
2329 +- *__policy = find_first_bit(&policy_long, \
2330 +- sizeof(policy_long) * BITS_PER_BYTE);\
2331 +- MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
2332 +- if (!_write && *__policy) \
2333 +- *__policy = 1 << *__policy; \
2334 +- } while (0)
2335 +-
2336 + /* get/set FEC admin field for a given speed */
2337 + static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
2338 + enum mlx5e_fec_supported_link_mode link_mode)
2339 +@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
2340 + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
2341 + break;
2342 + case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
2343 +- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
2344 ++ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
2345 + break;
2346 + case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
2347 +- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
2348 ++ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
2349 + break;
2350 + case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
2351 +- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
2352 ++ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
2353 + break;
2354 + case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
2355 +- MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
2356 ++ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
2357 + break;
2358 + default:
2359 + return -EINVAL;
2360 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2361 +index 24fa399b15770..0d755f76bb8d9 100644
2362 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2363 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2364 +@@ -2194,6 +2194,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2365 + return 0;
2366 +
2367 + flow_rule_match_meta(rule, &match);
2368 ++ if (!match.mask->ingress_ifindex)
2369 ++ return 0;
2370 ++
2371 + if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2372 + NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2373 + return -EOPNOTSUPP;
2374 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
2375 +index 7c1a057dcf3d6..e04e885f28938 100644
2376 +--- a/drivers/net/ethernet/realtek/r8169_main.c
2377 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
2378 +@@ -2342,13 +2342,14 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
2379 + static void rtl_jumbo_config(struct rtl8169_private *tp)
2380 + {
2381 + bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
2382 ++ int readrq = 4096;
2383 +
2384 + rtl_unlock_config_regs(tp);
2385 + switch (tp->mac_version) {
2386 + case RTL_GIGA_MAC_VER_12:
2387 + case RTL_GIGA_MAC_VER_17:
2388 + if (jumbo) {
2389 +- pcie_set_readrq(tp->pci_dev, 512);
2390 ++ readrq = 512;
2391 + r8168b_1_hw_jumbo_enable(tp);
2392 + } else {
2393 + r8168b_1_hw_jumbo_disable(tp);
2394 +@@ -2356,7 +2357,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
2395 + break;
2396 + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
2397 + if (jumbo) {
2398 +- pcie_set_readrq(tp->pci_dev, 512);
2399 ++ readrq = 512;
2400 + r8168c_hw_jumbo_enable(tp);
2401 + } else {
2402 + r8168c_hw_jumbo_disable(tp);
2403 +@@ -2381,8 +2382,15 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
2404 + }
2405 + rtl_lock_config_regs(tp);
2406 +
2407 +- if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
2408 +- pcie_set_readrq(tp->pci_dev, 4096);
2409 ++ if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
2410 ++ pcie_set_readrq(tp->pci_dev, readrq);
2411 ++
2412 ++ /* Chip doesn't support pause in jumbo mode */
2413 ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2414 ++ tp->phydev->advertising, !jumbo);
2415 ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2416 ++ tp->phydev->advertising, !jumbo);
2417 ++ phy_start_aneg(tp->phydev);
2418 + }
2419 +
2420 + DECLARE_RTL_COND(rtl_chipcmd_cond)
2421 +@@ -4661,8 +4669,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
2422 + if (!tp->supports_gmii)
2423 + phy_set_max_speed(phydev, SPEED_100);
2424 +
2425 +- phy_support_asym_pause(phydev);
2426 +-
2427 + phy_attached_info(phydev);
2428 +
2429 + return 0;
2430 +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
2431 +index a03c3ca1b28d2..9e2cddba3b5b7 100644
2432 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
2433 ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
2434 +@@ -497,6 +497,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
2435 + return axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
2436 + }
2437 +
2438 ++static inline void axienet_lock_mii(struct axienet_local *lp)
2439 ++{
2440 ++ if (lp->mii_bus)
2441 ++ mutex_lock(&lp->mii_bus->mdio_lock);
2442 ++}
2443 ++
2444 ++static inline void axienet_unlock_mii(struct axienet_local *lp)
2445 ++{
2446 ++ if (lp->mii_bus)
2447 ++ mutex_unlock(&lp->mii_bus->mdio_lock);
2448 ++}
2449 ++
2450 + /**
2451 + * axienet_iow - Memory mapped Axi Ethernet register write
2452 + * @lp: Pointer to axienet local structure
2453 +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
2454 +index 4cd701a9277d7..82176dd2cdf33 100644
2455 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
2456 ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
2457 +@@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev)
2458 + * including the MDIO. MDIO must be disabled before resetting.
2459 + * Hold MDIO bus lock to avoid MDIO accesses during the reset.
2460 + */
2461 +- mutex_lock(&lp->mii_bus->mdio_lock);
2462 ++ axienet_lock_mii(lp);
2463 + ret = axienet_device_reset(ndev);
2464 +- mutex_unlock(&lp->mii_bus->mdio_lock);
2465 ++ axienet_unlock_mii(lp);
2466 +
2467 + ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
2468 + if (ret) {
2469 +@@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev)
2470 + }
2471 +
2472 + /* Do a reset to ensure DMA is really stopped */
2473 +- mutex_lock(&lp->mii_bus->mdio_lock);
2474 ++ axienet_lock_mii(lp);
2475 + __axienet_device_reset(lp);
2476 +- mutex_unlock(&lp->mii_bus->mdio_lock);
2477 ++ axienet_unlock_mii(lp);
2478 +
2479 + cancel_work_sync(&lp->dma_err_task);
2480 +
2481 +@@ -1664,9 +1664,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
2482 + * including the MDIO. MDIO must be disabled before resetting.
2483 + * Hold MDIO bus lock to avoid MDIO accesses during the reset.
2484 + */
2485 +- mutex_lock(&lp->mii_bus->mdio_lock);
2486 ++ axienet_lock_mii(lp);
2487 + __axienet_device_reset(lp);
2488 +- mutex_unlock(&lp->mii_bus->mdio_lock);
2489 ++ axienet_unlock_mii(lp);
2490 +
2491 + for (i = 0; i < lp->tx_bd_num; i++) {
2492 + cur_p = &lp->tx_bd_v[i];
2493 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
2494 +index 2afef45d15b12..163767abceea9 100644
2495 +--- a/drivers/net/phy/marvell.c
2496 ++++ b/drivers/net/phy/marvell.c
2497 +@@ -3019,9 +3019,34 @@ static struct phy_driver marvell_drivers[] = {
2498 + .get_stats = marvell_get_stats,
2499 + },
2500 + {
2501 +- .phy_id = MARVELL_PHY_ID_88E6390,
2502 ++ .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
2503 + .phy_id_mask = MARVELL_PHY_ID_MASK,
2504 +- .name = "Marvell 88E6390",
2505 ++ .name = "Marvell 88E6341 Family",
2506 ++ /* PHY_GBIT_FEATURES */
2507 ++ .flags = PHY_POLL_CABLE_TEST,
2508 ++ .probe = m88e1510_probe,
2509 ++ .config_init = marvell_config_init,
2510 ++ .config_aneg = m88e6390_config_aneg,
2511 ++ .read_status = marvell_read_status,
2512 ++ .config_intr = marvell_config_intr,
2513 ++ .handle_interrupt = marvell_handle_interrupt,
2514 ++ .resume = genphy_resume,
2515 ++ .suspend = genphy_suspend,
2516 ++ .read_page = marvell_read_page,
2517 ++ .write_page = marvell_write_page,
2518 ++ .get_sset_count = marvell_get_sset_count,
2519 ++ .get_strings = marvell_get_strings,
2520 ++ .get_stats = marvell_get_stats,
2521 ++ .get_tunable = m88e1540_get_tunable,
2522 ++ .set_tunable = m88e1540_set_tunable,
2523 ++ .cable_test_start = marvell_vct7_cable_test_start,
2524 ++ .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
2525 ++ .cable_test_get_status = marvell_vct7_cable_test_get_status,
2526 ++ },
2527 ++ {
2528 ++ .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
2529 ++ .phy_id_mask = MARVELL_PHY_ID_MASK,
2530 ++ .name = "Marvell 88E6390 Family",
2531 + /* PHY_GBIT_FEATURES */
2532 + .flags = PHY_POLL_CABLE_TEST,
2533 + .probe = m88e6390_probe,
2534 +@@ -3105,7 +3130,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
2535 + { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
2536 + { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
2537 + { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
2538 +- { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
2539 ++ { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
2540 ++ { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
2541 + { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
2542 + { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
2543 + { }
2544 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2545 +index c55faa388948e..018daa84ddd28 100644
2546 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2547 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2548 +@@ -628,6 +628,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
2549 + IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
2550 + IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
2551 + IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
2552 ++ IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
2553 +
2554 + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
2555 + IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
2556 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
2557 +index 689f51968049a..2280f05fbc18b 100644
2558 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
2559 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
2560 +@@ -929,6 +929,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
2561 + u32 cmd_pos;
2562 + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
2563 + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
2564 ++ unsigned long flags;
2565 +
2566 + if (WARN(!trans->wide_cmd_header &&
2567 + group_id > IWL_ALWAYS_LONG_GROUP,
2568 +@@ -1012,10 +1013,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
2569 + goto free_dup_buf;
2570 + }
2571 +
2572 +- spin_lock_bh(&txq->lock);
2573 ++ spin_lock_irqsave(&txq->lock, flags);
2574 +
2575 + if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
2576 +- spin_unlock_bh(&txq->lock);
2577 ++ spin_unlock_irqrestore(&txq->lock, flags);
2578 +
2579 + IWL_ERR(trans, "No space in command queue\n");
2580 + iwl_op_mode_cmd_queue_full(trans->op_mode);
2581 +@@ -1175,7 +1176,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
2582 + unlock_reg:
2583 + spin_unlock(&trans_pcie->reg_lock);
2584 + out:
2585 +- spin_unlock_bh(&txq->lock);
2586 ++ spin_unlock_irqrestore(&txq->lock, flags);
2587 + free_dup_buf:
2588 + if (idx < 0)
2589 + kfree(dup_buf);
2590 +diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
2591 +index c878097f0ddaf..1df959532c7d3 100644
2592 +--- a/drivers/net/wireless/virt_wifi.c
2593 ++++ b/drivers/net/wireless/virt_wifi.c
2594 +@@ -12,6 +12,7 @@
2595 + #include <net/cfg80211.h>
2596 + #include <net/rtnetlink.h>
2597 + #include <linux/etherdevice.h>
2598 ++#include <linux/math64.h>
2599 + #include <linux/module.h>
2600 +
2601 + static struct wiphy *common_wiphy;
2602 +@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work)
2603 + scan_result.work);
2604 + struct wiphy *wiphy = priv_to_wiphy(priv);
2605 + struct cfg80211_scan_info scan_info = { .aborted = false };
2606 ++ u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
2607 +
2608 + informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
2609 + CFG80211_BSS_FTYPE_PRESP,
2610 +- fake_router_bssid,
2611 +- ktime_get_boottime_ns(),
2612 ++ fake_router_bssid, tsf,
2613 + WLAN_CAPABILITY_ESS, 0,
2614 + (void *)&ssid, sizeof(ssid),
2615 + DBM_TO_MBM(-50), GFP_KERNEL);
2616 +diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
2617 +index ef23119db5746..e05cc9f8a9fd1 100644
2618 +--- a/drivers/nvdimm/region_devs.c
2619 ++++ b/drivers/nvdimm/region_devs.c
2620 +@@ -1239,6 +1239,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
2621 + || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
2622 + return -ENXIO;
2623 +
2624 ++ /* Test if an explicit flush function is defined */
2625 ++ if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
2626 ++ return 1;
2627 ++
2628 ++ /* Test if any flush hints for the region are available */
2629 + for (i = 0; i < nd_region->ndr_mappings; i++) {
2630 + struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2631 + struct nvdimm *nvdimm = nd_mapping->nvdimm;
2632 +@@ -1249,8 +1254,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
2633 + }
2634 +
2635 + /*
2636 +- * The platform defines dimm devices without hints, assume
2637 +- * platform persistence mechanism like ADR
2638 ++ * The platform defines dimm devices without hints nor explicit flush,
2639 ++ * assume platform persistence mechanism like ADR
2640 + */
2641 + return 0;
2642 + }
2643 +diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
2644 +index 16979c1cd2f4b..dcb380e868dfd 100644
2645 +--- a/drivers/remoteproc/pru_rproc.c
2646 ++++ b/drivers/remoteproc/pru_rproc.c
2647 +@@ -450,6 +450,24 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
2648 + if (len == 0)
2649 + return NULL;
2650 +
2651 ++ /*
2652 ++ * GNU binutils do not support multiple address spaces. The GNU
2653 ++ * linker's default linker script places IRAM at an arbitrary high
2654 ++ * offset, in order to differentiate it from DRAM. Hence we need to
2655 ++ * strip the artificial offset in the IRAM addresses coming from the
2656 ++ * ELF file.
2657 ++ *
2658 ++ * The TI proprietary linker would never set those higher IRAM address
2659 ++ * bits anyway. PRU architecture limits the program counter to 16-bit
2660 ++ * word-address range. This in turn corresponds to 18-bit IRAM
2661 ++ * byte-address range for ELF.
2662 ++ *
2663 ++ * Two more bits are added just in case to make the final 20-bit mask.
2664 ++ * Idea is to have a safeguard in case TI decides to add banking
2665 ++ * in future SoCs.
2666 ++ */
2667 ++ da &= 0xfffff;
2668 ++
2669 + if (da >= PRU_IRAM_DA &&
2670 + da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
2671 + offset = da - PRU_IRAM_DA;
2672 +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
2673 +index 024e5a550759c..8b9a39077dbab 100644
2674 +--- a/drivers/scsi/libsas/sas_ata.c
2675 ++++ b/drivers/scsi/libsas/sas_ata.c
2676 +@@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
2677 + memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
2678 + task->total_xfer_len = qc->nbytes;
2679 + task->num_scatter = qc->n_elem;
2680 ++ task->data_dir = qc->dma_dir;
2681 ++ } else if (qc->tf.protocol == ATA_PROT_NODATA) {
2682 ++ task->data_dir = DMA_NONE;
2683 + } else {
2684 + for_each_sg(qc->sg, sg, qc->n_elem, si)
2685 + xfer += sg_dma_len(sg);
2686 +
2687 + task->total_xfer_len = xfer;
2688 + task->num_scatter = si;
2689 +- }
2690 +-
2691 +- if (qc->tf.protocol == ATA_PROT_NODATA)
2692 +- task->data_dir = DMA_NONE;
2693 +- else
2694 + task->data_dir = qc->dma_dir;
2695 ++ }
2696 + task->scatter = qc->sg;
2697 + task->ata_task.retry_count = 1;
2698 + task->task_state_flags = SAS_TASK_STATE_PENDING;
2699 +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
2700 +index 1e939a2a387f3..98a34ed10f1a0 100644
2701 +--- a/drivers/scsi/scsi_transport_srp.c
2702 ++++ b/drivers/scsi/scsi_transport_srp.c
2703 +@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
2704 + res = mutex_lock_interruptible(&rport->mutex);
2705 + if (res)
2706 + goto out;
2707 +- if (rport->state != SRP_RPORT_FAIL_FAST)
2708 ++ if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
2709 + /*
2710 + * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
2711 + * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
2712 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
2713 +index 706de3ef94bbf..465f646e33298 100644
2714 +--- a/drivers/vfio/pci/vfio_pci.c
2715 ++++ b/drivers/vfio/pci/vfio_pci.c
2716 +@@ -1658,6 +1658,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
2717 +
2718 + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
2719 +
2720 ++ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
2721 ++ return -EINVAL;
2722 + if (vma->vm_end < vma->vm_start)
2723 + return -EINVAL;
2724 + if ((vma->vm_flags & VM_SHARED) == 0)
2725 +@@ -1666,7 +1668,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
2726 + int regnum = index - VFIO_PCI_NUM_REGIONS;
2727 + struct vfio_pci_region *region = vdev->region + regnum;
2728 +
2729 +- if (region && region->ops && region->ops->mmap &&
2730 ++ if (region->ops && region->ops->mmap &&
2731 + (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
2732 + return region->ops->mmap(vdev, region, vma);
2733 + return -EINVAL;
2734 +diff --git a/fs/readdir.c b/fs/readdir.c
2735 +index 19434b3c982cd..09e8ed7d41614 100644
2736 +--- a/fs/readdir.c
2737 ++++ b/fs/readdir.c
2738 +@@ -150,6 +150,9 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
2739 +
2740 + if (buf->result)
2741 + return -EINVAL;
2742 ++ buf->result = verify_dirent_name(name, namlen);
2743 ++ if (buf->result < 0)
2744 ++ return buf->result;
2745 + d_ino = ino;
2746 + if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
2747 + buf->result = -EOVERFLOW;
2748 +@@ -405,6 +408,9 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
2749 +
2750 + if (buf->result)
2751 + return -EINVAL;
2752 ++ buf->result = verify_dirent_name(name, namlen);
2753 ++ if (buf->result < 0)
2754 ++ return buf->result;
2755 + d_ino = ino;
2756 + if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
2757 + buf->result = -EOVERFLOW;
2758 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
2759 +index 564ebf91793ed..88b581b75d5be 100644
2760 +--- a/include/linux/bpf.h
2761 ++++ b/include/linux/bpf.h
2762 +@@ -41,6 +41,7 @@ struct bpf_local_storage;
2763 + struct bpf_local_storage_map;
2764 + struct kobject;
2765 + struct mem_cgroup;
2766 ++struct module;
2767 +
2768 + extern struct idr btf_idr;
2769 + extern spinlock_t btf_idr_lock;
2770 +@@ -630,6 +631,7 @@ struct bpf_trampoline {
2771 + /* Executable image of trampoline */
2772 + struct bpf_tramp_image *cur_image;
2773 + u64 selector;
2774 ++ struct module *mod;
2775 + };
2776 +
2777 + struct bpf_attach_target_info {
2778 +diff --git a/include/linux/kasan.h b/include/linux/kasan.h
2779 +index 0aea9e2a2a01d..f2980f010a488 100644
2780 +--- a/include/linux/kasan.h
2781 ++++ b/include/linux/kasan.h
2782 +@@ -306,7 +306,7 @@ static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
2783 +
2784 + #endif /* CONFIG_KASAN */
2785 +
2786 +-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
2787 ++#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
2788 + void kasan_unpoison_task_stack(struct task_struct *task);
2789 + #else
2790 + static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
2791 +diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
2792 +index 52b1610eae68b..c544b70dfbd26 100644
2793 +--- a/include/linux/marvell_phy.h
2794 ++++ b/include/linux/marvell_phy.h
2795 +@@ -28,11 +28,12 @@
2796 + /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
2797 + #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
2798 +
2799 +-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
2800 ++/* These Ethernet switch families contain embedded PHYs, but they do
2801 + * not have a model ID. So the switch driver traps reads to the ID2
2802 + * register and returns the switch family ID
2803 + */
2804 +-#define MARVELL_PHY_ID_88E6390 0x01410f90
2805 ++#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
2806 ++#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
2807 +
2808 + #define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
2809 +
2810 +diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
2811 +index 7d3537c40ec95..26a13294318cf 100644
2812 +--- a/include/linux/netfilter_arp/arp_tables.h
2813 ++++ b/include/linux/netfilter_arp/arp_tables.h
2814 +@@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table *);
2815 + int arpt_register_table(struct net *net, const struct xt_table *table,
2816 + const struct arpt_replace *repl,
2817 + const struct nf_hook_ops *ops, struct xt_table **res);
2818 +-void arpt_unregister_table(struct net *net, struct xt_table *table,
2819 +- const struct nf_hook_ops *ops);
2820 ++void arpt_unregister_table(struct net *net, struct xt_table *table);
2821 ++void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
2822 ++ const struct nf_hook_ops *ops);
2823 + extern unsigned int arpt_do_table(struct sk_buff *skb,
2824 + const struct nf_hook_state *state,
2825 + struct xt_table *table);
2826 +diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
2827 +index 2f5c4e6ecd8a4..3a956145a25cb 100644
2828 +--- a/include/linux/netfilter_bridge/ebtables.h
2829 ++++ b/include/linux/netfilter_bridge/ebtables.h
2830 +@@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net,
2831 + const struct ebt_table *table,
2832 + const struct nf_hook_ops *ops,
2833 + struct ebt_table **res);
2834 +-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
2835 +- const struct nf_hook_ops *);
2836 ++extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
2837 ++void ebt_unregister_table_pre_exit(struct net *net, const char *tablename,
2838 ++ const struct nf_hook_ops *ops);
2839 + extern unsigned int ebt_do_table(struct sk_buff *skb,
2840 + const struct nf_hook_state *state,
2841 + struct ebt_table *table);
2842 +diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
2843 +index 236d437947bc9..e33997b4d750e 100644
2844 +--- a/include/uapi/linux/idxd.h
2845 ++++ b/include/uapi/linux/idxd.h
2846 +@@ -247,8 +247,8 @@ struct dsa_completion_record {
2847 + uint32_t rsvd2:8;
2848 + };
2849 +
2850 +- uint16_t delta_rec_size;
2851 +- uint16_t crc_val;
2852 ++ uint32_t delta_rec_size;
2853 ++ uint32_t crc_val;
2854 +
2855 + /* DIF check & strip */
2856 + struct {
2857 +diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
2858 +index 986dabc3d11f0..a431d7af884c8 100644
2859 +--- a/kernel/bpf/trampoline.c
2860 ++++ b/kernel/bpf/trampoline.c
2861 +@@ -9,6 +9,7 @@
2862 + #include <linux/btf.h>
2863 + #include <linux/rcupdate_trace.h>
2864 + #include <linux/rcupdate_wait.h>
2865 ++#include <linux/module.h>
2866 +
2867 + /* dummy _ops. The verifier will operate on target program's ops. */
2868 + const struct bpf_verifier_ops bpf_extension_verifier_ops = {
2869 +@@ -87,6 +88,26 @@ out:
2870 + return tr;
2871 + }
2872 +
2873 ++static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
2874 ++{
2875 ++ struct module *mod;
2876 ++ int err = 0;
2877 ++
2878 ++ preempt_disable();
2879 ++ mod = __module_text_address((unsigned long) tr->func.addr);
2880 ++ if (mod && !try_module_get(mod))
2881 ++ err = -ENOENT;
2882 ++ preempt_enable();
2883 ++ tr->mod = mod;
2884 ++ return err;
2885 ++}
2886 ++
2887 ++static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
2888 ++{
2889 ++ module_put(tr->mod);
2890 ++ tr->mod = NULL;
2891 ++}
2892 ++
2893 + static int is_ftrace_location(void *ip)
2894 + {
2895 + long addr;
2896 +@@ -108,6 +129,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
2897 + ret = unregister_ftrace_direct((long)ip, (long)old_addr);
2898 + else
2899 + ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
2900 ++
2901 ++ if (!ret)
2902 ++ bpf_trampoline_module_put(tr);
2903 + return ret;
2904 + }
2905 +
2906 +@@ -134,10 +158,16 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
2907 + return ret;
2908 + tr->func.ftrace_managed = ret;
2909 +
2910 ++ if (bpf_trampoline_module_get(tr))
2911 ++ return -ENOENT;
2912 ++
2913 + if (tr->func.ftrace_managed)
2914 + ret = register_ftrace_direct((long)ip, (long)new_addr);
2915 + else
2916 + ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
2917 ++
2918 ++ if (ret)
2919 ++ bpf_trampoline_module_put(tr);
2920 + return ret;
2921 + }
2922 +
2923 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2924 +index 36b81975d9cda..c198d19fa1c89 100644
2925 +--- a/kernel/bpf/verifier.c
2926 ++++ b/kernel/bpf/verifier.c
2927 +@@ -5384,12 +5384,26 @@ static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
2928 + return &env->insn_aux_data[env->insn_idx];
2929 + }
2930 +
2931 ++enum {
2932 ++ REASON_BOUNDS = -1,
2933 ++ REASON_TYPE = -2,
2934 ++ REASON_PATHS = -3,
2935 ++ REASON_LIMIT = -4,
2936 ++ REASON_STACK = -5,
2937 ++};
2938 ++
2939 + static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
2940 +- u32 *ptr_limit, u8 opcode, bool off_is_neg)
2941 ++ const struct bpf_reg_state *off_reg,
2942 ++ u32 *alu_limit, u8 opcode)
2943 + {
2944 ++ bool off_is_neg = off_reg->smin_value < 0;
2945 + bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
2946 + (opcode == BPF_SUB && !off_is_neg);
2947 +- u32 off, max;
2948 ++ u32 off, max = 0, ptr_limit = 0;
2949 ++
2950 ++ if (!tnum_is_const(off_reg->var_off) &&
2951 ++ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
2952 ++ return REASON_BOUNDS;
2953 +
2954 + switch (ptr_reg->type) {
2955 + case PTR_TO_STACK:
2956 +@@ -5402,22 +5416,27 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
2957 + */
2958 + off = ptr_reg->off + ptr_reg->var_off.value;
2959 + if (mask_to_left)
2960 +- *ptr_limit = MAX_BPF_STACK + off;
2961 ++ ptr_limit = MAX_BPF_STACK + off;
2962 + else
2963 +- *ptr_limit = -off - 1;
2964 +- return *ptr_limit >= max ? -ERANGE : 0;
2965 ++ ptr_limit = -off - 1;
2966 ++ break;
2967 + case PTR_TO_MAP_VALUE:
2968 + max = ptr_reg->map_ptr->value_size;
2969 + if (mask_to_left) {
2970 +- *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
2971 ++ ptr_limit = ptr_reg->umax_value + ptr_reg->off;
2972 + } else {
2973 + off = ptr_reg->smin_value + ptr_reg->off;
2974 +- *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
2975 ++ ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
2976 + }
2977 +- return *ptr_limit >= max ? -ERANGE : 0;
2978 ++ break;
2979 + default:
2980 +- return -EINVAL;
2981 ++ return REASON_TYPE;
2982 + }
2983 ++
2984 ++ if (ptr_limit >= max)
2985 ++ return REASON_LIMIT;
2986 ++ *alu_limit = ptr_limit;
2987 ++ return 0;
2988 + }
2989 +
2990 + static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
2991 +@@ -5435,7 +5454,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
2992 + if (aux->alu_state &&
2993 + (aux->alu_state != alu_state ||
2994 + aux->alu_limit != alu_limit))
2995 +- return -EACCES;
2996 ++ return REASON_PATHS;
2997 +
2998 + /* Corresponding fixup done in fixup_bpf_calls(). */
2999 + aux->alu_state = alu_state;
3000 +@@ -5454,14 +5473,20 @@ static int sanitize_val_alu(struct bpf_verifier_env *env,
3001 + return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
3002 + }
3003 +
3004 ++static bool sanitize_needed(u8 opcode)
3005 ++{
3006 ++ return opcode == BPF_ADD || opcode == BPF_SUB;
3007 ++}
3008 ++
3009 + static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3010 + struct bpf_insn *insn,
3011 + const struct bpf_reg_state *ptr_reg,
3012 +- struct bpf_reg_state *dst_reg,
3013 +- bool off_is_neg)
3014 ++ const struct bpf_reg_state *off_reg,
3015 ++ struct bpf_reg_state *dst_reg)
3016 + {
3017 + struct bpf_verifier_state *vstate = env->cur_state;
3018 + struct bpf_insn_aux_data *aux = cur_aux(env);
3019 ++ bool off_is_neg = off_reg->smin_value < 0;
3020 + bool ptr_is_dst_reg = ptr_reg == dst_reg;
3021 + u8 opcode = BPF_OP(insn->code);
3022 + u32 alu_state, alu_limit;
3023 +@@ -5483,7 +5508,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3024 + alu_state |= ptr_is_dst_reg ?
3025 + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
3026 +
3027 +- err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
3028 ++ err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
3029 + if (err < 0)
3030 + return err;
3031 +
3032 +@@ -5507,7 +5532,46 @@ do_sim:
3033 + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3034 + if (!ptr_is_dst_reg && ret)
3035 + *dst_reg = tmp;
3036 +- return !ret ? -EFAULT : 0;
3037 ++ return !ret ? REASON_STACK : 0;
3038 ++}
3039 ++
3040 ++static int sanitize_err(struct bpf_verifier_env *env,
3041 ++ const struct bpf_insn *insn, int reason,
3042 ++ const struct bpf_reg_state *off_reg,
3043 ++ const struct bpf_reg_state *dst_reg)
3044 ++{
3045 ++ static const char *err = "pointer arithmetic with it prohibited for !root";
3046 ++ const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
3047 ++ u32 dst = insn->dst_reg, src = insn->src_reg;
3048 ++
3049 ++ switch (reason) {
3050 ++ case REASON_BOUNDS:
3051 ++ verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
3052 ++ off_reg == dst_reg ? dst : src, err);
3053 ++ break;
3054 ++ case REASON_TYPE:
3055 ++ verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
3056 ++ off_reg == dst_reg ? src : dst, err);
3057 ++ break;
3058 ++ case REASON_PATHS:
3059 ++ verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
3060 ++ dst, op, err);
3061 ++ break;
3062 ++ case REASON_LIMIT:
3063 ++ verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
3064 ++ dst, op, err);
3065 ++ break;
3066 ++ case REASON_STACK:
3067 ++ verbose(env, "R%d could not be pushed for speculative verification, %s\n",
3068 ++ dst, err);
3069 ++ break;
3070 ++ default:
3071 ++ verbose(env, "verifier internal error: unknown reason (%d)\n",
3072 ++ reason);
3073 ++ break;
3074 ++ }
3075 ++
3076 ++ return -EACCES;
3077 + }
3078 +
3079 + /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
3080 +@@ -5528,8 +5592,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3081 + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
3082 + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
3083 + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3084 +- u32 dst = insn->dst_reg, src = insn->src_reg;
3085 + u8 opcode = BPF_OP(insn->code);
3086 ++ u32 dst = insn->dst_reg;
3087 + int ret;
3088 +
3089 + dst_reg = &regs[dst];
3090 +@@ -5577,13 +5641,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3091 + verbose(env, "R%d pointer arithmetic on %s prohibited\n",
3092 + dst, reg_type_str[ptr_reg->type]);
3093 + return -EACCES;
3094 +- case PTR_TO_MAP_VALUE:
3095 +- if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
3096 +- verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
3097 +- off_reg == dst_reg ? dst : src);
3098 +- return -EACCES;
3099 +- }
3100 +- fallthrough;
3101 + default:
3102 + break;
3103 + }
3104 +@@ -5603,11 +5660,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3105 +
3106 + switch (opcode) {
3107 + case BPF_ADD:
3108 +- ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3109 +- if (ret < 0) {
3110 +- verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
3111 +- return ret;
3112 +- }
3113 ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
3114 ++ if (ret < 0)
3115 ++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
3116 ++
3117 + /* We can take a fixed offset as long as it doesn't overflow
3118 + * the s32 'off' field
3119 + */
3120 +@@ -5658,11 +5714,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
3121 + }
3122 + break;
3123 + case BPF_SUB:
3124 +- ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
3125 +- if (ret < 0) {
3126 +- verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
3127 +- return ret;
3128 +- }
3129 ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
3130 ++ if (ret < 0)
3131 ++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
3132 ++
3133 + if (dst_reg == off_reg) {
3134 + /* scalar -= pointer. Creates an unknown scalar */
3135 + verbose(env, "R%d tried to subtract pointer from scalar\n",
3136 +@@ -6352,9 +6407,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3137 + s32 s32_min_val, s32_max_val;
3138 + u32 u32_min_val, u32_max_val;
3139 + u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3140 +- u32 dst = insn->dst_reg;
3141 +- int ret;
3142 + bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
3143 ++ int ret;
3144 +
3145 + smin_val = src_reg.smin_value;
3146 + smax_val = src_reg.smax_value;
3147 +@@ -6396,6 +6450,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3148 + return 0;
3149 + }
3150 +
3151 ++ if (sanitize_needed(opcode)) {
3152 ++ ret = sanitize_val_alu(env, insn);
3153 ++ if (ret < 0)
3154 ++ return sanitize_err(env, insn, ret, NULL, NULL);
3155 ++ }
3156 ++
3157 + /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
3158 + * There are two classes of instructions: The first class we track both
3159 + * alu32 and alu64 sign/unsigned bounds independently this provides the
3160 +@@ -6412,21 +6472,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3161 + */
3162 + switch (opcode) {
3163 + case BPF_ADD:
3164 +- ret = sanitize_val_alu(env, insn);
3165 +- if (ret < 0) {
3166 +- verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
3167 +- return ret;
3168 +- }
3169 + scalar32_min_max_add(dst_reg, &src_reg);
3170 + scalar_min_max_add(dst_reg, &src_reg);
3171 + dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3172 + break;
3173 + case BPF_SUB:
3174 +- ret = sanitize_val_alu(env, insn);
3175 +- if (ret < 0) {
3176 +- verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
3177 +- return ret;
3178 +- }
3179 + scalar32_min_max_sub(dst_reg, &src_reg);
3180 + scalar_min_max_sub(dst_reg, &src_reg);
3181 + dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
3182 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
3183 +index eead7efbe7e5d..38d7c03e694cd 100644
3184 +--- a/kernel/locking/lockdep.c
3185 ++++ b/kernel/locking/lockdep.c
3186 +@@ -930,7 +930,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
3187 + /* Debug-check: all keys must be persistent! */
3188 + debug_locks_off();
3189 + pr_err("INFO: trying to register non-static key.\n");
3190 +- pr_err("the code is fine but needs lockdep annotation.\n");
3191 ++ pr_err("The code is fine but needs lockdep annotation, or maybe\n");
3192 ++ pr_err("you didn't initialize this object before use?\n");
3193 + pr_err("turning off the locking correctness validator.\n");
3194 + dump_stack();
3195 + return false;
3196 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
3197 +index 7937265ef8797..431b6b7ec04d4 100644
3198 +--- a/lib/Kconfig.debug
3199 ++++ b/lib/Kconfig.debug
3200 +@@ -1325,7 +1325,7 @@ config LOCKDEP
3201 + bool
3202 + depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
3203 + select STACKTRACE
3204 +- select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
3205 ++ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
3206 + select KALLSYMS
3207 + select KALLSYMS_ALL
3208 +
3209 +@@ -1619,7 +1619,7 @@ config LATENCYTOP
3210 + depends on DEBUG_KERNEL
3211 + depends on STACKTRACE_SUPPORT
3212 + depends on PROC_FS
3213 +- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
3214 ++ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
3215 + select KALLSYMS
3216 + select KALLSYMS_ALL
3217 + select STACKTRACE
3218 +@@ -1872,7 +1872,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
3219 + depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
3220 + depends on !X86_64
3221 + select STACKTRACE
3222 +- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
3223 ++ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
3224 + help
3225 + Provide stacktrace filter for fault-injection capabilities
3226 +
3227 +diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
3228 +index 0d3b7940cf430..fde82ec85f8f9 100644
3229 +--- a/lib/Kconfig.kasan
3230 ++++ b/lib/Kconfig.kasan
3231 +@@ -138,9 +138,10 @@ config KASAN_INLINE
3232 +
3233 + endchoice
3234 +
3235 +-config KASAN_STACK_ENABLE
3236 ++config KASAN_STACK
3237 + bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
3238 + depends on KASAN_GENERIC || KASAN_SW_TAGS
3239 ++ default y if CC_IS_GCC
3240 + help
3241 + The LLVM stack address sanitizer has a know problem that
3242 + causes excessive stack usage in a lot of functions, see
3243 +@@ -154,12 +155,6 @@ config KASAN_STACK_ENABLE
3244 + CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
3245 + to use and enabled by default.
3246 +
3247 +-config KASAN_STACK
3248 +- int
3249 +- depends on KASAN_GENERIC || KASAN_SW_TAGS
3250 +- default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
3251 +- default 0
3252 +-
3253 + config KASAN_SW_TAGS_IDENTIFY
3254 + bool "Enable memory corruption identification"
3255 + depends on KASAN_SW_TAGS
3256 +diff --git a/mm/kasan/common.c b/mm/kasan/common.c
3257 +index b25167664ead4..38ceb759f8532 100644
3258 +--- a/mm/kasan/common.c
3259 ++++ b/mm/kasan/common.c
3260 +@@ -63,7 +63,7 @@ void __kasan_unpoison_range(const void *address, size_t size)
3261 + unpoison_range(address, size);
3262 + }
3263 +
3264 +-#if CONFIG_KASAN_STACK
3265 ++#ifdef CONFIG_KASAN_STACK
3266 + /* Unpoison the entire stack for a task. */
3267 + void kasan_unpoison_task_stack(struct task_struct *task)
3268 + {
3269 +diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
3270 +index 8c706e7652f2b..daa06aa5ea19b 100644
3271 +--- a/mm/kasan/kasan.h
3272 ++++ b/mm/kasan/kasan.h
3273 +@@ -224,7 +224,7 @@ void *find_first_bad_addr(void *addr, size_t size);
3274 + const char *get_bug_type(struct kasan_access_info *info);
3275 + void metadata_fetch_row(char *buffer, void *row);
3276 +
3277 +-#if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK
3278 ++#if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
3279 + void print_address_stack_frame(const void *addr);
3280 + #else
3281 + static inline void print_address_stack_frame(const void *addr) { }
3282 +diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
3283 +index 8a9c889872da3..4e16518d98770 100644
3284 +--- a/mm/kasan/report_generic.c
3285 ++++ b/mm/kasan/report_generic.c
3286 +@@ -128,7 +128,7 @@ void metadata_fetch_row(char *buffer, void *row)
3287 + memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
3288 + }
3289 +
3290 +-#if CONFIG_KASAN_STACK
3291 ++#ifdef CONFIG_KASAN_STACK
3292 + static bool __must_check tokenize_frame_descr(const char **frame_descr,
3293 + char *token, size_t max_tok_len,
3294 + unsigned long *value)
3295 +diff --git a/mm/ptdump.c b/mm/ptdump.c
3296 +index 4354c1422d57c..da751448d0e4e 100644
3297 +--- a/mm/ptdump.c
3298 ++++ b/mm/ptdump.c
3299 +@@ -111,7 +111,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
3300 + unsigned long next, struct mm_walk *walk)
3301 + {
3302 + struct ptdump_state *st = walk->private;
3303 +- pte_t val = READ_ONCE(*pte);
3304 ++ pte_t val = ptep_get(pte);
3305 +
3306 + if (st->effective_prot)
3307 + st->effective_prot(st, 4, pte_val(val));
3308 +diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
3309 +index 66e7af1654943..32bc2821027f3 100644
3310 +--- a/net/bridge/netfilter/ebtable_broute.c
3311 ++++ b/net/bridge/netfilter/ebtable_broute.c
3312 +@@ -105,14 +105,20 @@ static int __net_init broute_net_init(struct net *net)
3313 + &net->xt.broute_table);
3314 + }
3315 +
3316 ++static void __net_exit broute_net_pre_exit(struct net *net)
3317 ++{
3318 ++ ebt_unregister_table_pre_exit(net, "broute", &ebt_ops_broute);
3319 ++}
3320 ++
3321 + static void __net_exit broute_net_exit(struct net *net)
3322 + {
3323 +- ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute);
3324 ++ ebt_unregister_table(net, net->xt.broute_table);
3325 + }
3326 +
3327 + static struct pernet_operations broute_net_ops = {
3328 + .init = broute_net_init,
3329 + .exit = broute_net_exit,
3330 ++ .pre_exit = broute_net_pre_exit,
3331 + };
3332 +
3333 + static int __init ebtable_broute_init(void)
3334 +diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
3335 +index 78cb9b21022d0..bcf982e12f16b 100644
3336 +--- a/net/bridge/netfilter/ebtable_filter.c
3337 ++++ b/net/bridge/netfilter/ebtable_filter.c
3338 +@@ -99,14 +99,20 @@ static int __net_init frame_filter_net_init(struct net *net)
3339 + &net->xt.frame_filter);
3340 + }
3341 +
3342 ++static void __net_exit frame_filter_net_pre_exit(struct net *net)
3343 ++{
3344 ++ ebt_unregister_table_pre_exit(net, "filter", ebt_ops_filter);
3345 ++}
3346 ++
3347 + static void __net_exit frame_filter_net_exit(struct net *net)
3348 + {
3349 +- ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter);
3350 ++ ebt_unregister_table(net, net->xt.frame_filter);
3351 + }
3352 +
3353 + static struct pernet_operations frame_filter_net_ops = {
3354 + .init = frame_filter_net_init,
3355 + .exit = frame_filter_net_exit,
3356 ++ .pre_exit = frame_filter_net_pre_exit,
3357 + };
3358 +
3359 + static int __init ebtable_filter_init(void)
3360 +diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
3361 +index 0888936ef8537..0d092773f8161 100644
3362 +--- a/net/bridge/netfilter/ebtable_nat.c
3363 ++++ b/net/bridge/netfilter/ebtable_nat.c
3364 +@@ -99,14 +99,20 @@ static int __net_init frame_nat_net_init(struct net *net)
3365 + &net->xt.frame_nat);
3366 + }
3367 +
3368 ++static void __net_exit frame_nat_net_pre_exit(struct net *net)
3369 ++{
3370 ++ ebt_unregister_table_pre_exit(net, "nat", ebt_ops_nat);
3371 ++}
3372 ++
3373 + static void __net_exit frame_nat_net_exit(struct net *net)
3374 + {
3375 +- ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat);
3376 ++ ebt_unregister_table(net, net->xt.frame_nat);
3377 + }
3378 +
3379 + static struct pernet_operations frame_nat_net_ops = {
3380 + .init = frame_nat_net_init,
3381 + .exit = frame_nat_net_exit,
3382 ++ .pre_exit = frame_nat_net_pre_exit,
3383 + };
3384 +
3385 + static int __init ebtable_nat_init(void)
3386 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3387 +index ebe33b60efd6b..d481ff24a1501 100644
3388 +--- a/net/bridge/netfilter/ebtables.c
3389 ++++ b/net/bridge/netfilter/ebtables.c
3390 +@@ -1232,10 +1232,34 @@ out:
3391 + return ret;
3392 + }
3393 +
3394 +-void ebt_unregister_table(struct net *net, struct ebt_table *table,
3395 +- const struct nf_hook_ops *ops)
3396 ++static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
3397 ++{
3398 ++ struct ebt_table *t;
3399 ++
3400 ++ mutex_lock(&ebt_mutex);
3401 ++
3402 ++ list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
3403 ++ if (strcmp(t->name, name) == 0) {
3404 ++ mutex_unlock(&ebt_mutex);
3405 ++ return t;
3406 ++ }
3407 ++ }
3408 ++
3409 ++ mutex_unlock(&ebt_mutex);
3410 ++ return NULL;
3411 ++}
3412 ++
3413 ++void ebt_unregister_table_pre_exit(struct net *net, const char *name, const struct nf_hook_ops *ops)
3414 ++{
3415 ++ struct ebt_table *table = __ebt_find_table(net, name);
3416 ++
3417 ++ if (table)
3418 ++ nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
3419 ++}
3420 ++EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
3421 ++
3422 ++void ebt_unregister_table(struct net *net, struct ebt_table *table)
3423 + {
3424 +- nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
3425 + __ebt_unregister_table(net, table);
3426 + }
3427 +
3428 +diff --git a/net/core/dev.c b/net/core/dev.c
3429 +index 9e3be2ae86532..3c0d3b6d674da 100644
3430 +--- a/net/core/dev.c
3431 ++++ b/net/core/dev.c
3432 +@@ -5877,7 +5877,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
3433 + NAPI_GRO_CB(skb)->frag0_len = 0;
3434 +
3435 + if (!skb_headlen(skb) && pinfo->nr_frags &&
3436 +- !PageHighMem(skb_frag_page(frag0))) {
3437 ++ !PageHighMem(skb_frag_page(frag0)) &&
3438 ++ (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
3439 + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3440 + NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
3441 + skb_frag_size(frag0),
3442 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
3443 +index 6d2d557442dc6..7b413fe907d66 100644
3444 +--- a/net/core/neighbour.c
3445 ++++ b/net/core/neighbour.c
3446 +@@ -1380,7 +1380,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
3447 + * we can reinject the packet there.
3448 + */
3449 + n2 = NULL;
3450 +- if (dst) {
3451 ++ if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
3452 + n2 = dst_neigh_lookup_skb(dst, skb);
3453 + if (n2)
3454 + n1 = n2;
3455 +diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
3456 +index 09998dc5c185f..d4ac02718b72a 100644
3457 +--- a/net/ethtool/pause.c
3458 ++++ b/net/ethtool/pause.c
3459 +@@ -38,16 +38,16 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
3460 + if (!dev->ethtool_ops->get_pauseparam)
3461 + return -EOPNOTSUPP;
3462 +
3463 ++ ethtool_stats_init((u64 *)&data->pausestat,
3464 ++ sizeof(data->pausestat) / 8);
3465 ++
3466 + ret = ethnl_ops_begin(dev);
3467 + if (ret < 0)
3468 + return ret;
3469 + dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
3470 + if (req_base->flags & ETHTOOL_FLAG_STATS &&
3471 +- dev->ethtool_ops->get_pause_stats) {
3472 +- ethtool_stats_init((u64 *)&data->pausestat,
3473 +- sizeof(data->pausestat) / 8);
3474 ++ dev->ethtool_ops->get_pause_stats)
3475 + dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
3476 +- }
3477 + ethnl_ops_complete(dev);
3478 +
3479 + return 0;
3480 +diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
3481 +index d1b6a9665b170..f0b47d43c9f6e 100644
3482 +--- a/net/ieee802154/nl802154.c
3483 ++++ b/net/ieee802154/nl802154.c
3484 +@@ -1498,6 +1498,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
3485 + if (err)
3486 + return err;
3487 +
3488 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
3489 ++ err = skb->len;
3490 ++ goto out_err;
3491 ++ }
3492 ++
3493 + if (!wpan_dev->netdev) {
3494 + err = -EINVAL;
3495 + goto out_err;
3496 +@@ -1552,6 +1557,9 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
3497 + struct ieee802154_llsec_key_id id = { };
3498 + u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
3499 +
3500 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3501 ++ return -EOPNOTSUPP;
3502 ++
3503 + if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
3504 + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
3505 + return -EINVAL;
3506 +@@ -1601,6 +1609,9 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
3507 + struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
3508 + struct ieee802154_llsec_key_id id;
3509 +
3510 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3511 ++ return -EOPNOTSUPP;
3512 ++
3513 + if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
3514 + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
3515 + return -EINVAL;
3516 +@@ -1666,6 +1677,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
3517 + if (err)
3518 + return err;
3519 +
3520 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
3521 ++ err = skb->len;
3522 ++ goto out_err;
3523 ++ }
3524 ++
3525 + if (!wpan_dev->netdev) {
3526 + err = -EINVAL;
3527 + goto out_err;
3528 +@@ -1752,6 +1768,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
3529 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
3530 + struct ieee802154_llsec_device dev_desc;
3531 +
3532 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3533 ++ return -EOPNOTSUPP;
3534 ++
3535 + if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
3536 + &dev_desc) < 0)
3537 + return -EINVAL;
3538 +@@ -1767,6 +1786,9 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
3539 + struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
3540 + __le64 extended_addr;
3541 +
3542 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3543 ++ return -EOPNOTSUPP;
3544 ++
3545 + if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
3546 + nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
3547 + return -EINVAL;
3548 +@@ -1836,6 +1858,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
3549 + if (err)
3550 + return err;
3551 +
3552 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
3553 ++ err = skb->len;
3554 ++ goto out_err;
3555 ++ }
3556 ++
3557 + if (!wpan_dev->netdev) {
3558 + err = -EINVAL;
3559 + goto out_err;
3560 +@@ -1893,6 +1920,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
3561 + struct ieee802154_llsec_device_key key;
3562 + __le64 extended_addr;
3563 +
3564 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3565 ++ return -EOPNOTSUPP;
3566 ++
3567 + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
3568 + nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
3569 + return -EINVAL;
3570 +@@ -1924,6 +1954,9 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
3571 + struct ieee802154_llsec_device_key key;
3572 + __le64 extended_addr;
3573 +
3574 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3575 ++ return -EOPNOTSUPP;
3576 ++
3577 + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
3578 + nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
3579 + return -EINVAL;
3580 +@@ -1998,6 +2031,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
3581 + if (err)
3582 + return err;
3583 +
3584 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
3585 ++ err = skb->len;
3586 ++ goto out_err;
3587 ++ }
3588 ++
3589 + if (!wpan_dev->netdev) {
3590 + err = -EINVAL;
3591 + goto out_err;
3592 +@@ -2082,6 +2120,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
3593 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
3594 + struct ieee802154_llsec_seclevel sl;
3595 +
3596 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3597 ++ return -EOPNOTSUPP;
3598 ++
3599 + if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
3600 + &sl) < 0)
3601 + return -EINVAL;
3602 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
3603 +index e0093411d85d6..d6d45d820d79a 100644
3604 +--- a/net/ipv4/netfilter/arp_tables.c
3605 ++++ b/net/ipv4/netfilter/arp_tables.c
3606 +@@ -1541,10 +1541,15 @@ out_free:
3607 + return ret;
3608 + }
3609 +
3610 +-void arpt_unregister_table(struct net *net, struct xt_table *table,
3611 +- const struct nf_hook_ops *ops)
3612 ++void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
3613 ++ const struct nf_hook_ops *ops)
3614 + {
3615 + nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
3616 ++}
3617 ++EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
3618 ++
3619 ++void arpt_unregister_table(struct net *net, struct xt_table *table)
3620 ++{
3621 + __arpt_unregister_table(net, table);
3622 + }
3623 +
3624 +diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
3625 +index c216b9ad3bb24..6c300ba5634e2 100644
3626 +--- a/net/ipv4/netfilter/arptable_filter.c
3627 ++++ b/net/ipv4/netfilter/arptable_filter.c
3628 +@@ -56,16 +56,24 @@ static int __net_init arptable_filter_table_init(struct net *net)
3629 + return err;
3630 + }
3631 +
3632 ++static void __net_exit arptable_filter_net_pre_exit(struct net *net)
3633 ++{
3634 ++ if (net->ipv4.arptable_filter)
3635 ++ arpt_unregister_table_pre_exit(net, net->ipv4.arptable_filter,
3636 ++ arpfilter_ops);
3637 ++}
3638 ++
3639 + static void __net_exit arptable_filter_net_exit(struct net *net)
3640 + {
3641 + if (!net->ipv4.arptable_filter)
3642 + return;
3643 +- arpt_unregister_table(net, net->ipv4.arptable_filter, arpfilter_ops);
3644 ++ arpt_unregister_table(net, net->ipv4.arptable_filter);
3645 + net->ipv4.arptable_filter = NULL;
3646 + }
3647 +
3648 + static struct pernet_operations arptable_filter_net_ops = {
3649 + .exit = arptable_filter_net_exit,
3650 ++ .pre_exit = arptable_filter_net_pre_exit,
3651 + };
3652 +
3653 + static int __init arptable_filter_init(void)
3654 +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
3655 +index 3e5f4f2e705e8..08829809e88b7 100644
3656 +--- a/net/ipv4/sysctl_net_ipv4.c
3657 ++++ b/net/ipv4/sysctl_net_ipv4.c
3658 +@@ -1369,9 +1369,19 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
3659 + if (!table)
3660 + goto err_alloc;
3661 +
3662 +- /* Update the variables to point into the current struct net */
3663 +- for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
3664 +- table[i].data += (void *)net - (void *)&init_net;
3665 ++ for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++) {
3666 ++ if (table[i].data) {
3667 ++ /* Update the variables to point into
3668 ++ * the current struct net
3669 ++ */
3670 ++ table[i].data += (void *)net - (void *)&init_net;
3671 ++ } else {
3672 ++ /* Entries without data pointer are global;
3673 ++ * Make them read-only in non-init_net ns
3674 ++ */
3675 ++ table[i].mode &= ~0222;
3676 ++ }
3677 ++ }
3678 + }
3679 +
3680 + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
3681 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
3682 +index 3fa0eca5a06f8..42fe7db6bbb37 100644
3683 +--- a/net/ipv6/ip6_tunnel.c
3684 ++++ b/net/ipv6/ip6_tunnel.c
3685 +@@ -2244,6 +2244,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head
3686 + t = rtnl_dereference(t->next);
3687 + }
3688 + }
3689 ++
3690 ++ t = rtnl_dereference(ip6n->tnls_wc[0]);
3691 ++ while (t) {
3692 ++ /* If dev is in the same netns, it has already
3693 ++ * been added to the list by the previous loop.
3694 ++ */
3695 ++ if (!net_eq(dev_net(t->dev), net))
3696 ++ unregister_netdevice_queue(t->dev, list);
3697 ++ t = rtnl_dereference(t->next);
3698 ++ }
3699 + }
3700 +
3701 + static int __net_init ip6_tnl_init_net(struct net *net)
3702 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3703 +index 63ccd9f2dcccf..9fdccf0718b59 100644
3704 +--- a/net/ipv6/sit.c
3705 ++++ b/net/ipv6/sit.c
3706 +@@ -1867,9 +1867,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
3707 + if (dev->rtnl_link_ops == &sit_link_ops)
3708 + unregister_netdevice_queue(dev, head);
3709 +
3710 +- for (prio = 1; prio < 4; prio++) {
3711 ++ for (prio = 0; prio < 4; prio++) {
3712 + int h;
3713 +- for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
3714 ++ for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
3715 + struct ip_tunnel *t;
3716 +
3717 + t = rtnl_dereference(sitn->tunnels[prio][h]);
3718 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
3719 +index 68a0de02b5618..860bc35383d5f 100644
3720 +--- a/net/mac80211/cfg.c
3721 ++++ b/net/mac80211/cfg.c
3722 +@@ -1788,8 +1788,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
3723 + }
3724 +
3725 + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
3726 +- sta->sdata->u.vlan.sta)
3727 ++ sta->sdata->u.vlan.sta) {
3728 ++ ieee80211_clear_fast_rx(sta);
3729 + RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
3730 ++ }
3731 +
3732 + if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
3733 + ieee80211_vif_dec_num_mcast(sta->sdata);
3734 +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
3735 +index 0ee702d374b02..c6c0cb4656645 100644
3736 +--- a/net/netfilter/nf_conntrack_standalone.c
3737 ++++ b/net/netfilter/nf_conntrack_standalone.c
3738 +@@ -266,6 +266,7 @@ static const char* l4proto_name(u16 proto)
3739 + case IPPROTO_GRE: return "gre";
3740 + case IPPROTO_SCTP: return "sctp";
3741 + case IPPROTO_UDPLITE: return "udplite";
3742 ++ case IPPROTO_ICMPV6: return "icmpv6";
3743 + }
3744 +
3745 + return "unknown";
3746 +diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
3747 +index 2a6993fa40d78..1c5460e7bce87 100644
3748 +--- a/net/netfilter/nf_flow_table_offload.c
3749 ++++ b/net/netfilter/nf_flow_table_offload.c
3750 +@@ -305,12 +305,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
3751 + const __be32 *addr, const __be32 *mask)
3752 + {
3753 + struct flow_action_entry *entry;
3754 +- int i;
3755 ++ int i, j;
3756 +
3757 +- for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
3758 ++ for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
3759 + entry = flow_action_entry_next(flow_rule);
3760 + flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
3761 +- offset + i, &addr[i], mask);
3762 ++ offset + i, &addr[j], mask);
3763 + }
3764 + }
3765 +
3766 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3767 +index 93d4bb39afb3c..d6ec76a0fe62f 100644
3768 +--- a/net/netfilter/nf_tables_api.c
3769 ++++ b/net/netfilter/nf_tables_api.c
3770 +@@ -5263,16 +5263,35 @@ err_expr:
3771 + return -ENOMEM;
3772 + }
3773 +
3774 +-static void nft_set_elem_expr_setup(const struct nft_set_ext *ext, int i,
3775 +- struct nft_expr *expr_array[])
3776 ++static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
3777 ++ const struct nft_set_ext *ext,
3778 ++ struct nft_expr *expr_array[],
3779 ++ u32 num_exprs)
3780 + {
3781 + struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
3782 +- struct nft_expr *expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
3783 ++ struct nft_expr *expr;
3784 ++ int i, err;
3785 ++
3786 ++ for (i = 0; i < num_exprs; i++) {
3787 ++ expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
3788 ++ err = nft_expr_clone(expr, expr_array[i]);
3789 ++ if (err < 0)
3790 ++ goto err_elem_expr_setup;
3791 ++
3792 ++ elem_expr->size += expr_array[i]->ops->size;
3793 ++ nft_expr_destroy(ctx, expr_array[i]);
3794 ++ expr_array[i] = NULL;
3795 ++ }
3796 ++
3797 ++ return 0;
3798 ++
3799 ++err_elem_expr_setup:
3800 ++ for (; i < num_exprs; i++) {
3801 ++ nft_expr_destroy(ctx, expr_array[i]);
3802 ++ expr_array[i] = NULL;
3803 ++ }
3804 +
3805 +- memcpy(expr, expr_array[i], expr_array[i]->ops->size);
3806 +- elem_expr->size += expr_array[i]->ops->size;
3807 +- kfree(expr_array[i]);
3808 +- expr_array[i] = NULL;
3809 ++ return -ENOMEM;
3810 + }
3811 +
3812 + static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3813 +@@ -5524,12 +5543,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3814 + *nft_set_ext_obj(ext) = obj;
3815 + obj->use++;
3816 + }
3817 +- for (i = 0; i < num_exprs; i++)
3818 +- nft_set_elem_expr_setup(ext, i, expr_array);
3819 ++ err = nft_set_elem_expr_setup(ctx, ext, expr_array, num_exprs);
3820 ++ if (err < 0)
3821 ++ goto err_elem_expr;
3822 +
3823 + trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
3824 +- if (trans == NULL)
3825 +- goto err_trans;
3826 ++ if (trans == NULL) {
3827 ++ err = -ENOMEM;
3828 ++ goto err_elem_expr;
3829 ++ }
3830 +
3831 + ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
3832 + err = set->ops->insert(ctx->net, set, &elem, &ext2);
3833 +@@ -5573,7 +5595,7 @@ err_set_full:
3834 + set->ops->remove(ctx->net, set, &elem);
3835 + err_element_clash:
3836 + kfree(trans);
3837 +-err_trans:
3838 ++err_elem_expr:
3839 + if (obj)
3840 + obj->use--;
3841 +
3842 +diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
3843 +index 0e2c315c3b5ed..82ec27bdf9412 100644
3844 +--- a/net/netfilter/nft_limit.c
3845 ++++ b/net/netfilter/nft_limit.c
3846 +@@ -76,13 +76,13 @@ static int nft_limit_init(struct nft_limit *limit,
3847 + return -EOVERFLOW;
3848 +
3849 + if (pkts) {
3850 +- tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
3851 ++ tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
3852 + } else {
3853 + /* The token bucket size limits the number of tokens can be
3854 + * accumulated. tokens_max specifies the bucket size.
3855 + * tokens_max = unit * (rate + burst) / rate.
3856 + */
3857 +- tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
3858 ++ tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
3859 + limit->rate);
3860 + }
3861 +
3862 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3863 +index a710917c5ac73..b9b3d899a611c 100644
3864 +--- a/net/sctp/socket.c
3865 ++++ b/net/sctp/socket.c
3866 +@@ -1520,11 +1520,9 @@ static void sctp_close(struct sock *sk, long timeout)
3867 +
3868 + /* Supposedly, no process has access to the socket, but
3869 + * the net layers still may.
3870 +- * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
3871 +- * held and that should be grabbed before socket lock.
3872 + */
3873 +- spin_lock_bh(&net->sctp.addr_wq_lock);
3874 +- bh_lock_sock_nested(sk);
3875 ++ local_bh_disable();
3876 ++ bh_lock_sock(sk);
3877 +
3878 + /* Hold the sock, since sk_common_release() will put sock_put()
3879 + * and we have just a little more cleanup.
3880 +@@ -1533,7 +1531,7 @@ static void sctp_close(struct sock *sk, long timeout)
3881 + sk_common_release(sk);
3882 +
3883 + bh_unlock_sock(sk);
3884 +- spin_unlock_bh(&net->sctp.addr_wq_lock);
3885 ++ local_bh_enable();
3886 +
3887 + sock_put(sk);
3888 +
3889 +@@ -4993,9 +4991,6 @@ static int sctp_init_sock(struct sock *sk)
3890 + sk_sockets_allocated_inc(sk);
3891 + sock_prot_inuse_add(net, sk->sk_prot, 1);
3892 +
3893 +- /* Nothing can fail after this block, otherwise
3894 +- * sctp_destroy_sock() will be called without addr_wq_lock held
3895 +- */
3896 + if (net->sctp.default_auto_asconf) {
3897 + spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
3898 + list_add_tail(&sp->auto_asconf_list,
3899 +@@ -5030,7 +5025,9 @@ static void sctp_destroy_sock(struct sock *sk)
3900 +
3901 + if (sp->do_auto_asconf) {
3902 + sp->do_auto_asconf = 0;
3903 ++ spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3904 + list_del(&sp->auto_asconf_list);
3905 ++ spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3906 + }
3907 + sctp_endpoint_free(sp->ep);
3908 + local_bh_disable();
3909 +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
3910 +index b81ca117dac7a..e4cb0ff4dcf41 100644
3911 +--- a/net/xfrm/xfrm_output.c
3912 ++++ b/net/xfrm/xfrm_output.c
3913 +@@ -660,6 +660,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
3914 + {
3915 + int err;
3916 +
3917 ++ if (x->outer_mode.encap == XFRM_MODE_BEET &&
3918 ++ ip_is_fragment(ip_hdr(skb))) {
3919 ++ net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
3920 ++ return -EAFNOSUPPORT;
3921 ++ }
3922 ++
3923 + err = xfrm4_tunnel_check_size(skb);
3924 + if (err)
3925 + return err;
3926 +@@ -705,8 +711,15 @@ out:
3927 + static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
3928 + {
3929 + #if IS_ENABLED(CONFIG_IPV6)
3930 ++ unsigned int ptr = 0;
3931 + int err;
3932 +
3933 ++ if (x->outer_mode.encap == XFRM_MODE_BEET &&
3934 ++ ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
3935 ++ net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
3936 ++ return -EAFNOSUPPORT;
3937 ++ }
3938 ++
3939 + err = xfrm6_tunnel_check_size(skb);
3940 + if (err)
3941 + return err;
3942 +diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
3943 +index 1e000cc2e7b4b..3d791908ed364 100644
3944 +--- a/scripts/Makefile.kasan
3945 ++++ b/scripts/Makefile.kasan
3946 +@@ -2,6 +2,14 @@
3947 + CFLAGS_KASAN_NOSANITIZE := -fno-builtin
3948 + KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
3949 +
3950 ++cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
3951 ++
3952 ++ifdef CONFIG_KASAN_STACK
3953 ++ stack_enable := 1
3954 ++else
3955 ++ stack_enable := 0
3956 ++endif
3957 ++
3958 + ifdef CONFIG_KASAN_GENERIC
3959 +
3960 + ifdef CONFIG_KASAN_INLINE
3961 +@@ -12,8 +20,6 @@ endif
3962 +
3963 + CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
3964 +
3965 +-cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
3966 +-
3967 + # -fasan-shadow-offset fails without -fsanitize
3968 + CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
3969 + -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
3970 +@@ -27,7 +33,7 @@ else
3971 + CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
3972 + $(call cc-param,asan-globals=1) \
3973 + $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
3974 +- $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
3975 ++ $(call cc-param,asan-stack=$(stack_enable)) \
3976 + $(call cc-param,asan-instrument-allocas=1)
3977 + endif
3978 +
3979 +@@ -36,14 +42,14 @@ endif # CONFIG_KASAN_GENERIC
3980 + ifdef CONFIG_KASAN_SW_TAGS
3981 +
3982 + ifdef CONFIG_KASAN_INLINE
3983 +- instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)
3984 ++ instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
3985 + else
3986 +- instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1
3987 ++ instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
3988 + endif
3989 +
3990 + CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
3991 +- -mllvm -hwasan-instrument-stack=$(CONFIG_KASAN_STACK) \
3992 +- -mllvm -hwasan-use-short-granules=0 \
3993 ++ $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
3994 ++ $(call cc-param,hwasan-use-short-granules=0) \
3995 + $(instrumentation_flags)
3996 +
3997 + endif # CONFIG_KASAN_SW_TAGS
3998 +diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
3999 +index 269967c4fc1b6..a56c36470cb19 100644
4000 +--- a/security/Kconfig.hardening
4001 ++++ b/security/Kconfig.hardening
4002 +@@ -64,7 +64,7 @@ choice
4003 + config GCC_PLUGIN_STRUCTLEAK_BYREF
4004 + bool "zero-init structs passed by reference (strong)"
4005 + depends on GCC_PLUGINS
4006 +- depends on !(KASAN && KASAN_STACK=1)
4007 ++ depends on !(KASAN && KASAN_STACK)
4008 + select GCC_PLUGIN_STRUCTLEAK
4009 + help
4010 + Zero-initialize any structures on the stack that may
4011 +@@ -82,7 +82,7 @@ choice
4012 + config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
4013 + bool "zero-init anything passed by reference (very strong)"
4014 + depends on GCC_PLUGINS
4015 +- depends on !(KASAN && KASAN_STACK=1)
4016 ++ depends on !(KASAN && KASAN_STACK)
4017 + select GCC_PLUGIN_STRUCTLEAK
4018 + help
4019 + Zero-initialize any stack variables that may be passed
4020 +diff --git a/sound/soc/codecs/max98373-i2c.c b/sound/soc/codecs/max98373-i2c.c
4021 +index 85f6865019d4a..ddb6436835d73 100644
4022 +--- a/sound/soc/codecs/max98373-i2c.c
4023 ++++ b/sound/soc/codecs/max98373-i2c.c
4024 +@@ -446,6 +446,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
4025 + case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
4026 + case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
4027 + case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
4028 ++ case MAX98373_R20FF_GLOBAL_SHDN:
4029 + case MAX98373_R21FF_REV_ID:
4030 + return true;
4031 + default:
4032 +diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
4033 +index b8d471d79e939..1a1f97f24601d 100644
4034 +--- a/sound/soc/codecs/max98373-sdw.c
4035 ++++ b/sound/soc/codecs/max98373-sdw.c
4036 +@@ -220,6 +220,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
4037 + case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
4038 + case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
4039 + case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
4040 ++ case MAX98373_R20FF_GLOBAL_SHDN:
4041 + case MAX98373_R21FF_REV_ID:
4042 + /* SoundWire Control Port Registers */
4043 + case MAX98373_R0040_SCP_INIT_STAT_1 ... MAX98373_R0070_SCP_FRAME_CTLR:
4044 +diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
4045 +index 746c829312b87..1346a98ce8a15 100644
4046 +--- a/sound/soc/codecs/max98373.c
4047 ++++ b/sound/soc/codecs/max98373.c
4048 +@@ -28,11 +28,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
4049 + regmap_update_bits(max98373->regmap,
4050 + MAX98373_R20FF_GLOBAL_SHDN,
4051 + MAX98373_GLOBAL_EN_MASK, 1);
4052 ++ usleep_range(30000, 31000);
4053 + break;
4054 + case SND_SOC_DAPM_POST_PMD:
4055 + regmap_update_bits(max98373->regmap,
4056 + MAX98373_R20FF_GLOBAL_SHDN,
4057 + MAX98373_GLOBAL_EN_MASK, 0);
4058 ++ usleep_range(30000, 31000);
4059 + max98373->tdm_mode = false;
4060 + break;
4061 + default:
4062 +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
4063 +index 39637ca78cdbb..9f5f217a96077 100644
4064 +--- a/sound/soc/fsl/fsl_esai.c
4065 ++++ b/sound/soc/fsl/fsl_esai.c
4066 +@@ -524,11 +524,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
4067 + ESAI_SAICR_SYNC, esai_priv->synchronous ?
4068 + ESAI_SAICR_SYNC : 0);
4069 +
4070 +- /* Set a default slot number -- 2 */
4071 ++ /* Set slots count */
4072 + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
4073 +- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
4074 ++ ESAI_xCCR_xDC_MASK,
4075 ++ ESAI_xCCR_xDC(esai_priv->slots));
4076 + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
4077 +- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
4078 ++ ESAI_xCCR_xDC_MASK,
4079 ++ ESAI_xCCR_xDC(esai_priv->slots));
4080 + }
4081 +
4082 + return 0;
4083 +diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h
4084 +index 637189ec1ab99..d30439b4b8ab4 100644
4085 +--- a/tools/include/uapi/asm/errno.h
4086 ++++ b/tools/include/uapi/asm/errno.h
4087 +@@ -9,8 +9,6 @@
4088 + #include "../../../arch/alpha/include/uapi/asm/errno.h"
4089 + #elif defined(__mips__)
4090 + #include "../../../arch/mips/include/uapi/asm/errno.h"
4091 +-#elif defined(__ia64__)
4092 +-#include "../../../arch/ia64/include/uapi/asm/errno.h"
4093 + #elif defined(__xtensa__)
4094 + #include "../../../arch/xtensa/include/uapi/asm/errno.h"
4095 + #else
4096 +diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
4097 +index ba70937c5362a..5a9c5a648a9e2 100644
4098 +--- a/tools/lib/bpf/xsk.c
4099 ++++ b/tools/lib/bpf/xsk.c
4100 +@@ -777,18 +777,19 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
4101 + struct xsk_ring_cons *comp,
4102 + const struct xsk_socket_config *usr_config)
4103 + {
4104 ++ bool unmap, rx_setup_done = false, tx_setup_done = false;
4105 + void *rx_map = NULL, *tx_map = NULL;
4106 + struct sockaddr_xdp sxdp = {};
4107 + struct xdp_mmap_offsets off;
4108 + struct xsk_socket *xsk;
4109 + struct xsk_ctx *ctx;
4110 + int err, ifindex;
4111 +- bool unmap = umem->fill_save != fill;
4112 +- bool rx_setup_done = false, tx_setup_done = false;
4113 +
4114 + if (!umem || !xsk_ptr || !(rx || tx))
4115 + return -EFAULT;
4116 +
4117 ++ unmap = umem->fill_save != fill;
4118 ++
4119 + xsk = calloc(1, sizeof(*xsk));
4120 + if (!xsk)
4121 + return -ENOMEM;