Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Wed, 31 Aug 2022 15:38:46
Message-Id: 1661960312.cd5b187ac40a420ace4932d5ed4d72bcdee0dddf.mpagano@gentoo
1 commit: cd5b187ac40a420ace4932d5ed4d72bcdee0dddf
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 31 15:38:32 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 31 15:38:32 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cd5b187a
7
8 Linux patch 5.15.64
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1063_linux-5.15.64.patch | 7680 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 7684 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index aadae770..4b74f06a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -295,6 +295,10 @@ Patch: 1062_linux-5.15.63.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.63
23
24 +Patch: 1063_linux-5.15.64.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.64
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1063_linux-5.15.64.patch b/1063_linux-5.15.64.patch
33 new file mode 100644
34 index 00000000..ad0e4f60
35 --- /dev/null
36 +++ b/1063_linux-5.15.64.patch
37 @@ -0,0 +1,7680 @@
38 +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
39 +index eda519519f122..a7362b1096c4d 100644
40 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
41 ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
42 +@@ -521,6 +521,7 @@ What: /sys/devices/system/cpu/vulnerabilities
43 + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
44 + /sys/devices/system/cpu/vulnerabilities/itlb_multihit
45 + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
46 ++ /sys/devices/system/cpu/vulnerabilities/retbleed
47 + Date: January 2018
48 + Contact: Linux kernel mailing list <linux-kernel@×××××××××××.org>
49 + Description: Information about CPU vulnerabilities
50 +diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
51 +index 9393c50b5afc9..c98fd11907cc8 100644
52 +--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
53 ++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
54 +@@ -230,6 +230,20 @@ The possible values in this file are:
55 + * - 'Mitigation: Clear CPU buffers'
56 + - The processor is vulnerable and the CPU buffer clearing mitigation is
57 + enabled.
58 ++ * - 'Unknown: No mitigations'
59 ++ - The processor vulnerability status is unknown because it is
60 ++ out of Servicing period. Mitigation is not attempted.
61 ++
62 ++Definitions:
63 ++------------
64 ++
65 ++Servicing period: The process of providing functional and security updates to
66 ++Intel processors or platforms, utilizing the Intel Platform Update (IPU)
67 ++process or other similar mechanisms.
68 ++
69 ++End of Servicing Updates (ESU): ESU is the date at which Intel will no
70 ++longer provide Servicing, such as through IPU or other similar update
71 ++processes. ESU dates will typically be aligned to end of quarter.
72 +
73 + If the processor is vulnerable then the following information is appended to
74 + the above information:
75 +diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
76 +index 4150f74c521a8..5310f398794c1 100644
77 +--- a/Documentation/admin-guide/sysctl/net.rst
78 ++++ b/Documentation/admin-guide/sysctl/net.rst
79 +@@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget.
80 + netdev_max_backlog
81 + ------------------
82 +
83 +-Maximum number of packets, queued on the INPUT side, when the interface
84 ++Maximum number of packets, queued on the INPUT side, when the interface
85 + receives packets faster than kernel can process them.
86 +
87 + netdev_rss_key
88 +diff --git a/Makefile b/Makefile
89 +index ea669530ec86d..b2b65f7c168c5 100644
90 +--- a/Makefile
91 ++++ b/Makefile
92 +@@ -1,7 +1,7 @@
93 + # SPDX-License-Identifier: GPL-2.0
94 + VERSION = 5
95 + PATCHLEVEL = 15
96 +-SUBLEVEL = 63
97 ++SUBLEVEL = 64
98 + EXTRAVERSION =
99 + NAME = Trick or Treat
100 +
101 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
102 +index c67c19d701597..292a3091b5dee 100644
103 +--- a/arch/arm64/kernel/cpu_errata.c
104 ++++ b/arch/arm64/kernel/cpu_errata.c
105 +@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
106 + #ifdef CONFIG_ARM64_ERRATUM_1286807
107 + {
108 + ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
109 ++ },
110 ++ {
111 + /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
112 + ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
113 + },
114 +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
115 +index 5dccf01a9e172..e6542e44caded 100644
116 +--- a/arch/parisc/Kconfig
117 ++++ b/arch/parisc/Kconfig
118 +@@ -142,10 +142,10 @@ menu "Processor type and features"
119 +
120 + choice
121 + prompt "Processor type"
122 +- default PA7000
123 ++ default PA7000 if "$(ARCH)" = "parisc"
124 +
125 + config PA7000
126 +- bool "PA7000/PA7100"
127 ++ bool "PA7000/PA7100" if "$(ARCH)" = "parisc"
128 + help
129 + This is the processor type of your CPU. This information is
130 + used for optimizing purposes. In order to compile a kernel
131 +@@ -156,21 +156,21 @@ config PA7000
132 + which is required on some machines.
133 +
134 + config PA7100LC
135 +- bool "PA7100LC"
136 ++ bool "PA7100LC" if "$(ARCH)" = "parisc"
137 + help
138 + Select this option for the PCX-L processor, as used in the
139 + 712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748,
140 + D200, D210, D300, D310 and E-class
141 +
142 + config PA7200
143 +- bool "PA7200"
144 ++ bool "PA7200" if "$(ARCH)" = "parisc"
145 + help
146 + Select this option for the PCX-T' processor, as used in the
147 + C100, C110, J100, J110, J210XC, D250, D260, D350, D360,
148 + K100, K200, K210, K220, K400, K410 and K420
149 +
150 + config PA7300LC
151 +- bool "PA7300LC"
152 ++ bool "PA7300LC" if "$(ARCH)" = "parisc"
153 + help
154 + Select this option for the PCX-L2 processor, as used in the
155 + 744, A180, B132L, B160L, B180L, C132L, C160L, C180L,
156 +@@ -220,17 +220,8 @@ config MLONGCALLS
157 + Enabling this option will probably slow down your kernel.
158 +
159 + config 64BIT
160 +- bool "64-bit kernel"
161 ++ def_bool "$(ARCH)" = "parisc64"
162 + depends on PA8X00
163 +- help
164 +- Enable this if you want to support 64bit kernel on PA-RISC platform.
165 +-
166 +- At the moment, only people willing to use more than 2GB of RAM,
167 +- or having a 64bit-only capable PA-RISC machine should say Y here.
168 +-
169 +- Since there is no 64bit userland on PA-RISC, there is no point to
170 +- enable this option otherwise. The 64bit kernel is significantly bigger
171 +- and slower than the 32bit one.
172 +
173 + choice
174 + prompt "Kernel page size"
175 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
176 +index 286cec4d86d7b..cc6ed74960501 100644
177 +--- a/arch/parisc/kernel/unaligned.c
178 ++++ b/arch/parisc/kernel/unaligned.c
179 +@@ -107,7 +107,7 @@
180 + #define R1(i) (((i)>>21)&0x1f)
181 + #define R2(i) (((i)>>16)&0x1f)
182 + #define R3(i) ((i)&0x1f)
183 +-#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
184 ++#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
185 + #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
186 + #define IM5_2(i) IM((i)>>16,5)
187 + #define IM5_3(i) IM((i),5)
188 +diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
189 +index 74d888c8d631a..e3866ffa06c5a 100644
190 +--- a/arch/riscv/include/asm/thread_info.h
191 ++++ b/arch/riscv/include/asm/thread_info.h
192 +@@ -42,6 +42,8 @@
193 +
194 + #ifndef __ASSEMBLY__
195 +
196 ++extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
197 ++
198 + #include <asm/processor.h>
199 + #include <asm/csr.h>
200 +
201 +diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
202 +index b938ffe129d6b..8c58aa5d2b369 100644
203 +--- a/arch/riscv/kernel/traps.c
204 ++++ b/arch/riscv/kernel/traps.c
205 +@@ -20,9 +20,10 @@
206 +
207 + #include <asm/asm-prototypes.h>
208 + #include <asm/bug.h>
209 ++#include <asm/csr.h>
210 + #include <asm/processor.h>
211 + #include <asm/ptrace.h>
212 +-#include <asm/csr.h>
213 ++#include <asm/thread_info.h>
214 +
215 + int show_unhandled_signals = 1;
216 +
217 +diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
218 +index 63bc691cff91b..2c7c1c5026af3 100644
219 +--- a/arch/riscv/lib/uaccess.S
220 ++++ b/arch/riscv/lib/uaccess.S
221 +@@ -173,6 +173,13 @@ ENTRY(__asm_copy_from_user)
222 + csrc CSR_STATUS, t6
223 + li a0, 0
224 + ret
225 ++
226 ++ /* Exception fixup code */
227 ++10:
228 ++ /* Disable access to user memory */
229 ++ csrc CSR_STATUS, t6
230 ++ mv a0, t5
231 ++ ret
232 + ENDPROC(__asm_copy_to_user)
233 + ENDPROC(__asm_copy_from_user)
234 + EXPORT_SYMBOL(__asm_copy_to_user)
235 +@@ -218,19 +225,12 @@ ENTRY(__clear_user)
236 + addi a0, a0, 1
237 + bltu a0, a3, 5b
238 + j 3b
239 +-ENDPROC(__clear_user)
240 +-EXPORT_SYMBOL(__clear_user)
241 +
242 +- .section .fixup,"ax"
243 +- .balign 4
244 +- /* Fixup code for __copy_user(10) and __clear_user(11) */
245 +-10:
246 +- /* Disable access to user memory */
247 +- csrs CSR_STATUS, t6
248 +- mv a0, t5
249 +- ret
250 ++ /* Exception fixup code */
251 + 11:
252 +- csrs CSR_STATUS, t6
253 ++ /* Disable access to user memory */
254 ++ csrc CSR_STATUS, t6
255 + mv a0, a1
256 + ret
257 +- .previous
258 ++ENDPROC(__clear_user)
259 ++EXPORT_SYMBOL(__clear_user)
260 +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
261 +index 350e94d0cac23..d015cb1027fa1 100644
262 +--- a/arch/s390/kernel/process.c
263 ++++ b/arch/s390/kernel/process.c
264 +@@ -91,6 +91,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
265 +
266 + memcpy(dst, src, arch_task_struct_size);
267 + dst->thread.fpu.regs = dst->thread.fpu.fprs;
268 ++
269 ++ /*
270 ++ * Don't transfer over the runtime instrumentation or the guarded
271 ++ * storage control block pointers. These fields are cleared here instead
272 ++ * of in copy_thread() to avoid premature freeing of associated memory
273 ++ * on fork() failure. Wait to clear the RI flag because ->stack still
274 ++ * refers to the source thread.
275 ++ */
276 ++ dst->thread.ri_cb = NULL;
277 ++ dst->thread.gs_cb = NULL;
278 ++ dst->thread.gs_bc_cb = NULL;
279 ++
280 + return 0;
281 + }
282 +
283 +@@ -149,13 +161,11 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
284 + frame->childregs.flags = 0;
285 + if (new_stackp)
286 + frame->childregs.gprs[15] = new_stackp;
287 +-
288 +- /* Don't copy runtime instrumentation info */
289 +- p->thread.ri_cb = NULL;
290 ++ /*
291 ++ * Clear the runtime instrumentation flag after the above childregs
292 ++ * copy. The CB pointer was already cleared in arch_dup_task_struct().
293 ++ */
294 + frame->childregs.psw.mask &= ~PSW_MASK_RI;
295 +- /* Don't copy guarded storage control block */
296 +- p->thread.gs_cb = NULL;
297 +- p->thread.gs_bc_cb = NULL;
298 +
299 + /* Set a new TLS ? */
300 + if (clone_flags & CLONE_SETTLS) {
301 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
302 +index 212632d57db9c..c930dff312df3 100644
303 +--- a/arch/s390/mm/fault.c
304 ++++ b/arch/s390/mm/fault.c
305 +@@ -397,7 +397,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
306 + flags = FAULT_FLAG_DEFAULT;
307 + if (user_mode(regs))
308 + flags |= FAULT_FLAG_USER;
309 +- if (access == VM_WRITE || is_write)
310 ++ if (is_write)
311 ++ access = VM_WRITE;
312 ++ if (access == VM_WRITE)
313 + flags |= FAULT_FLAG_WRITE;
314 + mmap_read_lock(mm);
315 +
316 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
317 +index 763ff243aeca6..a3af2a9159b1b 100644
318 +--- a/arch/x86/entry/entry_64.S
319 ++++ b/arch/x86/entry/entry_64.S
320 +@@ -373,6 +373,7 @@ SYM_CODE_END(xen_error_entry)
321 + SYM_CODE_START(\asmsym)
322 + UNWIND_HINT_IRET_REGS offset=\has_error_code*8
323 + ASM_CLAC
324 ++ cld
325 +
326 + .if \has_error_code == 0
327 + pushq $-1 /* ORIG_RAX: no syscall to restart */
328 +@@ -440,6 +441,7 @@ SYM_CODE_END(\asmsym)
329 + SYM_CODE_START(\asmsym)
330 + UNWIND_HINT_IRET_REGS
331 + ASM_CLAC
332 ++ cld
333 +
334 + pushq $-1 /* ORIG_RAX: no syscall to restart */
335 +
336 +@@ -495,6 +497,7 @@ SYM_CODE_END(\asmsym)
337 + SYM_CODE_START(\asmsym)
338 + UNWIND_HINT_IRET_REGS
339 + ASM_CLAC
340 ++ cld
341 +
342 + /*
343 + * If the entry is from userspace, switch stacks and treat it as
344 +@@ -557,6 +560,7 @@ SYM_CODE_END(\asmsym)
345 + SYM_CODE_START(\asmsym)
346 + UNWIND_HINT_IRET_REGS offset=8
347 + ASM_CLAC
348 ++ cld
349 +
350 + /* paranoid_entry returns GS information for paranoid_exit in EBX. */
351 + call paranoid_entry
352 +@@ -876,7 +880,6 @@ SYM_CODE_END(xen_failsafe_callback)
353 + */
354 + SYM_CODE_START_LOCAL(paranoid_entry)
355 + UNWIND_HINT_FUNC
356 +- cld
357 + PUSH_AND_CLEAR_REGS save_ret=1
358 + ENCODE_FRAME_POINTER 8
359 +
360 +@@ -1012,7 +1015,6 @@ SYM_CODE_END(paranoid_exit)
361 + */
362 + SYM_CODE_START_LOCAL(error_entry)
363 + UNWIND_HINT_FUNC
364 +- cld
365 +
366 + PUSH_AND_CLEAR_REGS save_ret=1
367 + ENCODE_FRAME_POINTER 8
368 +@@ -1155,6 +1157,7 @@ SYM_CODE_START(asm_exc_nmi)
369 + */
370 +
371 + ASM_CLAC
372 ++ cld
373 +
374 + /* Use %rdx as our temp variable throughout */
375 + pushq %rdx
376 +@@ -1174,7 +1177,6 @@ SYM_CODE_START(asm_exc_nmi)
377 + */
378 +
379 + swapgs
380 +- cld
381 + FENCE_SWAPGS_USER_ENTRY
382 + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
383 + movq %rsp, %rdx
384 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
385 +index 4dbb55a43dad2..266ac8263696a 100644
386 +--- a/arch/x86/events/intel/ds.c
387 ++++ b/arch/x86/events/intel/ds.c
388 +@@ -236,6 +236,7 @@ static u64 load_latency_data(u64 status)
389 + static u64 store_latency_data(u64 status)
390 + {
391 + union intel_x86_pebs_dse dse;
392 ++ union perf_mem_data_src src;
393 + u64 val;
394 +
395 + dse.val = status;
396 +@@ -263,7 +264,14 @@ static u64 store_latency_data(u64 status)
397 +
398 + val |= P(BLK, NA);
399 +
400 +- return val;
401 ++ /*
402 ++ * the pebs_data_source table is only for loads
403 ++ * so override the mem_op to say STORE instead
404 ++ */
405 ++ src.val = val;
406 ++ src.mem_op = P(OP,STORE);
407 ++
408 ++ return src.val;
409 + }
410 +
411 + struct pebs_record_core {
412 +diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
413 +index f455dd93f9219..6737213873911 100644
414 +--- a/arch/x86/events/intel/lbr.c
415 ++++ b/arch/x86/events/intel/lbr.c
416 +@@ -1114,6 +1114,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
417 +
418 + if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
419 + reg->config = mask;
420 ++
421 ++ /*
422 ++ * The Arch LBR HW can retrieve the common branch types
423 ++ * from the LBR_INFO. It doesn't require the high overhead
424 ++ * SW disassemble.
425 ++ * Enable the branch type by default for the Arch LBR.
426 ++ */
427 ++ reg->reg |= X86_BR_TYPE_SAVE;
428 + return 0;
429 + }
430 +
431 +diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
432 +index 0f63706cdadfc..dc3ae55f79e08 100644
433 +--- a/arch/x86/events/intel/uncore_snb.c
434 ++++ b/arch/x86/events/intel/uncore_snb.c
435 +@@ -788,6 +788,22 @@ int snb_pci2phy_map_init(int devid)
436 + return 0;
437 + }
438 +
439 ++static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
440 ++{
441 ++ struct hw_perf_event *hwc = &event->hw;
442 ++
443 ++ /*
444 ++ * SNB IMC counters are 32-bit and are laid out back to back
445 ++ * in MMIO space. Therefore we must use a 32-bit accessor function
446 ++ * using readq() from uncore_mmio_read_counter() causes problems
447 ++ * because it is reading 64-bit at a time. This is okay for the
448 ++ * uncore_perf_event_update() function because it drops the upper
449 ++ * 32-bits but not okay for plain uncore_read_counter() as invoked
450 ++ * in uncore_pmu_event_start().
451 ++ */
452 ++ return (u64)readl(box->io_addr + hwc->event_base);
453 ++}
454 ++
455 + static struct pmu snb_uncore_imc_pmu = {
456 + .task_ctx_nr = perf_invalid_context,
457 + .event_init = snb_uncore_imc_event_init,
458 +@@ -807,7 +823,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
459 + .disable_event = snb_uncore_imc_disable_event,
460 + .enable_event = snb_uncore_imc_enable_event,
461 + .hw_config = snb_uncore_imc_hw_config,
462 +- .read_counter = uncore_mmio_read_counter,
463 ++ .read_counter = snb_uncore_imc_read_counter,
464 + };
465 +
466 + static struct intel_uncore_type snb_uncore_imc = {
467 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
468 +index be744fa100048..2b56bfef99172 100644
469 +--- a/arch/x86/include/asm/cpufeatures.h
470 ++++ b/arch/x86/include/asm/cpufeatures.h
471 +@@ -446,7 +446,8 @@
472 + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
473 + #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
474 + #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
475 +-#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
476 +-#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
477 ++#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
478 ++#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
479 ++#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
480 +
481 + #endif /* _ASM_X86_CPUFEATURES_H */
482 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
483 +index 6a59b2d58a3a9..f5ce9a0ab2330 100644
484 +--- a/arch/x86/include/asm/nospec-branch.h
485 ++++ b/arch/x86/include/asm/nospec-branch.h
486 +@@ -35,33 +35,56 @@
487 + #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
488 +
489 + /*
490 ++ * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
491 ++ */
492 ++#define __FILL_RETURN_SLOT \
493 ++ ANNOTATE_INTRA_FUNCTION_CALL; \
494 ++ call 772f; \
495 ++ int3; \
496 ++772:
497 ++
498 ++/*
499 ++ * Stuff the entire RSB.
500 ++ *
501 + * Google experimented with loop-unrolling and this turned out to be
502 + * the optimal version - two calls, each with their own speculation
503 + * trap should their return address end up getting used, in a loop.
504 + */
505 +-#define __FILL_RETURN_BUFFER(reg, nr, sp) \
506 +- mov $(nr/2), reg; \
507 +-771: \
508 +- ANNOTATE_INTRA_FUNCTION_CALL; \
509 +- call 772f; \
510 +-773: /* speculation trap */ \
511 +- UNWIND_HINT_EMPTY; \
512 +- pause; \
513 +- lfence; \
514 +- jmp 773b; \
515 +-772: \
516 +- ANNOTATE_INTRA_FUNCTION_CALL; \
517 +- call 774f; \
518 +-775: /* speculation trap */ \
519 +- UNWIND_HINT_EMPTY; \
520 +- pause; \
521 +- lfence; \
522 +- jmp 775b; \
523 +-774: \
524 +- add $(BITS_PER_LONG/8) * 2, sp; \
525 +- dec reg; \
526 +- jnz 771b; \
527 +- /* barrier for jnz misprediction */ \
528 ++#ifdef CONFIG_X86_64
529 ++#define __FILL_RETURN_BUFFER(reg, nr) \
530 ++ mov $(nr/2), reg; \
531 ++771: \
532 ++ __FILL_RETURN_SLOT \
533 ++ __FILL_RETURN_SLOT \
534 ++ add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
535 ++ dec reg; \
536 ++ jnz 771b; \
537 ++ /* barrier for jnz misprediction */ \
538 ++ lfence;
539 ++#else
540 ++/*
541 ++ * i386 doesn't unconditionally have LFENCE, as such it can't
542 ++ * do a loop.
543 ++ */
544 ++#define __FILL_RETURN_BUFFER(reg, nr) \
545 ++ .rept nr; \
546 ++ __FILL_RETURN_SLOT; \
547 ++ .endr; \
548 ++ add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
549 ++#endif
550 ++
551 ++/*
552 ++ * Stuff a single RSB slot.
553 ++ *
554 ++ * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
555 ++ * forced to retire before letting a RET instruction execute.
556 ++ *
557 ++ * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
558 ++ * before this point.
559 ++ */
560 ++#define __FILL_ONE_RETURN \
561 ++ __FILL_RETURN_SLOT \
562 ++ add $(BITS_PER_LONG/8), %_ASM_SP; \
563 + lfence;
564 +
565 + #ifdef __ASSEMBLY__
566 +@@ -120,28 +143,15 @@
567 + #endif
568 + .endm
569 +
570 +-.macro ISSUE_UNBALANCED_RET_GUARD
571 +- ANNOTATE_INTRA_FUNCTION_CALL
572 +- call .Lunbalanced_ret_guard_\@
573 +- int3
574 +-.Lunbalanced_ret_guard_\@:
575 +- add $(BITS_PER_LONG/8), %_ASM_SP
576 +- lfence
577 +-.endm
578 +-
579 + /*
580 + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
581 + * monstrosity above, manually.
582 + */
583 +-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
584 +-.ifb \ftr2
585 +- ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
586 +-.else
587 +- ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
588 +-.endif
589 +- __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
590 +-.Lunbalanced_\@:
591 +- ISSUE_UNBALANCED_RET_GUARD
592 ++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
593 ++ ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
594 ++ __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
595 ++ __stringify(__FILL_ONE_RETURN), \ftr2
596 ++
597 + .Lskip_rsb_\@:
598 + .endm
599 +
600 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
601 +index 977d9d75e3add..7b15f7ef760d1 100644
602 +--- a/arch/x86/kernel/cpu/bugs.c
603 ++++ b/arch/x86/kernel/cpu/bugs.c
604 +@@ -433,7 +433,8 @@ static void __init mmio_select_mitigation(void)
605 + u64 ia32_cap;
606 +
607 + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
608 +- cpu_mitigations_off()) {
609 ++ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
610 ++ cpu_mitigations_off()) {
611 + mmio_mitigation = MMIO_MITIGATION_OFF;
612 + return;
613 + }
614 +@@ -538,6 +539,8 @@ out:
615 + pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
616 + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
617 + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
618 ++ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
619 ++ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
620 + }
621 +
622 + static void __init md_clear_select_mitigation(void)
623 +@@ -2268,6 +2271,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
624 +
625 + static ssize_t mmio_stale_data_show_state(char *buf)
626 + {
627 ++ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
628 ++ return sysfs_emit(buf, "Unknown: No mitigations\n");
629 ++
630 + if (mmio_mitigation == MMIO_MITIGATION_OFF)
631 + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
632 +
633 +@@ -2414,6 +2420,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
634 + return srbds_show_state(buf);
635 +
636 + case X86_BUG_MMIO_STALE_DATA:
637 ++ case X86_BUG_MMIO_UNKNOWN:
638 + return mmio_stale_data_show_state(buf);
639 +
640 + case X86_BUG_RETBLEED:
641 +@@ -2473,7 +2480,10 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
642 +
643 + ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
644 + {
645 +- return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
646 ++ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
647 ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
648 ++ else
649 ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
650 + }
651 +
652 + ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
653 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
654 +index 4a538ec413b8b..9c1df6222df92 100644
655 +--- a/arch/x86/kernel/cpu/common.c
656 ++++ b/arch/x86/kernel/cpu/common.c
657 +@@ -1027,7 +1027,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
658 + #define NO_SWAPGS BIT(6)
659 + #define NO_ITLB_MULTIHIT BIT(7)
660 + #define NO_SPECTRE_V2 BIT(8)
661 +-#define NO_EIBRS_PBRSB BIT(9)
662 ++#define NO_MMIO BIT(9)
663 ++#define NO_EIBRS_PBRSB BIT(10)
664 +
665 + #define VULNWL(vendor, family, model, whitelist) \
666 + X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
667 +@@ -1048,6 +1049,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
668 + VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
669 +
670 + /* Intel Family 6 */
671 ++ VULNWL_INTEL(TIGERLAKE, NO_MMIO),
672 ++ VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
673 ++ VULNWL_INTEL(ALDERLAKE, NO_MMIO),
674 ++ VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
675 ++
676 + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
677 + VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
678 + VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
679 +@@ -1066,9 +1072,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
680 + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
681 + VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
682 +
683 +- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
684 +- VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
685 +- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
686 ++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
687 ++ VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
688 ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
689 +
690 + /*
691 + * Technically, swapgs isn't serializing on AMD (despite it previously
692 +@@ -1083,18 +1089,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
693 + VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
694 +
695 + /* AMD Family 0xf - 0x12 */
696 +- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
697 +- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
698 +- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
699 +- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
700 ++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
701 ++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
702 ++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
703 ++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
704 +
705 + /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
706 +- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
707 +- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
708 ++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
709 ++ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
710 +
711 + /* Zhaoxin Family 7 */
712 +- VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
713 +- VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
714 ++ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
715 ++ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
716 + {}
717 + };
718 +
719 +@@ -1248,10 +1254,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
720 + * Affected CPU list is generally enough to enumerate the vulnerability,
721 + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
722 + * not want the guest to enumerate the bug.
723 ++ *
724 ++ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
725 ++ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
726 + */
727 +- if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
728 +- !arch_cap_mmio_immune(ia32_cap))
729 +- setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
730 ++ if (!arch_cap_mmio_immune(ia32_cap)) {
731 ++ if (cpu_matches(cpu_vuln_blacklist, MMIO))
732 ++ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
733 ++ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
734 ++ setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
735 ++ }
736 +
737 + if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
738 + if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
739 +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
740 +index a1202536fc57c..3423aaea4ad85 100644
741 +--- a/arch/x86/kernel/unwind_orc.c
742 ++++ b/arch/x86/kernel/unwind_orc.c
743 +@@ -93,22 +93,27 @@ static struct orc_entry *orc_find(unsigned long ip);
744 + static struct orc_entry *orc_ftrace_find(unsigned long ip)
745 + {
746 + struct ftrace_ops *ops;
747 +- unsigned long caller;
748 ++ unsigned long tramp_addr, offset;
749 +
750 + ops = ftrace_ops_trampoline(ip);
751 + if (!ops)
752 + return NULL;
753 +
754 ++ /* Set tramp_addr to the start of the code copied by the trampoline */
755 + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
756 +- caller = (unsigned long)ftrace_regs_call;
757 ++ tramp_addr = (unsigned long)ftrace_regs_caller;
758 + else
759 +- caller = (unsigned long)ftrace_call;
760 ++ tramp_addr = (unsigned long)ftrace_caller;
761 ++
762 ++ /* Now place tramp_addr to the location within the trampoline ip is at */
763 ++ offset = ip - ops->trampoline;
764 ++ tramp_addr += offset;
765 +
766 + /* Prevent unlikely recursion */
767 +- if (ip == caller)
768 ++ if (ip == tramp_addr)
769 + return NULL;
770 +
771 +- return orc_find(caller);
772 ++ return orc_find(tramp_addr);
773 + }
774 + #else
775 + static struct orc_entry *orc_ftrace_find(unsigned long ip)
776 +diff --git a/block/blk-mq.c b/block/blk-mq.c
777 +index 95993c4efa493..1a28ba9017edb 100644
778 +--- a/block/blk-mq.c
779 ++++ b/block/blk-mq.c
780 +@@ -1400,7 +1400,8 @@ out:
781 + /* If we didn't flush the entire list, we could have told the driver
782 + * there was more coming, but that turned out to be a lie.
783 + */
784 +- if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
785 ++ if ((!list_empty(list) || errors || needs_resource ||
786 ++ ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
787 + q->mq_ops->commit_rqs(hctx);
788 + /*
789 + * Any items that need requeuing? Stuff them into hctx->dispatch,
790 +@@ -2111,6 +2112,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
791 + list_del_init(&rq->queuelist);
792 + ret = blk_mq_request_issue_directly(rq, list_empty(list));
793 + if (ret != BLK_STS_OK) {
794 ++ errors++;
795 + if (ret == BLK_STS_RESOURCE ||
796 + ret == BLK_STS_DEV_RESOURCE) {
797 + blk_mq_request_bypass_insert(rq, false,
798 +@@ -2118,7 +2120,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
799 + break;
800 + }
801 + blk_mq_end_request(rq, ret);
802 +- errors++;
803 + } else
804 + queued++;
805 + }
806 +diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
807 +index a3d34e3f9f94b..921a0b5a58e58 100644
808 +--- a/drivers/acpi/processor_thermal.c
809 ++++ b/drivers/acpi/processor_thermal.c
810 +@@ -144,7 +144,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
811 + unsigned int cpu;
812 +
813 + for_each_cpu(cpu, policy->related_cpus) {
814 +- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
815 ++ struct acpi_processor *pr = per_cpu(processors, cpu);
816 +
817 + if (pr)
818 + freq_qos_remove_request(&pr->thermal_req);
819 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
820 +index b398909fda364..bd827533e7e83 100644
821 +--- a/drivers/android/binder_alloc.c
822 ++++ b/drivers/android/binder_alloc.c
823 +@@ -395,12 +395,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
824 + size_t size, data_offsets_size;
825 + int ret;
826 +
827 ++ mmap_read_lock(alloc->vma_vm_mm);
828 + if (!binder_alloc_get_vma(alloc)) {
829 ++ mmap_read_unlock(alloc->vma_vm_mm);
830 + binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
831 + "%d: binder_alloc_buf, no vma\n",
832 + alloc->pid);
833 + return ERR_PTR(-ESRCH);
834 + }
835 ++ mmap_read_unlock(alloc->vma_vm_mm);
836 +
837 + data_offsets_size = ALIGN(data_size, sizeof(void *)) +
838 + ALIGN(offsets_size, sizeof(void *));
839 +@@ -922,17 +925,25 @@ void binder_alloc_print_pages(struct seq_file *m,
840 + * Make sure the binder_alloc is fully initialized, otherwise we might
841 + * read inconsistent state.
842 + */
843 +- if (binder_alloc_get_vma(alloc) != NULL) {
844 +- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
845 +- page = &alloc->pages[i];
846 +- if (!page->page_ptr)
847 +- free++;
848 +- else if (list_empty(&page->lru))
849 +- active++;
850 +- else
851 +- lru++;
852 +- }
853 ++
854 ++ mmap_read_lock(alloc->vma_vm_mm);
855 ++ if (binder_alloc_get_vma(alloc) == NULL) {
856 ++ mmap_read_unlock(alloc->vma_vm_mm);
857 ++ goto uninitialized;
858 + }
859 ++
860 ++ mmap_read_unlock(alloc->vma_vm_mm);
861 ++ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
862 ++ page = &alloc->pages[i];
863 ++ if (!page->page_ptr)
864 ++ free++;
865 ++ else if (list_empty(&page->lru))
866 ++ active++;
867 ++ else
868 ++ lru++;
869 ++ }
870 ++
871 ++uninitialized:
872 + mutex_unlock(&alloc->mutex);
873 + seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
874 + seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
875 +diff --git a/drivers/base/node.c b/drivers/base/node.c
876 +index 0f5319b79fadc..5366d1b5359c8 100644
877 +--- a/drivers/base/node.c
878 ++++ b/drivers/base/node.c
879 +@@ -45,7 +45,7 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
880 + return n;
881 + }
882 +
883 +-static BIN_ATTR_RO(cpumap, 0);
884 ++static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
885 +
886 + static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
887 + struct bin_attribute *attr, char *buf,
888 +@@ -66,7 +66,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
889 + return n;
890 + }
891 +
892 +-static BIN_ATTR_RO(cpulist, 0);
893 ++static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
894 +
895 + /**
896 + * struct node_access_nodes - Access class device to hold user visible
897 +diff --git a/drivers/base/topology.c b/drivers/base/topology.c
898 +index 43c0940643f5d..5df6d861bc21b 100644
899 +--- a/drivers/base/topology.c
900 ++++ b/drivers/base/topology.c
901 +@@ -52,39 +52,39 @@ define_id_show_func(core_id);
902 + static DEVICE_ATTR_RO(core_id);
903 +
904 + define_siblings_read_func(thread_siblings, sibling_cpumask);
905 +-static BIN_ATTR_RO(thread_siblings, 0);
906 +-static BIN_ATTR_RO(thread_siblings_list, 0);
907 ++static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
908 ++static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
909 +
910 + define_siblings_read_func(core_cpus, sibling_cpumask);
911 +-static BIN_ATTR_RO(core_cpus, 0);
912 +-static BIN_ATTR_RO(core_cpus_list, 0);
913 ++static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
914 ++static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
915 +
916 + define_siblings_read_func(core_siblings, core_cpumask);
917 +-static BIN_ATTR_RO(core_siblings, 0);
918 +-static BIN_ATTR_RO(core_siblings_list, 0);
919 ++static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
920 ++static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
921 +
922 + define_siblings_read_func(die_cpus, die_cpumask);
923 +-static BIN_ATTR_RO(die_cpus, 0);
924 +-static BIN_ATTR_RO(die_cpus_list, 0);
925 ++static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
926 ++static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
927 +
928 + define_siblings_read_func(package_cpus, core_cpumask);
929 +-static BIN_ATTR_RO(package_cpus, 0);
930 +-static BIN_ATTR_RO(package_cpus_list, 0);
931 ++static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
932 ++static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
933 +
934 + #ifdef CONFIG_SCHED_BOOK
935 + define_id_show_func(book_id);
936 + static DEVICE_ATTR_RO(book_id);
937 + define_siblings_read_func(book_siblings, book_cpumask);
938 +-static BIN_ATTR_RO(book_siblings, 0);
939 +-static BIN_ATTR_RO(book_siblings_list, 0);
940 ++static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
941 ++static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
942 + #endif
943 +
944 + #ifdef CONFIG_SCHED_DRAWER
945 + define_id_show_func(drawer_id);
946 + static DEVICE_ATTR_RO(drawer_id);
947 + define_siblings_read_func(drawer_siblings, drawer_cpumask);
948 +-static BIN_ATTR_RO(drawer_siblings, 0);
949 +-static BIN_ATTR_RO(drawer_siblings_list, 0);
950 ++static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
951 ++static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
952 + #endif
953 +
954 + static struct bin_attribute *bin_attrs[] = {
955 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
956 +index 8cba10aafadb3..79e485949b60d 100644
957 +--- a/drivers/block/loop.c
958 ++++ b/drivers/block/loop.c
959 +@@ -1154,6 +1154,11 @@ loop_set_status_from_info(struct loop_device *lo,
960 +
961 + lo->lo_offset = info->lo_offset;
962 + lo->lo_sizelimit = info->lo_sizelimit;
963 ++
964 ++ /* loff_t vars have been assigned __u64 */
965 ++ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
966 ++ return -EOVERFLOW;
967 ++
968 + memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
969 + memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
970 + lo->lo_file_name[LO_NAME_SIZE-1] = 0;
971 +diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
972 +index c58bcdba2c7aa..511fb8dfb4c48 100644
973 +--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
974 ++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
975 +@@ -820,6 +820,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
976 + if (ret == 0) {
977 + ret = nouveau_fence_new(chan, false, &fence);
978 + if (ret == 0) {
979 ++ /* TODO: figure out a better solution here
980 ++ *
981 ++ * wait on the fence here explicitly as going through
982 ++ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
983 ++ *
984 ++ * Without this the operation can timeout and we'll fallback to a
985 ++ * software copy, which might take several minutes to finish.
986 ++ */
987 ++ nouveau_fence_wait(fence, false, false);
988 + ret = ttm_bo_move_accel_cleanup(bo,
989 + &fence->base,
990 + evict, false,
991 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
992 +index 148a7c5fd0e22..4b0201cf71f5e 100644
993 +--- a/drivers/input/serio/i8042-x86ia64io.h
994 ++++ b/drivers/input/serio/i8042-x86ia64io.h
995 +@@ -67,612 +67,767 @@ static inline void i8042_write_command(int val)
996 +
997 + #include <linux/dmi.h>
998 +
999 +-static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
1000 ++#define SERIO_QUIRK_NOKBD BIT(0)
1001 ++#define SERIO_QUIRK_NOAUX BIT(1)
1002 ++#define SERIO_QUIRK_NOMUX BIT(2)
1003 ++#define SERIO_QUIRK_FORCEMUX BIT(3)
1004 ++#define SERIO_QUIRK_UNLOCK BIT(4)
1005 ++#define SERIO_QUIRK_PROBE_DEFER BIT(5)
1006 ++#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
1007 ++#define SERIO_QUIRK_RESET_NEVER BIT(7)
1008 ++#define SERIO_QUIRK_DIECT BIT(8)
1009 ++#define SERIO_QUIRK_DUMBKBD BIT(9)
1010 ++#define SERIO_QUIRK_NOLOOP BIT(10)
1011 ++#define SERIO_QUIRK_NOTIMEOUT BIT(11)
1012 ++#define SERIO_QUIRK_KBDRESET BIT(12)
1013 ++#define SERIO_QUIRK_DRITEK BIT(13)
1014 ++#define SERIO_QUIRK_NOPNP BIT(14)
1015 ++
1016 ++/* Quirk table for different mainboards. Options similar or identical to i8042
1017 ++ * module parameters.
1018 ++ * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored.
1019 ++ * This allows entries to overwrite vendor wide quirks on a per device basis.
1020 ++ * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR
1021 ++ * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries.
1022 ++ */
1023 ++static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
1024 + {
1025 +- /*
1026 +- * Arima-Rioworks HDAMB -
1027 +- * AUX LOOP command does not raise AUX IRQ
1028 +- */
1029 + .matches = {
1030 +- DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
1031 +- DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
1032 +- DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
1033 ++ DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
1034 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
1035 + },
1036 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1037 + },
1038 + {
1039 +- /* ASUS G1S */
1040 + .matches = {
1041 +- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
1042 +- DMI_MATCH(DMI_BOARD_NAME, "G1S"),
1043 +- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
1044 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1045 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
1046 + },
1047 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1048 + },
1049 + {
1050 +- /* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
1051 ++ /* Asus X450LCP */
1052 + .matches = {
1053 +- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1054 +- DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
1055 +- DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"),
1056 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1057 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
1058 + },
1059 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER)
1060 + },
1061 + {
1062 ++ /* ASUS ZenBook UX425UA */
1063 + .matches = {
1064 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1065 +- DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
1066 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
1067 + },
1068 ++ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
1069 + },
1070 + {
1071 ++ /* ASUS ZenBook UM325UA */
1072 + .matches = {
1073 +- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1074 +- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
1075 +- DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
1076 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1077 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
1078 + },
1079 ++ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
1080 + },
1081 ++ /*
1082 ++ * On some Asus laptops, just running self tests cause problems.
1083 ++ */
1084 + {
1085 + .matches = {
1086 +- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1087 +- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
1088 +- DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
1089 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1090 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
1091 + },
1092 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
1093 + },
1094 + {
1095 +- /* Dell Embedded Box PC 3000 */
1096 + .matches = {
1097 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1098 +- DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
1099 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1100 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
1101 + },
1102 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
1103 + },
1104 + {
1105 +- /* OQO Model 01 */
1106 ++ /* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
1107 + .matches = {
1108 +- DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
1109 +- DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
1110 +- DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
1111 ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1112 ++ DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
1113 ++ DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"),
1114 + },
1115 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1116 + },
1117 + {
1118 +- /* ULI EV4873 - AUX LOOP does not work properly */
1119 ++ /* ASUS G1S */
1120 + .matches = {
1121 +- DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
1122 +- DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
1123 +- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
1124 ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
1125 ++ DMI_MATCH(DMI_BOARD_NAME, "G1S"),
1126 ++ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
1127 + },
1128 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1129 + },
1130 + {
1131 +- /* Microsoft Virtual Machine */
1132 + .matches = {
1133 +- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
1134 +- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
1135 +- DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
1136 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1137 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
1138 + },
1139 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1140 + },
1141 + {
1142 +- /* Medion MAM 2070 */
1143 ++ /* Acer Aspire 5710 */
1144 + .matches = {
1145 +- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
1146 +- DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
1147 +- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
1148 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1149 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
1150 + },
1151 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1152 + },
1153 + {
1154 +- /* Medion Akoya E7225 */
1155 ++ /* Acer Aspire 7738 */
1156 + .matches = {
1157 +- DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
1158 +- DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
1159 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1160 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1161 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
1162 + },
1163 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1164 + },
1165 + {
1166 +- /* Blue FB5601 */
1167 ++ /* Acer Aspire 5536 */
1168 + .matches = {
1169 +- DMI_MATCH(DMI_SYS_VENDOR, "blue"),
1170 +- DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
1171 +- DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
1172 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1173 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
1174 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
1175 + },
1176 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1177 + },
1178 + {
1179 +- /* Gigabyte M912 */
1180 ++ /*
1181 ++ * Acer Aspire 5738z
1182 ++ * Touchpad stops working in mux mode when dis- + re-enabled
1183 ++ * with the touchpad enable/disable toggle hotkey
1184 ++ */
1185 + .matches = {
1186 +- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1187 +- DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
1188 +- DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
1189 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1190 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
1191 + },
1192 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1193 + },
1194 + {
1195 +- /* Gigabyte M1022M netbook */
1196 ++ /* Acer Aspire One 150 */
1197 + .matches = {
1198 +- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
1199 +- DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
1200 +- DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
1201 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1202 ++ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
1203 + },
1204 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1205 + },
1206 + {
1207 +- /* Gigabyte Spring Peak - defines wrong chassis type */
1208 + .matches = {
1209 +- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1210 +- DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
1211 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1212 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
1213 + },
1214 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1215 + },
1216 + {
1217 +- /* Gigabyte T1005 - defines wrong chassis type ("Other") */
1218 + .matches = {
1219 +- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1220 +- DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
1221 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1222 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
1223 + },
1224 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1225 + },
1226 + {
1227 +- /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
1228 + .matches = {
1229 +- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1230 +- DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
1231 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1232 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
1233 + },
1234 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1235 + },
1236 + {
1237 + .matches = {
1238 +- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1239 +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
1240 +- DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
1241 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1242 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
1243 + },
1244 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1245 + },
1246 + {
1247 + .matches = {
1248 +- DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
1249 +- DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
1250 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1251 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
1252 + },
1253 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1254 + },
1255 + {
1256 + .matches = {
1257 +- DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
1258 +- DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
1259 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1260 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
1261 + },
1262 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1263 + },
1264 +- { }
1265 +-};
1266 +-
1267 +-/*
1268 +- * Some Fujitsu notebooks are having trouble with touchpads if
1269 +- * active multiplexing mode is activated. Luckily they don't have
1270 +- * external PS/2 ports so we can safely disable it.
1271 +- * ... apparently some Toshibas don't like MUX mode either and
1272 +- * die horrible death on reboot.
1273 +- */
1274 +-static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
1275 + {
1276 +- /* Fujitsu Lifebook P7010/P7010D */
1277 + .matches = {
1278 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1279 +- DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
1280 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1281 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
1282 + },
1283 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1284 + },
1285 ++ /*
1286 ++ * Some Wistron based laptops need us to explicitly enable the 'Dritek
1287 ++ * keyboard extension' to make their extra keys start generating scancodes.
1288 ++ * Originally, this was just confined to older laptops, but a few Acer laptops
1289 ++ * have turned up in 2007 that also need this again.
1290 ++ */
1291 + {
1292 +- /* Fujitsu Lifebook P7010 */
1293 ++ /* Acer Aspire 5100 */
1294 + .matches = {
1295 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1296 +- DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
1297 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1298 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
1299 + },
1300 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1301 + },
1302 + {
1303 +- /* Fujitsu Lifebook P5020D */
1304 ++ /* Acer Aspire 5610 */
1305 + .matches = {
1306 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1307 +- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
1308 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1309 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
1310 + },
1311 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1312 + },
1313 + {
1314 +- /* Fujitsu Lifebook S2000 */
1315 ++ /* Acer Aspire 5630 */
1316 + .matches = {
1317 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1318 +- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
1319 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1320 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
1321 + },
1322 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1323 + },
1324 + {
1325 +- /* Fujitsu Lifebook S6230 */
1326 ++ /* Acer Aspire 5650 */
1327 + .matches = {
1328 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1329 +- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
1330 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1331 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
1332 + },
1333 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1334 + },
1335 + {
1336 +- /* Fujitsu Lifebook T725 laptop */
1337 ++ /* Acer Aspire 5680 */
1338 + .matches = {
1339 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1340 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
1341 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1342 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
1343 + },
1344 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1345 + },
1346 + {
1347 +- /* Fujitsu Lifebook U745 */
1348 ++ /* Acer Aspire 5720 */
1349 + .matches = {
1350 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1351 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
1352 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1353 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
1354 + },
1355 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1356 + },
1357 + {
1358 +- /* Fujitsu T70H */
1359 ++ /* Acer Aspire 9110 */
1360 + .matches = {
1361 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1362 +- DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
1363 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1364 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
1365 + },
1366 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1367 + },
1368 + {
1369 +- /* Fujitsu-Siemens Lifebook T3010 */
1370 ++ /* Acer TravelMate 660 */
1371 + .matches = {
1372 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1373 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
1374 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1375 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
1376 + },
1377 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1378 + },
1379 + {
1380 +- /* Fujitsu-Siemens Lifebook E4010 */
1381 ++ /* Acer TravelMate 2490 */
1382 + .matches = {
1383 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1384 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
1385 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1386 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
1387 + },
1388 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1389 + },
1390 + {
1391 +- /* Fujitsu-Siemens Amilo Pro 2010 */
1392 ++ /* Acer TravelMate 4280 */
1393 + .matches = {
1394 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1395 +- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
1396 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1397 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
1398 + },
1399 ++ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
1400 + },
1401 + {
1402 +- /* Fujitsu-Siemens Amilo Pro 2030 */
1403 ++ /* Amoi M636/A737 */
1404 + .matches = {
1405 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1406 +- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
1407 ++ DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
1408 ++ DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
1409 + },
1410 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1411 + },
1412 + {
1413 +- /*
1414 +- * No data is coming from the touchscreen unless KBC
1415 +- * is in legacy mode.
1416 +- */
1417 +- /* Panasonic CF-29 */
1418 + .matches = {
1419 +- DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
1420 +- DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
1421 ++ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
1422 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
1423 + },
1424 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1425 + },
1426 + {
1427 +- /*
1428 +- * HP Pavilion DV4017EA -
1429 +- * errors on MUX ports are reported without raising AUXDATA
1430 +- * causing "spurious NAK" messages.
1431 +- */
1432 ++ /* Compal HEL80I */
1433 + .matches = {
1434 +- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1435 +- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
1436 ++ DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
1437 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
1438 + },
1439 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1440 + },
1441 + {
1442 +- /*
1443 +- * HP Pavilion ZT1000 -
1444 +- * like DV4017EA does not raise AUXERR for errors on MUX ports.
1445 +- */
1446 + .matches = {
1447 +- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1448 +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
1449 +- DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
1450 ++ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1451 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
1452 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
1453 + },
1454 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1455 + },
1456 + {
1457 +- /*
1458 +- * HP Pavilion DV4270ca -
1459 +- * like DV4017EA does not raise AUXERR for errors on MUX ports.
1460 +- */
1461 + .matches = {
1462 +- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1463 +- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
1464 ++ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1465 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
1466 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
1467 + },
1468 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1469 + },
1470 + {
1471 ++ /* Advent 4211 */
1472 + .matches = {
1473 +- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1474 +- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
1475 ++ DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
1476 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
1477 + },
1478 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1479 + },
1480 + {
1481 ++ /* Dell Embedded Box PC 3000 */
1482 + .matches = {
1483 +- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1484 +- DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
1485 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1486 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
1487 + },
1488 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1489 + },
1490 + {
1491 ++ /* Dell XPS M1530 */
1492 + .matches = {
1493 +- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1494 +- DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
1495 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1496 ++ DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
1497 + },
1498 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1499 + },
1500 + {
1501 ++ /* Dell Vostro 1510 */
1502 + .matches = {
1503 +- DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
1504 +- DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
1505 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1506 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
1507 + },
1508 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1509 + },
1510 + {
1511 +- /* Sharp Actius MM20 */
1512 ++ /* Dell Vostro V13 */
1513 + .matches = {
1514 +- DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
1515 +- DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
1516 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1517 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
1518 + },
1519 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
1520 + },
1521 + {
1522 +- /* Sony Vaio FS-115b */
1523 ++ /* Dell Vostro 1320 */
1524 + .matches = {
1525 +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
1526 +- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
1527 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1528 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
1529 + },
1530 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1531 + },
1532 + {
1533 +- /*
1534 +- * Sony Vaio FZ-240E -
1535 +- * reset and GET ID commands issued via KBD port are
1536 +- * sometimes being delivered to AUX3.
1537 +- */
1538 ++ /* Dell Vostro 1520 */
1539 + .matches = {
1540 +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
1541 +- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
1542 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1543 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
1544 + },
1545 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1546 + },
1547 + {
1548 +- /*
1549 +- * Most (all?) VAIOs do not have external PS/2 ports nor
1550 +- * they implement active multiplexing properly, and
1551 +- * MUX discovery usually messes up keyboard/touchpad.
1552 +- */
1553 ++ /* Dell Vostro 1720 */
1554 + .matches = {
1555 +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
1556 +- DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
1557 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1558 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
1559 + },
1560 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
1561 + },
1562 + {
1563 +- /* Amoi M636/A737 */
1564 ++ /* Entroware Proteus */
1565 + .matches = {
1566 +- DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
1567 +- DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
1568 ++ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
1569 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
1570 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
1571 + },
1572 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS)
1573 + },
1574 ++ /*
1575 ++ * Some Fujitsu notebooks are having trouble with touchpads if
1576 ++ * active multiplexing mode is activated. Luckily they don't have
1577 ++ * external PS/2 ports so we can safely disable it.
1578 ++ * ... apparently some Toshibas don't like MUX mode either and
1579 ++ * die horrible death on reboot.
1580 ++ */
1581 + {
1582 +- /* Lenovo 3000 n100 */
1583 ++ /* Fujitsu Lifebook P7010/P7010D */
1584 + .matches = {
1585 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1586 +- DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
1587 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1588 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
1589 + },
1590 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1591 + },
1592 + {
1593 +- /* Lenovo XiaoXin Air 12 */
1594 ++ /* Fujitsu Lifebook P5020D */
1595 + .matches = {
1596 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1597 +- DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
1598 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1599 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
1600 + },
1601 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1602 + },
1603 + {
1604 ++ /* Fujitsu Lifebook S2000 */
1605 + .matches = {
1606 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1607 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
1608 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1609 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
1610 + },
1611 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1612 + },
1613 + {
1614 +- /* Acer Aspire 5710 */
1615 ++ /* Fujitsu Lifebook S6230 */
1616 + .matches = {
1617 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1618 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
1619 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1620 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
1621 + },
1622 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1623 + },
1624 + {
1625 +- /* Acer Aspire 7738 */
1626 ++ /* Fujitsu Lifebook T725 laptop */
1627 + .matches = {
1628 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1629 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
1630 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1631 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
1632 + },
1633 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
1634 + },
1635 + {
1636 +- /* Gericom Bellagio */
1637 ++ /* Fujitsu Lifebook U745 */
1638 + .matches = {
1639 +- DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
1640 +- DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
1641 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1642 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
1643 + },
1644 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1645 + },
1646 + {
1647 +- /* IBM 2656 */
1648 ++ /* Fujitsu T70H */
1649 + .matches = {
1650 +- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
1651 +- DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
1652 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1653 ++ DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
1654 + },
1655 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1656 + },
1657 + {
1658 +- /* Dell XPS M1530 */
1659 ++ /* Fujitsu A544 laptop */
1660 ++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
1661 + .matches = {
1662 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1663 +- DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
1664 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1665 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
1666 + },
1667 ++ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
1668 + },
1669 + {
1670 +- /* Compal HEL80I */
1671 ++ /* Fujitsu AH544 laptop */
1672 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
1673 + .matches = {
1674 +- DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
1675 +- DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
1676 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1677 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
1678 + },
1679 ++ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
1680 + },
1681 + {
1682 +- /* Dell Vostro 1510 */
1683 ++ /* Fujitsu U574 laptop */
1684 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
1685 + .matches = {
1686 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1687 +- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
1688 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1689 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
1690 + },
1691 ++ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
1692 + },
1693 + {
1694 +- /* Acer Aspire 5536 */
1695 ++ /* Fujitsu UH554 laptop */
1696 + .matches = {
1697 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1698 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
1699 +- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
1700 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1701 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
1702 + },
1703 ++ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
1704 + },
1705 + {
1706 +- /* Dell Vostro V13 */
1707 ++ /* Fujitsu Lifebook P7010 */
1708 + .matches = {
1709 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1710 +- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
1711 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1712 ++ DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
1713 + },
1714 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1715 + },
1716 + {
1717 +- /* Newer HP Pavilion dv4 models */
1718 ++ /* Fujitsu-Siemens Lifebook T3010 */
1719 + .matches = {
1720 +- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1721 +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
1722 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1723 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
1724 + },
1725 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1726 + },
1727 + {
1728 +- /* Asus X450LCP */
1729 ++ /* Fujitsu-Siemens Lifebook E4010 */
1730 + .matches = {
1731 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1732 +- DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
1733 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1734 ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
1735 + },
1736 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1737 + },
1738 + {
1739 +- /* Avatar AVIU-145A6 */
1740 ++ /* Fujitsu-Siemens Amilo Pro 2010 */
1741 + .matches = {
1742 +- DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
1743 +- DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
1744 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1745 ++ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
1746 + },
1747 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1748 + },
1749 + {
1750 +- /* TUXEDO BU1406 */
1751 ++ /* Fujitsu-Siemens Amilo Pro 2030 */
1752 + .matches = {
1753 +- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
1754 +- DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
1755 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1756 ++ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
1757 + },
1758 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1759 + },
1760 + {
1761 +- /* Lenovo LaVie Z */
1762 ++ /* Gigabyte M912 */
1763 + .matches = {
1764 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1765 +- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
1766 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1767 ++ DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
1768 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
1769 + },
1770 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1771 + },
1772 + {
1773 +- /*
1774 +- * Acer Aspire 5738z
1775 +- * Touchpad stops working in mux mode when dis- + re-enabled
1776 +- * with the touchpad enable/disable toggle hotkey
1777 +- */
1778 ++ /* Gigabyte Spring Peak - defines wrong chassis type */
1779 + .matches = {
1780 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1781 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
1782 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1783 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
1784 + },
1785 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1786 + },
1787 + {
1788 +- /* Entroware Proteus */
1789 ++ /* Gigabyte T1005 - defines wrong chassis type ("Other") */
1790 + .matches = {
1791 +- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
1792 +- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
1793 +- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
1794 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1795 ++ DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
1796 + },
1797 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1798 ++ },
1799 ++ {
1800 ++ /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
1801 ++ .matches = {
1802 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1803 ++ DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
1804 ++ },
1805 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1806 ++ },
1807 ++ /*
1808 ++ * Some laptops need keyboard reset before probing for the trackpad to get
1809 ++ * it detected, initialised & finally work.
1810 ++ */
1811 ++ {
1812 ++ /* Gigabyte P35 v2 - Elantech touchpad */
1813 ++ .matches = {
1814 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1815 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
1816 ++ },
1817 ++ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
1818 ++ },
1819 ++ {
1820 ++ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
1821 ++ .matches = {
1822 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1823 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
1824 ++ },
1825 ++ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
1826 ++ },
1827 ++ {
1828 ++ /* Gigabyte P34 - Elantech touchpad */
1829 ++ .matches = {
1830 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1831 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
1832 ++ },
1833 ++ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
1834 ++ },
1835 ++ {
1836 ++ /* Gigabyte P57 - Elantech touchpad */
1837 ++ .matches = {
1838 ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1839 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
1840 ++ },
1841 ++ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
1842 ++ },
1843 ++ {
1844 ++ /* Gericom Bellagio */
1845 ++ .matches = {
1846 ++ DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
1847 ++ DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
1848 ++ },
1849 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1850 ++ },
1851 ++ {
1852 ++ /* Gigabyte M1022M netbook */
1853 ++ .matches = {
1854 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
1855 ++ DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
1856 ++ DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
1857 ++ },
1858 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1859 ++ },
1860 ++ {
1861 ++ .matches = {
1862 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1863 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
1864 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
1865 ++ },
1866 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
1867 + },
1868 +- { }
1869 +-};
1870 +-
1871 +-static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
1872 + {
1873 + /*
1874 +- * Sony Vaio VGN-CS series require MUX or the touch sensor
1875 +- * buttons will disturb touchpad operation
1876 ++ * HP Pavilion DV4017EA -
1877 ++ * errors on MUX ports are reported without raising AUXDATA
1878 ++ * causing "spurious NAK" messages.
1879 + */
1880 + .matches = {
1881 +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
1882 +- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
1883 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1884 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
1885 + },
1886 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1887 + },
1888 +- { }
1889 +-};
1890 +-
1891 +-/*
1892 +- * On some Asus laptops, just running self tests cause problems.
1893 +- */
1894 +-static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
1895 + {
1896 ++ /*
1897 ++ * HP Pavilion ZT1000 -
1898 ++ * like DV4017EA does not raise AUXERR for errors on MUX ports.
1899 ++ */
1900 + .matches = {
1901 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1902 +- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
1903 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1904 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
1905 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
1906 + },
1907 +- }, {
1908 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1909 ++ },
1910 ++ {
1911 ++ /*
1912 ++ * HP Pavilion DV4270ca -
1913 ++ * like DV4017EA does not raise AUXERR for errors on MUX ports.
1914 ++ */
1915 + .matches = {
1916 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1917 +- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
1918 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1919 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
1920 + },
1921 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1922 + },
1923 +- { }
1924 +-};
1925 +-static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
1926 + {
1927 +- /* MSI Wind U-100 */
1928 ++ /* Newer HP Pavilion dv4 models */
1929 + .matches = {
1930 +- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
1931 +- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
1932 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1933 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
1934 + },
1935 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
1936 + },
1937 + {
1938 +- /* LG Electronics X110 */
1939 ++ /* IBM 2656 */
1940 + .matches = {
1941 +- DMI_MATCH(DMI_BOARD_NAME, "X110"),
1942 +- DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
1943 ++ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
1944 ++ DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
1945 + },
1946 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1947 + },
1948 + {
1949 +- /* Acer Aspire One 150 */
1950 ++ /* Avatar AVIU-145A6 */
1951 + .matches = {
1952 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1953 +- DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
1954 ++ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
1955 ++ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
1956 + },
1957 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1958 + },
1959 + {
1960 ++ /* Intel MBO Desktop D845PESV */
1961 + .matches = {
1962 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1963 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
1964 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
1965 ++ DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
1966 + },
1967 ++ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
1968 + },
1969 + {
1970 ++ /*
1971 ++ * Intel NUC D54250WYK - does not have i8042 controller but
1972 ++ * declares PS/2 devices in DSDT.
1973 ++ */
1974 + .matches = {
1975 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1976 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
1977 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
1978 ++ DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
1979 + },
1980 ++ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
1981 + },
1982 + {
1983 ++ /* Lenovo 3000 n100 */
1984 + .matches = {
1985 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1986 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
1987 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1988 ++ DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
1989 + },
1990 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
1991 + },
1992 + {
1993 ++ /* Lenovo XiaoXin Air 12 */
1994 + .matches = {
1995 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1996 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
1997 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1998 ++ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
1999 + },
2000 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2001 + },
2002 + {
2003 ++ /* Lenovo LaVie Z */
2004 + .matches = {
2005 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2006 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
2007 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2008 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
2009 + },
2010 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2011 + },
2012 + {
2013 ++ /* Lenovo Ideapad U455 */
2014 + .matches = {
2015 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2016 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
2017 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2018 ++ DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
2019 + },
2020 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
2021 + },
2022 + {
2023 ++ /* Lenovo ThinkPad L460 */
2024 + .matches = {
2025 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2026 +- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
2027 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2028 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
2029 + },
2030 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
2031 + },
2032 + {
2033 +- /* Advent 4211 */
2034 ++ /* Lenovo ThinkPad Twist S230u */
2035 + .matches = {
2036 +- DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
2037 +- DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
2038 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2039 ++ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
2040 + },
2041 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
2042 ++ },
2043 ++ {
2044 ++ /* LG Electronics X110 */
2045 ++ .matches = {
2046 ++ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
2047 ++ DMI_MATCH(DMI_BOARD_NAME, "X110"),
2048 ++ },
2049 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
2050 + },
2051 + {
2052 + /* Medion Akoya Mini E1210 */
2053 +@@ -680,6 +835,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
2054 + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
2055 + DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
2056 + },
2057 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
2058 + },
2059 + {
2060 + /* Medion Akoya E1222 */
2061 +@@ -687,331 +843,434 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
2062 + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
2063 + DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
2064 + },
2065 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
2066 + },
2067 + {
2068 +- /* Mivvy M310 */
2069 ++ /* MSI Wind U-100 */
2070 + .matches = {
2071 +- DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
2072 +- DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
2073 ++ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
2074 ++ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
2075 + },
2076 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOPNP)
2077 + },
2078 + {
2079 +- /* Dell Vostro 1320 */
2080 ++ /*
2081 ++ * No data is coming from the touchscreen unless KBC
2082 ++ * is in legacy mode.
2083 ++ */
2084 ++ /* Panasonic CF-29 */
2085 + .matches = {
2086 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2087 +- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
2088 ++ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
2089 ++ DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
2090 + },
2091 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2092 + },
2093 + {
2094 +- /* Dell Vostro 1520 */
2095 ++ /* Medion Akoya E7225 */
2096 + .matches = {
2097 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2098 +- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
2099 ++ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
2100 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
2101 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
2102 + },
2103 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
2104 + },
2105 + {
2106 +- /* Dell Vostro 1720 */
2107 ++ /* Microsoft Virtual Machine */
2108 + .matches = {
2109 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2110 +- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
2111 ++ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
2112 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
2113 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
2114 + },
2115 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
2116 + },
2117 + {
2118 +- /* Lenovo Ideapad U455 */
2119 ++ /* Medion MAM 2070 */
2120 + .matches = {
2121 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2122 +- DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
2123 ++ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
2124 ++ DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
2125 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
2126 + },
2127 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
2128 + },
2129 + {
2130 +- /* Lenovo ThinkPad L460 */
2131 ++ /* TUXEDO BU1406 */
2132 + .matches = {
2133 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2134 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
2135 ++ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
2136 ++ DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
2137 ++ },
2138 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2139 ++ },
2140 ++ {
2141 ++ /* OQO Model 01 */
2142 ++ .matches = {
2143 ++ DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
2144 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
2145 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
2146 ++ },
2147 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
2148 ++ },
2149 ++ {
2150 ++ .matches = {
2151 ++ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
2152 ++ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
2153 ++ },
2154 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
2155 ++ },
2156 ++ {
2157 ++ /* Acer Aspire 5 A515 */
2158 ++ .matches = {
2159 ++ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
2160 ++ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
2161 ++ },
2162 ++ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
2163 ++ },
2164 ++ {
2165 ++ /* ULI EV4873 - AUX LOOP does not work properly */
2166 ++ .matches = {
2167 ++ DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
2168 ++ DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
2169 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
2170 ++ },
2171 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
2172 ++ },
2173 ++ {
2174 ++ /*
2175 ++ * Arima-Rioworks HDAMB -
2176 ++ * AUX LOOP command does not raise AUX IRQ
2177 ++ */
2178 ++ .matches = {
2179 ++ DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
2180 ++ DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
2181 ++ DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
2182 + },
2183 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
2184 + },
2185 + {
2186 +- /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
2187 ++ /* Sharp Actius MM20 */
2188 + .matches = {
2189 +- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
2190 +- DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
2191 ++ DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
2192 ++ DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
2193 + },
2194 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2195 + },
2196 + {
2197 +- /* Lenovo ThinkPad Twist S230u */
2198 ++ /*
2199 ++ * Sony Vaio FZ-240E -
2200 ++ * reset and GET ID commands issued via KBD port are
2201 ++ * sometimes being delivered to AUX3.
2202 ++ */
2203 + .matches = {
2204 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2205 +- DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
2206 ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
2207 ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
2208 + },
2209 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2210 + },
2211 + {
2212 +- /* Entroware Proteus */
2213 ++ /*
2214 ++ * Most (all?) VAIOs do not have external PS/2 ports nor
2215 ++ * they implement active multiplexing properly, and
2216 ++ * MUX discovery usually messes up keyboard/touchpad.
2217 ++ */
2218 + .matches = {
2219 +- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
2220 +- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
2221 +- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
2222 ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
2223 ++ DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
2224 + },
2225 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2226 + },
2227 +- { }
2228 +-};
2229 +-
2230 +-#ifdef CONFIG_PNP
2231 +-static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
2232 + {
2233 +- /* Intel MBO Desktop D845PESV */
2234 ++ /* Sony Vaio FS-115b */
2235 + .matches = {
2236 +- DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
2237 +- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
2238 ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
2239 ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
2240 + },
2241 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2242 + },
2243 + {
2244 + /*
2245 +- * Intel NUC D54250WYK - does not have i8042 controller but
2246 +- * declares PS/2 devices in DSDT.
2247 ++ * Sony Vaio VGN-CS series require MUX or the touch sensor
2248 ++ * buttons will disturb touchpad operation
2249 + */
2250 + .matches = {
2251 +- DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
2252 +- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
2253 ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
2254 ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
2255 + },
2256 ++ .driver_data = (void *)(SERIO_QUIRK_FORCEMUX)
2257 + },
2258 + {
2259 +- /* MSI Wind U-100 */
2260 + .matches = {
2261 +- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
2262 +- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
2263 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
2264 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
2265 + },
2266 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2267 + },
2268 + {
2269 +- /* Acer Aspire 5 A515 */
2270 + .matches = {
2271 +- DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
2272 +- DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
2273 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
2274 ++ DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
2275 + },
2276 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2277 + },
2278 +- { }
2279 +-};
2280 +-
2281 +-static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
2282 + {
2283 + .matches = {
2284 +- DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
2285 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
2286 ++ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
2287 + },
2288 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
2289 + },
2290 ++ /*
2291 ++ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
2292 ++ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
2293 ++ * none of them have an external PS/2 port so this can safely be set for
2294 ++ * all of them. These two are based on a Clevo design, but have the
2295 ++ * board_name changed.
2296 ++ */
2297 + {
2298 + .matches = {
2299 +- DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
2300 ++ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
2301 ++ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
2302 + },
2303 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2304 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2305 + },
2306 + {
2307 + .matches = {
2308 +- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
2309 ++ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
2310 ++ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
2311 + },
2312 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2313 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2314 + },
2315 + {
2316 ++ /* Mivvy M310 */
2317 + .matches = {
2318 +- DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
2319 ++ DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
2320 ++ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
2321 + },
2322 ++ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
2323 + },
2324 +- { }
2325 +-};
2326 +-#endif
2327 +-
2328 +-static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
2329 ++ /*
2330 ++ * Some laptops need keyboard reset before probing for the trackpad to get
2331 ++ * it detected, initialised & finally work.
2332 ++ */
2333 + {
2334 +- /* Dell Vostro V13 */
2335 ++ /* Schenker XMG C504 - Elantech touchpad */
2336 + .matches = {
2337 +- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2338 +- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
2339 ++ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
2340 ++ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
2341 + },
2342 ++ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
2343 + },
2344 + {
2345 +- /* Newer HP Pavilion dv4 models */
2346 ++ /* Blue FB5601 */
2347 + .matches = {
2348 +- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2349 +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
2350 ++ DMI_MATCH(DMI_SYS_VENDOR, "blue"),
2351 ++ DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
2352 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
2353 + },
2354 ++ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
2355 + },
2356 ++ /*
2357 ++ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
2358 ++ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
2359 ++ * none of them have an external PS/2 port so this can safely be set for
2360 ++ * all of them.
2361 ++ * Clevo barebones come with board_vendor and/or system_vendor set to
2362 ++ * either the very generic string "Notebook" and/or a different value
2363 ++ * for each individual reseller. The only somewhat universal way to
2364 ++ * identify them is by board_name.
2365 ++ */
2366 + {
2367 +- /* Fujitsu A544 laptop */
2368 +- /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
2369 + .matches = {
2370 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2371 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
2372 ++ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
2373 + },
2374 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2375 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2376 + },
2377 + {
2378 +- /* Fujitsu AH544 laptop */
2379 +- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
2380 + .matches = {
2381 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2382 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
2383 ++ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
2384 + },
2385 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2386 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2387 + },
2388 + {
2389 +- /* Fujitsu Lifebook T725 laptop */
2390 + .matches = {
2391 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2392 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
2393 ++ DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
2394 + },
2395 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2396 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2397 + },
2398 + {
2399 +- /* Fujitsu U574 laptop */
2400 +- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
2401 + .matches = {
2402 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2403 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
2404 ++ DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
2405 + },
2406 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2407 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2408 + },
2409 + {
2410 +- /* Fujitsu UH554 laptop */
2411 + .matches = {
2412 +- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2413 +- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
2414 ++ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
2415 + },
2416 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2417 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2418 + },
2419 +- { }
2420 +-};
2421 +-
2422 +-/*
2423 +- * Some Wistron based laptops need us to explicitly enable the 'Dritek
2424 +- * keyboard extension' to make their extra keys start generating scancodes.
2425 +- * Originally, this was just confined to older laptops, but a few Acer laptops
2426 +- * have turned up in 2007 that also need this again.
2427 +- */
2428 +-static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
2429 + {
2430 +- /* Acer Aspire 5100 */
2431 + .matches = {
2432 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2433 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
2434 ++ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
2435 + },
2436 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2437 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2438 + },
2439 ++ /*
2440 ++ * At least one modern Clevo barebone has the touchpad connected both
2441 ++ * via PS/2 and i2c interface. This causes a race condition between the
2442 ++ * psmouse and i2c-hid driver. Since the full capability of the touchpad
2443 ++ * is available via the i2c interface and the device has no external
2444 ++ * PS/2 port, it is safe to just ignore all ps2 mouses here to avoid
2445 ++ * this issue. The known affected device is the
2446 ++ * TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU which comes with one of
2447 ++ * the two different dmi strings below. NS50MU is not a typo!
2448 ++ */
2449 + {
2450 +- /* Acer Aspire 5610 */
2451 + .matches = {
2452 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2453 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
2454 ++ DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
2455 + },
2456 ++ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
2457 ++ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
2458 ++ SERIO_QUIRK_NOPNP)
2459 + },
2460 + {
2461 +- /* Acer Aspire 5630 */
2462 + .matches = {
2463 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2464 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
2465 ++ DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
2466 + },
2467 ++ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
2468 ++ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
2469 ++ SERIO_QUIRK_NOPNP)
2470 + },
2471 + {
2472 +- /* Acer Aspire 5650 */
2473 + .matches = {
2474 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2475 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
2476 ++ DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
2477 + },
2478 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2479 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2480 + },
2481 + {
2482 +- /* Acer Aspire 5680 */
2483 ++ /*
2484 ++ * This is only a partial board_name and might be followed by
2485 ++ * another letter or number. DMI_MATCH however does do partial
2486 ++ * matching.
2487 ++ */
2488 + .matches = {
2489 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2490 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
2491 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
2492 + },
2493 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2494 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2495 + },
2496 + {
2497 +- /* Acer Aspire 5720 */
2498 ++ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
2499 + .matches = {
2500 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2501 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
2502 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
2503 + },
2504 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2505 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2506 + },
2507 + {
2508 +- /* Acer Aspire 9110 */
2509 ++ /*
2510 ++ * This is only a partial board_name and might be followed by
2511 ++ * another letter or number. DMI_MATCH however does do partial
2512 ++ * matching.
2513 ++ */
2514 + .matches = {
2515 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2516 +- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
2517 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
2518 + },
2519 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2520 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2521 + },
2522 + {
2523 +- /* Acer TravelMate 660 */
2524 ++ /*
2525 ++ * This is only a partial board_name and might be followed by
2526 ++ * another letter or number. DMI_MATCH however does do partial
2527 ++ * matching.
2528 ++ */
2529 + .matches = {
2530 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2531 +- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
2532 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
2533 + },
2534 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2535 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2536 + },
2537 + {
2538 +- /* Acer TravelMate 2490 */
2539 ++ /*
2540 ++ * This is only a partial board_name and might be followed by
2541 ++ * another letter or number. DMI_MATCH however does do partial
2542 ++ * matching.
2543 ++ */
2544 + .matches = {
2545 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2546 +- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
2547 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
2548 + },
2549 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2550 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2551 + },
2552 + {
2553 +- /* Acer TravelMate 4280 */
2554 ++ /*
2555 ++ * This is only a partial board_name and might be followed by
2556 ++ * another letter or number. DMI_MATCH however does do partial
2557 ++ * matching.
2558 ++ */
2559 + .matches = {
2560 +- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2561 +- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
2562 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
2563 + },
2564 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2565 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2566 + },
2567 +- { }
2568 +-};
2569 +-
2570 +-/*
2571 +- * Some laptops need keyboard reset before probing for the trackpad to get
2572 +- * it detected, initialised & finally work.
2573 +- */
2574 +-static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
2575 + {
2576 +- /* Gigabyte P35 v2 - Elantech touchpad */
2577 + .matches = {
2578 +- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
2579 +- DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
2580 ++ DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
2581 + },
2582 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2583 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2584 + },
2585 +- {
2586 +- /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
2587 ++ {
2588 + .matches = {
2589 +- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
2590 +- DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
2591 ++ DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
2592 + },
2593 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2594 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2595 + },
2596 + {
2597 +- /* Gigabyte P34 - Elantech touchpad */
2598 + .matches = {
2599 +- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
2600 +- DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
2601 ++ DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
2602 + },
2603 ++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
2604 ++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
2605 + },
2606 ++ { }
2607 ++};
2608 ++
2609 ++#ifdef CONFIG_PNP
2610 ++static const struct dmi_system_id i8042_dmi_laptop_table[] __initconst = {
2611 + {
2612 +- /* Gigabyte P57 - Elantech touchpad */
2613 + .matches = {
2614 +- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
2615 +- DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
2616 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
2617 + },
2618 + },
2619 + {
2620 +- /* Schenker XMG C504 - Elantech touchpad */
2621 + .matches = {
2622 +- DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
2623 +- DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
2624 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
2625 + },
2626 + },
2627 +- { }
2628 +-};
2629 +-
2630 +-static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
2631 + {
2632 +- /* ASUS ZenBook UX425UA */
2633 + .matches = {
2634 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
2635 +- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
2636 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
2637 + },
2638 + },
2639 + {
2640 +- /* ASUS ZenBook UM325UA */
2641 + .matches = {
2642 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
2643 +- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
2644 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
2645 + },
2646 + },
2647 + { }
2648 + };
2649 ++#endif
2650 +
2651 + #endif /* CONFIG_X86 */
2652 +
2653 +@@ -1167,11 +1426,6 @@ static int __init i8042_pnp_init(void)
2654 + bool pnp_data_busted = false;
2655 + int err;
2656 +
2657 +-#ifdef CONFIG_X86
2658 +- if (dmi_check_system(i8042_dmi_nopnp_table))
2659 +- i8042_nopnp = true;
2660 +-#endif
2661 +-
2662 + if (i8042_nopnp) {
2663 + pr_info("PNP detection disabled\n");
2664 + return 0;
2665 +@@ -1275,6 +1529,59 @@ static inline int i8042_pnp_init(void) { return 0; }
2666 + static inline void i8042_pnp_exit(void) { }
2667 + #endif /* CONFIG_PNP */
2668 +
2669 ++
2670 ++#ifdef CONFIG_X86
2671 ++static void __init i8042_check_quirks(void)
2672 ++{
2673 ++ const struct dmi_system_id *device_quirk_info;
2674 ++ uintptr_t quirks;
2675 ++
2676 ++ device_quirk_info = dmi_first_match(i8042_dmi_quirk_table);
2677 ++ if (!device_quirk_info)
2678 ++ return;
2679 ++
2680 ++ quirks = (uintptr_t)device_quirk_info->driver_data;
2681 ++
2682 ++ if (quirks & SERIO_QUIRK_NOKBD)
2683 ++ i8042_nokbd = true;
2684 ++ if (quirks & SERIO_QUIRK_NOAUX)
2685 ++ i8042_noaux = true;
2686 ++ if (quirks & SERIO_QUIRK_NOMUX)
2687 ++ i8042_nomux = true;
2688 ++ if (quirks & SERIO_QUIRK_FORCEMUX)
2689 ++ i8042_nomux = false;
2690 ++ if (quirks & SERIO_QUIRK_UNLOCK)
2691 ++ i8042_unlock = true;
2692 ++ if (quirks & SERIO_QUIRK_PROBE_DEFER)
2693 ++ i8042_probe_defer = true;
2694 ++ /* Honor module parameter when value is not default */
2695 ++ if (i8042_reset == I8042_RESET_DEFAULT) {
2696 ++ if (quirks & SERIO_QUIRK_RESET_ALWAYS)
2697 ++ i8042_reset = I8042_RESET_ALWAYS;
2698 ++ if (quirks & SERIO_QUIRK_RESET_NEVER)
2699 ++ i8042_reset = I8042_RESET_NEVER;
2700 ++ }
2701 ++ if (quirks & SERIO_QUIRK_DIECT)
2702 ++ i8042_direct = true;
2703 ++ if (quirks & SERIO_QUIRK_DUMBKBD)
2704 ++ i8042_dumbkbd = true;
2705 ++ if (quirks & SERIO_QUIRK_NOLOOP)
2706 ++ i8042_noloop = true;
2707 ++ if (quirks & SERIO_QUIRK_NOTIMEOUT)
2708 ++ i8042_notimeout = true;
2709 ++ if (quirks & SERIO_QUIRK_KBDRESET)
2710 ++ i8042_kbdreset = true;
2711 ++ if (quirks & SERIO_QUIRK_DRITEK)
2712 ++ i8042_dritek = true;
2713 ++#ifdef CONFIG_PNP
2714 ++ if (quirks & SERIO_QUIRK_NOPNP)
2715 ++ i8042_nopnp = true;
2716 ++#endif
2717 ++}
2718 ++#else
2719 ++static inline void i8042_check_quirks(void) {}
2720 ++#endif
2721 ++
2722 + static int __init i8042_platform_init(void)
2723 + {
2724 + int retval;
2725 +@@ -1297,45 +1604,17 @@ static int __init i8042_platform_init(void)
2726 + i8042_kbd_irq = I8042_MAP_IRQ(1);
2727 + i8042_aux_irq = I8042_MAP_IRQ(12);
2728 +
2729 +- retval = i8042_pnp_init();
2730 +- if (retval)
2731 +- return retval;
2732 +-
2733 + #if defined(__ia64__)
2734 +- i8042_reset = I8042_RESET_ALWAYS;
2735 ++ i8042_reset = I8042_RESET_ALWAYS;
2736 + #endif
2737 +
2738 +-#ifdef CONFIG_X86
2739 +- /* Honor module parameter when value is not default */
2740 +- if (i8042_reset == I8042_RESET_DEFAULT) {
2741 +- if (dmi_check_system(i8042_dmi_reset_table))
2742 +- i8042_reset = I8042_RESET_ALWAYS;
2743 +-
2744 +- if (dmi_check_system(i8042_dmi_noselftest_table))
2745 +- i8042_reset = I8042_RESET_NEVER;
2746 +- }
2747 +-
2748 +- if (dmi_check_system(i8042_dmi_noloop_table))
2749 +- i8042_noloop = true;
2750 +-
2751 +- if (dmi_check_system(i8042_dmi_nomux_table))
2752 +- i8042_nomux = true;
2753 +-
2754 +- if (dmi_check_system(i8042_dmi_forcemux_table))
2755 +- i8042_nomux = false;
2756 +-
2757 +- if (dmi_check_system(i8042_dmi_notimeout_table))
2758 +- i8042_notimeout = true;
2759 +-
2760 +- if (dmi_check_system(i8042_dmi_dritek_table))
2761 +- i8042_dritek = true;
2762 +-
2763 +- if (dmi_check_system(i8042_dmi_kbdreset_table))
2764 +- i8042_kbdreset = true;
2765 ++ i8042_check_quirks();
2766 +
2767 +- if (dmi_check_system(i8042_dmi_probe_defer_table))
2768 +- i8042_probe_defer = true;
2769 ++ retval = i8042_pnp_init();
2770 ++ if (retval)
2771 ++ return retval;
2772 +
2773 ++#ifdef CONFIG_X86
2774 + /*
2775 + * A20 was already enabled during early kernel init. But some buggy
2776 + * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
2777 +diff --git a/drivers/md/md.c b/drivers/md/md.c
2778 +index 33946adb0d6f6..c8f2e8524bfb7 100644
2779 +--- a/drivers/md/md.c
2780 ++++ b/drivers/md/md.c
2781 +@@ -6251,11 +6251,11 @@ static void mddev_detach(struct mddev *mddev)
2782 + static void __md_stop(struct mddev *mddev)
2783 + {
2784 + struct md_personality *pers = mddev->pers;
2785 ++ md_bitmap_destroy(mddev);
2786 + mddev_detach(mddev);
2787 + /* Ensure ->event_work is done */
2788 + if (mddev->event_work.func)
2789 + flush_workqueue(md_misc_wq);
2790 +- md_bitmap_destroy(mddev);
2791 + spin_lock(&mddev->lock);
2792 + mddev->pers = NULL;
2793 + spin_unlock(&mddev->lock);
2794 +@@ -6272,6 +6272,7 @@ void md_stop(struct mddev *mddev)
2795 + /* stop the array and free an attached data structures.
2796 + * This is called from dm-raid
2797 + */
2798 ++ __md_stop_writes(mddev);
2799 + __md_stop(mddev);
2800 + bioset_exit(&mddev->bio_set);
2801 + bioset_exit(&mddev->sync_set);
2802 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
2803 +index d7fb33c078e81..1f0120cbe9e80 100644
2804 +--- a/drivers/net/bonding/bond_3ad.c
2805 ++++ b/drivers/net/bonding/bond_3ad.c
2806 +@@ -2007,30 +2007,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
2807 + */
2808 + void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
2809 + {
2810 +- /* check that the bond is not initialized yet */
2811 +- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
2812 +- bond->dev->dev_addr)) {
2813 +-
2814 +- BOND_AD_INFO(bond).aggregator_identifier = 0;
2815 +-
2816 +- BOND_AD_INFO(bond).system.sys_priority =
2817 +- bond->params.ad_actor_sys_prio;
2818 +- if (is_zero_ether_addr(bond->params.ad_actor_system))
2819 +- BOND_AD_INFO(bond).system.sys_mac_addr =
2820 +- *((struct mac_addr *)bond->dev->dev_addr);
2821 +- else
2822 +- BOND_AD_INFO(bond).system.sys_mac_addr =
2823 +- *((struct mac_addr *)bond->params.ad_actor_system);
2824 ++ BOND_AD_INFO(bond).aggregator_identifier = 0;
2825 ++ BOND_AD_INFO(bond).system.sys_priority =
2826 ++ bond->params.ad_actor_sys_prio;
2827 ++ if (is_zero_ether_addr(bond->params.ad_actor_system))
2828 ++ BOND_AD_INFO(bond).system.sys_mac_addr =
2829 ++ *((struct mac_addr *)bond->dev->dev_addr);
2830 ++ else
2831 ++ BOND_AD_INFO(bond).system.sys_mac_addr =
2832 ++ *((struct mac_addr *)bond->params.ad_actor_system);
2833 +
2834 +- /* initialize how many times this module is called in one
2835 +- * second (should be about every 100ms)
2836 +- */
2837 +- ad_ticks_per_sec = tick_resolution;
2838 ++ /* initialize how many times this module is called in one
2839 ++ * second (should be about every 100ms)
2840 ++ */
2841 ++ ad_ticks_per_sec = tick_resolution;
2842 +
2843 +- bond_3ad_initiate_agg_selection(bond,
2844 +- AD_AGGREGATOR_SELECTION_TIMER *
2845 +- ad_ticks_per_sec);
2846 +- }
2847 ++ bond_3ad_initiate_agg_selection(bond,
2848 ++ AD_AGGREGATOR_SELECTION_TIMER *
2849 ++ ad_ticks_per_sec);
2850 + }
2851 +
2852 + /**
2853 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
2854 +index 70d8ca3039dcb..78763f5027d10 100644
2855 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
2856 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
2857 +@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
2858 + hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
2859 + hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
2860 + if (bp->flags & BNXT_FLAG_CHIP_P5)
2861 +- hw_resc->max_irqs -= vf_msix * n;
2862 ++ hw_resc->max_nqs -= vf_msix;
2863 +
2864 + rc = pf->active_vfs;
2865 + }
2866 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
2867 +index 0e13ce9b4d009..669ae53f4c728 100644
2868 +--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
2869 ++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
2870 +@@ -4385,7 +4385,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
2871 + (struct in6_addr *)&ipv6_full_mask))
2872 + new_mask |= I40E_L3_V6_DST_MASK;
2873 + else if (ipv6_addr_any((struct in6_addr *)
2874 +- &usr_ip6_spec->ip6src))
2875 ++ &usr_ip6_spec->ip6dst))
2876 + new_mask &= ~I40E_L3_V6_DST_MASK;
2877 + else
2878 + return -EOPNOTSUPP;
2879 +diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
2880 +index 5581747947e57..60d8ef0c88595 100644
2881 +--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
2882 ++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
2883 +@@ -321,6 +321,19 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
2884 + bool if_running, pool_present = !!pool;
2885 + int ret = 0, pool_failure = 0;
2886 +
2887 ++ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
2888 ++ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
2889 ++ pool_failure = -EINVAL;
2890 ++ goto failure;
2891 ++ }
2892 ++
2893 ++ if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
2894 ++ !is_power_of_2(vsi->tx_rings[qid]->count)) {
2895 ++ netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
2896 ++ pool_failure = -EINVAL;
2897 ++ goto failure;
2898 ++ }
2899 ++
2900 + if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
2901 +
2902 + if (if_running) {
2903 +@@ -343,6 +356,7 @@ xsk_pool_if_up:
2904 + netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
2905 + }
2906 +
2907 ++failure:
2908 + if (pool_failure) {
2909 + netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
2910 + pool_present ? "en" : "dis", pool_failure);
2911 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
2912 +index 23ddfd79fc8b6..29be1d6eca436 100644
2913 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
2914 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
2915 +@@ -1212,7 +1212,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
2916 + struct cyclecounter cc;
2917 + unsigned long flags;
2918 + u32 incval = 0;
2919 +- u32 tsauxc = 0;
2920 + u32 fuse0 = 0;
2921 +
2922 + /* For some of the boards below this mask is technically incorrect.
2923 +@@ -1247,18 +1246,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
2924 + case ixgbe_mac_x550em_a:
2925 + case ixgbe_mac_X550:
2926 + cc.read = ixgbe_ptp_read_X550;
2927 +-
2928 +- /* enable SYSTIME counter */
2929 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
2930 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
2931 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
2932 +- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
2933 +- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
2934 +- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
2935 +- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
2936 +- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
2937 +-
2938 +- IXGBE_WRITE_FLUSH(hw);
2939 + break;
2940 + case ixgbe_mac_X540:
2941 + cc.read = ixgbe_ptp_read_82599;
2942 +@@ -1290,6 +1277,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
2943 + spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
2944 + }
2945 +
2946 ++/**
2947 ++ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
2948 ++ * @adapter: the ixgbe private board structure
2949 ++ *
2950 ++ * Initialize and start the SYSTIME registers.
2951 ++ */
2952 ++static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
2953 ++{
2954 ++ struct ixgbe_hw *hw = &adapter->hw;
2955 ++ u32 tsauxc;
2956 ++
2957 ++ switch (hw->mac.type) {
2958 ++ case ixgbe_mac_X550EM_x:
2959 ++ case ixgbe_mac_x550em_a:
2960 ++ case ixgbe_mac_X550:
2961 ++ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
2962 ++
2963 ++ /* Reset SYSTIME registers to 0 */
2964 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
2965 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
2966 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
2967 ++
2968 ++ /* Reset interrupt settings */
2969 ++ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
2970 ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
2971 ++
2972 ++ /* Activate the SYSTIME counter */
2973 ++ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
2974 ++ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
2975 ++ break;
2976 ++ case ixgbe_mac_X540:
2977 ++ case ixgbe_mac_82599EB:
2978 ++ /* Reset SYSTIME registers to 0 */
2979 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
2980 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
2981 ++ break;
2982 ++ default:
2983 ++ /* Other devices aren't supported */
2984 ++ return;
2985 ++ };
2986 ++
2987 ++ IXGBE_WRITE_FLUSH(hw);
2988 ++}
2989 ++
2990 + /**
2991 + * ixgbe_ptp_reset
2992 + * @adapter: the ixgbe private board structure
2993 +@@ -1316,6 +1347,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
2994 +
2995 + ixgbe_ptp_start_cyclecounter(adapter);
2996 +
2997 ++ ixgbe_ptp_init_systime(adapter);
2998 ++
2999 + spin_lock_irqsave(&adapter->tmreg_lock, flags);
3000 + timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
3001 + ktime_to_ns(ktime_get_real()));
3002 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3003 +index e00648094fc2a..c1c4f380803a1 100644
3004 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3005 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3006 +@@ -3325,7 +3325,9 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
3007 + struct mlx5e_priv *priv = netdev_priv(netdev);
3008 +
3009 + #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
3010 +- if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
3011 ++ int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
3012 ++ MLX5_TC_FLAG(NIC_OFFLOAD);
3013 ++ if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
3014 + netdev_err(netdev,
3015 + "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3016 + return -EINVAL;
3017 +@@ -4350,14 +4352,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
3018 + /* RQ */
3019 + mlx5e_build_rq_params(mdev, params);
3020 +
3021 +- /* HW LRO */
3022 +- if (MLX5_CAP_ETH(mdev, lro_cap) &&
3023 +- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3024 +- /* No XSK params: checking the availability of striding RQ in general. */
3025 +- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
3026 +- params->packet_merge.type = slow_pci_heuristic(mdev) ?
3027 +- MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
3028 +- }
3029 + params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
3030 +
3031 + /* CQ moderation params */
3032 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
3033 +index 161b60e1139b3..3d614bf5cff9e 100644
3034 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
3035 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
3036 +@@ -618,6 +618,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
3037 +
3038 + params->mqprio.num_tc = 1;
3039 + params->tunneled_offload_en = false;
3040 ++ if (rep->vport != MLX5_VPORT_UPLINK)
3041 ++ params->vlan_strip_disable = true;
3042 +
3043 + /* Set an initial non-zero value, so that mlx5e_select_queue won't
3044 + * divide by zero if called before first activating channels.
3045 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
3046 +index 5a6606c843edf..740065e21181d 100644
3047 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
3048 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
3049 +@@ -1427,7 +1427,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
3050 + memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
3051 + INIT_LIST_HEAD(&priv->ctx_list);
3052 + spin_lock_init(&priv->ctx_lock);
3053 ++ lockdep_register_key(&dev->lock_key);
3054 + mutex_init(&dev->intf_state_mutex);
3055 ++ lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
3056 +
3057 + mutex_init(&priv->bfregs.reg_head.lock);
3058 + mutex_init(&priv->bfregs.wc_head.lock);
3059 +@@ -1474,6 +1476,7 @@ err_health_init:
3060 + mutex_destroy(&priv->bfregs.wc_head.lock);
3061 + mutex_destroy(&priv->bfregs.reg_head.lock);
3062 + mutex_destroy(&dev->intf_state_mutex);
3063 ++ lockdep_unregister_key(&dev->lock_key);
3064 + return err;
3065 + }
3066 +
3067 +@@ -1491,6 +1494,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
3068 + mutex_destroy(&priv->bfregs.wc_head.lock);
3069 + mutex_destroy(&priv->bfregs.reg_head.lock);
3070 + mutex_destroy(&dev->intf_state_mutex);
3071 ++ lockdep_unregister_key(&dev->lock_key);
3072 + }
3073 +
3074 + static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3075 +diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
3076 +index 54a91d2b33b53..fa4c596e6ec6f 100644
3077 +--- a/drivers/net/ethernet/moxa/moxart_ether.c
3078 ++++ b/drivers/net/ethernet/moxa/moxart_ether.c
3079 +@@ -74,11 +74,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
3080 + static void moxart_mac_free_memory(struct net_device *ndev)
3081 + {
3082 + struct moxart_mac_priv_t *priv = netdev_priv(ndev);
3083 +- int i;
3084 +-
3085 +- for (i = 0; i < RX_DESC_NUM; i++)
3086 +- dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
3087 +- priv->rx_buf_size, DMA_FROM_DEVICE);
3088 +
3089 + if (priv->tx_desc_base)
3090 + dma_free_coherent(&priv->pdev->dev,
3091 +@@ -193,6 +188,7 @@ static int moxart_mac_open(struct net_device *ndev)
3092 + static int moxart_mac_stop(struct net_device *ndev)
3093 + {
3094 + struct moxart_mac_priv_t *priv = netdev_priv(ndev);
3095 ++ int i;
3096 +
3097 + napi_disable(&priv->napi);
3098 +
3099 +@@ -204,6 +200,11 @@ static int moxart_mac_stop(struct net_device *ndev)
3100 + /* disable all functions */
3101 + writel(0, priv->base + REG_MAC_CTRL);
3102 +
3103 ++ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
3104 ++ for (i = 0; i < RX_DESC_NUM; i++)
3105 ++ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
3106 ++ priv->rx_buf_size, DMA_FROM_DEVICE);
3107 ++
3108 + return 0;
3109 + }
3110 +
3111 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
3112 +index 781313dbd04f2..c713a3ee6571b 100644
3113 +--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
3114 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
3115 +@@ -1692,8 +1692,67 @@ static int ionic_set_features(struct net_device *netdev,
3116 + return err;
3117 + }
3118 +
3119 ++static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
3120 ++{
3121 ++ struct ionic_admin_ctx ctx = {
3122 ++ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3123 ++ .cmd.lif_setattr = {
3124 ++ .opcode = IONIC_CMD_LIF_SETATTR,
3125 ++ .index = cpu_to_le16(lif->index),
3126 ++ .attr = IONIC_LIF_ATTR_MAC,
3127 ++ },
3128 ++ };
3129 ++
3130 ++ ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
3131 ++ return ionic_adminq_post_wait(lif, &ctx);
3132 ++}
3133 ++
3134 ++static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
3135 ++{
3136 ++ struct ionic_admin_ctx ctx = {
3137 ++ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3138 ++ .cmd.lif_getattr = {
3139 ++ .opcode = IONIC_CMD_LIF_GETATTR,
3140 ++ .index = cpu_to_le16(lif->index),
3141 ++ .attr = IONIC_LIF_ATTR_MAC,
3142 ++ },
3143 ++ };
3144 ++ int err;
3145 ++
3146 ++ err = ionic_adminq_post_wait(lif, &ctx);
3147 ++ if (err)
3148 ++ return err;
3149 ++
3150 ++ ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
3151 ++ return 0;
3152 ++}
3153 ++
3154 ++static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
3155 ++{
3156 ++ u8 get_mac[ETH_ALEN];
3157 ++ int err;
3158 ++
3159 ++ err = ionic_set_attr_mac(lif, mac);
3160 ++ if (err)
3161 ++ return err;
3162 ++
3163 ++ err = ionic_get_attr_mac(lif, get_mac);
3164 ++ if (err)
3165 ++ return err;
3166 ++
3167 ++ /* To deal with older firmware that silently ignores the set attr mac:
3168 ++ * doesn't actually change the mac and doesn't return an error, so we
3169 ++ * do the get attr to verify whether or not the set actually happened
3170 ++ */
3171 ++ if (!ether_addr_equal(get_mac, mac))
3172 ++ return 1;
3173 ++
3174 ++ return 0;
3175 ++}
3176 ++
3177 + static int ionic_set_mac_address(struct net_device *netdev, void *sa)
3178 + {
3179 ++ struct ionic_lif *lif = netdev_priv(netdev);
3180 + struct sockaddr *addr = sa;
3181 + u8 *mac;
3182 + int err;
3183 +@@ -1702,6 +1761,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
3184 + if (ether_addr_equal(netdev->dev_addr, mac))
3185 + return 0;
3186 +
3187 ++ err = ionic_program_mac(lif, mac);
3188 ++ if (err < 0)
3189 ++ return err;
3190 ++
3191 ++ if (err > 0)
3192 ++ netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
3193 ++ __func__);
3194 ++
3195 + err = eth_prepare_mac_addr_change(netdev, addr);
3196 + if (err)
3197 + return err;
3198 +@@ -2974,11 +3041,10 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3199 +
3200 + netif_device_detach(lif->netdev);
3201 +
3202 ++ mutex_lock(&lif->queue_lock);
3203 + if (test_bit(IONIC_LIF_F_UP, lif->state)) {
3204 + dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
3205 +- mutex_lock(&lif->queue_lock);
3206 + ionic_stop_queues(lif);
3207 +- mutex_unlock(&lif->queue_lock);
3208 + }
3209 +
3210 + if (netif_running(lif->netdev)) {
3211 +@@ -2989,6 +3055,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3212 + ionic_reset(ionic);
3213 + ionic_qcqs_free(lif);
3214 +
3215 ++ mutex_unlock(&lif->queue_lock);
3216 ++
3217 + dev_info(ionic->dev, "FW Down: LIFs stopped\n");
3218 + }
3219 +
3220 +@@ -3012,9 +3080,15 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3221 + err = ionic_port_init(ionic);
3222 + if (err)
3223 + goto err_out;
3224 ++
3225 ++ mutex_lock(&lif->queue_lock);
3226 ++
3227 ++ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
3228 ++ dev_info(ionic->dev, "FW Up: clearing broken state\n");
3229 ++
3230 + err = ionic_qcqs_alloc(lif);
3231 + if (err)
3232 +- goto err_out;
3233 ++ goto err_unlock;
3234 +
3235 + err = ionic_lif_init(lif);
3236 + if (err)
3237 +@@ -3035,6 +3109,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3238 + goto err_txrx_free;
3239 + }
3240 +
3241 ++ mutex_unlock(&lif->queue_lock);
3242 ++
3243 + clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3244 + ionic_link_status_check_request(lif, CAN_SLEEP);
3245 + netif_device_attach(lif->netdev);
3246 +@@ -3051,6 +3127,8 @@ err_lifs_deinit:
3247 + ionic_lif_deinit(lif);
3248 + err_qcqs_free:
3249 + ionic_qcqs_free(lif);
3250 ++err_unlock:
3251 ++ mutex_unlock(&lif->queue_lock);
3252 + err_out:
3253 + dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3254 + }
3255 +@@ -3215,6 +3293,7 @@ static int ionic_station_set(struct ionic_lif *lif)
3256 + .attr = IONIC_LIF_ATTR_MAC,
3257 + },
3258 + };
3259 ++ u8 mac_address[ETH_ALEN];
3260 + struct sockaddr addr;
3261 + int err;
3262 +
3263 +@@ -3223,8 +3302,23 @@ static int ionic_station_set(struct ionic_lif *lif)
3264 + return err;
3265 + netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3266 + ctx.comp.lif_getattr.mac);
3267 +- if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
3268 +- return 0;
3269 ++ ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
3270 ++
3271 ++ if (is_zero_ether_addr(mac_address)) {
3272 ++ eth_hw_addr_random(netdev);
3273 ++ netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
3274 ++ ether_addr_copy(mac_address, netdev->dev_addr);
3275 ++
3276 ++ err = ionic_program_mac(lif, mac_address);
3277 ++ if (err < 0)
3278 ++ return err;
3279 ++
3280 ++ if (err > 0) {
3281 ++ netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3282 ++ __func__);
3283 ++ return 0;
3284 ++ }
3285 ++ }
3286 +
3287 + if (!is_zero_ether_addr(netdev->dev_addr)) {
3288 + /* If the netdev mac is non-zero and doesn't match the default
3289 +@@ -3232,12 +3326,11 @@ static int ionic_station_set(struct ionic_lif *lif)
3290 + * likely here again after a fw-upgrade reset. We need to be
3291 + * sure the netdev mac is in our filter list.
3292 + */
3293 +- if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
3294 +- netdev->dev_addr))
3295 ++ if (!ether_addr_equal(mac_address, netdev->dev_addr))
3296 + ionic_lif_addr_add(lif, netdev->dev_addr);
3297 + } else {
3298 + /* Update the netdev mac with the device's mac */
3299 +- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
3300 ++ ether_addr_copy(addr.sa_data, mac_address);
3301 + addr.sa_family = AF_INET;
3302 + err = eth_prepare_mac_addr_change(netdev, &addr);
3303 + if (err) {
3304 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
3305 +index 480f85bc17f99..9ede66842118f 100644
3306 +--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
3307 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
3308 +@@ -395,8 +395,8 @@ try_again:
3309 + ionic_opcode_to_str(opcode), opcode,
3310 + ionic_error_to_str(err), err);
3311 +
3312 +- msleep(1000);
3313 + iowrite32(0, &idev->dev_cmd_regs->done);
3314 ++ msleep(1000);
3315 + iowrite32(1, &idev->dev_cmd_regs->doorbell);
3316 + goto try_again;
3317 + }
3318 +@@ -409,6 +409,8 @@ try_again:
3319 + return ionic_error_to_errno(err);
3320 + }
3321 +
3322 ++ ionic_dev_cmd_clean(ionic);
3323 ++
3324 + return 0;
3325 + }
3326 +
3327 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
3328 +index d1c31200bb911..01d0a14f67520 100644
3329 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
3330 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
3331 +@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
3332 + /* Enable disable MAC RX/TX */
3333 + void stmmac_set_mac(void __iomem *ioaddr, bool enable)
3334 + {
3335 +- u32 value = readl(ioaddr + MAC_CTRL_REG);
3336 ++ u32 old_val, value;
3337 ++
3338 ++ old_val = readl(ioaddr + MAC_CTRL_REG);
3339 ++ value = old_val;
3340 +
3341 + if (enable)
3342 + value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
3343 + else
3344 + value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
3345 +
3346 +- writel(value, ioaddr + MAC_CTRL_REG);
3347 ++ if (value != old_val)
3348 ++ writel(value, ioaddr + MAC_CTRL_REG);
3349 + }
3350 +
3351 + void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
3352 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3353 +index b4f83c8655684..2569673559df3 100644
3354 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3355 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3356 +@@ -1083,10 +1083,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
3357 + bool tx_pause, bool rx_pause)
3358 + {
3359 + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
3360 +- u32 ctrl;
3361 ++ u32 old_ctrl, ctrl;
3362 +
3363 +- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
3364 +- ctrl &= ~priv->hw->link.speed_mask;
3365 ++ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
3366 ++ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
3367 +
3368 + if (interface == PHY_INTERFACE_MODE_USXGMII) {
3369 + switch (speed) {
3370 +@@ -1161,7 +1161,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
3371 + if (tx_pause && rx_pause)
3372 + stmmac_mac_flow_ctrl(priv, duplex);
3373 +
3374 +- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
3375 ++ if (ctrl != old_ctrl)
3376 ++ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
3377 +
3378 + stmmac_mac_set(priv, priv->ioaddr, true);
3379 + if (phy && priv->dma_cap.eee) {
3380 +diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
3381 +index 287ae4c538aae..6472425539e15 100644
3382 +--- a/drivers/net/ethernet/sun/cassini.c
3383 ++++ b/drivers/net/ethernet/sun/cassini.c
3384 +@@ -1325,7 +1325,7 @@ static void cas_init_rx_dma(struct cas *cp)
3385 + writel(val, cp->regs + REG_RX_PAGE_SIZE);
3386 +
3387 + /* enable the header parser if desired */
3388 +- if (CAS_HP_FIRMWARE == cas_prog_null)
3389 ++ if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
3390 + return;
3391 +
3392 + val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
3393 +@@ -3794,7 +3794,7 @@ static void cas_reset(struct cas *cp, int blkflag)
3394 +
3395 + /* program header parser */
3396 + if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3397 +- (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3398 ++ (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
3399 + cas_load_firmware(cp, CAS_HP_FIRMWARE);
3400 + } else {
3401 + cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3402 +diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
3403 +index 4337b0920d3d7..cad0798985a13 100644
3404 +--- a/drivers/net/ipa/ipa_mem.c
3405 ++++ b/drivers/net/ipa/ipa_mem.c
3406 +@@ -570,7 +570,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
3407 + }
3408 +
3409 + /* Align the address down and the size up to a page boundary */
3410 +- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
3411 ++ addr = qcom_smem_virt_to_phys(virt);
3412 + phys = addr & PAGE_MASK;
3413 + size = PAGE_ALIGN(size + addr - phys);
3414 + iova = phys; /* We just want a direct mapping */
3415 +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
3416 +index c0b21a5580d52..3f43c253adaca 100644
3417 +--- a/drivers/net/ipvlan/ipvlan_main.c
3418 ++++ b/drivers/net/ipvlan/ipvlan_main.c
3419 +@@ -787,7 +787,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
3420 +
3421 + case NETDEV_CHANGEADDR:
3422 + list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
3423 +- ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
3424 ++ eth_hw_addr_set(ipvlan->dev, dev->dev_addr);
3425 + call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
3426 + }
3427 + break;
3428 +diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
3429 +index 1cedb634f4f7b..f01078b2581ce 100644
3430 +--- a/drivers/net/ipvlan/ipvtap.c
3431 ++++ b/drivers/net/ipvlan/ipvtap.c
3432 +@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
3433 + .notifier_call = ipvtap_device_event,
3434 + };
3435 +
3436 +-static int ipvtap_init(void)
3437 ++static int __init ipvtap_init(void)
3438 + {
3439 + int err;
3440 +
3441 +@@ -228,7 +228,7 @@ out1:
3442 + }
3443 + module_init(ipvtap_init);
3444 +
3445 +-static void ipvtap_exit(void)
3446 ++static void __exit ipvtap_exit(void)
3447 + {
3448 + rtnl_link_unregister(&ipvtap_link_ops);
3449 + unregister_netdevice_notifier(&ipvtap_notifier_block);
3450 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
3451 +index 354890948f8a1..71700f2792786 100644
3452 +--- a/drivers/net/macsec.c
3453 ++++ b/drivers/net/macsec.c
3454 +@@ -447,11 +447,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
3455 + return (struct macsec_eth_header *)skb_mac_header(skb);
3456 + }
3457 +
3458 +-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3459 +-{
3460 +- return make_sci(dev->dev_addr, port);
3461 +-}
3462 +-
3463 + static void __macsec_pn_wrapped(struct macsec_secy *secy,
3464 + struct macsec_tx_sa *tx_sa)
3465 + {
3466 +@@ -3616,8 +3611,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
3467 + dev_uc_del(real_dev, dev->dev_addr);
3468 +
3469 + out:
3470 +- ether_addr_copy(dev->dev_addr, addr->sa_data);
3471 +- macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
3472 ++ eth_hw_addr_set(dev, addr->sa_data);
3473 +
3474 + /* If h/w offloading is available, propagate to the device */
3475 + if (macsec_is_offloaded(macsec)) {
3476 +@@ -3953,6 +3947,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
3477 + return false;
3478 + }
3479 +
3480 ++static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3481 ++{
3482 ++ return make_sci(dev->dev_addr, port);
3483 ++}
3484 ++
3485 + static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3486 + {
3487 + struct macsec_dev *macsec = macsec_priv(dev);
3488 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
3489 +index a9a515cf5a460..6363459ba1d05 100644
3490 +--- a/drivers/net/macvlan.c
3491 ++++ b/drivers/net/macvlan.c
3492 +@@ -711,7 +711,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
3493 +
3494 + if (!(dev->flags & IFF_UP)) {
3495 + /* Just copy in the new address */
3496 +- ether_addr_copy(dev->dev_addr, addr);
3497 ++ eth_hw_addr_set(dev, addr);
3498 + } else {
3499 + /* Rehash and update the device filters */
3500 + if (macvlan_addr_busy(vlan->port, addr))
3501 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
3502 +index 834a68d758327..b616f55ea222a 100644
3503 +--- a/drivers/net/phy/phy_device.c
3504 ++++ b/drivers/net/phy/phy_device.c
3505 +@@ -315,11 +315,11 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
3506 +
3507 + phydev->suspended_by_mdio_bus = 0;
3508 +
3509 +- /* If we managed to get here with the PHY state machine in a state other
3510 +- * than PHY_HALTED this is an indication that something went wrong and
3511 +- * we should most likely be using MAC managed PM and we are not.
3512 ++ /* If we manged to get here with the PHY state machine in a state neither
3513 ++ * PHY_HALTED nor PHY_READY this is an indication that something went wrong
3514 ++ * and we should most likely be using MAC managed PM and we are not.
3515 + */
3516 +- WARN_ON(phydev->state != PHY_HALTED && !phydev->mac_managed_pm);
3517 ++ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
3518 +
3519 + ret = phy_init_hw(phydev);
3520 + if (ret < 0)
3521 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
3522 +index 0d1d92ef79099..7e821bed91ce5 100644
3523 +--- a/drivers/net/usb/r8152.c
3524 ++++ b/drivers/net/usb/r8152.c
3525 +@@ -5904,6 +5904,11 @@ static void r8153_enter_oob(struct r8152 *tp)
3526 + ocp_data &= ~NOW_IS_OOB;
3527 + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
3528 +
3529 ++ /* RX FIFO settings for OOB */
3530 ++ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
3531 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
3532 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
3533 ++
3534 + rtl_disable(tp);
3535 + rtl_reset_bmu(tp);
3536 +
3537 +@@ -6429,21 +6434,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
3538 + u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
3539 + u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
3540 +
3541 +- switch (tp->version) {
3542 +- case RTL_VER_10:
3543 +- case RTL_VER_11:
3544 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
3545 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
3546 +- break;
3547 +- case RTL_VER_12:
3548 +- case RTL_VER_13:
3549 +- case RTL_VER_15:
3550 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
3551 +- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
3552 +- break;
3553 +- default:
3554 +- break;
3555 +- }
3556 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
3557 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
3558 + }
3559 +
3560 + static void rtl8156_change_mtu(struct r8152 *tp)
3561 +@@ -6555,6 +6547,11 @@ static void rtl8156_down(struct r8152 *tp)
3562 + ocp_data &= ~NOW_IS_OOB;
3563 + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
3564 +
3565 ++ /* RX FIFO settings for OOB */
3566 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
3567 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
3568 ++ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
3569 ++
3570 + rtl_disable(tp);
3571 + rtl_reset_bmu(tp);
3572 +
3573 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
3574 +index 460e90eb528f8..7cf9206638c37 100644
3575 +--- a/drivers/net/usb/smsc95xx.c
3576 ++++ b/drivers/net/usb/smsc95xx.c
3577 +@@ -18,8 +18,6 @@
3578 + #include <linux/usb/usbnet.h>
3579 + #include <linux/slab.h>
3580 + #include <linux/of_net.h>
3581 +-#include <linux/irq.h>
3582 +-#include <linux/irqdomain.h>
3583 + #include <linux/mdio.h>
3584 + #include <linux/phy.h>
3585 + #include "smsc95xx.h"
3586 +@@ -53,9 +51,6 @@
3587 + #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
3588 + SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
3589 +
3590 +-#define SMSC95XX_NR_IRQS (1) /* raise to 12 for GPIOs */
3591 +-#define PHY_HWIRQ (SMSC95XX_NR_IRQS - 1)
3592 +-
3593 + struct smsc95xx_priv {
3594 + u32 mac_cr;
3595 + u32 hash_hi;
3596 +@@ -64,12 +59,8 @@ struct smsc95xx_priv {
3597 + spinlock_t mac_cr_lock;
3598 + u8 features;
3599 + u8 suspend_flags;
3600 +- struct irq_chip irqchip;
3601 +- struct irq_domain *irqdomain;
3602 +- struct fwnode_handle *irqfwnode;
3603 + struct mii_bus *mdiobus;
3604 + struct phy_device *phydev;
3605 +- struct task_struct *pm_task;
3606 + };
3607 +
3608 + static bool turbo_mode = true;
3609 +@@ -79,14 +70,13 @@ MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
3610 + static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
3611 + u32 *data, int in_pm)
3612 + {
3613 +- struct smsc95xx_priv *pdata = dev->driver_priv;
3614 + u32 buf;
3615 + int ret;
3616 + int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
3617 +
3618 + BUG_ON(!dev);
3619 +
3620 +- if (current != pdata->pm_task)
3621 ++ if (!in_pm)
3622 + fn = usbnet_read_cmd;
3623 + else
3624 + fn = usbnet_read_cmd_nopm;
3625 +@@ -110,14 +100,13 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
3626 + static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
3627 + u32 data, int in_pm)
3628 + {
3629 +- struct smsc95xx_priv *pdata = dev->driver_priv;
3630 + u32 buf;
3631 + int ret;
3632 + int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
3633 +
3634 + BUG_ON(!dev);
3635 +
3636 +- if (current != pdata->pm_task)
3637 ++ if (!in_pm)
3638 + fn = usbnet_write_cmd;
3639 + else
3640 + fn = usbnet_write_cmd_nopm;
3641 +@@ -606,8 +595,6 @@ static void smsc95xx_mac_update_fullduplex(struct usbnet *dev)
3642 +
3643 + static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
3644 + {
3645 +- struct smsc95xx_priv *pdata = dev->driver_priv;
3646 +- unsigned long flags;
3647 + u32 intdata;
3648 +
3649 + if (urb->actual_length != 4) {
3650 +@@ -619,15 +606,11 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
3651 + intdata = get_unaligned_le32(urb->transfer_buffer);
3652 + netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
3653 +
3654 +- local_irq_save(flags);
3655 +-
3656 + if (intdata & INT_ENP_PHY_INT_)
3657 +- generic_handle_domain_irq(pdata->irqdomain, PHY_HWIRQ);
3658 ++ ;
3659 + else
3660 + netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
3661 + intdata);
3662 +-
3663 +- local_irq_restore(flags);
3664 + }
3665 +
3666 + /* Enable or disable Tx & Rx checksum offload engines */
3667 +@@ -1089,9 +1072,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
3668 + {
3669 + struct smsc95xx_priv *pdata;
3670 + bool is_internal_phy;
3671 +- char usb_path[64];
3672 +- int ret, phy_irq;
3673 + u32 val;
3674 ++ int ret;
3675 +
3676 + printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
3677 +
3678 +@@ -1131,38 +1113,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
3679 + if (ret)
3680 + goto free_pdata;
3681 +
3682 +- /* create irq domain for use by PHY driver and GPIO consumers */
3683 +- usb_make_path(dev->udev, usb_path, sizeof(usb_path));
3684 +- pdata->irqfwnode = irq_domain_alloc_named_fwnode(usb_path);
3685 +- if (!pdata->irqfwnode) {
3686 +- ret = -ENOMEM;
3687 +- goto free_pdata;
3688 +- }
3689 +-
3690 +- pdata->irqdomain = irq_domain_create_linear(pdata->irqfwnode,
3691 +- SMSC95XX_NR_IRQS,
3692 +- &irq_domain_simple_ops,
3693 +- pdata);
3694 +- if (!pdata->irqdomain) {
3695 +- ret = -ENOMEM;
3696 +- goto free_irqfwnode;
3697 +- }
3698 +-
3699 +- phy_irq = irq_create_mapping(pdata->irqdomain, PHY_HWIRQ);
3700 +- if (!phy_irq) {
3701 +- ret = -ENOENT;
3702 +- goto remove_irqdomain;
3703 +- }
3704 +-
3705 +- pdata->irqchip = dummy_irq_chip;
3706 +- pdata->irqchip.name = SMSC_CHIPNAME;
3707 +- irq_set_chip_and_handler_name(phy_irq, &pdata->irqchip,
3708 +- handle_simple_irq, "phy");
3709 +-
3710 + pdata->mdiobus = mdiobus_alloc();
3711 + if (!pdata->mdiobus) {
3712 + ret = -ENOMEM;
3713 +- goto dispose_irq;
3714 ++ goto free_pdata;
3715 + }
3716 +
3717 + ret = smsc95xx_read_reg(dev, HW_CFG, &val);
3718 +@@ -1195,7 +1149,6 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
3719 + goto unregister_mdio;
3720 + }
3721 +
3722 +- pdata->phydev->irq = phy_irq;
3723 + pdata->phydev->is_internal = is_internal_phy;
3724 +
3725 + /* detect device revision as different features may be available */
3726 +@@ -1238,15 +1191,6 @@ unregister_mdio:
3727 + free_mdio:
3728 + mdiobus_free(pdata->mdiobus);
3729 +
3730 +-dispose_irq:
3731 +- irq_dispose_mapping(phy_irq);
3732 +-
3733 +-remove_irqdomain:
3734 +- irq_domain_remove(pdata->irqdomain);
3735 +-
3736 +-free_irqfwnode:
3737 +- irq_domain_free_fwnode(pdata->irqfwnode);
3738 +-
3739 + free_pdata:
3740 + kfree(pdata);
3741 + return ret;
3742 +@@ -1259,9 +1203,6 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
3743 + phy_disconnect(dev->net->phydev);
3744 + mdiobus_unregister(pdata->mdiobus);
3745 + mdiobus_free(pdata->mdiobus);
3746 +- irq_dispose_mapping(irq_find_mapping(pdata->irqdomain, PHY_HWIRQ));
3747 +- irq_domain_remove(pdata->irqdomain);
3748 +- irq_domain_free_fwnode(pdata->irqfwnode);
3749 + netif_dbg(dev, ifdown, dev->net, "free pdata\n");
3750 + kfree(pdata);
3751 + }
3752 +@@ -1286,6 +1227,29 @@ static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
3753 + return crc << ((filter % 2) * 16);
3754 + }
3755 +
3756 ++static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
3757 ++{
3758 ++ int ret;
3759 ++
3760 ++ netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
3761 ++
3762 ++ /* read to clear */
3763 ++ ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_SRC);
3764 ++ if (ret < 0)
3765 ++ return ret;
3766 ++
3767 ++ /* enable interrupt source */
3768 ++ ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_MASK);
3769 ++ if (ret < 0)
3770 ++ return ret;
3771 ++
3772 ++ ret |= mask;
3773 ++
3774 ++ smsc95xx_mdio_write_nopm(dev, PHY_INT_MASK, ret);
3775 ++
3776 ++ return 0;
3777 ++}
3778 ++
3779 + static int smsc95xx_link_ok_nopm(struct usbnet *dev)
3780 + {
3781 + int ret;
3782 +@@ -1452,6 +1416,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
3783 + static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
3784 + {
3785 + struct smsc95xx_priv *pdata = dev->driver_priv;
3786 ++ int ret;
3787 +
3788 + if (!netif_running(dev->net)) {
3789 + /* interface is ifconfig down so fully power down hw */
3790 +@@ -1470,10 +1435,27 @@ static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
3791 + }
3792 +
3793 + netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n");
3794 ++
3795 ++ /* enable PHY wakeup events for if cable is attached */
3796 ++ ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
3797 ++ PHY_INT_MASK_ANEG_COMP_);
3798 ++ if (ret < 0) {
3799 ++ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
3800 ++ return ret;
3801 ++ }
3802 ++
3803 + netdev_info(dev->net, "entering SUSPEND1 mode\n");
3804 + return smsc95xx_enter_suspend1(dev);
3805 + }
3806 +
3807 ++ /* enable PHY wakeup events so we remote wakeup if cable is pulled */
3808 ++ ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
3809 ++ PHY_INT_MASK_LINK_DOWN_);
3810 ++ if (ret < 0) {
3811 ++ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
3812 ++ return ret;
3813 ++ }
3814 ++
3815 + netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n");
3816 + return smsc95xx_enter_suspend3(dev);
3817 + }
3818 +@@ -1485,12 +1467,9 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
3819 + u32 val, link_up;
3820 + int ret;
3821 +
3822 +- pdata->pm_task = current;
3823 +-
3824 + ret = usbnet_suspend(intf, message);
3825 + if (ret < 0) {
3826 + netdev_warn(dev->net, "usbnet_suspend error\n");
3827 +- pdata->pm_task = NULL;
3828 + return ret;
3829 + }
3830 +
3831 +@@ -1542,6 +1521,13 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
3832 + }
3833 +
3834 + if (pdata->wolopts & WAKE_PHY) {
3835 ++ ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
3836 ++ (PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_LINK_DOWN_));
3837 ++ if (ret < 0) {
3838 ++ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
3839 ++ goto done;
3840 ++ }
3841 ++
3842 + /* if link is down then configure EDPD and enter SUSPEND1,
3843 + * otherwise enter SUSPEND0 below
3844 + */
3845 +@@ -1730,7 +1716,6 @@ done:
3846 + if (ret && PMSG_IS_AUTO(message))
3847 + usbnet_resume(intf);
3848 +
3849 +- pdata->pm_task = NULL;
3850 + return ret;
3851 + }
3852 +
3853 +@@ -1751,53 +1736,45 @@ static int smsc95xx_resume(struct usb_interface *intf)
3854 + /* do this first to ensure it's cleared even in error case */
3855 + pdata->suspend_flags = 0;
3856 +
3857 +- pdata->pm_task = current;
3858 +-
3859 + if (suspend_flags & SUSPEND_ALLMODES) {
3860 + /* clear wake-up sources */
3861 + ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
3862 + if (ret < 0)
3863 +- goto done;
3864 ++ return ret;
3865 +
3866 + val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
3867 +
3868 + ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
3869 + if (ret < 0)
3870 +- goto done;
3871 ++ return ret;
3872 +
3873 + /* clear wake-up status */
3874 + ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
3875 + if (ret < 0)
3876 +- goto done;
3877 ++ return ret;
3878 +
3879 + val &= ~PM_CTL_WOL_EN_;
3880 + val |= PM_CTL_WUPS_;
3881 +
3882 + ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
3883 + if (ret < 0)
3884 +- goto done;
3885 ++ return ret;
3886 + }
3887 +
3888 +- phy_init_hw(pdata->phydev);
3889 +-
3890 + ret = usbnet_resume(intf);
3891 + if (ret < 0)
3892 + netdev_warn(dev->net, "usbnet_resume error\n");
3893 +
3894 +-done:
3895 +- pdata->pm_task = NULL;
3896 ++ phy_init_hw(pdata->phydev);
3897 + return ret;
3898 + }
3899 +
3900 + static int smsc95xx_reset_resume(struct usb_interface *intf)
3901 + {
3902 + struct usbnet *dev = usb_get_intfdata(intf);
3903 +- struct smsc95xx_priv *pdata = dev->driver_priv;
3904 + int ret;
3905 +
3906 +- pdata->pm_task = current;
3907 + ret = smsc95xx_reset(dev);
3908 +- pdata->pm_task = NULL;
3909 + if (ret < 0)
3910 + return ret;
3911 +
3912 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
3913 +index 9b83c710c9b86..743e38a1aa511 100644
3914 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
3915 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
3916 +@@ -2386,10 +2386,7 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
3917 + rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
3918 + "Just Read IQK Matrix reg for channel:%d....\n",
3919 + channel);
3920 +- if ((rtlphy->iqk_matrix[indexforchannel].
3921 +- value[0] != NULL)
3922 +- /*&&(regea4 != 0) */)
3923 +- _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
3924 ++ _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
3925 + rtlphy->iqk_matrix[
3926 + indexforchannel].value, 0,
3927 + (rtlphy->iqk_matrix[
3928 +diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
3929 +index 7bdaf82630706..7ad98973648cc 100644
3930 +--- a/drivers/nfc/pn533/uart.c
3931 ++++ b/drivers/nfc/pn533/uart.c
3932 +@@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev)
3933 + pn53x_unregister_nfc(pn532->priv);
3934 + serdev_device_close(serdev);
3935 + pn53x_common_clean(pn532->priv);
3936 ++ del_timer_sync(&pn532->cmd_timeout);
3937 + kfree_skb(pn532->recv_skb);
3938 + kfree(pn532);
3939 + }
3940 +diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
3941 +index 46bc30fe85d2b..235553337fb2d 100644
3942 +--- a/drivers/nvme/target/zns.c
3943 ++++ b/drivers/nvme/target/zns.c
3944 +@@ -34,8 +34,7 @@ static int validate_conv_zones_cb(struct blk_zone *z,
3945 +
3946 + bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
3947 + {
3948 +- struct request_queue *q = ns->bdev->bd_disk->queue;
3949 +- u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q));
3950 ++ u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
3951 + struct gendisk *bd_disk = ns->bdev->bd_disk;
3952 + int ret;
3953 +
3954 +diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
3955 +index 2a6d613a76cf3..f82e4a348330a 100644
3956 +--- a/drivers/scsi/qla2xxx/qla_gbl.h
3957 ++++ b/drivers/scsi/qla2xxx/qla_gbl.h
3958 +@@ -192,6 +192,8 @@ extern int ql2xfulldump_on_mpifail;
3959 + extern int ql2xsecenable;
3960 + extern int ql2xenforce_iocb_limit;
3961 + extern int ql2xabts_wait_nvme;
3962 ++extern int ql2xrspq_follow_inptr;
3963 ++extern int ql2xrspq_follow_inptr_legacy;
3964 +
3965 + extern int qla2x00_loop_reset(scsi_qla_host_t *);
3966 + extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
3967 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3968 +index b218f97396195..59f5918dca95f 100644
3969 +--- a/drivers/scsi/qla2xxx/qla_isr.c
3970 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
3971 +@@ -3707,12 +3707,11 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3972 + * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
3973 + */
3974 + static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3975 +- struct rsp_que *rsp, response_t *pkt)
3976 ++ struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
3977 + {
3978 +- int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
3979 +- response_t *end_pkt;
3980 ++ int start_pkt_ring_index;
3981 ++ u32 iocb_cnt = 0;
3982 + int rc = 0;
3983 +- u32 rsp_q_in;
3984 +
3985 + if (pkt->entry_count == 1)
3986 + return rc;
3987 +@@ -3723,34 +3722,18 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3988 + else
3989 + start_pkt_ring_index = rsp->ring_index - 1;
3990 +
3991 +- if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
3992 +- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
3993 +- rsp->length - 1;
3994 ++ if (rsp_q_in < start_pkt_ring_index)
3995 ++ /* q in ptr is wrapped */
3996 ++ iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
3997 + else
3998 +- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
3999 ++ iocb_cnt = rsp_q_in - start_pkt_ring_index;
4000 +
4001 +- end_pkt = rsp->ring + end_pkt_ring_index;
4002 +-
4003 +- /* next pkt = end_pkt + 1 */
4004 +- n_ring_index = end_pkt_ring_index + 1;
4005 +- if (n_ring_index >= rsp->length)
4006 +- n_ring_index = 0;
4007 +-
4008 +- rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
4009 +- rd_reg_dword(rsp->rsp_q_in);
4010 +-
4011 +- /* rsp_q_in is either wrapped or pointing beyond endpkt */
4012 +- if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
4013 +- rsp_q_in >= n_ring_index)
4014 +- /* all IOCBs arrived. */
4015 +- rc = 0;
4016 +- else
4017 ++ if (iocb_cnt < pkt->entry_count)
4018 + rc = -EIO;
4019 +
4020 +- ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
4021 +- "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
4022 +- __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
4023 +- rsp_q_in, rc);
4024 ++ ql_dbg(ql_dbg_init, vha, 0x5091,
4025 ++ "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
4026 ++ __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
4027 +
4028 + return rc;
4029 + }
4030 +@@ -3767,6 +3750,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
4031 + struct qla_hw_data *ha = vha->hw;
4032 + struct purex_entry_24xx *purex_entry;
4033 + struct purex_item *pure_item;
4034 ++ u16 rsp_in = 0, cur_ring_index;
4035 ++ int follow_inptr, is_shadow_hba;
4036 +
4037 + if (!ha->flags.fw_started)
4038 + return;
4039 +@@ -3776,8 +3761,27 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
4040 + qla_cpu_update(rsp->qpair, smp_processor_id());
4041 + }
4042 +
4043 +- while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
4044 ++#define __update_rsp_in(_update, _is_shadow_hba, _rsp, _rsp_in) \
4045 ++ do { \
4046 ++ if (_update) { \
4047 ++ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
4048 ++ rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
4049 ++ } \
4050 ++ } while (0)
4051 ++
4052 ++ is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
4053 ++ follow_inptr = is_shadow_hba ? ql2xrspq_follow_inptr :
4054 ++ ql2xrspq_follow_inptr_legacy;
4055 ++
4056 ++ __update_rsp_in(follow_inptr, is_shadow_hba, rsp, rsp_in);
4057 ++
4058 ++ while ((likely(follow_inptr &&
4059 ++ rsp->ring_index != rsp_in &&
4060 ++ rsp->ring_ptr->signature != RESPONSE_PROCESSED)) ||
4061 ++ (!follow_inptr &&
4062 ++ rsp->ring_ptr->signature != RESPONSE_PROCESSED)) {
4063 + pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
4064 ++ cur_ring_index = rsp->ring_index;
4065 +
4066 + rsp->ring_index++;
4067 + if (rsp->ring_index == rsp->length) {
4068 +@@ -3889,6 +3893,8 @@ process_err:
4069 + }
4070 + pure_item = qla27xx_copy_fpin_pkt(vha,
4071 + (void **)&pkt, &rsp);
4072 ++ __update_rsp_in(follow_inptr, is_shadow_hba,
4073 ++ rsp, rsp_in);
4074 + if (!pure_item)
4075 + break;
4076 + qla24xx_queue_purex_item(vha, pure_item,
4077 +@@ -3896,7 +3902,17 @@ process_err:
4078 + break;
4079 +
4080 + case ELS_AUTH_ELS:
4081 +- if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
4082 ++ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
4083 ++ /*
4084 ++ * ring_ptr and ring_index were
4085 ++ * pre-incremented above. Reset them
4086 ++ * back to current. Wait for next
4087 ++ * interrupt with all IOCBs to arrive
4088 ++ * and re-process.
4089 ++ */
4090 ++ rsp->ring_ptr = (response_t *)pkt;
4091 ++ rsp->ring_index = cur_ring_index;
4092 ++
4093 + ql_dbg(ql_dbg_init, vha, 0x5091,
4094 + "Defer processing ELS opcode %#x...\n",
4095 + purex_entry->els_frame_payload[3]);
4096 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
4097 +index 6542a258cb751..00e97f0a07ebe 100644
4098 +--- a/drivers/scsi/qla2xxx/qla_os.c
4099 ++++ b/drivers/scsi/qla2xxx/qla_os.c
4100 +@@ -338,6 +338,16 @@ module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
4101 + MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
4102 + "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
4103 +
4104 ++int ql2xrspq_follow_inptr = 1;
4105 ++module_param(ql2xrspq_follow_inptr, int, 0644);
4106 ++MODULE_PARM_DESC(ql2xrspq_follow_inptr,
4107 ++ "Follow RSP IN pointer for RSP updates for HBAs 27xx and newer (default: 1).");
4108 ++
4109 ++int ql2xrspq_follow_inptr_legacy = 1;
4110 ++module_param(ql2xrspq_follow_inptr_legacy, int, 0644);
4111 ++MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
4112 ++ "Follow RSP IN pointer for RSP updates for HBAs older than 27XX. (default: 1).");
4113 ++
4114 + static void qla2x00_clear_drv_active(struct qla_hw_data *);
4115 + static void qla2x00_free_device(scsi_qla_host_t *);
4116 + static int qla2xxx_map_queues(struct Scsi_Host *shost);
4117 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
4118 +index 71c7f7b435c4a..3d03e1ca58201 100644
4119 +--- a/drivers/scsi/storvsc_drv.c
4120 ++++ b/drivers/scsi/storvsc_drv.c
4121 +@@ -2093,7 +2093,7 @@ static int storvsc_probe(struct hv_device *device,
4122 + */
4123 + host_dev->handle_error_wq =
4124 + alloc_ordered_workqueue("storvsc_error_wq_%d",
4125 +- WQ_MEM_RECLAIM,
4126 ++ 0,
4127 + host->host_no);
4128 + if (!host_dev->handle_error_wq) {
4129 + ret = -ENOMEM;
4130 +diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
4131 +index 3ed60068c4ea6..8dbe9866ea6c6 100644
4132 +--- a/drivers/scsi/ufs/ufshci.h
4133 ++++ b/drivers/scsi/ufs/ufshci.h
4134 +@@ -133,11 +133,7 @@ static inline u32 ufshci_version(u32 major, u32 minor)
4135 +
4136 + #define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
4137 +
4138 +-#define UFSHCD_ERROR_MASK (UIC_ERROR |\
4139 +- DEVICE_FATAL_ERROR |\
4140 +- CONTROLLER_FATAL_ERROR |\
4141 +- SYSTEM_BUS_FATAL_ERROR |\
4142 +- CRYPTO_ENGINE_FATAL_ERROR)
4143 ++#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS)
4144 +
4145 + #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
4146 + CONTROLLER_FATAL_ERROR |\
4147 +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
4148 +index fb02105d6337b..e035a63bbe5b7 100644
4149 +--- a/drivers/video/fbdev/core/fbcon.c
4150 ++++ b/drivers/video/fbdev/core/fbcon.c
4151 +@@ -2413,15 +2413,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
4152 + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
4153 + struct fbcon_ops *ops = info->fbcon_par;
4154 + struct fbcon_display *p = &fb_display[vc->vc_num];
4155 +- int resize;
4156 ++ int resize, ret, old_userfont, old_width, old_height, old_charcount;
4157 + char *old_data = NULL;
4158 +
4159 + resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
4160 + if (p->userfont)
4161 + old_data = vc->vc_font.data;
4162 + vc->vc_font.data = (void *)(p->fontdata = data);
4163 ++ old_userfont = p->userfont;
4164 + if ((p->userfont = userfont))
4165 + REFCOUNT(data)++;
4166 ++
4167 ++ old_width = vc->vc_font.width;
4168 ++ old_height = vc->vc_font.height;
4169 ++ old_charcount = vc->vc_font.charcount;
4170 ++
4171 + vc->vc_font.width = w;
4172 + vc->vc_font.height = h;
4173 + vc->vc_font.charcount = charcount;
4174 +@@ -2437,7 +2443,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
4175 + rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
4176 + cols /= w;
4177 + rows /= h;
4178 +- vc_resize(vc, cols, rows);
4179 ++ ret = vc_resize(vc, cols, rows);
4180 ++ if (ret)
4181 ++ goto err_out;
4182 + } else if (con_is_visible(vc)
4183 + && vc->vc_mode == KD_TEXT) {
4184 + fbcon_clear_margins(vc, 0);
4185 +@@ -2447,6 +2455,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
4186 + if (old_data && (--REFCOUNT(old_data) == 0))
4187 + kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
4188 + return 0;
4189 ++
4190 ++err_out:
4191 ++ p->fontdata = old_data;
4192 ++ vc->vc_font.data = (void *)old_data;
4193 ++
4194 ++ if (userfont) {
4195 ++ p->userfont = old_userfont;
4196 ++ REFCOUNT(data)--;
4197 ++ }
4198 ++
4199 ++ vc->vc_font.width = old_width;
4200 ++ vc->vc_font.height = old_height;
4201 ++ vc->vc_font.charcount = old_charcount;
4202 ++
4203 ++ return ret;
4204 + }
4205 +
4206 + /*
4207 +diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
4208 +index 3369734108af2..e88e8f6f0a334 100644
4209 +--- a/drivers/xen/privcmd.c
4210 ++++ b/drivers/xen/privcmd.c
4211 +@@ -581,27 +581,30 @@ static int lock_pages(
4212 + struct privcmd_dm_op_buf kbufs[], unsigned int num,
4213 + struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
4214 + {
4215 +- unsigned int i;
4216 ++ unsigned int i, off = 0;
4217 +
4218 +- for (i = 0; i < num; i++) {
4219 ++ for (i = 0; i < num; ) {
4220 + unsigned int requested;
4221 + int page_count;
4222 +
4223 + requested = DIV_ROUND_UP(
4224 + offset_in_page(kbufs[i].uptr) + kbufs[i].size,
4225 +- PAGE_SIZE);
4226 ++ PAGE_SIZE) - off;
4227 + if (requested > nr_pages)
4228 + return -ENOSPC;
4229 +
4230 + page_count = pin_user_pages_fast(
4231 +- (unsigned long) kbufs[i].uptr,
4232 ++ (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
4233 + requested, FOLL_WRITE, pages);
4234 +- if (page_count < 0)
4235 +- return page_count;
4236 ++ if (page_count <= 0)
4237 ++ return page_count ? : -EFAULT;
4238 +
4239 + *pinned += page_count;
4240 + nr_pages -= page_count;
4241 + pages += page_count;
4242 ++
4243 ++ off = (requested == page_count) ? 0 : off + page_count;
4244 ++ i += !off;
4245 + }
4246 +
4247 + return 0;
4248 +@@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
4249 + }
4250 +
4251 + rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
4252 +- if (rc < 0) {
4253 +- nr_pages = pinned;
4254 ++ if (rc < 0)
4255 + goto out;
4256 +- }
4257 +
4258 + for (i = 0; i < kdata.num; i++) {
4259 + set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
4260 +@@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
4261 + xen_preemptible_hcall_end();
4262 +
4263 + out:
4264 +- unlock_pages(pages, nr_pages);
4265 ++ unlock_pages(pages, pinned);
4266 + kfree(xbufs);
4267 + kfree(pages);
4268 + kfree(kbufs);
4269 +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
4270 +index 76ee1452c57ba..37ceea85b871c 100644
4271 +--- a/fs/btrfs/btrfs_inode.h
4272 ++++ b/fs/btrfs/btrfs_inode.h
4273 +@@ -13,6 +13,13 @@
4274 + #include "ordered-data.h"
4275 + #include "delayed-inode.h"
4276 +
4277 ++/*
4278 ++ * Since we search a directory based on f_pos (struct dir_context::pos) we have
4279 ++ * to start at 2 since '.' and '..' have f_pos of 0 and 1 respectively, so
4280 ++ * everybody else has to start at 2 (see btrfs_real_readdir() and dir_emit_dots()).
4281 ++ */
4282 ++#define BTRFS_DIR_START_INDEX 2
4283 ++
4284 + /*
4285 + * ordered_data_close is set by truncate when a file that used
4286 + * to have good data has been truncated to zero. When it is set
4287 +@@ -164,8 +171,9 @@ struct btrfs_inode {
4288 + u64 disk_i_size;
4289 +
4290 + /*
4291 +- * if this is a directory then index_cnt is the counter for the index
4292 +- * number for new files that are created
4293 ++ * If this is a directory then index_cnt is the counter for the index
4294 ++ * number for new files that are created. For an empty directory, this
4295 ++ * must be initialized to BTRFS_DIR_START_INDEX.
4296 + */
4297 + u64 index_cnt;
4298 +
4299 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
4300 +index d1838de0b39c0..1831135fef1ab 100644
4301 +--- a/fs/btrfs/ctree.h
4302 ++++ b/fs/btrfs/ctree.h
4303 +@@ -105,14 +105,6 @@ struct btrfs_ref;
4304 + #define BTRFS_STAT_CURR 0
4305 + #define BTRFS_STAT_PREV 1
4306 +
4307 +-/*
4308 +- * Count how many BTRFS_MAX_EXTENT_SIZE cover the @size
4309 +- */
4310 +-static inline u32 count_max_extents(u64 size)
4311 +-{
4312 +- return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
4313 +-}
4314 +-
4315 + static inline unsigned long btrfs_chunk_item_size(int num_stripes)
4316 + {
4317 + BUG_ON(num_stripes == 0);
4318 +@@ -999,6 +991,12 @@ struct btrfs_fs_info {
4319 + u32 csums_per_leaf;
4320 + u32 stripesize;
4321 +
4322 ++ /*
4323 ++ * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
4324 ++ * filesystem, on zoned it depends on the device constraints.
4325 ++ */
4326 ++ u64 max_extent_size;
4327 ++
4328 + /* Block groups and devices containing active swapfiles. */
4329 + spinlock_t swapfile_pins_lock;
4330 + struct rb_root swapfile_pins;
4331 +@@ -1017,6 +1015,8 @@ struct btrfs_fs_info {
4332 + u64 zoned;
4333 + };
4334 +
4335 ++ /* Max size to emit ZONE_APPEND write command */
4336 ++ u64 max_zone_append_size;
4337 + struct mutex zoned_meta_io_lock;
4338 + spinlock_t treelog_bg_lock;
4339 + u64 treelog_bg;
4340 +@@ -3870,6 +3870,19 @@ static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
4341 + return fs_info->zoned != 0;
4342 + }
4343 +
4344 ++/*
4345 ++ * Count how many fs_info->max_extent_size cover the @size
4346 ++ */
4347 ++static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
4348 ++{
4349 ++#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4350 ++ if (!fs_info)
4351 ++ return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
4352 ++#endif
4353 ++
4354 ++ return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
4355 ++}
4356 ++
4357 + static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
4358 + {
4359 + return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
4360 +diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
4361 +index 40c4d6ba3fb9a..b934429c24350 100644
4362 +--- a/fs/btrfs/delalloc-space.c
4363 ++++ b/fs/btrfs/delalloc-space.c
4364 +@@ -273,7 +273,7 @@ static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
4365 + u64 num_bytes, u64 *meta_reserve,
4366 + u64 *qgroup_reserve)
4367 + {
4368 +- u64 nr_extents = count_max_extents(num_bytes);
4369 ++ u64 nr_extents = count_max_extents(fs_info, num_bytes);
4370 + u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, num_bytes);
4371 + u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
4372 +
4373 +@@ -347,7 +347,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
4374 + * needs to free the reservation we just made.
4375 + */
4376 + spin_lock(&inode->lock);
4377 +- nr_extents = count_max_extents(num_bytes);
4378 ++ nr_extents = count_max_extents(fs_info, num_bytes);
4379 + btrfs_mod_outstanding_extents(inode, nr_extents);
4380 + inode->csum_bytes += num_bytes;
4381 + btrfs_calculate_inode_block_rsv_size(fs_info, inode);
4382 +@@ -410,7 +410,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
4383 + unsigned num_extents;
4384 +
4385 + spin_lock(&inode->lock);
4386 +- num_extents = count_max_extents(num_bytes);
4387 ++ num_extents = count_max_extents(fs_info, num_bytes);
4388 + btrfs_mod_outstanding_extents(inode, -num_extents);
4389 + btrfs_calculate_inode_block_rsv_size(fs_info, inode);
4390 + spin_unlock(&inode->lock);
4391 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
4392 +index 781556e2a37f2..03d8a2d49bf41 100644
4393 +--- a/fs/btrfs/dev-replace.c
4394 ++++ b/fs/btrfs/dev-replace.c
4395 +@@ -165,7 +165,7 @@ no_valid_dev_replace_entry_found:
4396 + */
4397 + if (btrfs_find_device(fs_info->fs_devices, &args)) {
4398 + btrfs_err(fs_info,
4399 +- "replace devid present without an active replace item");
4400 ++"replace without active item, run 'device scan --forget' on the target device");
4401 + ret = -EUCLEAN;
4402 + } else {
4403 + dev_replace->srcdev = NULL;
4404 +@@ -1151,8 +1151,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
4405 + up_write(&dev_replace->rwsem);
4406 +
4407 + /* Scrub for replace must not be running in suspended state */
4408 +- ret = btrfs_scrub_cancel(fs_info);
4409 +- ASSERT(ret != -ENOTCONN);
4410 ++ btrfs_scrub_cancel(fs_info);
4411 +
4412 + trans = btrfs_start_transaction(root, 0);
4413 + if (IS_ERR(trans)) {
4414 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
4415 +index e65c3039caf1c..247d7f9ced3b0 100644
4416 +--- a/fs/btrfs/disk-io.c
4417 ++++ b/fs/btrfs/disk-io.c
4418 +@@ -3006,6 +3006,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
4419 + fs_info->sectorsize_bits = ilog2(4096);
4420 + fs_info->stripesize = 4096;
4421 +
4422 ++ fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
4423 ++
4424 + spin_lock_init(&fs_info->swapfile_pins_lock);
4425 + fs_info->swapfile_pins = RB_ROOT;
4426 +
4427 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
4428 +index a90546b3107c5..a72a8d4d4a72e 100644
4429 +--- a/fs/btrfs/extent_io.c
4430 ++++ b/fs/btrfs/extent_io.c
4431 +@@ -1985,8 +1985,10 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
4432 + struct page *locked_page, u64 *start,
4433 + u64 *end)
4434 + {
4435 ++ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4436 + struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
4437 +- u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
4438 ++ /* The sanity tests may not set a valid fs_info. */
4439 ++ u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
4440 + u64 delalloc_start;
4441 + u64 delalloc_end;
4442 + bool found;
4443 +@@ -3778,10 +3780,11 @@ static void update_nr_written(struct writeback_control *wbc,
4444 + */
4445 + static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
4446 + struct page *page, struct writeback_control *wbc,
4447 +- u64 delalloc_start, unsigned long *nr_written)
4448 ++ unsigned long *nr_written)
4449 + {
4450 +- u64 page_end = delalloc_start + PAGE_SIZE - 1;
4451 ++ u64 page_end = page_offset(page) + PAGE_SIZE - 1;
4452 + bool found;
4453 ++ u64 delalloc_start = page_offset(page);
4454 + u64 delalloc_to_write = 0;
4455 + u64 delalloc_end = 0;
4456 + int ret;
4457 +@@ -4066,8 +4069,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
4458 + struct extent_page_data *epd)
4459 + {
4460 + struct inode *inode = page->mapping->host;
4461 +- u64 start = page_offset(page);
4462 +- u64 page_end = start + PAGE_SIZE - 1;
4463 ++ const u64 page_start = page_offset(page);
4464 ++ const u64 page_end = page_start + PAGE_SIZE - 1;
4465 + int ret;
4466 + int nr = 0;
4467 + size_t pg_offset;
4468 +@@ -4102,8 +4105,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
4469 + }
4470 +
4471 + if (!epd->extent_locked) {
4472 +- ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
4473 +- &nr_written);
4474 ++ ret = writepage_delalloc(BTRFS_I(inode), page, wbc, &nr_written);
4475 + if (ret == 1)
4476 + return 0;
4477 + if (ret)
4478 +@@ -4153,7 +4155,7 @@ done:
4479 + * capable of that.
4480 + */
4481 + if (PageError(page))
4482 +- end_extent_writepage(page, ret, start, page_end);
4483 ++ end_extent_writepage(page, ret, page_start, page_end);
4484 + unlock_page(page);
4485 + ASSERT(ret <= 0);
4486 + return ret;
4487 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4488 +index 20d0dea1d0c41..428a56f248bba 100644
4489 +--- a/fs/btrfs/inode.c
4490 ++++ b/fs/btrfs/inode.c
4491 +@@ -2032,6 +2032,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
4492 + void btrfs_split_delalloc_extent(struct inode *inode,
4493 + struct extent_state *orig, u64 split)
4494 + {
4495 ++ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4496 + u64 size;
4497 +
4498 + /* not delalloc, ignore it */
4499 +@@ -2039,7 +2040,7 @@ void btrfs_split_delalloc_extent(struct inode *inode,
4500 + return;
4501 +
4502 + size = orig->end - orig->start + 1;
4503 +- if (size > BTRFS_MAX_EXTENT_SIZE) {
4504 ++ if (size > fs_info->max_extent_size) {
4505 + u32 num_extents;
4506 + u64 new_size;
4507 +
4508 +@@ -2048,10 +2049,10 @@ void btrfs_split_delalloc_extent(struct inode *inode,
4509 + * applies here, just in reverse.
4510 + */
4511 + new_size = orig->end - split + 1;
4512 +- num_extents = count_max_extents(new_size);
4513 ++ num_extents = count_max_extents(fs_info, new_size);
4514 + new_size = split - orig->start;
4515 +- num_extents += count_max_extents(new_size);
4516 +- if (count_max_extents(size) >= num_extents)
4517 ++ num_extents += count_max_extents(fs_info, new_size);
4518 ++ if (count_max_extents(fs_info, size) >= num_extents)
4519 + return;
4520 + }
4521 +
4522 +@@ -2068,6 +2069,7 @@ void btrfs_split_delalloc_extent(struct inode *inode,
4523 + void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
4524 + struct extent_state *other)
4525 + {
4526 ++ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4527 + u64 new_size, old_size;
4528 + u32 num_extents;
4529 +
4530 +@@ -2081,7 +2083,7 @@ void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
4531 + new_size = other->end - new->start + 1;
4532 +
4533 + /* we're not bigger than the max, unreserve the space and go */
4534 +- if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
4535 ++ if (new_size <= fs_info->max_extent_size) {
4536 + spin_lock(&BTRFS_I(inode)->lock);
4537 + btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
4538 + spin_unlock(&BTRFS_I(inode)->lock);
4539 +@@ -2107,10 +2109,10 @@ void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
4540 + * this case.
4541 + */
4542 + old_size = other->end - other->start + 1;
4543 +- num_extents = count_max_extents(old_size);
4544 ++ num_extents = count_max_extents(fs_info, old_size);
4545 + old_size = new->end - new->start + 1;
4546 +- num_extents += count_max_extents(old_size);
4547 +- if (count_max_extents(new_size) >= num_extents)
4548 ++ num_extents += count_max_extents(fs_info, old_size);
4549 ++ if (count_max_extents(fs_info, new_size) >= num_extents)
4550 + return;
4551 +
4552 + spin_lock(&BTRFS_I(inode)->lock);
4553 +@@ -2189,7 +2191,7 @@ void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
4554 + if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
4555 + struct btrfs_root *root = BTRFS_I(inode)->root;
4556 + u64 len = state->end + 1 - state->start;
4557 +- u32 num_extents = count_max_extents(len);
4558 ++ u32 num_extents = count_max_extents(fs_info, len);
4559 + bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
4560 +
4561 + spin_lock(&BTRFS_I(inode)->lock);
4562 +@@ -2231,7 +2233,7 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
4563 + struct btrfs_inode *inode = BTRFS_I(vfs_inode);
4564 + struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
4565 + u64 len = state->end + 1 - state->start;
4566 +- u32 num_extents = count_max_extents(len);
4567 ++ u32 num_extents = count_max_extents(fs_info, len);
4568 +
4569 + if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
4570 + spin_lock(&inode->lock);
4571 +@@ -6394,14 +6396,8 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
4572 + goto out;
4573 + ret = 0;
4574 +
4575 +- /*
4576 +- * MAGIC NUMBER EXPLANATION:
4577 +- * since we search a directory based on f_pos we have to start at 2
4578 +- * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4579 +- * else has to start at 2
4580 +- */
4581 + if (path->slots[0] == 0) {
4582 +- inode->index_cnt = 2;
4583 ++ inode->index_cnt = BTRFS_DIR_START_INDEX;
4584 + goto out;
4585 + }
4586 +
4587 +@@ -6412,7 +6408,7 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
4588 +
4589 + if (found_key.objectid != btrfs_ino(inode) ||
4590 + found_key.type != BTRFS_DIR_INDEX_KEY) {
4591 +- inode->index_cnt = 2;
4592 ++ inode->index_cnt = BTRFS_DIR_START_INDEX;
4593 + goto out;
4594 + }
4595 +
4596 +@@ -6956,7 +6952,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4597 + goto fail;
4598 + }
4599 + d_instantiate(dentry, inode);
4600 +- btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
4601 ++ btrfs_log_new_name(trans, old_dentry, NULL, parent);
4602 + }
4603 +
4604 + fail:
4605 +@@ -9625,13 +9621,13 @@ static int btrfs_rename_exchange(struct inode *old_dir,
4606 + BTRFS_I(new_inode)->dir_index = new_idx;
4607 +
4608 + if (root_log_pinned) {
4609 +- btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
4610 ++ btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
4611 + new_dentry->d_parent);
4612 + btrfs_end_log_trans(root);
4613 + root_log_pinned = false;
4614 + }
4615 + if (dest_log_pinned) {
4616 +- btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
4617 ++ btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
4618 + old_dentry->d_parent);
4619 + btrfs_end_log_trans(dest);
4620 + dest_log_pinned = false;
4621 +@@ -9912,7 +9908,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
4622 + BTRFS_I(old_inode)->dir_index = index;
4623 +
4624 + if (log_pinned) {
4625 +- btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
4626 ++ btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
4627 + new_dentry->d_parent);
4628 + btrfs_end_log_trans(root);
4629 + log_pinned = false;
4630 +diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
4631 +index 1fa0e5e2e3505..9328d87d96888 100644
4632 +--- a/fs/btrfs/root-tree.c
4633 ++++ b/fs/btrfs/root-tree.c
4634 +@@ -351,9 +351,10 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
4635 + key.offset = ref_id;
4636 + again:
4637 + ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
4638 +- if (ret < 0)
4639 ++ if (ret < 0) {
4640 ++ err = ret;
4641 + goto out;
4642 +- if (ret == 0) {
4643 ++ } else if (ret == 0) {
4644 + leaf = path->nodes[0];
4645 + ref = btrfs_item_ptr(leaf, path->slots[0],
4646 + struct btrfs_root_ref);
4647 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4648 +index e9e1aae89030a..1d7e9812f55e1 100644
4649 +--- a/fs/btrfs/tree-log.c
4650 ++++ b/fs/btrfs/tree-log.c
4651 +@@ -6628,14 +6628,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
4652 + mutex_unlock(&dir->log_mutex);
4653 + }
4654 +
4655 +-/*
4656 +- * Call this after adding a new name for a file and it will properly
4657 +- * update the log to reflect the new name.
4658 ++/**
4659 ++ * Update the log after adding a new name for an inode.
4660 ++ *
4661 ++ * @trans: Transaction handle.
4662 ++ * @old_dentry: The dentry associated with the old name and the old
4663 ++ * parent directory.
4664 ++ * @old_dir: The inode of the previous parent directory for the case
4665 ++ * of a rename. For a link operation, it must be NULL.
4666 ++ * @parent: The dentry associated with the directory under which the
4667 ++ * new name is located.
4668 ++ *
4669 ++ * Call this after adding a new name for an inode, as a result of a link or
4670 ++ * rename operation, and it will properly update the log to reflect the new name.
4671 + */
4672 + void btrfs_log_new_name(struct btrfs_trans_handle *trans,
4673 +- struct btrfs_inode *inode, struct btrfs_inode *old_dir,
4674 ++ struct dentry *old_dentry, struct btrfs_inode *old_dir,
4675 + struct dentry *parent)
4676 + {
4677 ++ struct btrfs_inode *inode = BTRFS_I(d_inode(old_dentry));
4678 + struct btrfs_log_ctx ctx;
4679 +
4680 + /*
4681 +diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
4682 +index 731bd9c029f55..7ffcac8a89905 100644
4683 +--- a/fs/btrfs/tree-log.h
4684 ++++ b/fs/btrfs/tree-log.h
4685 +@@ -84,7 +84,7 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
4686 + void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
4687 + struct btrfs_inode *dir);
4688 + void btrfs_log_new_name(struct btrfs_trans_handle *trans,
4689 +- struct btrfs_inode *inode, struct btrfs_inode *old_dir,
4690 ++ struct dentry *old_dentry, struct btrfs_inode *old_dir,
4691 + struct dentry *parent);
4692 +
4693 + #endif
4694 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4695 +index 2a93d80be9bf7..0f22d91e23927 100644
4696 +--- a/fs/btrfs/volumes.c
4697 ++++ b/fs/btrfs/volumes.c
4698 +@@ -2392,8 +2392,11 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
4699 +
4700 + ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
4701 + &bdev, &disk_super);
4702 +- if (ret)
4703 ++ if (ret) {
4704 ++ btrfs_put_dev_args_from_path(args);
4705 + return ret;
4706 ++ }
4707 ++
4708 + args->devid = btrfs_stack_device_id(&disk_super->dev_item);
4709 + memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
4710 + if (btrfs_fs_incompat(fs_info, METADATA_UUID))
4711 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
4712 +index c5c5b97c2a852..43fe2c2a955e2 100644
4713 +--- a/fs/btrfs/xattr.c
4714 ++++ b/fs/btrfs/xattr.c
4715 +@@ -391,6 +391,9 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
4716 + const char *name, const void *buffer,
4717 + size_t size, int flags)
4718 + {
4719 ++ if (btrfs_root_readonly(BTRFS_I(inode)->root))
4720 ++ return -EROFS;
4721 ++
4722 + name = xattr_full_name(handler, name);
4723 + return btrfs_setxattr_trans(inode, name, buffer, size, flags);
4724 + }
4725 +diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
4726 +index fc791f7c71428..7a127d3c521f9 100644
4727 +--- a/fs/btrfs/zoned.c
4728 ++++ b/fs/btrfs/zoned.c
4729 +@@ -386,6 +386,16 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
4730 + nr_sectors = bdev_nr_sectors(bdev);
4731 + zone_info->zone_size_shift = ilog2(zone_info->zone_size);
4732 + zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
4733 ++ /*
4734 ++ * We limit max_zone_append_size also by max_segments *
4735 ++ * PAGE_SIZE. Technically, we can have multiple pages per segment. But,
4736 ++ * since btrfs adds the pages one by one to a bio, and btrfs cannot
4737 ++ * increase the metadata reservation even if it increases the number of
4738 ++ * extents, it is safe to stick with the limit.
4739 ++ */
4740 ++ zone_info->max_zone_append_size =
4741 ++ min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
4742 ++ (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
4743 + if (!IS_ALIGNED(nr_sectors, zone_sectors))
4744 + zone_info->nr_zones++;
4745 +
4746 +@@ -570,6 +580,7 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
4747 + u64 zoned_devices = 0;
4748 + u64 nr_devices = 0;
4749 + u64 zone_size = 0;
4750 ++ u64 max_zone_append_size = 0;
4751 + const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
4752 + int ret = 0;
4753 +
4754 +@@ -605,6 +616,11 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
4755 + ret = -EINVAL;
4756 + goto out;
4757 + }
4758 ++ if (!max_zone_append_size ||
4759 ++ (zone_info->max_zone_append_size &&
4760 ++ zone_info->max_zone_append_size < max_zone_append_size))
4761 ++ max_zone_append_size =
4762 ++ zone_info->max_zone_append_size;
4763 + }
4764 + nr_devices++;
4765 + }
4766 +@@ -654,7 +670,11 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
4767 + }
4768 +
4769 + fs_info->zone_size = zone_size;
4770 ++ fs_info->max_zone_append_size = ALIGN_DOWN(max_zone_append_size,
4771 ++ fs_info->sectorsize);
4772 + fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
4773 ++ if (fs_info->max_zone_append_size < fs_info->max_extent_size)
4774 ++ fs_info->max_extent_size = fs_info->max_zone_append_size;
4775 +
4776 + /*
4777 + * Check mount options here, because we might change fs_info->zoned
4778 +diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
4779 +index 574490ea2cc87..1ef493fcd504e 100644
4780 +--- a/fs/btrfs/zoned.h
4781 ++++ b/fs/btrfs/zoned.h
4782 +@@ -23,6 +23,7 @@ struct btrfs_zoned_device_info {
4783 + */
4784 + u64 zone_size;
4785 + u8 zone_size_shift;
4786 ++ u64 max_zone_append_size;
4787 + u32 nr_zones;
4788 + unsigned long *seq_zones;
4789 + unsigned long *empty_zones;
4790 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
4791 +index 07895e9d537c8..2d31860d56e96 100644
4792 +--- a/fs/cifs/smb2ops.c
4793 ++++ b/fs/cifs/smb2ops.c
4794 +@@ -3599,7 +3599,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
4795 + static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
4796 + loff_t offset, loff_t len)
4797 + {
4798 +- struct inode *inode;
4799 ++ struct inode *inode = file_inode(file);
4800 + struct cifsFileInfo *cfile = file->private_data;
4801 + struct file_zero_data_information fsctl_buf;
4802 + long rc;
4803 +@@ -3608,14 +3608,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
4804 +
4805 + xid = get_xid();
4806 +
4807 +- inode = d_inode(cfile->dentry);
4808 +-
4809 ++ inode_lock(inode);
4810 + /* Need to make file sparse, if not already, before freeing range. */
4811 + /* Consider adding equivalent for compressed since it could also work */
4812 + if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
4813 + rc = -EOPNOTSUPP;
4814 +- free_xid(xid);
4815 +- return rc;
4816 ++ goto out;
4817 + }
4818 +
4819 + filemap_invalidate_lock(inode->i_mapping);
4820 +@@ -3635,8 +3633,10 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
4821 + true /* is_fctl */, (char *)&fsctl_buf,
4822 + sizeof(struct file_zero_data_information),
4823 + CIFSMaxBufSize, NULL, NULL);
4824 +- free_xid(xid);
4825 + filemap_invalidate_unlock(inode->i_mapping);
4826 ++out:
4827 ++ inode_unlock(inode);
4828 ++ free_xid(xid);
4829 + return rc;
4830 + }
4831 +
4832 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
4833 +index 1fd8d09416c4b..9761470a7ecf5 100644
4834 +--- a/fs/fs-writeback.c
4835 ++++ b/fs/fs-writeback.c
4836 +@@ -134,10 +134,10 @@ static bool inode_io_list_move_locked(struct inode *inode,
4837 +
4838 + static void wb_wakeup(struct bdi_writeback *wb)
4839 + {
4840 +- spin_lock_bh(&wb->work_lock);
4841 ++ spin_lock_irq(&wb->work_lock);
4842 + if (test_bit(WB_registered, &wb->state))
4843 + mod_delayed_work(bdi_wq, &wb->dwork, 0);
4844 +- spin_unlock_bh(&wb->work_lock);
4845 ++ spin_unlock_irq(&wb->work_lock);
4846 + }
4847 +
4848 + static void finish_writeback_work(struct bdi_writeback *wb,
4849 +@@ -164,7 +164,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
4850 + if (work->done)
4851 + atomic_inc(&work->done->cnt);
4852 +
4853 +- spin_lock_bh(&wb->work_lock);
4854 ++ spin_lock_irq(&wb->work_lock);
4855 +
4856 + if (test_bit(WB_registered, &wb->state)) {
4857 + list_add_tail(&work->list, &wb->work_list);
4858 +@@ -172,7 +172,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
4859 + } else
4860 + finish_writeback_work(wb, work);
4861 +
4862 +- spin_unlock_bh(&wb->work_lock);
4863 ++ spin_unlock_irq(&wb->work_lock);
4864 + }
4865 +
4866 + /**
4867 +@@ -2109,13 +2109,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
4868 + {
4869 + struct wb_writeback_work *work = NULL;
4870 +
4871 +- spin_lock_bh(&wb->work_lock);
4872 ++ spin_lock_irq(&wb->work_lock);
4873 + if (!list_empty(&wb->work_list)) {
4874 + work = list_entry(wb->work_list.next,
4875 + struct wb_writeback_work, list);
4876 + list_del_init(&work->list);
4877 + }
4878 +- spin_unlock_bh(&wb->work_lock);
4879 ++ spin_unlock_irq(&wb->work_lock);
4880 + return work;
4881 + }
4882 +
4883 +diff --git a/fs/io_uring.c b/fs/io_uring.c
4884 +index 89f24b54fe5e8..2680e9756b1d4 100644
4885 +--- a/fs/io_uring.c
4886 ++++ b/fs/io_uring.c
4887 +@@ -3720,7 +3720,12 @@ done:
4888 + copy_iov:
4889 + iov_iter_restore(iter, state);
4890 + ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
4891 +- return ret ?: -EAGAIN;
4892 ++ if (!ret) {
4893 ++ if (kiocb->ki_flags & IOCB_WRITE)
4894 ++ kiocb_end_write(req);
4895 ++ return -EAGAIN;
4896 ++ }
4897 ++ return ret;
4898 + }
4899 + out_free:
4900 + /* it's reportedly faster than delegating the null check to kfree() */
4901 +diff --git a/fs/namespace.c b/fs/namespace.c
4902 +index dc31ad6b370f3..d946298691ed4 100644
4903 +--- a/fs/namespace.c
4904 ++++ b/fs/namespace.c
4905 +@@ -4168,6 +4168,13 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
4906 + err = -EPERM;
4907 + goto out_fput;
4908 + }
4909 ++
4910 ++ /* We're not controlling the target namespace. */
4911 ++ if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
4912 ++ err = -EPERM;
4913 ++ goto out_fput;
4914 ++ }
4915 ++
4916 + kattr->mnt_userns = get_user_ns(mnt_userns);
4917 +
4918 + out_fput:
4919 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
4920 +index 4120e1cb3feef..14f2efdecc2f8 100644
4921 +--- a/fs/nfs/nfs4file.c
4922 ++++ b/fs/nfs/nfs4file.c
4923 +@@ -319,7 +319,7 @@ static int read_name_gen = 1;
4924 + static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
4925 + struct nfs_fh *src_fh, nfs4_stateid *stateid)
4926 + {
4927 +- struct nfs_fattr fattr;
4928 ++ struct nfs_fattr *fattr = nfs_alloc_fattr();
4929 + struct file *filep, *res;
4930 + struct nfs_server *server;
4931 + struct inode *r_ino = NULL;
4932 +@@ -330,14 +330,20 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
4933 +
4934 + server = NFS_SERVER(ss_mnt->mnt_root->d_inode);
4935 +
4936 +- nfs_fattr_init(&fattr);
4937 ++ if (!fattr)
4938 ++ return ERR_PTR(-ENOMEM);
4939 +
4940 +- status = nfs4_proc_getattr(server, src_fh, &fattr, NULL, NULL);
4941 ++ status = nfs4_proc_getattr(server, src_fh, fattr, NULL, NULL);
4942 + if (status < 0) {
4943 + res = ERR_PTR(status);
4944 + goto out;
4945 + }
4946 +
4947 ++ if (!S_ISREG(fattr->mode)) {
4948 ++ res = ERR_PTR(-EBADF);
4949 ++ goto out;
4950 ++ }
4951 ++
4952 + res = ERR_PTR(-ENOMEM);
4953 + len = strlen(SSC_READ_NAME_BODY) + 16;
4954 + read_name = kzalloc(len, GFP_NOFS);
4955 +@@ -345,7 +351,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
4956 + goto out;
4957 + snprintf(read_name, len, SSC_READ_NAME_BODY, read_name_gen++);
4958 +
4959 +- r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, &fattr,
4960 ++ r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, fattr,
4961 + NULL);
4962 + if (IS_ERR(r_ino)) {
4963 + res = ERR_CAST(r_ino);
4964 +@@ -356,6 +362,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
4965 + r_ino->i_fop);
4966 + if (IS_ERR(filep)) {
4967 + res = ERR_CAST(filep);
4968 ++ iput(r_ino);
4969 + goto out_free_name;
4970 + }
4971 + filep->f_mode |= FMODE_READ;
4972 +@@ -390,6 +397,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
4973 + out_free_name:
4974 + kfree(read_name);
4975 + out:
4976 ++ nfs_free_fattr(fattr);
4977 + return res;
4978 + out_stateowner:
4979 + nfs4_put_state_owner(sp);
4980 +diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
4981 +index 872eb56bb1706..e8bfa709270d1 100644
4982 +--- a/fs/ntfs3/xattr.c
4983 ++++ b/fs/ntfs3/xattr.c
4984 +@@ -476,8 +476,7 @@ out:
4985 + }
4986 +
4987 + #ifdef CONFIG_NTFS3_FS_POSIX_ACL
4988 +-static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
4989 +- struct inode *inode, int type,
4990 ++static struct posix_acl *ntfs_get_acl_ex(struct inode *inode, int type,
4991 + int locked)
4992 + {
4993 + struct ntfs_inode *ni = ntfs_i(inode);
4994 +@@ -512,7 +511,7 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
4995 +
4996 + /* Translate extended attribute to acl. */
4997 + if (err >= 0) {
4998 +- acl = posix_acl_from_xattr(mnt_userns, buf, err);
4999 ++ acl = posix_acl_from_xattr(&init_user_ns, buf, err);
5000 + } else if (err == -ENODATA) {
5001 + acl = NULL;
5002 + } else {
5003 +@@ -535,8 +534,7 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
5004 + if (rcu)
5005 + return ERR_PTR(-ECHILD);
5006 +
5007 +- /* TODO: init_user_ns? */
5008 +- return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
5009 ++ return ntfs_get_acl_ex(inode, type, 0);
5010 + }
5011 +
5012 + static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
5013 +@@ -588,7 +586,7 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
5014 + value = kmalloc(size, GFP_NOFS);
5015 + if (!value)
5016 + return -ENOMEM;
5017 +- err = posix_acl_to_xattr(mnt_userns, acl, value, size);
5018 ++ err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
5019 + if (err < 0)
5020 + goto out;
5021 + flags = 0;
5022 +@@ -639,7 +637,7 @@ static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
5023 + if (!acl)
5024 + return -ENODATA;
5025 +
5026 +- err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
5027 ++ err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
5028 + posix_acl_release(acl);
5029 +
5030 + return err;
5031 +@@ -663,12 +661,12 @@ static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
5032 + if (!value) {
5033 + acl = NULL;
5034 + } else {
5035 +- acl = posix_acl_from_xattr(mnt_userns, value, size);
5036 ++ acl = posix_acl_from_xattr(&init_user_ns, value, size);
5037 + if (IS_ERR(acl))
5038 + return PTR_ERR(acl);
5039 +
5040 + if (acl) {
5041 +- err = posix_acl_valid(mnt_userns, acl);
5042 ++ err = posix_acl_valid(&init_user_ns, acl);
5043 + if (err)
5044 + goto release_and_out;
5045 + }
5046 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
5047 +index 79ca4d69dfd6b..d9c07eecd7872 100644
5048 +--- a/fs/proc/task_mmu.c
5049 ++++ b/fs/proc/task_mmu.c
5050 +@@ -503,10 +503,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
5051 + struct vm_area_struct *vma = walk->vma;
5052 + bool locked = !!(vma->vm_flags & VM_LOCKED);
5053 + struct page *page = NULL;
5054 +- bool migration = false;
5055 ++ bool migration = false, young = false, dirty = false;
5056 +
5057 + if (pte_present(*pte)) {
5058 + page = vm_normal_page(vma, addr, *pte);
5059 ++ young = pte_young(*pte);
5060 ++ dirty = pte_dirty(*pte);
5061 + } else if (is_swap_pte(*pte)) {
5062 + swp_entry_t swpent = pte_to_swp_entry(*pte);
5063 +
5064 +@@ -540,8 +542,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
5065 + if (!page)
5066 + return;
5067 +
5068 +- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
5069 +- locked, migration);
5070 ++ smaps_account(mss, page, false, young, dirty, locked, migration);
5071 + }
5072 +
5073 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5074 +diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
5075 +index ecf564d150b3c..f8feaed0b54d3 100644
5076 +--- a/fs/zonefs/super.c
5077 ++++ b/fs/zonefs/super.c
5078 +@@ -723,13 +723,12 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
5079 + struct inode *inode = file_inode(iocb->ki_filp);
5080 + struct zonefs_inode_info *zi = ZONEFS_I(inode);
5081 + struct block_device *bdev = inode->i_sb->s_bdev;
5082 +- unsigned int max;
5083 ++ unsigned int max = bdev_max_zone_append_sectors(bdev);
5084 + struct bio *bio;
5085 + ssize_t size;
5086 + int nr_pages;
5087 + ssize_t ret;
5088 +
5089 +- max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
5090 + max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
5091 + iov_iter_truncate(from, max);
5092 +
5093 +diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
5094 +index d16302d3eb597..72f1e2a8c1670 100644
5095 +--- a/include/asm-generic/sections.h
5096 ++++ b/include/asm-generic/sections.h
5097 +@@ -114,7 +114,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
5098 + /**
5099 + * memory_intersects - checks if the region occupied by an object intersects
5100 + * with another memory region
5101 +- * @begin: virtual address of the beginning of the memory regien
5102 ++ * @begin: virtual address of the beginning of the memory region
5103 + * @end: virtual address of the end of the memory region
5104 + * @virt: virtual address of the memory object
5105 + * @size: size of the memory object
5106 +@@ -127,7 +127,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
5107 + {
5108 + void *vend = virt + size;
5109 +
5110 +- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
5111 ++ if (virt < end && vend > begin)
5112 ++ return true;
5113 ++
5114 ++ return false;
5115 + }
5116 +
5117 + /**
5118 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
5119 +index 8863b4a378afe..67344dfe07a7c 100644
5120 +--- a/include/linux/blkdev.h
5121 ++++ b/include/linux/blkdev.h
5122 +@@ -1387,6 +1387,17 @@ static inline unsigned int queue_max_zone_append_sectors(const struct request_qu
5123 + return min(l->max_zone_append_sectors, l->max_sectors);
5124 + }
5125 +
5126 ++static inline unsigned int
5127 ++bdev_max_zone_append_sectors(struct block_device *bdev)
5128 ++{
5129 ++ return queue_max_zone_append_sectors(bdev_get_queue(bdev));
5130 ++}
5131 ++
5132 ++static inline unsigned int bdev_max_segments(struct block_device *bdev)
5133 ++{
5134 ++ return queue_max_segments(bdev_get_queue(bdev));
5135 ++}
5136 ++
5137 + static inline unsigned queue_logical_block_size(const struct request_queue *q)
5138 + {
5139 + int retval = 512;
5140 +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
5141 +index 1e7399fc69c0a..054e654f06def 100644
5142 +--- a/include/linux/cpumask.h
5143 ++++ b/include/linux/cpumask.h
5144 +@@ -1045,4 +1045,22 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
5145 + [0] = 1UL \
5146 + } }
5147 +
5148 ++/*
5149 ++ * Provide a valid theoretical max size for cpumap and cpulist sysfs files
5150 ++ * to avoid breaking userspace which may allocate a buffer based on the size
5151 ++ * reported by e.g. fstat.
5152 ++ *
5153 ++ * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
5154 ++ *
5155 ++ * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
5156 ++ * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
5157 ++ * cover a worst-case of every other cpu being on one of two nodes for a
5158 ++ * very large NR_CPUS.
5159 ++ *
5160 ++ * Use PAGE_SIZE as a minimum for smaller configurations.
5161 ++ */
5162 ++#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
5163 ++ ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
5164 ++#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
5165 ++
5166 + #endif /* __LINUX_CPUMASK_H */
5167 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
5168 +index d35439db047cb..4f189b17dafcc 100644
5169 +--- a/include/linux/memcontrol.h
5170 ++++ b/include/linux/memcontrol.h
5171 +@@ -966,19 +966,30 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
5172 +
5173 + static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
5174 + {
5175 +- return READ_ONCE(memcg->vmstats.state[idx]);
5176 ++ long x = READ_ONCE(memcg->vmstats.state[idx]);
5177 ++#ifdef CONFIG_SMP
5178 ++ if (x < 0)
5179 ++ x = 0;
5180 ++#endif
5181 ++ return x;
5182 + }
5183 +
5184 + static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
5185 + enum node_stat_item idx)
5186 + {
5187 + struct mem_cgroup_per_node *pn;
5188 ++ long x;
5189 +
5190 + if (mem_cgroup_disabled())
5191 + return node_page_state(lruvec_pgdat(lruvec), idx);
5192 +
5193 + pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
5194 +- return READ_ONCE(pn->lruvec_stats.state[idx]);
5195 ++ x = READ_ONCE(pn->lruvec_stats.state[idx]);
5196 ++#ifdef CONFIG_SMP
5197 ++ if (x < 0)
5198 ++ x = 0;
5199 ++#endif
5200 ++ return x;
5201 + }
5202 +
5203 + static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
5204 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
5205 +index f17d2101af7a0..4c678de4608dd 100644
5206 +--- a/include/linux/mlx5/driver.h
5207 ++++ b/include/linux/mlx5/driver.h
5208 +@@ -759,6 +759,7 @@ struct mlx5_core_dev {
5209 + enum mlx5_device_state state;
5210 + /* sync interface state */
5211 + struct mutex intf_state_mutex;
5212 ++ struct lock_class_key lock_key;
5213 + unsigned long intf_state;
5214 + struct mlx5_priv priv;
5215 + struct mlx5_profile profile;
5216 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
5217 +index f8d46dc62d658..3b97438afe3e2 100644
5218 +--- a/include/linux/netdevice.h
5219 ++++ b/include/linux/netdevice.h
5220 +@@ -626,9 +626,23 @@ extern int sysctl_devconf_inherit_init_net;
5221 + */
5222 + static inline bool net_has_fallback_tunnels(const struct net *net)
5223 + {
5224 +- return !IS_ENABLED(CONFIG_SYSCTL) ||
5225 +- !sysctl_fb_tunnels_only_for_init_net ||
5226 +- (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
5227 ++#if IS_ENABLED(CONFIG_SYSCTL)
5228 ++ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
5229 ++
5230 ++ return !fb_tunnels_only_for_init_net ||
5231 ++ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
5232 ++#else
5233 ++ return true;
5234 ++#endif
5235 ++}
5236 ++
5237 ++static inline int net_inherit_devconf(void)
5238 ++{
5239 ++#if IS_ENABLED(CONFIG_SYSCTL)
5240 ++ return READ_ONCE(sysctl_devconf_inherit_init_net);
5241 ++#else
5242 ++ return 0;
5243 ++#endif
5244 + }
5245 +
5246 + static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
5247 +diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
5248 +index 10a01978bc0d3..bde9db771ae41 100644
5249 +--- a/include/linux/netfilter_bridge/ebtables.h
5250 ++++ b/include/linux/netfilter_bridge/ebtables.h
5251 +@@ -94,10 +94,6 @@ struct ebt_table {
5252 + struct ebt_replace_kernel *table;
5253 + unsigned int valid_hooks;
5254 + rwlock_t lock;
5255 +- /* e.g. could be the table explicitly only allows certain
5256 +- * matches, targets, ... 0 == let it in */
5257 +- int (*check)(const struct ebt_table_info *info,
5258 +- unsigned int valid_hooks);
5259 + /* the data used by the kernel */
5260 + struct ebt_table_info *private;
5261 + struct nf_hook_ops *ops;
5262 +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
5263 +index 40296ed976a97..3459a04a3d61c 100644
5264 +--- a/include/net/busy_poll.h
5265 ++++ b/include/net/busy_poll.h
5266 +@@ -33,7 +33,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
5267 +
5268 + static inline bool net_busy_loop_on(void)
5269 + {
5270 +- return sysctl_net_busy_poll;
5271 ++ return READ_ONCE(sysctl_net_busy_poll);
5272 + }
5273 +
5274 + static inline bool sk_can_busy_loop(const struct sock *sk)
5275 +diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
5276 +index 9f927c44087de..aaa518e777e9e 100644
5277 +--- a/include/net/netfilter/nf_flow_table.h
5278 ++++ b/include/net/netfilter/nf_flow_table.h
5279 +@@ -266,6 +266,7 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
5280 +
5281 + struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
5282 + struct flow_offload_tuple *tuple);
5283 ++void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
5284 + void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
5285 + struct net_device *dev);
5286 + void nf_flow_table_cleanup(struct net_device *dev);
5287 +@@ -302,6 +303,8 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
5288 + struct flow_offload *flow);
5289 +
5290 + void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
5291 ++void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
5292 ++
5293 + int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
5294 + struct net_device *dev,
5295 + enum flow_block_command cmd);
5296 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
5297 +index f56a1071c0052..53746494eb846 100644
5298 +--- a/include/net/netfilter/nf_tables.h
5299 ++++ b/include/net/netfilter/nf_tables.h
5300 +@@ -193,13 +193,18 @@ struct nft_ctx {
5301 + bool report;
5302 + };
5303 +
5304 ++enum nft_data_desc_flags {
5305 ++ NFT_DATA_DESC_SETELEM = (1 << 0),
5306 ++};
5307 ++
5308 + struct nft_data_desc {
5309 + enum nft_data_types type;
5310 ++ unsigned int size;
5311 + unsigned int len;
5312 ++ unsigned int flags;
5313 + };
5314 +
5315 +-int nft_data_init(const struct nft_ctx *ctx,
5316 +- struct nft_data *data, unsigned int size,
5317 ++int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
5318 + struct nft_data_desc *desc, const struct nlattr *nla);
5319 + void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
5320 + void nft_data_release(const struct nft_data *data, enum nft_data_types type);
5321 +@@ -1595,6 +1600,7 @@ struct nftables_pernet {
5322 + struct list_head module_list;
5323 + struct list_head notify_list;
5324 + struct mutex commit_mutex;
5325 ++ u64 table_handle;
5326 + unsigned int base_seq;
5327 + u8 validate_state;
5328 + };
5329 +diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
5330 +index 0fa5a6d98a00b..9dfa11d4224d2 100644
5331 +--- a/include/net/netfilter/nf_tables_core.h
5332 ++++ b/include/net/netfilter/nf_tables_core.h
5333 +@@ -40,6 +40,14 @@ struct nft_cmp_fast_expr {
5334 + bool inv;
5335 + };
5336 +
5337 ++struct nft_cmp16_fast_expr {
5338 ++ struct nft_data data;
5339 ++ struct nft_data mask;
5340 ++ u8 sreg;
5341 ++ u8 len;
5342 ++ bool inv;
5343 ++};
5344 ++
5345 + struct nft_immediate_expr {
5346 + struct nft_data data;
5347 + u8 dreg;
5348 +@@ -57,6 +65,7 @@ static inline u32 nft_cmp_fast_mask(unsigned int len)
5349 + }
5350 +
5351 + extern const struct nft_expr_ops nft_cmp_fast_ops;
5352 ++extern const struct nft_expr_ops nft_cmp16_fast_ops;
5353 +
5354 + struct nft_payload {
5355 + enum nft_payload_bases base:8;
5356 +diff --git a/include/net/tcp.h b/include/net/tcp.h
5357 +index 76b0d7f2b967f..d3646645cb9ec 100644
5358 +--- a/include/net/tcp.h
5359 ++++ b/include/net/tcp.h
5360 +@@ -571,6 +571,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
5361 + #endif
5362 + /* tcp_output.c */
5363 +
5364 ++void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
5365 ++void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
5366 + void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
5367 + int nonagle);
5368 + int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
5369 +diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
5370 +index 60739d5e3373f..c428312938e95 100644
5371 +--- a/kernel/audit_fsnotify.c
5372 ++++ b/kernel/audit_fsnotify.c
5373 +@@ -102,6 +102,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
5374 +
5375 + ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true);
5376 + if (ret < 0) {
5377 ++ audit_mark->path = NULL;
5378 + fsnotify_put_mark(&audit_mark->mark);
5379 + audit_mark = ERR_PTR(ret);
5380 + }
5381 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
5382 +index c8b534a498b38..5c9ebcbf6f5f8 100644
5383 +--- a/kernel/bpf/verifier.c
5384 ++++ b/kernel/bpf/verifier.c
5385 +@@ -6096,8 +6096,7 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
5386 + struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
5387 + struct bpf_reg_state *regs = cur_regs(env), *reg;
5388 + struct bpf_map *map = meta->map_ptr;
5389 +- struct tnum range;
5390 +- u64 val;
5391 ++ u64 val, max;
5392 + int err;
5393 +
5394 + if (func_id != BPF_FUNC_tail_call)
5395 +@@ -6107,10 +6106,11 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
5396 + return -EINVAL;
5397 + }
5398 +
5399 +- range = tnum_range(0, map->max_entries - 1);
5400 + reg = &regs[BPF_REG_3];
5401 ++ val = reg->var_off.value;
5402 ++ max = map->max_entries;
5403 +
5404 +- if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
5405 ++ if (!(register_is_const(reg) && val < max)) {
5406 + bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
5407 + return 0;
5408 + }
5409 +@@ -6118,8 +6118,6 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
5410 + err = mark_chain_precision(env, BPF_REG_3);
5411 + if (err)
5412 + return err;
5413 +-
5414 +- val = reg->var_off.value;
5415 + if (bpf_map_key_unseen(aux))
5416 + bpf_map_key_store(aux, val);
5417 + else if (!bpf_map_key_poisoned(aux) &&
5418 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
5419 +index e7c3b0e586f20..416dd7db3fb2c 100644
5420 +--- a/kernel/cgroup/cgroup.c
5421 ++++ b/kernel/cgroup/cgroup.c
5422 +@@ -1810,6 +1810,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
5423 +
5424 + if (ss->css_rstat_flush) {
5425 + list_del_rcu(&css->rstat_css_node);
5426 ++ synchronize_rcu();
5427 + list_add_rcu(&css->rstat_css_node,
5428 + &dcgrp->rstat_css_list);
5429 + }
5430 +diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
5431 +index f43d89d92860d..126380696f9c5 100644
5432 +--- a/kernel/sys_ni.c
5433 ++++ b/kernel/sys_ni.c
5434 +@@ -276,6 +276,7 @@ COND_SYSCALL(landlock_restrict_self);
5435 +
5436 + /* mm/fadvise.c */
5437 + COND_SYSCALL(fadvise64_64);
5438 ++COND_SYSCALL_COMPAT(fadvise64_64);
5439 +
5440 + /* mm/, CONFIG_MMU only */
5441 + COND_SYSCALL(swapon);
5442 +diff --git a/lib/ratelimit.c b/lib/ratelimit.c
5443 +index e01a93f46f833..ce945c17980b9 100644
5444 +--- a/lib/ratelimit.c
5445 ++++ b/lib/ratelimit.c
5446 +@@ -26,10 +26,16 @@
5447 + */
5448 + int ___ratelimit(struct ratelimit_state *rs, const char *func)
5449 + {
5450 ++ /* Paired with WRITE_ONCE() in .proc_handler().
5451 ++ * Changing two values seperately could be inconsistent
5452 ++ * and some message could be lost. (See: net_ratelimit_state).
5453 ++ */
5454 ++ int interval = READ_ONCE(rs->interval);
5455 ++ int burst = READ_ONCE(rs->burst);
5456 + unsigned long flags;
5457 + int ret;
5458 +
5459 +- if (!rs->interval)
5460 ++ if (!interval)
5461 + return 1;
5462 +
5463 + /*
5464 +@@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
5465 + if (!rs->begin)
5466 + rs->begin = jiffies;
5467 +
5468 +- if (time_is_before_jiffies(rs->begin + rs->interval)) {
5469 ++ if (time_is_before_jiffies(rs->begin + interval)) {
5470 + if (rs->missed) {
5471 + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
5472 + printk_deferred(KERN_WARNING
5473 +@@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
5474 + rs->begin = jiffies;
5475 + rs->printed = 0;
5476 + }
5477 +- if (rs->burst && rs->burst > rs->printed) {
5478 ++ if (burst && burst > rs->printed) {
5479 + rs->printed++;
5480 + ret = 1;
5481 + } else {
5482 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
5483 +index 02c9d5c7276e3..142e118ade87a 100644
5484 +--- a/mm/backing-dev.c
5485 ++++ b/mm/backing-dev.c
5486 +@@ -258,10 +258,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
5487 + unsigned long timeout;
5488 +
5489 + timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
5490 +- spin_lock_bh(&wb->work_lock);
5491 ++ spin_lock_irq(&wb->work_lock);
5492 + if (test_bit(WB_registered, &wb->state))
5493 + queue_delayed_work(bdi_wq, &wb->dwork, timeout);
5494 +- spin_unlock_bh(&wb->work_lock);
5495 ++ spin_unlock_irq(&wb->work_lock);
5496 + }
5497 +
5498 + static void wb_update_bandwidth_workfn(struct work_struct *work)
5499 +@@ -337,12 +337,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
5500 + static void wb_shutdown(struct bdi_writeback *wb)
5501 + {
5502 + /* Make sure nobody queues further work */
5503 +- spin_lock_bh(&wb->work_lock);
5504 ++ spin_lock_irq(&wb->work_lock);
5505 + if (!test_and_clear_bit(WB_registered, &wb->state)) {
5506 +- spin_unlock_bh(&wb->work_lock);
5507 ++ spin_unlock_irq(&wb->work_lock);
5508 + return;
5509 + }
5510 +- spin_unlock_bh(&wb->work_lock);
5511 ++ spin_unlock_irq(&wb->work_lock);
5512 +
5513 + cgwb_remove_from_bdi_list(wb);
5514 + /*
5515 +diff --git a/mm/bootmem_info.c b/mm/bootmem_info.c
5516 +index f03f42f426f69..8655492159a5f 100644
5517 +--- a/mm/bootmem_info.c
5518 ++++ b/mm/bootmem_info.c
5519 +@@ -12,6 +12,7 @@
5520 + #include <linux/memblock.h>
5521 + #include <linux/bootmem_info.h>
5522 + #include <linux/memory_hotplug.h>
5523 ++#include <linux/kmemleak.h>
5524 +
5525 + void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
5526 + {
5527 +@@ -34,6 +35,7 @@ void put_page_bootmem(struct page *page)
5528 + ClearPagePrivate(page);
5529 + set_page_private(page, 0);
5530 + INIT_LIST_HEAD(&page->lru);
5531 ++ kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
5532 + free_reserved_page(page);
5533 + }
5534 + }
5535 +diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
5536 +index 36624990b5777..70a5cb977ed0e 100644
5537 +--- a/mm/damon/dbgfs.c
5538 ++++ b/mm/damon/dbgfs.c
5539 +@@ -376,6 +376,9 @@ static int dbgfs_mk_context(char *name)
5540 + return -ENOENT;
5541 +
5542 + new_dir = debugfs_create_dir(name, root);
5543 ++ /* Below check is required for a potential duplicated name case */
5544 ++ if (IS_ERR(new_dir))
5545 ++ return PTR_ERR(new_dir);
5546 + dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
5547 +
5548 + new_ctx = dbgfs_new_ctx();
5549 +diff --git a/mm/mmap.c b/mm/mmap.c
5550 +index 031fca1a7c65e..b63336f6984c9 100644
5551 +--- a/mm/mmap.c
5552 ++++ b/mm/mmap.c
5553 +@@ -1684,8 +1684,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
5554 + pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
5555 + return 0;
5556 +
5557 +- /* Do we need to track softdirty? */
5558 +- if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
5559 ++ /*
5560 ++ * Do we need to track softdirty? hugetlb does not support softdirty
5561 ++ * tracking yet.
5562 ++ */
5563 ++ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
5564 ++ !is_vm_hugetlb_page(vma))
5565 + return 1;
5566 +
5567 + /* Specialty mapping? */
5568 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
5569 +index 4812a17b288c5..8ca6617b2a723 100644
5570 +--- a/mm/page-writeback.c
5571 ++++ b/mm/page-writeback.c
5572 +@@ -2755,6 +2755,7 @@ static void wb_inode_writeback_start(struct bdi_writeback *wb)
5573 +
5574 + static void wb_inode_writeback_end(struct bdi_writeback *wb)
5575 + {
5576 ++ unsigned long flags;
5577 + atomic_dec(&wb->writeback_inodes);
5578 + /*
5579 + * Make sure estimate of writeback throughput gets updated after
5580 +@@ -2763,7 +2764,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
5581 + * that if multiple inodes end writeback at a similar time, they get
5582 + * batched into one bandwidth update.
5583 + */
5584 +- queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
5585 ++ spin_lock_irqsave(&wb->work_lock, flags);
5586 ++ if (test_bit(WB_registered, &wb->state))
5587 ++ queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
5588 ++ spin_unlock_irqrestore(&wb->work_lock, flags);
5589 + }
5590 +
5591 + int test_clear_page_writeback(struct page *page)
5592 +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
5593 +index 8602885c8a8e0..a54535cbcf4cf 100644
5594 +--- a/net/8021q/vlan_dev.c
5595 ++++ b/net/8021q/vlan_dev.c
5596 +@@ -250,7 +250,7 @@ bool vlan_dev_inherit_address(struct net_device *dev,
5597 + if (dev->addr_assign_type != NET_ADDR_STOLEN)
5598 + return false;
5599 +
5600 +- ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
5601 ++ eth_hw_addr_set(dev, real_dev->dev_addr);
5602 + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5603 + return true;
5604 + }
5605 +@@ -349,7 +349,7 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
5606 + dev_uc_del(real_dev, dev->dev_addr);
5607 +
5608 + out:
5609 +- ether_addr_copy(dev->dev_addr, addr->sa_data);
5610 ++ eth_hw_addr_set(dev, addr->sa_data);
5611 + return 0;
5612 + }
5613 +
5614 +@@ -586,7 +586,7 @@ static int vlan_dev_init(struct net_device *dev)
5615 + dev->dev_id = real_dev->dev_id;
5616 +
5617 + if (is_zero_ether_addr(dev->dev_addr)) {
5618 +- ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
5619 ++ eth_hw_addr_set(dev, real_dev->dev_addr);
5620 + dev->addr_assign_type = NET_ADDR_STOLEN;
5621 + }
5622 + if (is_zero_ether_addr(dev->broadcast))
5623 +diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
5624 +index a7af4eaff17d3..3d4ea774d7e8f 100644
5625 +--- a/net/bridge/netfilter/ebtable_broute.c
5626 ++++ b/net/bridge/netfilter/ebtable_broute.c
5627 +@@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = {
5628 + .entries = (char *)&initial_chain,
5629 + };
5630 +
5631 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
5632 +-{
5633 +- if (valid_hooks & ~(1 << NF_BR_BROUTING))
5634 +- return -EINVAL;
5635 +- return 0;
5636 +-}
5637 +-
5638 + static const struct ebt_table broute_table = {
5639 + .name = "broute",
5640 + .table = &initial_table,
5641 + .valid_hooks = 1 << NF_BR_BROUTING,
5642 +- .check = check,
5643 + .me = THIS_MODULE,
5644 + };
5645 +
5646 +diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
5647 +index c0b121df4a9af..257d63b5dec16 100644
5648 +--- a/net/bridge/netfilter/ebtable_filter.c
5649 ++++ b/net/bridge/netfilter/ebtable_filter.c
5650 +@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
5651 + .entries = (char *)initial_chains,
5652 + };
5653 +
5654 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
5655 +-{
5656 +- if (valid_hooks & ~FILTER_VALID_HOOKS)
5657 +- return -EINVAL;
5658 +- return 0;
5659 +-}
5660 +-
5661 + static const struct ebt_table frame_filter = {
5662 + .name = "filter",
5663 + .table = &initial_table,
5664 + .valid_hooks = FILTER_VALID_HOOKS,
5665 +- .check = check,
5666 + .me = THIS_MODULE,
5667 + };
5668 +
5669 +diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
5670 +index 4078151c224fb..39179c2cf87d2 100644
5671 +--- a/net/bridge/netfilter/ebtable_nat.c
5672 ++++ b/net/bridge/netfilter/ebtable_nat.c
5673 +@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
5674 + .entries = (char *)initial_chains,
5675 + };
5676 +
5677 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
5678 +-{
5679 +- if (valid_hooks & ~NAT_VALID_HOOKS)
5680 +- return -EINVAL;
5681 +- return 0;
5682 +-}
5683 +-
5684 + static const struct ebt_table frame_nat = {
5685 + .name = "nat",
5686 + .table = &initial_table,
5687 + .valid_hooks = NAT_VALID_HOOKS,
5688 +- .check = check,
5689 + .me = THIS_MODULE,
5690 + };
5691 +
5692 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
5693 +index ba045f35114dd..8905fe2fe023d 100644
5694 +--- a/net/bridge/netfilter/ebtables.c
5695 ++++ b/net/bridge/netfilter/ebtables.c
5696 +@@ -1040,8 +1040,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
5697 + goto free_iterate;
5698 + }
5699 +
5700 +- /* the table doesn't like it */
5701 +- if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
5702 ++ if (repl->valid_hooks != t->valid_hooks)
5703 + goto free_unlock;
5704 +
5705 + if (repl->num_counters && repl->num_counters != t->private->nentries) {
5706 +@@ -1231,11 +1230,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
5707 + if (ret != 0)
5708 + goto free_chainstack;
5709 +
5710 +- if (table->check && table->check(newinfo, table->valid_hooks)) {
5711 +- ret = -EINVAL;
5712 +- goto free_chainstack;
5713 +- }
5714 +-
5715 + table->private = newinfo;
5716 + rwlock_init(&table->lock);
5717 + mutex_lock(&ebt_mutex);
5718 +diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
5719 +index d2745c54737e3..910ca41cb9e67 100644
5720 +--- a/net/core/bpf_sk_storage.c
5721 ++++ b/net/core/bpf_sk_storage.c
5722 +@@ -305,11 +305,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
5723 + static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
5724 + void *owner, u32 size)
5725 + {
5726 ++ int optmem_max = READ_ONCE(sysctl_optmem_max);
5727 + struct sock *sk = (struct sock *)owner;
5728 +
5729 + /* same check as in sock_kmalloc() */
5730 +- if (size <= sysctl_optmem_max &&
5731 +- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
5732 ++ if (size <= optmem_max &&
5733 ++ atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
5734 + atomic_add(size, &sk->sk_omem_alloc);
5735 + return 0;
5736 + }
5737 +diff --git a/net/core/dev.c b/net/core/dev.c
5738 +index 12b1811cb488b..276cca563325e 100644
5739 +--- a/net/core/dev.c
5740 ++++ b/net/core/dev.c
5741 +@@ -4589,7 +4589,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
5742 + struct softnet_data *sd;
5743 + unsigned int old_flow, new_flow;
5744 +
5745 +- if (qlen < (netdev_max_backlog >> 1))
5746 ++ if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
5747 + return false;
5748 +
5749 + sd = this_cpu_ptr(&softnet_data);
5750 +@@ -4637,7 +4637,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
5751 + if (!netif_running(skb->dev))
5752 + goto drop;
5753 + qlen = skb_queue_len(&sd->input_pkt_queue);
5754 +- if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
5755 ++ if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
5756 + if (qlen) {
5757 + enqueue:
5758 + __skb_queue_tail(&sd->input_pkt_queue, skb);
5759 +@@ -4893,7 +4893,7 @@ static int netif_rx_internal(struct sk_buff *skb)
5760 + {
5761 + int ret;
5762 +
5763 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
5764 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5765 +
5766 + trace_netif_rx(skb);
5767 +
5768 +@@ -5253,7 +5253,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5769 + int ret = NET_RX_DROP;
5770 + __be16 type;
5771 +
5772 +- net_timestamp_check(!netdev_tstamp_prequeue, skb);
5773 ++ net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
5774 +
5775 + trace_netif_receive_skb(skb);
5776 +
5777 +@@ -5634,7 +5634,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
5778 + {
5779 + int ret;
5780 +
5781 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
5782 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5783 +
5784 + if (skb_defer_rx_timestamp(skb))
5785 + return NET_RX_SUCCESS;
5786 +@@ -5664,7 +5664,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
5787 +
5788 + INIT_LIST_HEAD(&sublist);
5789 + list_for_each_entry_safe(skb, next, head, list) {
5790 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
5791 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5792 + skb_list_del_init(skb);
5793 + if (!skb_defer_rx_timestamp(skb))
5794 + list_add_tail(&skb->list, &sublist);
5795 +@@ -6437,7 +6437,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
5796 + net_rps_action_and_irq_enable(sd);
5797 + }
5798 +
5799 +- napi->weight = dev_rx_weight;
5800 ++ napi->weight = READ_ONCE(dev_rx_weight);
5801 + while (again) {
5802 + struct sk_buff *skb;
5803 +
5804 +@@ -7137,8 +7137,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
5805 + {
5806 + struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5807 + unsigned long time_limit = jiffies +
5808 +- usecs_to_jiffies(netdev_budget_usecs);
5809 +- int budget = netdev_budget;
5810 ++ usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
5811 ++ int budget = READ_ONCE(netdev_budget);
5812 + LIST_HEAD(list);
5813 + LIST_HEAD(repoll);
5814 +
5815 +diff --git a/net/core/filter.c b/net/core/filter.c
5816 +index ac64395611ae3..fb5b9dbf3bc08 100644
5817 +--- a/net/core/filter.c
5818 ++++ b/net/core/filter.c
5819 +@@ -1213,10 +1213,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
5820 + static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
5821 + {
5822 + u32 filter_size = bpf_prog_size(fp->prog->len);
5823 ++ int optmem_max = READ_ONCE(sysctl_optmem_max);
5824 +
5825 + /* same check as in sock_kmalloc() */
5826 +- if (filter_size <= sysctl_optmem_max &&
5827 +- atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
5828 ++ if (filter_size <= optmem_max &&
5829 ++ atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) {
5830 + atomic_add(filter_size, &sk->sk_omem_alloc);
5831 + return true;
5832 + }
5833 +@@ -1548,7 +1549,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
5834 + if (IS_ERR(prog))
5835 + return PTR_ERR(prog);
5836 +
5837 +- if (bpf_prog_size(prog->len) > sysctl_optmem_max)
5838 ++ if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max))
5839 + err = -ENOMEM;
5840 + else
5841 + err = reuseport_attach_prog(sk, prog);
5842 +@@ -1615,7 +1616,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
5843 + }
5844 + } else {
5845 + /* BPF_PROG_TYPE_SOCKET_FILTER */
5846 +- if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
5847 ++ if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) {
5848 + err = -ENOMEM;
5849 + goto err_prog_put;
5850 + }
5851 +@@ -4744,14 +4745,14 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
5852 + /* Only some socketops are supported */
5853 + switch (optname) {
5854 + case SO_RCVBUF:
5855 +- val = min_t(u32, val, sysctl_rmem_max);
5856 ++ val = min_t(u32, val, READ_ONCE(sysctl_rmem_max));
5857 + val = min_t(int, val, INT_MAX / 2);
5858 + sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
5859 + WRITE_ONCE(sk->sk_rcvbuf,
5860 + max_t(int, val * 2, SOCK_MIN_RCVBUF));
5861 + break;
5862 + case SO_SNDBUF:
5863 +- val = min_t(u32, val, sysctl_wmem_max);
5864 ++ val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
5865 + val = min_t(int, val, INT_MAX / 2);
5866 + sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
5867 + WRITE_ONCE(sk->sk_sndbuf,
5868 +diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
5869 +index 6eb2e5ec2c506..2f66f3f295630 100644
5870 +--- a/net/core/gro_cells.c
5871 ++++ b/net/core/gro_cells.c
5872 +@@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
5873 +
5874 + cell = this_cpu_ptr(gcells->cells);
5875 +
5876 +- if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
5877 ++ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
5878 + drop:
5879 + atomic_long_inc(&dev->rx_dropped);
5880 + kfree_skb(skb);
5881 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5882 +index 5ebef94e14dc6..563848242ad33 100644
5883 +--- a/net/core/skbuff.c
5884 ++++ b/net/core/skbuff.c
5885 +@@ -4892,7 +4892,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
5886 + {
5887 + bool ret;
5888 +
5889 +- if (likely(sysctl_tstamp_allow_data || tsonly))
5890 ++ if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
5891 + return true;
5892 +
5893 + read_lock_bh(&sk->sk_callback_lock);
5894 +diff --git a/net/core/sock.c b/net/core/sock.c
5895 +index deaed1b206823..9bcffe1d5332a 100644
5896 +--- a/net/core/sock.c
5897 ++++ b/net/core/sock.c
5898 +@@ -1014,7 +1014,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
5899 + * play 'guess the biggest size' games. RCVBUF/SNDBUF
5900 + * are treated in BSD as hints
5901 + */
5902 +- val = min_t(u32, val, sysctl_wmem_max);
5903 ++ val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
5904 + set_sndbuf:
5905 + /* Ensure val * 2 fits into an int, to prevent max_t()
5906 + * from treating it as a negative value.
5907 +@@ -1046,7 +1046,7 @@ set_sndbuf:
5908 + * play 'guess the biggest size' games. RCVBUF/SNDBUF
5909 + * are treated in BSD as hints
5910 + */
5911 +- __sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
5912 ++ __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
5913 + break;
5914 +
5915 + case SO_RCVBUFFORCE:
5916 +@@ -2368,7 +2368,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
5917 +
5918 + /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
5919 + if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
5920 +- sysctl_optmem_max)
5921 ++ READ_ONCE(sysctl_optmem_max))
5922 + return NULL;
5923 +
5924 + skb = alloc_skb(size, priority);
5925 +@@ -2386,8 +2386,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
5926 + */
5927 + void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
5928 + {
5929 +- if ((unsigned int)size <= sysctl_optmem_max &&
5930 +- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
5931 ++ int optmem_max = READ_ONCE(sysctl_optmem_max);
5932 ++
5933 ++ if ((unsigned int)size <= optmem_max &&
5934 ++ atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
5935 + void *mem;
5936 + /* First do the add, to avoid the race if kmalloc
5937 + * might sleep.
5938 +@@ -3124,8 +3126,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
5939 + timer_setup(&sk->sk_timer, NULL, 0);
5940 +
5941 + sk->sk_allocation = GFP_KERNEL;
5942 +- sk->sk_rcvbuf = sysctl_rmem_default;
5943 +- sk->sk_sndbuf = sysctl_wmem_default;
5944 ++ sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default);
5945 ++ sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
5946 + sk->sk_state = TCP_CLOSE;
5947 + sk_set_socket(sk, sock);
5948 +
5949 +@@ -3180,7 +3182,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
5950 +
5951 + #ifdef CONFIG_NET_RX_BUSY_POLL
5952 + sk->sk_napi_id = 0;
5953 +- sk->sk_ll_usec = sysctl_net_busy_read;
5954 ++ sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
5955 + #endif
5956 +
5957 + sk->sk_max_pacing_rate = ~0UL;
5958 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
5959 +index 5f88526ad61cc..ed20cbdd19315 100644
5960 +--- a/net/core/sysctl_net_core.c
5961 ++++ b/net/core/sysctl_net_core.c
5962 +@@ -236,14 +236,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
5963 + static int proc_do_dev_weight(struct ctl_table *table, int write,
5964 + void *buffer, size_t *lenp, loff_t *ppos)
5965 + {
5966 +- int ret;
5967 ++ static DEFINE_MUTEX(dev_weight_mutex);
5968 ++ int ret, weight;
5969 +
5970 ++ mutex_lock(&dev_weight_mutex);
5971 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
5972 +- if (ret != 0)
5973 +- return ret;
5974 +-
5975 +- dev_rx_weight = weight_p * dev_weight_rx_bias;
5976 +- dev_tx_weight = weight_p * dev_weight_tx_bias;
5977 ++ if (!ret && write) {
5978 ++ weight = READ_ONCE(weight_p);
5979 ++ WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
5980 ++ WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
5981 ++ }
5982 ++ mutex_unlock(&dev_weight_mutex);
5983 +
5984 + return ret;
5985 + }
5986 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
5987 +index a2bf2d8ac65b7..11ec9e689589b 100644
5988 +--- a/net/dsa/slave.c
5989 ++++ b/net/dsa/slave.c
5990 +@@ -174,7 +174,7 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
5991 + dev_uc_del(master, dev->dev_addr);
5992 +
5993 + out:
5994 +- ether_addr_copy(dev->dev_addr, addr->sa_data);
5995 ++ eth_hw_addr_set(dev, addr->sa_data);
5996 +
5997 + return 0;
5998 + }
5999 +@@ -1954,7 +1954,7 @@ int dsa_slave_create(struct dsa_port *port)
6000 +
6001 + slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
6002 + if (!is_zero_ether_addr(port->mac))
6003 +- ether_addr_copy(slave_dev->dev_addr, port->mac);
6004 ++ eth_hw_addr_set(slave_dev, port->mac);
6005 + else
6006 + eth_hw_addr_inherit(slave_dev, master);
6007 + slave_dev->priv_flags |= IFF_NO_QUEUE;
6008 +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
6009 +index ea7b96e296ef0..a1045c3d71b4f 100644
6010 +--- a/net/hsr/hsr_device.c
6011 ++++ b/net/hsr/hsr_device.c
6012 +@@ -493,7 +493,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
6013 + INIT_LIST_HEAD(&hsr->self_node_db);
6014 + spin_lock_init(&hsr->list_lock);
6015 +
6016 +- ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
6017 ++ eth_hw_addr_set(hsr_dev, slave[0]->dev_addr);
6018 +
6019 + /* initialize protocol specific functions */
6020 + if (protocol_version == PRP_V1) {
6021 +diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
6022 +index f7e284f23b1f3..b099c31501509 100644
6023 +--- a/net/hsr/hsr_main.c
6024 ++++ b/net/hsr/hsr_main.c
6025 +@@ -75,7 +75,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
6026 + master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
6027 +
6028 + if (port->type == HSR_PT_SLAVE_A) {
6029 +- ether_addr_copy(master->dev->dev_addr, dev->dev_addr);
6030 ++ eth_hw_addr_set(master->dev, dev->dev_addr);
6031 + call_netdevice_notifiers(NETDEV_CHANGEADDR,
6032 + master->dev);
6033 + }
6034 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
6035 +index 4744c7839de53..9ac41ffdc6344 100644
6036 +--- a/net/ipv4/devinet.c
6037 ++++ b/net/ipv4/devinet.c
6038 +@@ -2673,23 +2673,27 @@ static __net_init int devinet_init_net(struct net *net)
6039 + #endif
6040 +
6041 + if (!net_eq(net, &init_net)) {
6042 +- if (IS_ENABLED(CONFIG_SYSCTL) &&
6043 +- sysctl_devconf_inherit_init_net == 3) {
6044 ++ switch (net_inherit_devconf()) {
6045 ++ case 3:
6046 + /* copy from the current netns */
6047 + memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
6048 + sizeof(ipv4_devconf));
6049 + memcpy(dflt,
6050 + current->nsproxy->net_ns->ipv4.devconf_dflt,
6051 + sizeof(ipv4_devconf_dflt));
6052 +- } else if (!IS_ENABLED(CONFIG_SYSCTL) ||
6053 +- sysctl_devconf_inherit_init_net != 2) {
6054 +- /* inherit == 0 or 1: copy from init_net */
6055 ++ break;
6056 ++ case 0:
6057 ++ case 1:
6058 ++ /* copy from init_net */
6059 + memcpy(all, init_net.ipv4.devconf_all,
6060 + sizeof(ipv4_devconf));
6061 + memcpy(dflt, init_net.ipv4.devconf_dflt,
6062 + sizeof(ipv4_devconf_dflt));
6063 ++ break;
6064 ++ case 2:
6065 ++ /* use compiled values */
6066 ++ break;
6067 + }
6068 +- /* else inherit == 2: use compiled values */
6069 + }
6070 +
6071 + #ifdef CONFIG_SYSCTL
6072 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
6073 +index 131066d0319a2..7aff0179b3c2d 100644
6074 +--- a/net/ipv4/ip_output.c
6075 ++++ b/net/ipv4/ip_output.c
6076 +@@ -1712,7 +1712,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
6077 +
6078 + sk->sk_protocol = ip_hdr(skb)->protocol;
6079 + sk->sk_bound_dev_if = arg->bound_dev_if;
6080 +- sk->sk_sndbuf = sysctl_wmem_default;
6081 ++ sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
6082 + ipc.sockc.mark = fl4.flowi4_mark;
6083 + err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
6084 + len, 0, &ipc, &rt, MSG_DONTWAIT);
6085 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
6086 +index 38f296afb663d..1e2af5f8822df 100644
6087 +--- a/net/ipv4/ip_sockglue.c
6088 ++++ b/net/ipv4/ip_sockglue.c
6089 +@@ -772,7 +772,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
6090 +
6091 + if (optlen < GROUP_FILTER_SIZE(0))
6092 + return -EINVAL;
6093 +- if (optlen > sysctl_optmem_max)
6094 ++ if (optlen > READ_ONCE(sysctl_optmem_max))
6095 + return -ENOBUFS;
6096 +
6097 + gsf = memdup_sockptr(optval, optlen);
6098 +@@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
6099 +
6100 + if (optlen < size0)
6101 + return -EINVAL;
6102 +- if (optlen > sysctl_optmem_max - 4)
6103 ++ if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
6104 + return -ENOBUFS;
6105 +
6106 + p = kmalloc(optlen + 4, GFP_KERNEL);
6107 +@@ -1231,7 +1231,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
6108 +
6109 + if (optlen < IP_MSFILTER_SIZE(0))
6110 + goto e_inval;
6111 +- if (optlen > sysctl_optmem_max) {
6112 ++ if (optlen > READ_ONCE(sysctl_optmem_max)) {
6113 + err = -ENOBUFS;
6114 + break;
6115 + }
6116 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
6117 +index 2097eeaf30a67..0ebef2a5950cd 100644
6118 +--- a/net/ipv4/tcp.c
6119 ++++ b/net/ipv4/tcp.c
6120 +@@ -644,7 +644,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
6121 + }
6122 + EXPORT_SYMBOL(tcp_ioctl);
6123 +
6124 +-static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
6125 ++void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
6126 + {
6127 + TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
6128 + tp->pushed_seq = tp->write_seq;
6129 +@@ -655,7 +655,7 @@ static inline bool forced_push(const struct tcp_sock *tp)
6130 + return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
6131 + }
6132 +
6133 +-static void skb_entail(struct sock *sk, struct sk_buff *skb)
6134 ++void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
6135 + {
6136 + struct tcp_sock *tp = tcp_sk(sk);
6137 + struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
6138 +@@ -982,7 +982,7 @@ new_segment:
6139 + #ifdef CONFIG_TLS_DEVICE
6140 + skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
6141 + #endif
6142 +- skb_entail(sk, skb);
6143 ++ tcp_skb_entail(sk, skb);
6144 + copy = size_goal;
6145 + }
6146 +
6147 +@@ -991,7 +991,7 @@ new_segment:
6148 +
6149 + i = skb_shinfo(skb)->nr_frags;
6150 + can_coalesce = skb_can_coalesce(skb, i, page, offset);
6151 +- if (!can_coalesce && i >= sysctl_max_skb_frags) {
6152 ++ if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
6153 + tcp_mark_push(tp, skb);
6154 + goto new_segment;
6155 + }
6156 +@@ -1312,7 +1312,7 @@ new_segment:
6157 + process_backlog++;
6158 + skb->ip_summed = CHECKSUM_PARTIAL;
6159 +
6160 +- skb_entail(sk, skb);
6161 ++ tcp_skb_entail(sk, skb);
6162 + copy = size_goal;
6163 +
6164 + /* All packets are restored as if they have
6165 +@@ -1344,7 +1344,7 @@ new_segment:
6166 +
6167 + if (!skb_can_coalesce(skb, i, pfrag->page,
6168 + pfrag->offset)) {
6169 +- if (i >= sysctl_max_skb_frags) {
6170 ++ if (i >= READ_ONCE(sysctl_max_skb_frags)) {
6171 + tcp_mark_push(tp, skb);
6172 + goto new_segment;
6173 + }
6174 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
6175 +index 40c9da4bd03e4..ed2e1836c0c05 100644
6176 +--- a/net/ipv4/tcp_output.c
6177 ++++ b/net/ipv4/tcp_output.c
6178 +@@ -239,7 +239,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
6179 + if (wscale_ok) {
6180 + /* Set window scaling on max possible window */
6181 + space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
6182 +- space = max_t(u32, space, sysctl_rmem_max);
6183 ++ space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
6184 + space = min_t(u32, space, *window_clamp);
6185 + *rcv_wscale = clamp_t(int, ilog2(space) - 15,
6186 + 0, TCP_MAX_WSCALE);
6187 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
6188 +index 6dcf034835ecd..8800987fdb402 100644
6189 +--- a/net/ipv6/addrconf.c
6190 ++++ b/net/ipv6/addrconf.c
6191 +@@ -7128,9 +7128,8 @@ static int __net_init addrconf_init_net(struct net *net)
6192 + if (!dflt)
6193 + goto err_alloc_dflt;
6194 +
6195 +- if (IS_ENABLED(CONFIG_SYSCTL) &&
6196 +- !net_eq(net, &init_net)) {
6197 +- switch (sysctl_devconf_inherit_init_net) {
6198 ++ if (!net_eq(net, &init_net)) {
6199 ++ switch (net_inherit_devconf()) {
6200 + case 1: /* copy from init_net */
6201 + memcpy(all, init_net.ipv6.devconf_all,
6202 + sizeof(ipv6_devconf));
6203 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
6204 +index e4bdb09c55867..8a1c78f385084 100644
6205 +--- a/net/ipv6/ipv6_sockglue.c
6206 ++++ b/net/ipv6/ipv6_sockglue.c
6207 +@@ -208,7 +208,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
6208 +
6209 + if (optlen < GROUP_FILTER_SIZE(0))
6210 + return -EINVAL;
6211 +- if (optlen > sysctl_optmem_max)
6212 ++ if (optlen > READ_ONCE(sysctl_optmem_max))
6213 + return -ENOBUFS;
6214 +
6215 + gsf = memdup_sockptr(optval, optlen);
6216 +@@ -242,7 +242,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
6217 +
6218 + if (optlen < size0)
6219 + return -EINVAL;
6220 +- if (optlen > sysctl_optmem_max - 4)
6221 ++ if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
6222 + return -ENOBUFS;
6223 +
6224 + p = kmalloc(optlen + 4, GFP_KERNEL);
6225 +diff --git a/net/key/af_key.c b/net/key/af_key.c
6226 +index d93bde6573593..53cca90191586 100644
6227 +--- a/net/key/af_key.c
6228 ++++ b/net/key/af_key.c
6229 +@@ -1697,9 +1697,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
6230 + pfk->registered |= (1<<hdr->sadb_msg_satype);
6231 + }
6232 +
6233 ++ mutex_lock(&pfkey_mutex);
6234 + xfrm_probe_algs();
6235 +
6236 + supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
6237 ++ mutex_unlock(&pfkey_mutex);
6238 ++
6239 + if (!supp_skb) {
6240 + if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
6241 + pfk->registered &= ~(1<<hdr->sadb_msg_satype);
6242 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
6243 +index 7f96e0c42a090..47f359dac247b 100644
6244 +--- a/net/mptcp/protocol.c
6245 ++++ b/net/mptcp/protocol.c
6246 +@@ -1224,6 +1224,7 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
6247 + if (likely(__mptcp_add_ext(skb, gfp))) {
6248 + skb_reserve(skb, MAX_TCP_HEADER);
6249 + skb->reserved_tailroom = skb->end - skb->tail;
6250 ++ INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
6251 + return skb;
6252 + }
6253 + __kfree_skb(skb);
6254 +@@ -1233,31 +1234,24 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
6255 + return NULL;
6256 + }
6257 +
6258 +-static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
6259 ++static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
6260 + {
6261 + struct sk_buff *skb;
6262 +
6263 +- if (ssk->sk_tx_skb_cache) {
6264 +- skb = ssk->sk_tx_skb_cache;
6265 +- if (unlikely(!skb_ext_find(skb, SKB_EXT_MPTCP) &&
6266 +- !__mptcp_add_ext(skb, gfp)))
6267 +- return false;
6268 +- return true;
6269 +- }
6270 +-
6271 + skb = __mptcp_do_alloc_tx_skb(sk, gfp);
6272 + if (!skb)
6273 +- return false;
6274 ++ return NULL;
6275 +
6276 + if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
6277 +- ssk->sk_tx_skb_cache = skb;
6278 +- return true;
6279 ++ tcp_skb_entail(ssk, skb);
6280 ++ return skb;
6281 + }
6282 ++ tcp_skb_tsorted_anchor_cleanup(skb);
6283 + kfree_skb(skb);
6284 +- return false;
6285 ++ return NULL;
6286 + }
6287 +
6288 +-static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
6289 ++static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
6290 + {
6291 + gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
6292 +
6293 +@@ -1287,23 +1281,29 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
6294 + struct mptcp_sendmsg_info *info)
6295 + {
6296 + u64 data_seq = dfrag->data_seq + info->sent;
6297 ++ int offset = dfrag->offset + info->sent;
6298 + struct mptcp_sock *msk = mptcp_sk(sk);
6299 + bool zero_window_probe = false;
6300 + struct mptcp_ext *mpext = NULL;
6301 +- struct sk_buff *skb, *tail;
6302 +- bool must_collapse = false;
6303 +- int size_bias = 0;
6304 +- int avail_size;
6305 +- size_t ret = 0;
6306 ++ bool can_coalesce = false;
6307 ++ bool reuse_skb = true;
6308 ++ struct sk_buff *skb;
6309 ++ size_t copy;
6310 ++ int i;
6311 +
6312 + pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
6313 + msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
6314 +
6315 ++ if (WARN_ON_ONCE(info->sent > info->limit ||
6316 ++ info->limit > dfrag->data_len))
6317 ++ return 0;
6318 ++
6319 + /* compute send limit */
6320 + info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
6321 +- avail_size = info->size_goal;
6322 ++ copy = info->size_goal;
6323 ++
6324 + skb = tcp_write_queue_tail(ssk);
6325 +- if (skb) {
6326 ++ if (skb && copy > skb->len) {
6327 + /* Limit the write to the size available in the
6328 + * current skb, if any, so that we create at most a new skb.
6329 + * Explicitly tells TCP internals to avoid collapsing on later
6330 +@@ -1316,62 +1316,80 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
6331 + goto alloc_skb;
6332 + }
6333 +
6334 +- must_collapse = (info->size_goal > skb->len) &&
6335 +- (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
6336 +- if (must_collapse) {
6337 +- size_bias = skb->len;
6338 +- avail_size = info->size_goal - skb->len;
6339 ++ i = skb_shinfo(skb)->nr_frags;
6340 ++ can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
6341 ++ if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
6342 ++ tcp_mark_push(tcp_sk(ssk), skb);
6343 ++ goto alloc_skb;
6344 + }
6345 +- }
6346 +
6347 ++ copy -= skb->len;
6348 ++ } else {
6349 + alloc_skb:
6350 +- if (!must_collapse &&
6351 +- !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
6352 +- return 0;
6353 ++ skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
6354 ++ if (!skb)
6355 ++ return -ENOMEM;
6356 ++
6357 ++ i = skb_shinfo(skb)->nr_frags;
6358 ++ reuse_skb = false;
6359 ++ mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
6360 ++ }
6361 +
6362 + /* Zero window and all data acked? Probe. */
6363 +- avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
6364 +- if (avail_size == 0) {
6365 ++ copy = mptcp_check_allowed_size(msk, data_seq, copy);
6366 ++ if (copy == 0) {
6367 + u64 snd_una = READ_ONCE(msk->snd_una);
6368 +
6369 +- if (skb || snd_una != msk->snd_nxt)
6370 ++ if (snd_una != msk->snd_nxt) {
6371 ++ tcp_remove_empty_skb(ssk, tcp_write_queue_tail(ssk));
6372 + return 0;
6373 ++ }
6374 ++
6375 + zero_window_probe = true;
6376 + data_seq = snd_una - 1;
6377 +- avail_size = 1;
6378 +- }
6379 ++ copy = 1;
6380 +
6381 +- if (WARN_ON_ONCE(info->sent > info->limit ||
6382 +- info->limit > dfrag->data_len))
6383 +- return 0;
6384 ++ /* all mptcp-level data is acked, no skbs should be present into the
6385 ++ * ssk write queue
6386 ++ */
6387 ++ WARN_ON_ONCE(reuse_skb);
6388 ++ }
6389 +
6390 +- ret = info->limit - info->sent;
6391 +- tail = tcp_build_frag(ssk, avail_size + size_bias, info->flags,
6392 +- dfrag->page, dfrag->offset + info->sent, &ret);
6393 +- if (!tail) {
6394 +- tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk));
6395 ++ copy = min_t(size_t, copy, info->limit - info->sent);
6396 ++ if (!sk_wmem_schedule(ssk, copy)) {
6397 ++ tcp_remove_empty_skb(ssk, tcp_write_queue_tail(ssk));
6398 + return -ENOMEM;
6399 + }
6400 +
6401 +- /* if the tail skb is still the cached one, collapsing really happened.
6402 +- */
6403 +- if (skb == tail) {
6404 +- TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
6405 +- mpext->data_len += ret;
6406 ++ if (can_coalesce) {
6407 ++ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
6408 ++ } else {
6409 ++ get_page(dfrag->page);
6410 ++ skb_fill_page_desc(skb, i, dfrag->page, offset, copy);
6411 ++ }
6412 ++
6413 ++ skb->len += copy;
6414 ++ skb->data_len += copy;
6415 ++ skb->truesize += copy;
6416 ++ sk_wmem_queued_add(ssk, copy);
6417 ++ sk_mem_charge(ssk, copy);
6418 ++ skb->ip_summed = CHECKSUM_PARTIAL;
6419 ++ WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
6420 ++ TCP_SKB_CB(skb)->end_seq += copy;
6421 ++ tcp_skb_pcount_set(skb, 0);
6422 ++
6423 ++ /* on skb reuse we just need to update the DSS len */
6424 ++ if (reuse_skb) {
6425 ++ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
6426 ++ mpext->data_len += copy;
6427 + WARN_ON_ONCE(zero_window_probe);
6428 + goto out;
6429 + }
6430 +
6431 +- mpext = skb_ext_find(tail, SKB_EXT_MPTCP);
6432 +- if (WARN_ON_ONCE(!mpext)) {
6433 +- /* should never reach here, stream corrupted */
6434 +- return -EINVAL;
6435 +- }
6436 +-
6437 + memset(mpext, 0, sizeof(*mpext));
6438 + mpext->data_seq = data_seq;
6439 + mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
6440 +- mpext->data_len = ret;
6441 ++ mpext->data_len = copy;
6442 + mpext->use_map = 1;
6443 + mpext->dsn64 = 1;
6444 +
6445 +@@ -1380,18 +1398,18 @@ alloc_skb:
6446 + mpext->dsn64);
6447 +
6448 + if (zero_window_probe) {
6449 +- mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
6450 ++ mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
6451 + mpext->frozen = 1;
6452 + if (READ_ONCE(msk->csum_enabled))
6453 +- mptcp_update_data_checksum(tail, ret);
6454 ++ mptcp_update_data_checksum(skb, copy);
6455 + tcp_push_pending_frames(ssk);
6456 + return 0;
6457 + }
6458 + out:
6459 + if (READ_ONCE(msk->csum_enabled))
6460 +- mptcp_update_data_checksum(tail, ret);
6461 +- mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
6462 +- return ret;
6463 ++ mptcp_update_data_checksum(skb, copy);
6464 ++ mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
6465 ++ return copy;
6466 + }
6467 +
6468 + #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
6469 +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
6470 +index 9d43277b8b4fe..a56fd0b5a430a 100644
6471 +--- a/net/netfilter/ipvs/ip_vs_sync.c
6472 ++++ b/net/netfilter/ipvs/ip_vs_sync.c
6473 +@@ -1280,12 +1280,12 @@ static void set_sock_size(struct sock *sk, int mode, int val)
6474 + lock_sock(sk);
6475 + if (mode) {
6476 + val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
6477 +- sysctl_wmem_max);
6478 ++ READ_ONCE(sysctl_wmem_max));
6479 + sk->sk_sndbuf = val * 2;
6480 + sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
6481 + } else {
6482 + val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
6483 +- sysctl_rmem_max);
6484 ++ READ_ONCE(sysctl_rmem_max));
6485 + sk->sk_rcvbuf = val * 2;
6486 + sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
6487 + }
6488 +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
6489 +index 9fb407084c506..4f61eb1282834 100644
6490 +--- a/net/netfilter/nf_flow_table_core.c
6491 ++++ b/net/netfilter/nf_flow_table_core.c
6492 +@@ -436,12 +436,17 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
6493 + }
6494 + }
6495 +
6496 ++void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
6497 ++{
6498 ++ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
6499 ++}
6500 ++
6501 + static void nf_flow_offload_work_gc(struct work_struct *work)
6502 + {
6503 + struct nf_flowtable *flow_table;
6504 +
6505 + flow_table = container_of(work, struct nf_flowtable, gc_work.work);
6506 +- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
6507 ++ nf_flow_table_gc_run(flow_table);
6508 + queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
6509 + }
6510 +
6511 +@@ -599,11 +604,11 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
6512 + mutex_unlock(&flowtable_lock);
6513 +
6514 + cancel_delayed_work_sync(&flow_table->gc_work);
6515 +- nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
6516 +- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
6517 + nf_flow_table_offload_flush(flow_table);
6518 +- if (nf_flowtable_hw_offload(flow_table))
6519 +- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
6520 ++ /* ... no more pending work after this stage ... */
6521 ++ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
6522 ++ nf_flow_table_gc_run(flow_table);
6523 ++ nf_flow_table_offload_flush_cleanup(flow_table);
6524 + rhashtable_destroy(&flow_table->rhashtable);
6525 + }
6526 + EXPORT_SYMBOL_GPL(nf_flow_table_free);
6527 +diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
6528 +index b561e0a44a45f..c4559fae8acd5 100644
6529 +--- a/net/netfilter/nf_flow_table_offload.c
6530 ++++ b/net/netfilter/nf_flow_table_offload.c
6531 +@@ -1050,6 +1050,14 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
6532 + flow_offload_queue_work(offload);
6533 + }
6534 +
6535 ++void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable)
6536 ++{
6537 ++ if (nf_flowtable_hw_offload(flowtable)) {
6538 ++ flush_workqueue(nf_flow_offload_del_wq);
6539 ++ nf_flow_table_gc_run(flowtable);
6540 ++ }
6541 ++}
6542 ++
6543 + void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
6544 + {
6545 + if (nf_flowtable_hw_offload(flowtable)) {
6546 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
6547 +index 2f22a172a27e1..d8ca55d6be409 100644
6548 +--- a/net/netfilter/nf_tables_api.c
6549 ++++ b/net/netfilter/nf_tables_api.c
6550 +@@ -32,7 +32,6 @@ static LIST_HEAD(nf_tables_objects);
6551 + static LIST_HEAD(nf_tables_flowtables);
6552 + static LIST_HEAD(nf_tables_destroy_list);
6553 + static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
6554 +-static u64 table_handle;
6555 +
6556 + enum {
6557 + NFT_VALIDATE_SKIP = 0,
6558 +@@ -1156,7 +1155,7 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
6559 + INIT_LIST_HEAD(&table->flowtables);
6560 + table->family = family;
6561 + table->flags = flags;
6562 +- table->handle = ++table_handle;
6563 ++ table->handle = ++nft_net->table_handle;
6564 + if (table->flags & NFT_TABLE_F_OWNER)
6565 + table->nlpid = NETLINK_CB(skb).portid;
6566 +
6567 +@@ -2102,9 +2101,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
6568 + struct netlink_ext_ack *extack)
6569 + {
6570 + const struct nlattr * const *nla = ctx->nla;
6571 ++ struct nft_stats __percpu *stats = NULL;
6572 + struct nft_table *table = ctx->table;
6573 + struct nft_base_chain *basechain;
6574 +- struct nft_stats __percpu *stats;
6575 + struct net *net = ctx->net;
6576 + char name[NFT_NAME_MAXLEN];
6577 + struct nft_trans *trans;
6578 +@@ -2141,7 +2140,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
6579 + return PTR_ERR(stats);
6580 + }
6581 + rcu_assign_pointer(basechain->stats, stats);
6582 +- static_branch_inc(&nft_counters_enabled);
6583 + }
6584 +
6585 + err = nft_basechain_init(basechain, family, &hook, flags);
6586 +@@ -2224,6 +2222,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
6587 + goto err_unregister_hook;
6588 + }
6589 +
6590 ++ if (stats)
6591 ++ static_branch_inc(&nft_counters_enabled);
6592 ++
6593 + table->use++;
6594 +
6595 + return 0;
6596 +@@ -2479,6 +2480,9 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
6597 + nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
6598 +
6599 + if (chain != NULL) {
6600 ++ if (chain->flags & NFT_CHAIN_BINDING)
6601 ++ return -EINVAL;
6602 ++
6603 + if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
6604 + NL_SET_BAD_ATTR(extack, attr);
6605 + return -EEXIST;
6606 +@@ -5116,19 +5120,13 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
6607 + static int nft_setelem_parse_key(struct nft_ctx *ctx, struct nft_set *set,
6608 + struct nft_data *key, struct nlattr *attr)
6609 + {
6610 +- struct nft_data_desc desc;
6611 +- int err;
6612 +-
6613 +- err = nft_data_init(ctx, key, NFT_DATA_VALUE_MAXLEN, &desc, attr);
6614 +- if (err < 0)
6615 +- return err;
6616 +-
6617 +- if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
6618 +- nft_data_release(key, desc.type);
6619 +- return -EINVAL;
6620 +- }
6621 ++ struct nft_data_desc desc = {
6622 ++ .type = NFT_DATA_VALUE,
6623 ++ .size = NFT_DATA_VALUE_MAXLEN,
6624 ++ .len = set->klen,
6625 ++ };
6626 +
6627 +- return 0;
6628 ++ return nft_data_init(ctx, key, &desc, attr);
6629 + }
6630 +
6631 + static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
6632 +@@ -5137,24 +5135,18 @@ static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
6633 + struct nlattr *attr)
6634 + {
6635 + u32 dtype;
6636 +- int err;
6637 +-
6638 +- err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr);
6639 +- if (err < 0)
6640 +- return err;
6641 +
6642 + if (set->dtype == NFT_DATA_VERDICT)
6643 + dtype = NFT_DATA_VERDICT;
6644 + else
6645 + dtype = NFT_DATA_VALUE;
6646 +
6647 +- if (dtype != desc->type ||
6648 +- set->dlen != desc->len) {
6649 +- nft_data_release(data, desc->type);
6650 +- return -EINVAL;
6651 +- }
6652 ++ desc->type = dtype;
6653 ++ desc->size = NFT_DATA_VALUE_MAXLEN;
6654 ++ desc->len = set->dlen;
6655 ++ desc->flags = NFT_DATA_DESC_SETELEM;
6656 +
6657 +- return 0;
6658 ++ return nft_data_init(ctx, data, desc, attr);
6659 + }
6660 +
6661 + static void *nft_setelem_catchall_get(const struct net *net,
6662 +@@ -9513,6 +9505,11 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
6663 + return PTR_ERR(chain);
6664 + if (nft_is_base_chain(chain))
6665 + return -EOPNOTSUPP;
6666 ++ if (nft_chain_is_bound(chain))
6667 ++ return -EINVAL;
6668 ++ if (desc->flags & NFT_DATA_DESC_SETELEM &&
6669 ++ chain->flags & NFT_CHAIN_BINDING)
6670 ++ return -EINVAL;
6671 +
6672 + chain->use++;
6673 + data->verdict.chain = chain;
6674 +@@ -9520,7 +9517,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
6675 + }
6676 +
6677 + desc->len = sizeof(data->verdict);
6678 +- desc->type = NFT_DATA_VERDICT;
6679 ++
6680 + return 0;
6681 + }
6682 +
6683 +@@ -9573,20 +9570,25 @@ nla_put_failure:
6684 + }
6685 +
6686 + static int nft_value_init(const struct nft_ctx *ctx,
6687 +- struct nft_data *data, unsigned int size,
6688 +- struct nft_data_desc *desc, const struct nlattr *nla)
6689 ++ struct nft_data *data, struct nft_data_desc *desc,
6690 ++ const struct nlattr *nla)
6691 + {
6692 + unsigned int len;
6693 +
6694 + len = nla_len(nla);
6695 + if (len == 0)
6696 + return -EINVAL;
6697 +- if (len > size)
6698 ++ if (len > desc->size)
6699 + return -EOVERFLOW;
6700 ++ if (desc->len) {
6701 ++ if (len != desc->len)
6702 ++ return -EINVAL;
6703 ++ } else {
6704 ++ desc->len = len;
6705 ++ }
6706 +
6707 + nla_memcpy(data->data, nla, len);
6708 +- desc->type = NFT_DATA_VALUE;
6709 +- desc->len = len;
6710 ++
6711 + return 0;
6712 + }
6713 +
6714 +@@ -9606,7 +9608,6 @@ static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
6715 + *
6716 + * @ctx: context of the expression using the data
6717 + * @data: destination struct nft_data
6718 +- * @size: maximum data length
6719 + * @desc: data description
6720 + * @nla: netlink attribute containing data
6721 + *
6722 +@@ -9616,24 +9617,35 @@ static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
6723 + * The caller can indicate that it only wants to accept data of type
6724 + * NFT_DATA_VALUE by passing NULL for the ctx argument.
6725 + */
6726 +-int nft_data_init(const struct nft_ctx *ctx,
6727 +- struct nft_data *data, unsigned int size,
6728 ++int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
6729 + struct nft_data_desc *desc, const struct nlattr *nla)
6730 + {
6731 + struct nlattr *tb[NFTA_DATA_MAX + 1];
6732 + int err;
6733 +
6734 ++ if (WARN_ON_ONCE(!desc->size))
6735 ++ return -EINVAL;
6736 ++
6737 + err = nla_parse_nested_deprecated(tb, NFTA_DATA_MAX, nla,
6738 + nft_data_policy, NULL);
6739 + if (err < 0)
6740 + return err;
6741 +
6742 +- if (tb[NFTA_DATA_VALUE])
6743 +- return nft_value_init(ctx, data, size, desc,
6744 +- tb[NFTA_DATA_VALUE]);
6745 +- if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
6746 +- return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
6747 +- return -EINVAL;
6748 ++ if (tb[NFTA_DATA_VALUE]) {
6749 ++ if (desc->type != NFT_DATA_VALUE)
6750 ++ return -EINVAL;
6751 ++
6752 ++ err = nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]);
6753 ++ } else if (tb[NFTA_DATA_VERDICT] && ctx != NULL) {
6754 ++ if (desc->type != NFT_DATA_VERDICT)
6755 ++ return -EINVAL;
6756 ++
6757 ++ err = nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
6758 ++ } else {
6759 ++ err = -EINVAL;
6760 ++ }
6761 ++
6762 ++ return err;
6763 + }
6764 + EXPORT_SYMBOL_GPL(nft_data_init);
6765 +
6766 +diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
6767 +index d4d8f613af512..2ab4216d2a903 100644
6768 +--- a/net/netfilter/nf_tables_core.c
6769 ++++ b/net/netfilter/nf_tables_core.c
6770 +@@ -67,6 +67,50 @@ static void nft_cmp_fast_eval(const struct nft_expr *expr,
6771 + regs->verdict.code = NFT_BREAK;
6772 + }
6773 +
6774 ++static void nft_cmp16_fast_eval(const struct nft_expr *expr,
6775 ++ struct nft_regs *regs)
6776 ++{
6777 ++ const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
6778 ++ const u64 *reg_data = (const u64 *)&regs->data[priv->sreg];
6779 ++ const u64 *mask = (const u64 *)&priv->mask;
6780 ++ const u64 *data = (const u64 *)&priv->data;
6781 ++
6782 ++ if (((reg_data[0] & mask[0]) == data[0] &&
6783 ++ ((reg_data[1] & mask[1]) == data[1])) ^ priv->inv)
6784 ++ return;
6785 ++ regs->verdict.code = NFT_BREAK;
6786 ++}
6787 ++
6788 ++static noinline void __nft_trace_verdict(struct nft_traceinfo *info,
6789 ++ const struct nft_chain *chain,
6790 ++ const struct nft_regs *regs)
6791 ++{
6792 ++ enum nft_trace_types type;
6793 ++
6794 ++ switch (regs->verdict.code) {
6795 ++ case NFT_CONTINUE:
6796 ++ case NFT_RETURN:
6797 ++ type = NFT_TRACETYPE_RETURN;
6798 ++ break;
6799 ++ default:
6800 ++ type = NFT_TRACETYPE_RULE;
6801 ++ break;
6802 ++ }
6803 ++
6804 ++ __nft_trace_packet(info, chain, type);
6805 ++}
6806 ++
6807 ++static inline void nft_trace_verdict(struct nft_traceinfo *info,
6808 ++ const struct nft_chain *chain,
6809 ++ const struct nft_rule *rule,
6810 ++ const struct nft_regs *regs)
6811 ++{
6812 ++ if (static_branch_unlikely(&nft_trace_enabled)) {
6813 ++ info->rule = rule;
6814 ++ __nft_trace_verdict(info, chain, regs);
6815 ++ }
6816 ++}
6817 ++
6818 + static bool nft_payload_fast_eval(const struct nft_expr *expr,
6819 + struct nft_regs *regs,
6820 + const struct nft_pktinfo *pkt)
6821 +@@ -185,6 +229,8 @@ next_rule:
6822 + nft_rule_for_each_expr(expr, last, rule) {
6823 + if (expr->ops == &nft_cmp_fast_ops)
6824 + nft_cmp_fast_eval(expr, &regs);
6825 ++ else if (expr->ops == &nft_cmp16_fast_ops)
6826 ++ nft_cmp16_fast_eval(expr, &regs);
6827 + else if (expr->ops == &nft_bitwise_fast_ops)
6828 + nft_bitwise_fast_eval(expr, &regs);
6829 + else if (expr->ops != &nft_payload_fast_ops ||
6830 +@@ -207,13 +253,13 @@ next_rule:
6831 + break;
6832 + }
6833 +
6834 ++ nft_trace_verdict(&info, chain, rule, &regs);
6835 ++
6836 + switch (regs.verdict.code & NF_VERDICT_MASK) {
6837 + case NF_ACCEPT:
6838 + case NF_DROP:
6839 + case NF_QUEUE:
6840 + case NF_STOLEN:
6841 +- nft_trace_packet(&info, chain, rule,
6842 +- NFT_TRACETYPE_RULE);
6843 + return regs.verdict.code;
6844 + }
6845 +
6846 +@@ -226,15 +272,10 @@ next_rule:
6847 + stackptr++;
6848 + fallthrough;
6849 + case NFT_GOTO:
6850 +- nft_trace_packet(&info, chain, rule,
6851 +- NFT_TRACETYPE_RULE);
6852 +-
6853 + chain = regs.verdict.chain;
6854 + goto do_chain;
6855 + case NFT_CONTINUE:
6856 + case NFT_RETURN:
6857 +- nft_trace_packet(&info, chain, rule,
6858 +- NFT_TRACETYPE_RETURN);
6859 + break;
6860 + default:
6861 + WARN_ON(1);
6862 +diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
6863 +index 47b0dba95054f..d6ab7aa14adc2 100644
6864 +--- a/net/netfilter/nft_bitwise.c
6865 ++++ b/net/netfilter/nft_bitwise.c
6866 +@@ -93,7 +93,16 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
6867 + static int nft_bitwise_init_bool(struct nft_bitwise *priv,
6868 + const struct nlattr *const tb[])
6869 + {
6870 +- struct nft_data_desc mask, xor;
6871 ++ struct nft_data_desc mask = {
6872 ++ .type = NFT_DATA_VALUE,
6873 ++ .size = sizeof(priv->mask),
6874 ++ .len = priv->len,
6875 ++ };
6876 ++ struct nft_data_desc xor = {
6877 ++ .type = NFT_DATA_VALUE,
6878 ++ .size = sizeof(priv->xor),
6879 ++ .len = priv->len,
6880 ++ };
6881 + int err;
6882 +
6883 + if (tb[NFTA_BITWISE_DATA])
6884 +@@ -103,36 +112,30 @@ static int nft_bitwise_init_bool(struct nft_bitwise *priv,
6885 + !tb[NFTA_BITWISE_XOR])
6886 + return -EINVAL;
6887 +
6888 +- err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &mask,
6889 +- tb[NFTA_BITWISE_MASK]);
6890 ++ err = nft_data_init(NULL, &priv->mask, &mask, tb[NFTA_BITWISE_MASK]);
6891 + if (err < 0)
6892 + return err;
6893 +- if (mask.type != NFT_DATA_VALUE || mask.len != priv->len) {
6894 +- err = -EINVAL;
6895 +- goto err1;
6896 +- }
6897 +
6898 +- err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &xor,
6899 +- tb[NFTA_BITWISE_XOR]);
6900 ++ err = nft_data_init(NULL, &priv->xor, &xor, tb[NFTA_BITWISE_XOR]);
6901 + if (err < 0)
6902 +- goto err1;
6903 +- if (xor.type != NFT_DATA_VALUE || xor.len != priv->len) {
6904 +- err = -EINVAL;
6905 +- goto err2;
6906 +- }
6907 ++ goto err_xor_err;
6908 +
6909 + return 0;
6910 +-err2:
6911 +- nft_data_release(&priv->xor, xor.type);
6912 +-err1:
6913 ++
6914 ++err_xor_err:
6915 + nft_data_release(&priv->mask, mask.type);
6916 ++
6917 + return err;
6918 + }
6919 +
6920 + static int nft_bitwise_init_shift(struct nft_bitwise *priv,
6921 + const struct nlattr *const tb[])
6922 + {
6923 +- struct nft_data_desc d;
6924 ++ struct nft_data_desc desc = {
6925 ++ .type = NFT_DATA_VALUE,
6926 ++ .size = sizeof(priv->data),
6927 ++ .len = sizeof(u32),
6928 ++ };
6929 + int err;
6930 +
6931 + if (tb[NFTA_BITWISE_MASK] ||
6932 +@@ -142,13 +145,12 @@ static int nft_bitwise_init_shift(struct nft_bitwise *priv,
6933 + if (!tb[NFTA_BITWISE_DATA])
6934 + return -EINVAL;
6935 +
6936 +- err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &d,
6937 +- tb[NFTA_BITWISE_DATA]);
6938 ++ err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_BITWISE_DATA]);
6939 + if (err < 0)
6940 + return err;
6941 +- if (d.type != NFT_DATA_VALUE || d.len != sizeof(u32) ||
6942 +- priv->data.data[0] >= BITS_PER_TYPE(u32)) {
6943 +- nft_data_release(&priv->data, d.type);
6944 ++
6945 ++ if (priv->data.data[0] >= BITS_PER_TYPE(u32)) {
6946 ++ nft_data_release(&priv->data, desc.type);
6947 + return -EINVAL;
6948 + }
6949 +
6950 +@@ -290,22 +292,21 @@ static const struct nft_expr_ops nft_bitwise_ops = {
6951 + static int
6952 + nft_bitwise_extract_u32_data(const struct nlattr * const tb, u32 *out)
6953 + {
6954 +- struct nft_data_desc desc;
6955 + struct nft_data data;
6956 +- int err = 0;
6957 ++ struct nft_data_desc desc = {
6958 ++ .type = NFT_DATA_VALUE,
6959 ++ .size = sizeof(data),
6960 ++ .len = sizeof(u32),
6961 ++ };
6962 ++ int err;
6963 +
6964 +- err = nft_data_init(NULL, &data, sizeof(data), &desc, tb);
6965 ++ err = nft_data_init(NULL, &data, &desc, tb);
6966 + if (err < 0)
6967 + return err;
6968 +
6969 +- if (desc.type != NFT_DATA_VALUE || desc.len != sizeof(u32)) {
6970 +- err = -EINVAL;
6971 +- goto err;
6972 +- }
6973 + *out = data.data[0];
6974 +-err:
6975 +- nft_data_release(&data, desc.type);
6976 +- return err;
6977 ++
6978 ++ return 0;
6979 + }
6980 +
6981 + static int nft_bitwise_fast_init(const struct nft_ctx *ctx,
6982 +diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
6983 +index 47b6d05f1ae69..461763a571f20 100644
6984 +--- a/net/netfilter/nft_cmp.c
6985 ++++ b/net/netfilter/nft_cmp.c
6986 +@@ -73,20 +73,16 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
6987 + const struct nlattr * const tb[])
6988 + {
6989 + struct nft_cmp_expr *priv = nft_expr_priv(expr);
6990 +- struct nft_data_desc desc;
6991 ++ struct nft_data_desc desc = {
6992 ++ .type = NFT_DATA_VALUE,
6993 ++ .size = sizeof(priv->data),
6994 ++ };
6995 + int err;
6996 +
6997 +- err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
6998 +- tb[NFTA_CMP_DATA]);
6999 ++ err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
7000 + if (err < 0)
7001 + return err;
7002 +
7003 +- if (desc.type != NFT_DATA_VALUE) {
7004 +- err = -EINVAL;
7005 +- nft_data_release(&priv->data, desc.type);
7006 +- return err;
7007 +- }
7008 +-
7009 + err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
7010 + if (err < 0)
7011 + return err;
7012 +@@ -201,12 +197,14 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
7013 + const struct nlattr * const tb[])
7014 + {
7015 + struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
7016 +- struct nft_data_desc desc;
7017 + struct nft_data data;
7018 ++ struct nft_data_desc desc = {
7019 ++ .type = NFT_DATA_VALUE,
7020 ++ .size = sizeof(data),
7021 ++ };
7022 + int err;
7023 +
7024 +- err = nft_data_init(NULL, &data, sizeof(data), &desc,
7025 +- tb[NFTA_CMP_DATA]);
7026 ++ err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
7027 + if (err < 0)
7028 + return err;
7029 +
7030 +@@ -272,12 +270,108 @@ const struct nft_expr_ops nft_cmp_fast_ops = {
7031 + .offload = nft_cmp_fast_offload,
7032 + };
7033 +
7034 ++static u32 nft_cmp_mask(u32 bitlen)
7035 ++{
7036 ++ return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
7037 ++}
7038 ++
7039 ++static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
7040 ++{
7041 ++ int len = bitlen / BITS_PER_BYTE;
7042 ++ int i, words = len / sizeof(u32);
7043 ++
7044 ++ for (i = 0; i < words; i++) {
7045 ++ data->data[i] = 0xffffffff;
7046 ++ bitlen -= sizeof(u32) * BITS_PER_BYTE;
7047 ++ }
7048 ++
7049 ++ if (len % sizeof(u32))
7050 ++ data->data[i++] = nft_cmp_mask(bitlen);
7051 ++
7052 ++ for (; i < 4; i++)
7053 ++ data->data[i] = 0;
7054 ++}
7055 ++
7056 ++static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
7057 ++ const struct nft_expr *expr,
7058 ++ const struct nlattr * const tb[])
7059 ++{
7060 ++ struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
7061 ++ struct nft_data_desc desc = {
7062 ++ .type = NFT_DATA_VALUE,
7063 ++ .size = sizeof(priv->data),
7064 ++ };
7065 ++ int err;
7066 ++
7067 ++ err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
7068 ++ if (err < 0)
7069 ++ return err;
7070 ++
7071 ++ err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
7072 ++ if (err < 0)
7073 ++ return err;
7074 ++
7075 ++ nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE);
7076 ++ priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
7077 ++ priv->len = desc.len;
7078 ++
7079 ++ return 0;
7080 ++}
7081 ++
7082 ++static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
7083 ++ struct nft_flow_rule *flow,
7084 ++ const struct nft_expr *expr)
7085 ++{
7086 ++ const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
7087 ++ struct nft_cmp_expr cmp = {
7088 ++ .data = priv->data,
7089 ++ .sreg = priv->sreg,
7090 ++ .len = priv->len,
7091 ++ .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
7092 ++ };
7093 ++
7094 ++ return __nft_cmp_offload(ctx, flow, &cmp);
7095 ++}
7096 ++
7097 ++static int nft_cmp16_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
7098 ++{
7099 ++ const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
7100 ++ enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
7101 ++
7102 ++ if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
7103 ++ goto nla_put_failure;
7104 ++ if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
7105 ++ goto nla_put_failure;
7106 ++
7107 ++ if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
7108 ++ NFT_DATA_VALUE, priv->len) < 0)
7109 ++ goto nla_put_failure;
7110 ++ return 0;
7111 ++
7112 ++nla_put_failure:
7113 ++ return -1;
7114 ++}
7115 ++
7116 ++
7117 ++const struct nft_expr_ops nft_cmp16_fast_ops = {
7118 ++ .type = &nft_cmp_type,
7119 ++ .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
7120 ++ .eval = NULL, /* inlined */
7121 ++ .init = nft_cmp16_fast_init,
7122 ++ .dump = nft_cmp16_fast_dump,
7123 ++ .offload = nft_cmp16_fast_offload,
7124 ++};
7125 ++
7126 + static const struct nft_expr_ops *
7127 + nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
7128 + {
7129 +- struct nft_data_desc desc;
7130 + struct nft_data data;
7131 ++ struct nft_data_desc desc = {
7132 ++ .type = NFT_DATA_VALUE,
7133 ++ .size = sizeof(data),
7134 ++ };
7135 + enum nft_cmp_ops op;
7136 ++ u8 sreg;
7137 + int err;
7138 +
7139 + if (tb[NFTA_CMP_SREG] == NULL ||
7140 +@@ -298,21 +392,21 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
7141 + return ERR_PTR(-EINVAL);
7142 + }
7143 +
7144 +- err = nft_data_init(NULL, &data, sizeof(data), &desc,
7145 +- tb[NFTA_CMP_DATA]);
7146 ++ err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
7147 + if (err < 0)
7148 + return ERR_PTR(err);
7149 +
7150 +- if (desc.type != NFT_DATA_VALUE)
7151 +- goto err1;
7152 +-
7153 +- if (desc.len <= sizeof(u32) && (op == NFT_CMP_EQ || op == NFT_CMP_NEQ))
7154 +- return &nft_cmp_fast_ops;
7155 ++ sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
7156 +
7157 ++ if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
7158 ++ if (desc.len <= sizeof(u32))
7159 ++ return &nft_cmp_fast_ops;
7160 ++ else if (desc.len <= sizeof(data) &&
7161 ++ ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
7162 ++ (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
7163 ++ return &nft_cmp16_fast_ops;
7164 ++ }
7165 + return &nft_cmp_ops;
7166 +-err1:
7167 +- nft_data_release(&data, desc.type);
7168 +- return ERR_PTR(-EINVAL);
7169 + }
7170 +
7171 + struct nft_expr_type nft_cmp_type __read_mostly = {
7172 +diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
7173 +index d0f67d325bdfd..fcdbc5ed3f367 100644
7174 +--- a/net/netfilter/nft_immediate.c
7175 ++++ b/net/netfilter/nft_immediate.c
7176 +@@ -29,20 +29,36 @@ static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
7177 + [NFTA_IMMEDIATE_DATA] = { .type = NLA_NESTED },
7178 + };
7179 +
7180 ++static enum nft_data_types nft_reg_to_type(const struct nlattr *nla)
7181 ++{
7182 ++ enum nft_data_types type;
7183 ++ u8 reg;
7184 ++
7185 ++ reg = ntohl(nla_get_be32(nla));
7186 ++ if (reg == NFT_REG_VERDICT)
7187 ++ type = NFT_DATA_VERDICT;
7188 ++ else
7189 ++ type = NFT_DATA_VALUE;
7190 ++
7191 ++ return type;
7192 ++}
7193 ++
7194 + static int nft_immediate_init(const struct nft_ctx *ctx,
7195 + const struct nft_expr *expr,
7196 + const struct nlattr * const tb[])
7197 + {
7198 + struct nft_immediate_expr *priv = nft_expr_priv(expr);
7199 +- struct nft_data_desc desc;
7200 ++ struct nft_data_desc desc = {
7201 ++ .size = sizeof(priv->data),
7202 ++ };
7203 + int err;
7204 +
7205 + if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
7206 + tb[NFTA_IMMEDIATE_DATA] == NULL)
7207 + return -EINVAL;
7208 +
7209 +- err = nft_data_init(ctx, &priv->data, sizeof(priv->data), &desc,
7210 +- tb[NFTA_IMMEDIATE_DATA]);
7211 ++ desc.type = nft_reg_to_type(tb[NFTA_IMMEDIATE_DREG]);
7212 ++ err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
7213 + if (err < 0)
7214 + return err;
7215 +
7216 +diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
7217 +index d82677e83400b..720dc9fba6d4f 100644
7218 +--- a/net/netfilter/nft_osf.c
7219 ++++ b/net/netfilter/nft_osf.c
7220 +@@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
7221 + const struct nft_expr *expr,
7222 + const struct nft_data **data)
7223 + {
7224 +- return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
7225 +- (1 << NF_INET_PRE_ROUTING) |
7226 +- (1 << NF_INET_FORWARD));
7227 ++ unsigned int hooks;
7228 ++
7229 ++ switch (ctx->family) {
7230 ++ case NFPROTO_IPV4:
7231 ++ case NFPROTO_IPV6:
7232 ++ case NFPROTO_INET:
7233 ++ hooks = (1 << NF_INET_LOCAL_IN) |
7234 ++ (1 << NF_INET_PRE_ROUTING) |
7235 ++ (1 << NF_INET_FORWARD);
7236 ++ break;
7237 ++ default:
7238 ++ return -EOPNOTSUPP;
7239 ++ }
7240 ++
7241 ++ return nft_chain_validate_hooks(ctx->chain, hooks);
7242 + }
7243 +
7244 + static struct nft_expr_type nft_osf_type;
7245 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
7246 +index b46e01365bd96..da652c21368e1 100644
7247 +--- a/net/netfilter/nft_payload.c
7248 ++++ b/net/netfilter/nft_payload.c
7249 +@@ -712,17 +712,23 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
7250 + const struct nlattr * const tb[])
7251 + {
7252 + struct nft_payload_set *priv = nft_expr_priv(expr);
7253 ++ u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
7254 ++ int err;
7255 +
7256 + priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
7257 + priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
7258 + priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
7259 +
7260 + if (tb[NFTA_PAYLOAD_CSUM_TYPE])
7261 +- priv->csum_type =
7262 +- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
7263 +- if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
7264 +- priv->csum_offset =
7265 +- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
7266 ++ csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
7267 ++ if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
7268 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
7269 ++ &csum_offset);
7270 ++ if (err < 0)
7271 ++ return err;
7272 ++
7273 ++ priv->csum_offset = csum_offset;
7274 ++ }
7275 + if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
7276 + u32 flags;
7277 +
7278 +@@ -733,7 +739,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
7279 + priv->csum_flags = flags;
7280 + }
7281 +
7282 +- switch (priv->csum_type) {
7283 ++ switch (csum_type) {
7284 + case NFT_PAYLOAD_CSUM_NONE:
7285 + case NFT_PAYLOAD_CSUM_INET:
7286 + break;
7287 +@@ -747,6 +753,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
7288 + default:
7289 + return -EOPNOTSUPP;
7290 + }
7291 ++ priv->csum_type = csum_type;
7292 +
7293 + return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
7294 + priv->len);
7295 +@@ -785,6 +792,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
7296 + {
7297 + enum nft_payload_bases base;
7298 + unsigned int offset, len;
7299 ++ int err;
7300 +
7301 + if (tb[NFTA_PAYLOAD_BASE] == NULL ||
7302 + tb[NFTA_PAYLOAD_OFFSET] == NULL ||
7303 +@@ -811,8 +819,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
7304 + if (tb[NFTA_PAYLOAD_DREG] == NULL)
7305 + return ERR_PTR(-EINVAL);
7306 +
7307 +- offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
7308 +- len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
7309 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
7310 ++ if (err < 0)
7311 ++ return ERR_PTR(err);
7312 ++
7313 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
7314 ++ if (err < 0)
7315 ++ return ERR_PTR(err);
7316 +
7317 + if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
7318 + base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
7319 +diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
7320 +index e4a1c44d7f513..e6bbe32c323df 100644
7321 +--- a/net/netfilter/nft_range.c
7322 ++++ b/net/netfilter/nft_range.c
7323 +@@ -51,7 +51,14 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
7324 + const struct nlattr * const tb[])
7325 + {
7326 + struct nft_range_expr *priv = nft_expr_priv(expr);
7327 +- struct nft_data_desc desc_from, desc_to;
7328 ++ struct nft_data_desc desc_from = {
7329 ++ .type = NFT_DATA_VALUE,
7330 ++ .size = sizeof(priv->data_from),
7331 ++ };
7332 ++ struct nft_data_desc desc_to = {
7333 ++ .type = NFT_DATA_VALUE,
7334 ++ .size = sizeof(priv->data_to),
7335 ++ };
7336 + int err;
7337 + u32 op;
7338 +
7339 +@@ -61,26 +68,16 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
7340 + !tb[NFTA_RANGE_TO_DATA])
7341 + return -EINVAL;
7342 +
7343 +- err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
7344 +- &desc_from, tb[NFTA_RANGE_FROM_DATA]);
7345 ++ err = nft_data_init(NULL, &priv->data_from, &desc_from,
7346 ++ tb[NFTA_RANGE_FROM_DATA]);
7347 + if (err < 0)
7348 + return err;
7349 +
7350 +- if (desc_from.type != NFT_DATA_VALUE) {
7351 +- err = -EINVAL;
7352 +- goto err1;
7353 +- }
7354 +-
7355 +- err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to),
7356 +- &desc_to, tb[NFTA_RANGE_TO_DATA]);
7357 ++ err = nft_data_init(NULL, &priv->data_to, &desc_to,
7358 ++ tb[NFTA_RANGE_TO_DATA]);
7359 + if (err < 0)
7360 + goto err1;
7361 +
7362 +- if (desc_to.type != NFT_DATA_VALUE) {
7363 +- err = -EINVAL;
7364 +- goto err2;
7365 +- }
7366 +-
7367 + if (desc_from.len != desc_to.len) {
7368 + err = -EINVAL;
7369 + goto err2;
7370 +diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
7371 +index 3b27926d5382c..2ee50996da8cc 100644
7372 +--- a/net/netfilter/nft_tunnel.c
7373 ++++ b/net/netfilter/nft_tunnel.c
7374 +@@ -133,6 +133,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
7375 +
7376 + static struct nft_expr_type nft_tunnel_type __read_mostly = {
7377 + .name = "tunnel",
7378 ++ .family = NFPROTO_NETDEV,
7379 + .ops = &nft_tunnel_get_ops,
7380 + .policy = nft_tunnel_policy,
7381 + .maxattr = NFTA_TUNNEL_MAX,
7382 +diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
7383 +index 11c45c8c6c164..036d92c0ad794 100644
7384 +--- a/net/rose/rose_loopback.c
7385 ++++ b/net/rose/rose_loopback.c
7386 +@@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused)
7387 + }
7388 +
7389 + if (frametype == ROSE_CALL_REQUEST) {
7390 +- if (!rose_loopback_neigh->dev) {
7391 ++ if (!rose_loopback_neigh->dev &&
7392 ++ !rose_loopback_neigh->loopback) {
7393 + kfree_skb(skb);
7394 + continue;
7395 + }
7396 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
7397 +index 25c9a2cbf048c..d674d90e70313 100644
7398 +--- a/net/rxrpc/call_object.c
7399 ++++ b/net/rxrpc/call_object.c
7400 +@@ -285,8 +285,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
7401 + _enter("%p,%lx", rx, p->user_call_ID);
7402 +
7403 + limiter = rxrpc_get_call_slot(p, gfp);
7404 +- if (!limiter)
7405 ++ if (!limiter) {
7406 ++ release_sock(&rx->sk);
7407 + return ERR_PTR(-ERESTARTSYS);
7408 ++ }
7409 +
7410 + call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
7411 + if (IS_ERR(call)) {
7412 +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
7413 +index 1d38e279e2efa..3c3a626459deb 100644
7414 +--- a/net/rxrpc/sendmsg.c
7415 ++++ b/net/rxrpc/sendmsg.c
7416 +@@ -51,10 +51,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
7417 + return sock_intr_errno(*timeo);
7418 +
7419 + trace_rxrpc_transmit(call, rxrpc_transmit_wait);
7420 +- mutex_unlock(&call->user_mutex);
7421 + *timeo = schedule_timeout(*timeo);
7422 +- if (mutex_lock_interruptible(&call->user_mutex) < 0)
7423 +- return sock_intr_errno(*timeo);
7424 + }
7425 + }
7426 +
7427 +@@ -290,37 +287,48 @@ out:
7428 + static int rxrpc_send_data(struct rxrpc_sock *rx,
7429 + struct rxrpc_call *call,
7430 + struct msghdr *msg, size_t len,
7431 +- rxrpc_notify_end_tx_t notify_end_tx)
7432 ++ rxrpc_notify_end_tx_t notify_end_tx,
7433 ++ bool *_dropped_lock)
7434 + {
7435 + struct rxrpc_skb_priv *sp;
7436 + struct sk_buff *skb;
7437 + struct sock *sk = &rx->sk;
7438 ++ enum rxrpc_call_state state;
7439 + long timeo;
7440 +- bool more;
7441 +- int ret, copied;
7442 ++ bool more = msg->msg_flags & MSG_MORE;
7443 ++ int ret, copied = 0;
7444 +
7445 + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
7446 +
7447 + /* this should be in poll */
7448 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
7449 +
7450 ++reload:
7451 ++ ret = -EPIPE;
7452 + if (sk->sk_shutdown & SEND_SHUTDOWN)
7453 +- return -EPIPE;
7454 +-
7455 +- more = msg->msg_flags & MSG_MORE;
7456 +-
7457 ++ goto maybe_error;
7458 ++ state = READ_ONCE(call->state);
7459 ++ ret = -ESHUTDOWN;
7460 ++ if (state >= RXRPC_CALL_COMPLETE)
7461 ++ goto maybe_error;
7462 ++ ret = -EPROTO;
7463 ++ if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
7464 ++ state != RXRPC_CALL_SERVER_ACK_REQUEST &&
7465 ++ state != RXRPC_CALL_SERVER_SEND_REPLY)
7466 ++ goto maybe_error;
7467 ++
7468 ++ ret = -EMSGSIZE;
7469 + if (call->tx_total_len != -1) {
7470 +- if (len > call->tx_total_len)
7471 +- return -EMSGSIZE;
7472 +- if (!more && len != call->tx_total_len)
7473 +- return -EMSGSIZE;
7474 ++ if (len - copied > call->tx_total_len)
7475 ++ goto maybe_error;
7476 ++ if (!more && len - copied != call->tx_total_len)
7477 ++ goto maybe_error;
7478 + }
7479 +
7480 + skb = call->tx_pending;
7481 + call->tx_pending = NULL;
7482 + rxrpc_see_skb(skb, rxrpc_skb_seen);
7483 +
7484 +- copied = 0;
7485 + do {
7486 + /* Check to see if there's a ping ACK to reply to. */
7487 + if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
7488 +@@ -331,16 +339,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
7489 +
7490 + _debug("alloc");
7491 +
7492 +- if (!rxrpc_check_tx_space(call, NULL)) {
7493 +- ret = -EAGAIN;
7494 +- if (msg->msg_flags & MSG_DONTWAIT)
7495 +- goto maybe_error;
7496 +- ret = rxrpc_wait_for_tx_window(rx, call,
7497 +- &timeo,
7498 +- msg->msg_flags & MSG_WAITALL);
7499 +- if (ret < 0)
7500 +- goto maybe_error;
7501 +- }
7502 ++ if (!rxrpc_check_tx_space(call, NULL))
7503 ++ goto wait_for_space;
7504 +
7505 + /* Work out the maximum size of a packet. Assume that
7506 + * the security header is going to be in the padded
7507 +@@ -468,6 +468,27 @@ maybe_error:
7508 + efault:
7509 + ret = -EFAULT;
7510 + goto out;
7511 ++
7512 ++wait_for_space:
7513 ++ ret = -EAGAIN;
7514 ++ if (msg->msg_flags & MSG_DONTWAIT)
7515 ++ goto maybe_error;
7516 ++ mutex_unlock(&call->user_mutex);
7517 ++ *_dropped_lock = true;
7518 ++ ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
7519 ++ msg->msg_flags & MSG_WAITALL);
7520 ++ if (ret < 0)
7521 ++ goto maybe_error;
7522 ++ if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
7523 ++ if (mutex_lock_interruptible(&call->user_mutex) < 0) {
7524 ++ ret = sock_intr_errno(timeo);
7525 ++ goto maybe_error;
7526 ++ }
7527 ++ } else {
7528 ++ mutex_lock(&call->user_mutex);
7529 ++ }
7530 ++ *_dropped_lock = false;
7531 ++ goto reload;
7532 + }
7533 +
7534 + /*
7535 +@@ -629,6 +650,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
7536 + enum rxrpc_call_state state;
7537 + struct rxrpc_call *call;
7538 + unsigned long now, j;
7539 ++ bool dropped_lock = false;
7540 + int ret;
7541 +
7542 + struct rxrpc_send_params p = {
7543 +@@ -737,21 +759,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
7544 + ret = rxrpc_send_abort_packet(call);
7545 + } else if (p.command != RXRPC_CMD_SEND_DATA) {
7546 + ret = -EINVAL;
7547 +- } else if (rxrpc_is_client_call(call) &&
7548 +- state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
7549 +- /* request phase complete for this client call */
7550 +- ret = -EPROTO;
7551 +- } else if (rxrpc_is_service_call(call) &&
7552 +- state != RXRPC_CALL_SERVER_ACK_REQUEST &&
7553 +- state != RXRPC_CALL_SERVER_SEND_REPLY) {
7554 +- /* Reply phase not begun or not complete for service call. */
7555 +- ret = -EPROTO;
7556 + } else {
7557 +- ret = rxrpc_send_data(rx, call, msg, len, NULL);
7558 ++ ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock);
7559 + }
7560 +
7561 + out_put_unlock:
7562 +- mutex_unlock(&call->user_mutex);
7563 ++ if (!dropped_lock)
7564 ++ mutex_unlock(&call->user_mutex);
7565 + error_put:
7566 + rxrpc_put_call(call, rxrpc_call_put);
7567 + _leave(" = %d", ret);
7568 +@@ -779,6 +793,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
7569 + struct msghdr *msg, size_t len,
7570 + rxrpc_notify_end_tx_t notify_end_tx)
7571 + {
7572 ++ bool dropped_lock = false;
7573 + int ret;
7574 +
7575 + _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
7576 +@@ -796,7 +811,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
7577 + case RXRPC_CALL_SERVER_ACK_REQUEST:
7578 + case RXRPC_CALL_SERVER_SEND_REPLY:
7579 + ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
7580 +- notify_end_tx);
7581 ++ notify_end_tx, &dropped_lock);
7582 + break;
7583 + case RXRPC_CALL_COMPLETE:
7584 + read_lock_bh(&call->state_lock);
7585 +@@ -810,7 +825,8 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
7586 + break;
7587 + }
7588 +
7589 +- mutex_unlock(&call->user_mutex);
7590 ++ if (!dropped_lock)
7591 ++ mutex_unlock(&call->user_mutex);
7592 + _leave(" = %d", ret);
7593 + return ret;
7594 + }
7595 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
7596 +index 30c29a9a2efd2..250d87d993cb7 100644
7597 +--- a/net/sched/sch_generic.c
7598 ++++ b/net/sched/sch_generic.c
7599 +@@ -409,7 +409,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
7600 +
7601 + void __qdisc_run(struct Qdisc *q)
7602 + {
7603 +- int quota = dev_tx_weight;
7604 ++ int quota = READ_ONCE(dev_tx_weight);
7605 + int packets;
7606 +
7607 + while (qdisc_restart(q, &packets)) {
7608 +diff --git a/net/socket.c b/net/socket.c
7609 +index 5053eb0100e48..73666b878f2ce 100644
7610 +--- a/net/socket.c
7611 ++++ b/net/socket.c
7612 +@@ -1721,7 +1721,7 @@ int __sys_listen(int fd, int backlog)
7613 +
7614 + sock = sockfd_lookup_light(fd, &err, &fput_needed);
7615 + if (sock) {
7616 +- somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
7617 ++ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
7618 + if ((unsigned int)backlog > somaxconn)
7619 + backlog = somaxconn;
7620 +
7621 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
7622 +index 6a035e9339d20..ca2a494d727b2 100644
7623 +--- a/net/sunrpc/clnt.c
7624 ++++ b/net/sunrpc/clnt.c
7625 +@@ -1881,7 +1881,7 @@ call_encode(struct rpc_task *task)
7626 + break;
7627 + case -EKEYEXPIRED:
7628 + if (!task->tk_cred_retry) {
7629 +- rpc_exit(task, task->tk_status);
7630 ++ rpc_call_rpcerror(task, task->tk_status);
7631 + } else {
7632 + task->tk_action = call_refresh;
7633 + task->tk_cred_retry--;
7634 +diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
7635 +index 1f08ebf7d80c5..24ca49ecebea3 100644
7636 +--- a/net/xfrm/espintcp.c
7637 ++++ b/net/xfrm/espintcp.c
7638 +@@ -170,7 +170,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
7639 + {
7640 + struct espintcp_ctx *ctx = espintcp_getctx(sk);
7641 +
7642 +- if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog)
7643 ++ if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
7644 + return -ENOBUFS;
7645 +
7646 + __skb_queue_tail(&ctx->out_queue, skb);
7647 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
7648 +index 3df0861d4390f..5f34bc378fdcf 100644
7649 +--- a/net/xfrm/xfrm_input.c
7650 ++++ b/net/xfrm/xfrm_input.c
7651 +@@ -782,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
7652 +
7653 + trans = this_cpu_ptr(&xfrm_trans_tasklet);
7654 +
7655 +- if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
7656 ++ if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
7657 + return -ENOBUFS;
7658 +
7659 + BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
7660 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
7661 +index fb198f9490a0f..ba58b963f4827 100644
7662 +--- a/net/xfrm/xfrm_policy.c
7663 ++++ b/net/xfrm/xfrm_policy.c
7664 +@@ -3162,7 +3162,7 @@ ok:
7665 + return dst;
7666 +
7667 + nopol:
7668 +- if (!(dst_orig->dev->flags & IFF_LOOPBACK) &&
7669 ++ if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
7670 + net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
7671 + err = -EPERM;
7672 + goto error;
7673 +@@ -3600,6 +3600,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
7674 + if (pols[1]) {
7675 + if (IS_ERR(pols[1])) {
7676 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
7677 ++ xfrm_pol_put(pols[0]);
7678 + return 0;
7679 + }
7680 + pols[1]->curlft.use_time = ktime_get_real_seconds();
7681 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
7682 +index b1a04a22166f7..15132b080614c 100644
7683 +--- a/net/xfrm/xfrm_state.c
7684 ++++ b/net/xfrm/xfrm_state.c
7685 +@@ -1591,6 +1591,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
7686 + x->replay = orig->replay;
7687 + x->preplay = orig->preplay;
7688 + x->mapping_maxage = orig->mapping_maxage;
7689 ++ x->lastused = orig->lastused;
7690 + x->new_mapping = 0;
7691 + x->new_mapping_sport = 0;
7692 +
7693 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
7694 +index 1afc670474206..35e1f2a52435e 100644
7695 +--- a/tools/perf/Makefile.config
7696 ++++ b/tools/perf/Makefile.config
7697 +@@ -263,7 +263,7 @@ endif
7698 + # defined. get-executable-or-default fails with an error if the first argument is supplied but
7699 + # doesn't exist.
7700 + override PYTHON_CONFIG := $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO))
7701 +-override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_AUTO)))
7702 ++override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_CONFIG)))
7703 +
7704 + grep-libs = $(filter -l%,$(1))
7705 + strip-libs = $(filter-out -l%,$(1))
7706 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
7707 +index 1a194edb54520..abf88a1ad455c 100644
7708 +--- a/tools/perf/builtin-stat.c
7709 ++++ b/tools/perf/builtin-stat.c
7710 +@@ -807,6 +807,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
7711 + return -1;
7712 +
7713 + evlist__for_each_entry(evsel_list, counter) {
7714 ++ counter->reset_group = false;
7715 + if (bpf_counter__load(counter, &target))
7716 + return -1;
7717 + if (!evsel__is_bpf(counter))