Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 11 Aug 2022 12:35:42
Message-Id: 1660221318.b9d80258c889404645a24e42c8b16c4be14eff1c.mpagano@gentoo
1 commit: b9d80258c889404645a24e42c8b16c4be14eff1c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Aug 11 12:35:18 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Aug 11 12:35:18 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b9d80258
7
8 Linux patch 5.4.210
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1209_linux-5.4.210.patch | 950 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 954 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index b02651d3..af9d9a39 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -879,6 +879,10 @@ Patch: 1208_linux-5.4.209.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.209
23
24 +Patch: 1209_linux-5.4.210.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.210
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1209_linux-5.4.210.patch b/1209_linux-5.4.210.patch
33 new file mode 100644
34 index 00000000..518f8030
35 --- /dev/null
36 +++ b/1209_linux-5.4.210.patch
37 @@ -0,0 +1,950 @@
38 +diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
39 +index 6bd97cd50d625..7e061ed449aaa 100644
40 +--- a/Documentation/admin-guide/hw-vuln/spectre.rst
41 ++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
42 +@@ -422,6 +422,14 @@ The possible values in this file are:
43 + 'RSB filling' Protection of RSB on context switch enabled
44 + ============= ===========================================
45 +
46 ++ - EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:
47 ++
48 ++ =========================== =======================================================
49 ++ 'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled
50 ++ 'PBRSB-eIBRS: Vulnerable' CPU is vulnerable
51 ++ 'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
52 ++ =========================== =======================================================
53 ++
54 + Full mitigation might require a microcode update from the CPU
55 + vendor. When the necessary microcode is not available, the kernel will
56 + report vulnerability.
57 +diff --git a/Makefile b/Makefile
58 +index 7093e3b03b9f7..74abb7e389f33 100644
59 +--- a/Makefile
60 ++++ b/Makefile
61 +@@ -1,7 +1,7 @@
62 + # SPDX-License-Identifier: GPL-2.0
63 + VERSION = 5
64 + PATCHLEVEL = 4
65 +-SUBLEVEL = 209
66 ++SUBLEVEL = 210
67 + EXTRAVERSION =
68 + NAME = Kleptomaniac Octopus
69 +
70 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
71 +index 8c28a2365a92b..a3e32bc938562 100644
72 +--- a/arch/x86/include/asm/cpufeatures.h
73 ++++ b/arch/x86/include/asm/cpufeatures.h
74 +@@ -286,6 +286,7 @@
75 + #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
76 + #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
77 + #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
78 ++#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+ 6) /* "" Fill RSB on VM exit when EIBRS is enabled */
79 +
80 + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
81 + #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
82 +@@ -406,5 +407,6 @@
83 + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
84 + #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
85 + #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
86 ++#define X86_BUG_EIBRS_PBRSB X86_BUG(26) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
87 +
88 + #endif /* _ASM_X86_CPUFEATURES_H */
89 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
90 +index c56042916a7c3..cef4eba03ff36 100644
91 +--- a/arch/x86/include/asm/msr-index.h
92 ++++ b/arch/x86/include/asm/msr-index.h
93 +@@ -129,6 +129,10 @@
94 + * bit available to control VERW
95 + * behavior.
96 + */
97 ++#define ARCH_CAP_PBRSB_NO BIT(24) /*
98 ++ * Not susceptible to Post-Barrier
99 ++ * Return Stack Buffer Predictions.
100 ++ */
101 +
102 + #define MSR_IA32_FLUSH_CMD 0x0000010b
103 + #define L1D_FLUSH BIT(0) /*
104 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
105 +index ece2b2c6d020d..1e5df3ccdd5cb 100644
106 +--- a/arch/x86/include/asm/nospec-branch.h
107 ++++ b/arch/x86/include/asm/nospec-branch.h
108 +@@ -61,7 +61,16 @@
109 + 774: \
110 + dec reg; \
111 + jnz 771b; \
112 +- add $(BITS_PER_LONG/8) * nr, sp;
113 ++ add $(BITS_PER_LONG/8) * nr, sp; \
114 ++ /* barrier for jnz misprediction */ \
115 ++ lfence;
116 ++
117 ++#define __ISSUE_UNBALANCED_RET_GUARD(sp) \
118 ++ call 881f; \
119 ++ int3; \
120 ++881: \
121 ++ add $(BITS_PER_LONG/8), sp; \
122 ++ lfence;
123 +
124 + #ifdef __ASSEMBLY__
125 +
126 +@@ -130,6 +139,14 @@
127 + #else
128 + call *\reg
129 + #endif
130 ++.endm
131 ++
132 ++.macro ISSUE_UNBALANCED_RET_GUARD ftr:req
133 ++ ANNOTATE_NOSPEC_ALTERNATIVE
134 ++ ALTERNATIVE "jmp .Lskip_pbrsb_\@", \
135 ++ __stringify(__ISSUE_UNBALANCED_RET_GUARD(%_ASM_SP)) \
136 ++ \ftr
137 ++.Lskip_pbrsb_\@:
138 + .endm
139 +
140 + /*
141 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
142 +index 09d02b1f6f71f..57efa90f3fbd0 100644
143 +--- a/arch/x86/kernel/cpu/bugs.c
144 ++++ b/arch/x86/kernel/cpu/bugs.c
145 +@@ -1043,6 +1043,49 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
146 + return SPECTRE_V2_RETPOLINE;
147 + }
148 +
149 ++static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
150 ++{
151 ++ /*
152 ++ * Similar to context switches, there are two types of RSB attacks
153 ++ * after VM exit:
154 ++ *
155 ++ * 1) RSB underflow
156 ++ *
157 ++ * 2) Poisoned RSB entry
158 ++ *
159 ++ * When retpoline is enabled, both are mitigated by filling/clearing
160 ++ * the RSB.
161 ++ *
162 ++ * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
163 ++ * prediction isolation protections, RSB still needs to be cleared
164 ++ * because of #2. Note that SMEP provides no protection here, unlike
165 ++ * user-space-poisoned RSB entries.
166 ++ *
167 ++ * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
168 ++ * bug is present then a LITE version of RSB protection is required,
169 ++ * just a single call needs to retire before a RET is executed.
170 ++ */
171 ++ switch (mode) {
172 ++ case SPECTRE_V2_NONE:
173 ++ /* These modes already fill RSB at vmexit */
174 ++ case SPECTRE_V2_LFENCE:
175 ++ case SPECTRE_V2_RETPOLINE:
176 ++ case SPECTRE_V2_EIBRS_RETPOLINE:
177 ++ return;
178 ++
179 ++ case SPECTRE_V2_EIBRS_LFENCE:
180 ++ case SPECTRE_V2_EIBRS:
181 ++ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
182 ++ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
183 ++ pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
184 ++ }
185 ++ return;
186 ++ }
187 ++
188 ++ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
189 ++ dump_stack();
190 ++}
191 ++
192 + static void __init spectre_v2_select_mitigation(void)
193 + {
194 + enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
195 +@@ -1135,6 +1178,8 @@ static void __init spectre_v2_select_mitigation(void)
196 + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
197 + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
198 +
199 ++ spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
200 ++
201 + /*
202 + * Retpoline means the kernel is safe because it has no indirect
203 + * branches. Enhanced IBRS protects firmware too, so, enable restricted
204 +@@ -1879,6 +1924,19 @@ static char *ibpb_state(void)
205 + return "";
206 + }
207 +
208 ++static char *pbrsb_eibrs_state(void)
209 ++{
210 ++ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
211 ++ if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
212 ++ boot_cpu_has(X86_FEATURE_RETPOLINE))
213 ++ return ", PBRSB-eIBRS: SW sequence";
214 ++ else
215 ++ return ", PBRSB-eIBRS: Vulnerable";
216 ++ } else {
217 ++ return ", PBRSB-eIBRS: Not affected";
218 ++ }
219 ++}
220 ++
221 + static ssize_t spectre_v2_show_state(char *buf)
222 + {
223 + if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
224 +@@ -1891,12 +1949,13 @@ static ssize_t spectre_v2_show_state(char *buf)
225 + spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
226 + return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
227 +
228 +- return sprintf(buf, "%s%s%s%s%s%s\n",
229 ++ return sprintf(buf, "%s%s%s%s%s%s%s\n",
230 + spectre_v2_strings[spectre_v2_enabled],
231 + ibpb_state(),
232 + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
233 + stibp_state(),
234 + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
235 ++ pbrsb_eibrs_state(),
236 + spectre_v2_module_string());
237 + }
238 +
239 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
240 +index 305f30e45f3d3..b926b7244d42d 100644
241 +--- a/arch/x86/kernel/cpu/common.c
242 ++++ b/arch/x86/kernel/cpu/common.c
243 +@@ -1025,6 +1025,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
244 + #define NO_SWAPGS BIT(6)
245 + #define NO_ITLB_MULTIHIT BIT(7)
246 + #define NO_SPECTRE_V2 BIT(8)
247 ++#define NO_EIBRS_PBRSB BIT(9)
248 +
249 + #define VULNWL(_vendor, _family, _model, _whitelist) \
250 + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
251 +@@ -1065,7 +1066,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
252 +
253 + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
254 + VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
255 +- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
256 ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
257 +
258 + /*
259 + * Technically, swapgs isn't serializing on AMD (despite it previously
260 +@@ -1075,7 +1076,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
261 + * good enough for our purposes.
262 + */
263 +
264 +- VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
265 ++ VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
266 ++ VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
267 ++ VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
268 +
269 + /* AMD Family 0xf - 0x12 */
270 + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
271 +@@ -1236,6 +1239,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
272 + !arch_cap_mmio_immune(ia32_cap))
273 + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
274 +
275 ++ if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
276 ++ !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
277 ++ !(ia32_cap & ARCH_CAP_PBRSB_NO))
278 ++ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
279 ++
280 + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
281 + return;
282 +
283 +diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
284 +index ca4252f81bf81..946d9205c3b6d 100644
285 +--- a/arch/x86/kvm/vmx/vmenter.S
286 ++++ b/arch/x86/kvm/vmx/vmenter.S
287 +@@ -92,6 +92,7 @@ ENTRY(vmx_vmexit)
288 + pop %_ASM_AX
289 + .Lvmexit_skip_rsb:
290 + #endif
291 ++ ISSUE_UNBALANCED_RET_GUARD X86_FEATURE_RSB_VMEXIT_LITE
292 + ret
293 + ENDPROC(vmx_vmexit)
294 +
295 +diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
296 +index 76b7539a37a93..a06f35528c9a7 100644
297 +--- a/drivers/acpi/apei/bert.c
298 ++++ b/drivers/acpi/apei/bert.c
299 +@@ -29,16 +29,26 @@
300 +
301 + #undef pr_fmt
302 + #define pr_fmt(fmt) "BERT: " fmt
303 ++
304 ++#define ACPI_BERT_PRINT_MAX_RECORDS 5
305 + #define ACPI_BERT_PRINT_MAX_LEN 1024
306 +
307 + static int bert_disable;
308 +
309 ++/*
310 ++ * Print "all" the error records in the BERT table, but avoid huge spam to
311 ++ * the console if the BIOS included oversize records, or too many records.
312 ++ * Skipping some records here does not lose anything because the full
313 ++ * data is available to user tools in:
314 ++ * /sys/firmware/acpi/tables/data/BERT
315 ++ */
316 + static void __init bert_print_all(struct acpi_bert_region *region,
317 + unsigned int region_len)
318 + {
319 + struct acpi_hest_generic_status *estatus =
320 + (struct acpi_hest_generic_status *)region;
321 + int remain = region_len;
322 ++ int printed = 0, skipped = 0;
323 + u32 estatus_len;
324 +
325 + while (remain >= sizeof(struct acpi_bert_region)) {
326 +@@ -46,24 +56,26 @@ static void __init bert_print_all(struct acpi_bert_region *region,
327 + if (remain < estatus_len) {
328 + pr_err(FW_BUG "Truncated status block (length: %u).\n",
329 + estatus_len);
330 +- return;
331 ++ break;
332 + }
333 +
334 + /* No more error records. */
335 + if (!estatus->block_status)
336 +- return;
337 ++ break;
338 +
339 + if (cper_estatus_check(estatus)) {
340 + pr_err(FW_BUG "Invalid error record.\n");
341 +- return;
342 ++ break;
343 + }
344 +
345 +- pr_info_once("Error records from previous boot:\n");
346 +- if (region_len < ACPI_BERT_PRINT_MAX_LEN)
347 ++ if (estatus_len < ACPI_BERT_PRINT_MAX_LEN &&
348 ++ printed < ACPI_BERT_PRINT_MAX_RECORDS) {
349 ++ pr_info_once("Error records from previous boot:\n");
350 + cper_estatus_print(KERN_INFO HW_ERR, estatus);
351 +- else
352 +- pr_info_once("Max print length exceeded, table data is available at:\n"
353 +- "/sys/firmware/acpi/tables/data/BERT");
354 ++ printed++;
355 ++ } else {
356 ++ skipped++;
357 ++ }
358 +
359 + /*
360 + * Because the boot error source is "one-time polled" type,
361 +@@ -75,6 +87,9 @@ static void __init bert_print_all(struct acpi_bert_region *region,
362 + estatus = (void *)estatus + estatus_len;
363 + remain -= estatus_len;
364 + }
365 ++
366 ++ if (skipped)
367 ++ pr_info(HW_ERR "Skipped %d error records\n", skipped);
368 + }
369 +
370 + static int __init setup_bert_disable(char *str)
371 +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
372 +index de4142723ff48..3b972ca536896 100644
373 +--- a/drivers/acpi/video_detect.c
374 ++++ b/drivers/acpi/video_detect.c
375 +@@ -387,7 +387,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
376 + .callback = video_detect_force_native,
377 + .ident = "Clevo NL5xRU",
378 + .matches = {
379 +- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
380 + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
381 + },
382 + },
383 +@@ -395,59 +394,75 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
384 + .callback = video_detect_force_native,
385 + .ident = "Clevo NL5xRU",
386 + .matches = {
387 +- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
388 +- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
389 ++ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
390 ++ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
391 + },
392 + },
393 + {
394 + .callback = video_detect_force_native,
395 + .ident = "Clevo NL5xRU",
396 + .matches = {
397 +- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
398 +- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
399 ++ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
400 ++ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
401 + },
402 + },
403 + {
404 + .callback = video_detect_force_native,
405 +- .ident = "Clevo NL5xRU",
406 ++ .ident = "Clevo NL5xNU",
407 + .matches = {
408 +- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
409 +- DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
410 ++ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
411 + },
412 + },
413 ++ /*
414 ++ * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10,
415 ++ * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo
416 ++ * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description
417 ++ * above.
418 ++ */
419 + {
420 + .callback = video_detect_force_native,
421 +- .ident = "Clevo NL5xRU",
422 ++ .ident = "TongFang PF5PU1G",
423 + .matches = {
424 +- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
425 +- DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
426 ++ DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
427 + },
428 + },
429 + {
430 + .callback = video_detect_force_native,
431 +- .ident = "Clevo NL5xNU",
432 ++ .ident = "TongFang PF4NU1F",
433 ++ .matches = {
434 ++ DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"),
435 ++ },
436 ++ },
437 ++ {
438 ++ .callback = video_detect_force_native,
439 ++ .ident = "TongFang PF4NU1F",
440 + .matches = {
441 + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
442 +- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
443 ++ DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"),
444 + },
445 + },
446 + {
447 + .callback = video_detect_force_native,
448 +- .ident = "Clevo NL5xNU",
449 ++ .ident = "TongFang PF5NU1G",
450 + .matches = {
451 +- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
452 +- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
453 ++ DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"),
454 + },
455 + },
456 + {
457 + .callback = video_detect_force_native,
458 +- .ident = "Clevo NL5xNU",
459 ++ .ident = "TongFang PF5NU1G",
460 + .matches = {
461 +- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
462 +- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
463 ++ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
464 ++ DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"),
465 ++ },
466 ++ },
467 ++ {
468 ++ .callback = video_detect_force_native,
469 ++ .ident = "TongFang PF5LUXG",
470 ++ .matches = {
471 ++ DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
472 + },
473 + },
474 +-
475 + /*
476 + * Desktops which falsely report a backlight and which our heuristics
477 + * for this do not catch.
478 +diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
479 +index e49d1f287a175..c37d5fce86f79 100644
480 +--- a/drivers/macintosh/adb.c
481 ++++ b/drivers/macintosh/adb.c
482 +@@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req)
483 +
484 + switch(req->data[1]) {
485 + case ADB_QUERY_GETDEVINFO:
486 +- if (req->nbytes < 3)
487 ++ if (req->nbytes < 3 || req->data[2] >= 16)
488 + break;
489 + mutex_lock(&adb_handler_mutex);
490 + req->reply[0] = adb_handler[req->data[2]].original_address;
491 +diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
492 +index 639dc8d45e603..d56837c04a81a 100644
493 +--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
494 ++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
495 +@@ -460,19 +460,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
496 + }
497 + EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
498 +
499 +-int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
500 +- struct v4l2_buffer *buf)
501 ++static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
502 ++ struct v4l2_buffer *buf)
503 + {
504 +- struct vb2_queue *vq;
505 +- int ret = 0;
506 +- unsigned int i;
507 +-
508 +- vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
509 +- ret = vb2_querybuf(vq, buf);
510 +-
511 + /* Adjust MMAP memory offsets for the CAPTURE queue */
512 + if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
513 + if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
514 ++ unsigned int i;
515 ++
516 + for (i = 0; i < buf->length; ++i)
517 + buf->m.planes[i].m.mem_offset
518 + += DST_QUEUE_OFF_BASE;
519 +@@ -480,8 +475,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
520 + buf->m.offset += DST_QUEUE_OFF_BASE;
521 + }
522 + }
523 ++}
524 +
525 +- return ret;
526 ++int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
527 ++ struct v4l2_buffer *buf)
528 ++{
529 ++ struct vb2_queue *vq;
530 ++ int ret;
531 ++
532 ++ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
533 ++ ret = vb2_querybuf(vq, buf);
534 ++ if (ret)
535 ++ return ret;
536 ++
537 ++ /* Adjust MMAP memory offsets for the CAPTURE queue */
538 ++ v4l2_m2m_adjust_mem_offset(vq, buf);
539 ++
540 ++ return 0;
541 + }
542 + EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
543 +
544 +@@ -500,10 +510,16 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
545 + return -EPERM;
546 + }
547 + ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
548 +- if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
549 ++ if (ret)
550 ++ return ret;
551 ++
552 ++ /* Adjust MMAP memory offsets for the CAPTURE queue */
553 ++ v4l2_m2m_adjust_mem_offset(vq, buf);
554 ++
555 ++ if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
556 + v4l2_m2m_try_schedule(m2m_ctx);
557 +
558 +- return ret;
559 ++ return 0;
560 + }
561 + EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
562 +
563 +@@ -511,9 +527,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
564 + struct v4l2_buffer *buf)
565 + {
566 + struct vb2_queue *vq;
567 ++ int ret;
568 +
569 + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
570 +- return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
571 ++ ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
572 ++ if (ret)
573 ++ return ret;
574 ++
575 ++ /* Adjust MMAP memory offsets for the CAPTURE queue */
576 ++ v4l2_m2m_adjust_mem_offset(vq, buf);
577 ++
578 ++ return 0;
579 + }
580 + EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
581 +
582 +@@ -522,9 +546,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
583 + {
584 + struct video_device *vdev = video_devdata(file);
585 + struct vb2_queue *vq;
586 ++ int ret;
587 +
588 + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
589 +- return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
590 ++ ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
591 ++ if (ret)
592 ++ return ret;
593 ++
594 ++ /* Adjust MMAP memory offsets for the CAPTURE queue */
595 ++ v4l2_m2m_adjust_mem_offset(vq, buf);
596 ++
597 ++ return 0;
598 + }
599 + EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
600 +
601 +diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
602 +index 68d0c181ec7bb..1f38da5da6e45 100644
603 +--- a/drivers/thermal/of-thermal.c
604 ++++ b/drivers/thermal/of-thermal.c
605 +@@ -91,7 +91,7 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz,
606 + {
607 + struct __thermal_zone *data = tz->devdata;
608 +
609 +- if (!data->ops->get_temp)
610 ++ if (!data->ops || !data->ops->get_temp)
611 + return -EINVAL;
612 +
613 + return data->ops->get_temp(data->sensor_data, temp);
614 +@@ -188,6 +188,9 @@ static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
615 + {
616 + struct __thermal_zone *data = tz->devdata;
617 +
618 ++ if (!data->ops || !data->ops->set_emul_temp)
619 ++ return -EINVAL;
620 ++
621 + return data->ops->set_emul_temp(data->sensor_data, temp);
622 + }
623 +
624 +@@ -196,7 +199,7 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
625 + {
626 + struct __thermal_zone *data = tz->devdata;
627 +
628 +- if (!data->ops->get_trend)
629 ++ if (!data->ops || !data->ops->get_trend)
630 + return -EINVAL;
631 +
632 + return data->ops->get_trend(data->sensor_data, trip, trend);
633 +@@ -336,7 +339,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
634 + if (trip >= data->ntrips || trip < 0)
635 + return -EDOM;
636 +
637 +- if (data->ops->set_trip_temp) {
638 ++ if (data->ops && data->ops->set_trip_temp) {
639 + int ret;
640 +
641 + ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
642 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
643 +index 34262d83dce11..f705d3752fe0d 100644
644 +--- a/kernel/bpf/verifier.c
645 ++++ b/kernel/bpf/verifier.c
646 +@@ -5083,6 +5083,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
647 + coerce_reg_to_size(dst_reg, 4);
648 + }
649 +
650 ++ __update_reg_bounds(dst_reg);
651 + __reg_deduce_bounds(dst_reg);
652 + __reg_bound_offset(dst_reg);
653 + return 0;
654 +diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
655 +index 4133c721af6ed..59f924e92c284 100644
656 +--- a/tools/arch/x86/include/asm/cpufeatures.h
657 ++++ b/tools/arch/x86/include/asm/cpufeatures.h
658 +@@ -284,6 +284,7 @@
659 + #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
660 + #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
661 + #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
662 ++#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+ 6) /* "" Fill RSB on VM-Exit when EIBRS is enabled */
663 +
664 + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
665 + #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
666 +diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
667 +index 0bfad86ec960a..cb0631098f918 100644
668 +--- a/tools/include/uapi/linux/bpf.h
669 ++++ b/tools/include/uapi/linux/bpf.h
670 +@@ -3068,7 +3068,8 @@ struct bpf_sock {
671 + __u32 src_ip4;
672 + __u32 src_ip6[4];
673 + __u32 src_port; /* host byte order */
674 +- __u32 dst_port; /* network byte order */
675 ++ __be16 dst_port; /* network byte order */
676 ++ __u16 :16; /* zero padding */
677 + __u32 dst_ip4;
678 + __u32 dst_ip6[4];
679 + __u32 state;
680 +diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
681 +index 0262f7b374f9c..4b9a26caa2c2e 100644
682 +--- a/tools/testing/selftests/bpf/test_align.c
683 ++++ b/tools/testing/selftests/bpf/test_align.c
684 +@@ -359,15 +359,15 @@ static struct bpf_align_test tests[] = {
685 + * is still (4n), fixed offset is not changed.
686 + * Also, we create a new reg->id.
687 + */
688 +- {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
689 ++ {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
690 + /* At the time the word size load is performed from R5,
691 + * its total fixed offset is NET_IP_ALIGN + reg->off (18)
692 + * which is 20. Then the variable offset is (4n), so
693 + * the total offset is 4-byte aligned and meets the
694 + * load's requirements.
695 + */
696 +- {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
697 +- {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
698 ++ {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
699 ++ {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
700 + },
701 + },
702 + {
703 +@@ -410,15 +410,15 @@ static struct bpf_align_test tests[] = {
704 + /* Adding 14 makes R6 be (4n+2) */
705 + {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
706 + /* Packet pointer has (4n+2) offset */
707 +- {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
708 +- {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
709 ++ {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
710 ++ {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
711 + /* At the time the word size load is performed from R5,
712 + * its total fixed offset is NET_IP_ALIGN + reg->off (0)
713 + * which is 2. Then the variable offset is (4n+2), so
714 + * the total offset is 4-byte aligned and meets the
715 + * load's requirements.
716 + */
717 +- {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
718 ++ {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
719 + /* Newly read value in R6 was shifted left by 2, so has
720 + * known alignment of 4.
721 + */
722 +@@ -426,15 +426,15 @@ static struct bpf_align_test tests[] = {
723 + /* Added (4n) to packet pointer's (4n+2) var_off, giving
724 + * another (4n+2).
725 + */
726 +- {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
727 +- {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
728 ++ {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
729 ++ {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
730 + /* At the time the word size load is performed from R5,
731 + * its total fixed offset is NET_IP_ALIGN + reg->off (0)
732 + * which is 2. Then the variable offset is (4n+2), so
733 + * the total offset is 4-byte aligned and meets the
734 + * load's requirements.
735 + */
736 +- {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
737 ++ {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
738 + },
739 + },
740 + {
741 +@@ -469,16 +469,16 @@ static struct bpf_align_test tests[] = {
742 + .matches = {
743 + {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
744 + /* (ptr - ptr) << 2 == unknown, (4n) */
745 +- {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
746 ++ {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
747 + /* (4n) + 14 == (4n+2). We blow our bounds, because
748 + * the add could overflow.
749 + */
750 +- {7, "R5_w=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
751 ++ {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
752 + /* Checked s>=0 */
753 +- {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
754 ++ {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
755 + /* packet pointer + nonnegative (4n+2) */
756 +- {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
757 +- {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
758 ++ {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
759 ++ {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
760 + /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
761 + * We checked the bounds, but it might have been able
762 + * to overflow if the packet pointer started in the
763 +@@ -486,7 +486,7 @@ static struct bpf_align_test tests[] = {
764 + * So we did not get a 'range' on R6, and the access
765 + * attempt will fail.
766 + */
767 +- {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
768 ++ {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
769 + }
770 + },
771 + {
772 +@@ -528,7 +528,7 @@ static struct bpf_align_test tests[] = {
773 + /* New unknown value in R7 is (4n) */
774 + {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
775 + /* Subtracting it from R6 blows our unsigned bounds */
776 +- {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
777 ++ {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
778 + /* Checked s>= 0 */
779 + {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
780 + /* At the time the word size load is performed from R5,
781 +@@ -537,7 +537,8 @@ static struct bpf_align_test tests[] = {
782 + * the total offset is 4-byte aligned and meets the
783 + * load's requirements.
784 + */
785 +- {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
786 ++ {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
787 ++
788 + },
789 + },
790 + {
791 +@@ -579,18 +580,18 @@ static struct bpf_align_test tests[] = {
792 + /* Adding 14 makes R6 be (4n+2) */
793 + {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
794 + /* Subtracting from packet pointer overflows ubounds */
795 +- {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
796 ++ {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
797 + /* New unknown value in R7 is (4n), >= 76 */
798 + {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
799 + /* Adding it to packet pointer gives nice bounds again */
800 +- {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
801 ++ {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
802 + /* At the time the word size load is performed from R5,
803 + * its total fixed offset is NET_IP_ALIGN + reg->off (0)
804 + * which is 2. Then the variable offset is (4n+2), so
805 + * the total offset is 4-byte aligned and meets the
806 + * load's requirements.
807 + */
808 +- {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
809 ++ {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
810 + },
811 + },
812 + };
813 +diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
814 +index 92c02e4a1b626..313b345eddcc3 100644
815 +--- a/tools/testing/selftests/bpf/verifier/bounds.c
816 ++++ b/tools/testing/selftests/bpf/verifier/bounds.c
817 +@@ -411,16 +411,14 @@
818 + BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
819 + /* r1 = 0xffff'fffe (NOT 0!) */
820 + BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
821 +- /* computes OOB pointer */
822 ++ /* error on computing OOB pointer */
823 + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
824 +- /* OOB access */
825 +- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
826 + /* exit */
827 + BPF_MOV64_IMM(BPF_REG_0, 0),
828 + BPF_EXIT_INSN(),
829 + },
830 + .fixup_map_hash_8b = { 3 },
831 +- .errstr = "R0 invalid mem access",
832 ++ .errstr = "math between map_value pointer and 4294967294 is not allowed",
833 + .result = REJECT,
834 + },
835 + {
836 +diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
837 +index 9ed192e14f5fe..b2ce50bb935b8 100644
838 +--- a/tools/testing/selftests/bpf/verifier/sock.c
839 ++++ b/tools/testing/selftests/bpf/verifier/sock.c
840 +@@ -121,7 +121,25 @@
841 + .result = ACCEPT,
842 + },
843 + {
844 +- "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
845 ++ "sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)",
846 ++ .insns = {
847 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
848 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
849 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
850 ++ BPF_EXIT_INSN(),
851 ++ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
852 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
853 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
854 ++ BPF_EXIT_INSN(),
855 ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
856 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
857 ++ BPF_EXIT_INSN(),
858 ++ },
859 ++ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
860 ++ .result = ACCEPT,
861 ++},
862 ++{
863 ++ "sk_fullsock(skb->sk): sk->dst_port [half load]",
864 + .insns = {
865 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
866 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
867 +@@ -139,7 +157,64 @@
868 + .result = ACCEPT,
869 + },
870 + {
871 +- "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
872 ++ "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)",
873 ++ .insns = {
874 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
875 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
876 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
877 ++ BPF_EXIT_INSN(),
878 ++ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
879 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
880 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
881 ++ BPF_EXIT_INSN(),
882 ++ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
883 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
884 ++ BPF_EXIT_INSN(),
885 ++ },
886 ++ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
887 ++ .result = REJECT,
888 ++ .errstr = "invalid sock access",
889 ++},
890 ++{
891 ++ "sk_fullsock(skb->sk): sk->dst_port [byte load]",
892 ++ .insns = {
893 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
894 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
895 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
896 ++ BPF_EXIT_INSN(),
897 ++ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
898 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
899 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
900 ++ BPF_EXIT_INSN(),
901 ++ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
902 ++ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
903 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
904 ++ BPF_EXIT_INSN(),
905 ++ },
906 ++ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
907 ++ .result = ACCEPT,
908 ++},
909 ++{
910 ++ "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)",
911 ++ .insns = {
912 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
913 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
914 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
915 ++ BPF_EXIT_INSN(),
916 ++ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
917 ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
918 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
919 ++ BPF_EXIT_INSN(),
920 ++ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2),
921 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
922 ++ BPF_EXIT_INSN(),
923 ++ },
924 ++ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
925 ++ .result = REJECT,
926 ++ .errstr = "invalid sock access",
927 ++},
928 ++{
929 ++ "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)",
930 + .insns = {
931 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
932 + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
933 +@@ -149,7 +224,7 @@
934 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
935 + BPF_MOV64_IMM(BPF_REG_0, 0),
936 + BPF_EXIT_INSN(),
937 +- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
938 ++ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)),
939 + BPF_MOV64_IMM(BPF_REG_0, 0),
940 + BPF_EXIT_INSN(),
941 + },
942 +diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
943 +index 6cd91970fbad3..3b2a426070c44 100644
944 +--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
945 ++++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
946 +@@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
947 +
948 + void ucall(uint64_t cmd, int nargs, ...)
949 + {
950 +- struct ucall uc = {
951 +- .cmd = cmd,
952 +- };
953 ++ struct ucall uc = {};
954 + va_list va;
955 + int i;
956 +
957 ++ WRITE_ONCE(uc.cmd, cmd);
958 + nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
959 +
960 + va_start(va, nargs);
961 + for (i = 0; i < nargs; ++i)
962 +- uc.args[i] = va_arg(va, uint64_t);
963 ++ WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
964 + va_end(va);
965 +
966 +- *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
967 ++ WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
968 + }
969 +
970 + uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
971 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
972 +index 287444e52ccf8..4b445dddb7985 100644
973 +--- a/virt/kvm/kvm_main.c
974 ++++ b/virt/kvm/kvm_main.c
975 +@@ -3329,8 +3329,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
976 + kvm_put_kvm(kvm);
977 + mutex_lock(&kvm->lock);
978 + list_del(&dev->vm_node);
979 ++ if (ops->release)
980 ++ ops->release(dev);
981 + mutex_unlock(&kvm->lock);
982 +- ops->destroy(dev);
983 ++ if (ops->destroy)
984 ++ ops->destroy(dev);
985 + return ret;
986 + }
987 +