Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 30 Jun 2021 14:24:31
Message-Id: 1625063057.aad8672f9a4d60a64e628aaf62f88040cda97b4e.mpagano@gentoo
1 commit: aad8672f9a4d60a64e628aaf62f88040cda97b4e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jun 30 14:24:17 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jun 30 14:24:17 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aad8672f
7
8 Linux patch 5.4.129
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1128_linux-5.4.129.patch | 2952 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2956 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index cb07352..bedf8ea 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -555,6 +555,10 @@ Patch: 1127_linux-5.4.128.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.128
23
24 +Patch: 1128_linux-5.4.129.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.129
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1128_linux-5.4.129.patch b/1128_linux-5.4.129.patch
33 new file mode 100644
34 index 0000000..ff82b9f
35 --- /dev/null
36 +++ b/1128_linux-5.4.129.patch
37 @@ -0,0 +1,2952 @@
38 +diff --git a/Makefile b/Makefile
39 +index 5db87d8031f1e..802520ad08cca 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 128
47 ++SUBLEVEL = 129
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
52 +index 924285d0bccd9..43d6a6085d862 100644
53 +--- a/arch/arm/kernel/setup.c
54 ++++ b/arch/arm/kernel/setup.c
55 +@@ -544,9 +544,11 @@ void notrace cpu_init(void)
56 + * In Thumb-2, msr with an immediate value is not allowed.
57 + */
58 + #ifdef CONFIG_THUMB2_KERNEL
59 +-#define PLC "r"
60 ++#define PLC_l "l"
61 ++#define PLC_r "r"
62 + #else
63 +-#define PLC "I"
64 ++#define PLC_l "I"
65 ++#define PLC_r "I"
66 + #endif
67 +
68 + /*
69 +@@ -568,15 +570,15 @@ void notrace cpu_init(void)
70 + "msr cpsr_c, %9"
71 + :
72 + : "r" (stk),
73 +- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
74 ++ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
75 + "I" (offsetof(struct stack, irq[0])),
76 +- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
77 ++ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
78 + "I" (offsetof(struct stack, abt[0])),
79 +- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
80 ++ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
81 + "I" (offsetof(struct stack, und[0])),
82 +- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
83 ++ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
84 + "I" (offsetof(struct stack, fiq[0])),
85 +- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
86 ++ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
87 + : "r14");
88 + #endif
89 + }
90 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
91 +index cd8f3cdabfd07..d227cf87c48f3 100644
92 +--- a/arch/arm64/Makefile
93 ++++ b/arch/arm64/Makefile
94 +@@ -10,7 +10,7 @@
95 + #
96 + # Copyright (C) 1995-2001 by Russell King
97 +
98 +-LDFLAGS_vmlinux :=--no-undefined -X -z norelro
99 ++LDFLAGS_vmlinux :=--no-undefined -X
100 + CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
101 + GZFLAGS :=-9
102 +
103 +@@ -82,17 +82,21 @@ CHECKFLAGS += -D__AARCH64EB__
104 + AS += -EB
105 + # Prefer the baremetal ELF build target, but not all toolchains include
106 + # it so fall back to the standard linux version if needed.
107 +-KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
108 ++KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
109 + UTS_MACHINE := aarch64_be
110 + else
111 + KBUILD_CPPFLAGS += -mlittle-endian
112 + CHECKFLAGS += -D__AARCH64EL__
113 + AS += -EL
114 + # Same as above, prefer ELF but fall back to linux target if needed.
115 +-KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
116 ++KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
117 + UTS_MACHINE := aarch64
118 + endif
119 +
120 ++ifeq ($(CONFIG_LD_IS_LLD), y)
121 ++KBUILD_LDFLAGS += -z norelro
122 ++endif
123 ++
124 + CHECKFLAGS += -D__aarch64__
125 +
126 + ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
127 +diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
128 +index a7f51f97b9102..c45ad27594218 100644
129 +--- a/arch/mips/generic/board-boston.its.S
130 ++++ b/arch/mips/generic/board-boston.its.S
131 +@@ -1,22 +1,22 @@
132 + / {
133 + images {
134 +- fdt@boston {
135 ++ fdt-boston {
136 + description = "img,boston Device Tree";
137 + data = /incbin/("boot/dts/img/boston.dtb");
138 + type = "flat_dt";
139 + arch = "mips";
140 + compression = "none";
141 +- hash@0 {
142 ++ hash {
143 + algo = "sha1";
144 + };
145 + };
146 + };
147 +
148 + configurations {
149 +- conf@boston {
150 ++ conf-boston {
151 + description = "Boston Linux kernel";
152 +- kernel = "kernel@0";
153 +- fdt = "fdt@boston";
154 ++ kernel = "kernel";
155 ++ fdt = "fdt-boston";
156 + };
157 + };
158 + };
159 +diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
160 +index e4cb4f95a8cc1..0a2e8f7a8526f 100644
161 +--- a/arch/mips/generic/board-ni169445.its.S
162 ++++ b/arch/mips/generic/board-ni169445.its.S
163 +@@ -1,22 +1,22 @@
164 + / {
165 + images {
166 +- fdt@ni169445 {
167 ++ fdt-ni169445 {
168 + description = "NI 169445 device tree";
169 + data = /incbin/("boot/dts/ni/169445.dtb");
170 + type = "flat_dt";
171 + arch = "mips";
172 + compression = "none";
173 +- hash@0 {
174 ++ hash {
175 + algo = "sha1";
176 + };
177 + };
178 + };
179 +
180 + configurations {
181 +- conf@ni169445 {
182 ++ conf-ni169445 {
183 + description = "NI 169445 Linux Kernel";
184 +- kernel = "kernel@0";
185 +- fdt = "fdt@ni169445";
186 ++ kernel = "kernel";
187 ++ fdt = "fdt-ni169445";
188 + };
189 + };
190 + };
191 +diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
192 +index 3da23988149a6..8c7e3a1b68d3d 100644
193 +--- a/arch/mips/generic/board-ocelot.its.S
194 ++++ b/arch/mips/generic/board-ocelot.its.S
195 +@@ -1,40 +1,40 @@
196 + /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
197 + / {
198 + images {
199 +- fdt@ocelot_pcb123 {
200 ++ fdt-ocelot_pcb123 {
201 + description = "MSCC Ocelot PCB123 Device Tree";
202 + data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
203 + type = "flat_dt";
204 + arch = "mips";
205 + compression = "none";
206 +- hash@0 {
207 ++ hash {
208 + algo = "sha1";
209 + };
210 + };
211 +
212 +- fdt@ocelot_pcb120 {
213 ++ fdt-ocelot_pcb120 {
214 + description = "MSCC Ocelot PCB120 Device Tree";
215 + data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
216 + type = "flat_dt";
217 + arch = "mips";
218 + compression = "none";
219 +- hash@0 {
220 ++ hash {
221 + algo = "sha1";
222 + };
223 + };
224 + };
225 +
226 + configurations {
227 +- conf@ocelot_pcb123 {
228 ++ conf-ocelot_pcb123 {
229 + description = "Ocelot Linux kernel";
230 +- kernel = "kernel@0";
231 +- fdt = "fdt@ocelot_pcb123";
232 ++ kernel = "kernel";
233 ++ fdt = "fdt-ocelot_pcb123";
234 + };
235 +
236 +- conf@ocelot_pcb120 {
237 ++ conf-ocelot_pcb120 {
238 + description = "Ocelot Linux kernel";
239 +- kernel = "kernel@0";
240 +- fdt = "fdt@ocelot_pcb120";
241 ++ kernel = "kernel";
242 ++ fdt = "fdt-ocelot_pcb120";
243 + };
244 + };
245 + };
246 +diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
247 +index a2e773d3f14f4..08c1e900eb4ed 100644
248 +--- a/arch/mips/generic/board-xilfpga.its.S
249 ++++ b/arch/mips/generic/board-xilfpga.its.S
250 +@@ -1,22 +1,22 @@
251 + / {
252 + images {
253 +- fdt@xilfpga {
254 ++ fdt-xilfpga {
255 + description = "MIPSfpga (xilfpga) Device Tree";
256 + data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
257 + type = "flat_dt";
258 + arch = "mips";
259 + compression = "none";
260 +- hash@0 {
261 ++ hash {
262 + algo = "sha1";
263 + };
264 + };
265 + };
266 +
267 + configurations {
268 +- conf@xilfpga {
269 ++ conf-xilfpga {
270 + description = "MIPSfpga Linux kernel";
271 +- kernel = "kernel@0";
272 +- fdt = "fdt@xilfpga";
273 ++ kernel = "kernel";
274 ++ fdt = "fdt-xilfpga";
275 + };
276 + };
277 + };
278 +diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
279 +index 1a08438fd8930..3e254676540f4 100644
280 +--- a/arch/mips/generic/vmlinux.its.S
281 ++++ b/arch/mips/generic/vmlinux.its.S
282 +@@ -6,7 +6,7 @@
283 + #address-cells = <ADDR_CELLS>;
284 +
285 + images {
286 +- kernel@0 {
287 ++ kernel {
288 + description = KERNEL_NAME;
289 + data = /incbin/(VMLINUX_BINARY);
290 + type = "kernel";
291 +@@ -15,18 +15,18 @@
292 + compression = VMLINUX_COMPRESSION;
293 + load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
294 + entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
295 +- hash@0 {
296 ++ hash {
297 + algo = "sha1";
298 + };
299 + };
300 + };
301 +
302 + configurations {
303 +- default = "conf@default";
304 ++ default = "conf-default";
305 +
306 +- conf@default {
307 ++ conf-default {
308 + description = "Generic Linux kernel";
309 +- kernel = "kernel@0";
310 ++ kernel = "kernel";
311 + };
312 + };
313 + };
314 +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
315 +index 0c67a5a94de30..76959a7d88c82 100644
316 +--- a/arch/x86/pci/fixup.c
317 ++++ b/arch/x86/pci/fixup.c
318 +@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
319 + DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
320 + DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
321 +
322 ++#define RS690_LOWER_TOP_OF_DRAM2 0x30
323 ++#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
324 ++#define RS690_UPPER_TOP_OF_DRAM2 0x31
325 ++#define RS690_HTIU_NB_INDEX 0xA8
326 ++#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
327 ++#define RS690_HTIU_NB_DATA 0xAC
328 ++
329 ++/*
330 ++ * Some BIOS implementations support RAM above 4GB, but do not configure the
331 ++ * PCI host to respond to bus master accesses for these addresses. These
332 ++ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
333 ++ * works as expected for addresses below 4GB.
334 ++ *
335 ++ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
336 ++ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
337 ++ */
338 ++static void rs690_fix_64bit_dma(struct pci_dev *pdev)
339 ++{
340 ++ u32 val = 0;
341 ++ phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
342 ++
343 ++ if (top_of_dram <= (1ULL << 32))
344 ++ return;
345 ++
346 ++ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
347 ++ RS690_LOWER_TOP_OF_DRAM2);
348 ++ pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
349 ++
350 ++ if (val)
351 ++ return;
352 ++
353 ++ pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
354 ++
355 ++ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
356 ++ RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
357 ++ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
358 ++
359 ++ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
360 ++ RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
361 ++ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
362 ++ top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
363 ++}
364 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
365 ++
366 + #endif
367 +diff --git a/certs/Kconfig b/certs/Kconfig
368 +index c94e93d8bccf0..76e469b56a773 100644
369 +--- a/certs/Kconfig
370 ++++ b/certs/Kconfig
371 +@@ -83,4 +83,13 @@ config SYSTEM_BLACKLIST_HASH_LIST
372 + wrapper to incorporate the list into the kernel. Each <hash> should
373 + be a string of hex digits.
374 +
375 ++config SYSTEM_REVOCATION_LIST
376 ++ bool "Provide system-wide ring of revocation certificates"
377 ++ depends on SYSTEM_BLACKLIST_KEYRING
378 ++ depends on PKCS7_MESSAGE_PARSER=y
379 ++ help
380 ++ If set, this allows revocation certificates to be stored in the
381 ++ blacklist keyring and implements a hook whereby a PKCS#7 message can
382 ++ be checked to see if it matches such a certificate.
383 ++
384 + endmenu
385 +diff --git a/certs/Makefile b/certs/Makefile
386 +index f4c25b67aad90..f4b90bad8690a 100644
387 +--- a/certs/Makefile
388 ++++ b/certs/Makefile
389 +@@ -3,7 +3,7 @@
390 + # Makefile for the linux kernel signature checking certificates.
391 + #
392 +
393 +-obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
394 ++obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o common.o
395 + obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o
396 + ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),"")
397 + obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
398 +diff --git a/certs/blacklist.c b/certs/blacklist.c
399 +index 025a41de28fda..59b2f106b2940 100644
400 +--- a/certs/blacklist.c
401 ++++ b/certs/blacklist.c
402 +@@ -135,6 +135,58 @@ int is_hash_blacklisted(const u8 *hash, size_t hash_len, const char *type)
403 + }
404 + EXPORT_SYMBOL_GPL(is_hash_blacklisted);
405 +
406 ++int is_binary_blacklisted(const u8 *hash, size_t hash_len)
407 ++{
408 ++ if (is_hash_blacklisted(hash, hash_len, "bin") == -EKEYREJECTED)
409 ++ return -EPERM;
410 ++
411 ++ return 0;
412 ++}
413 ++EXPORT_SYMBOL_GPL(is_binary_blacklisted);
414 ++
415 ++#ifdef CONFIG_SYSTEM_REVOCATION_LIST
416 ++/**
417 ++ * add_key_to_revocation_list - Add a revocation certificate to the blacklist
418 ++ * @data: The data blob containing the certificate
419 ++ * @size: The size of data blob
420 ++ */
421 ++int add_key_to_revocation_list(const char *data, size_t size)
422 ++{
423 ++ key_ref_t key;
424 ++
425 ++ key = key_create_or_update(make_key_ref(blacklist_keyring, true),
426 ++ "asymmetric",
427 ++ NULL,
428 ++ data,
429 ++ size,
430 ++ ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW),
431 ++ KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_BUILT_IN);
432 ++
433 ++ if (IS_ERR(key)) {
434 ++ pr_err("Problem with revocation key (%ld)\n", PTR_ERR(key));
435 ++ return PTR_ERR(key);
436 ++ }
437 ++
438 ++ return 0;
439 ++}
440 ++
441 ++/**
442 ++ * is_key_on_revocation_list - Determine if the key for a PKCS#7 message is revoked
443 ++ * @pkcs7: The PKCS#7 message to check
444 ++ */
445 ++int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
446 ++{
447 ++ int ret;
448 ++
449 ++ ret = pkcs7_validate_trust(pkcs7, blacklist_keyring);
450 ++
451 ++ if (ret == 0)
452 ++ return -EKEYREJECTED;
453 ++
454 ++ return -ENOKEY;
455 ++}
456 ++#endif
457 ++
458 + /*
459 + * Initialise the blacklist
460 + */
461 +diff --git a/certs/blacklist.h b/certs/blacklist.h
462 +index 1efd6fa0dc608..51b320cf85749 100644
463 +--- a/certs/blacklist.h
464 ++++ b/certs/blacklist.h
465 +@@ -1,3 +1,5 @@
466 + #include <linux/kernel.h>
467 ++#include <linux/errno.h>
468 ++#include <crypto/pkcs7.h>
469 +
470 + extern const char __initconst *const blacklist_hashes[];
471 +diff --git a/certs/common.c b/certs/common.c
472 +new file mode 100644
473 +index 0000000000000..16a220887a53e
474 +--- /dev/null
475 ++++ b/certs/common.c
476 +@@ -0,0 +1,57 @@
477 ++// SPDX-License-Identifier: GPL-2.0-or-later
478 ++
479 ++#include <linux/kernel.h>
480 ++#include <linux/key.h>
481 ++#include "common.h"
482 ++
483 ++int load_certificate_list(const u8 cert_list[],
484 ++ const unsigned long list_size,
485 ++ const struct key *keyring)
486 ++{
487 ++ key_ref_t key;
488 ++ const u8 *p, *end;
489 ++ size_t plen;
490 ++
491 ++ p = cert_list;
492 ++ end = p + list_size;
493 ++ while (p < end) {
494 ++ /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
495 ++ * than 256 bytes in size.
496 ++ */
497 ++ if (end - p < 4)
498 ++ goto dodgy_cert;
499 ++ if (p[0] != 0x30 &&
500 ++ p[1] != 0x82)
501 ++ goto dodgy_cert;
502 ++ plen = (p[2] << 8) | p[3];
503 ++ plen += 4;
504 ++ if (plen > end - p)
505 ++ goto dodgy_cert;
506 ++
507 ++ key = key_create_or_update(make_key_ref(keyring, 1),
508 ++ "asymmetric",
509 ++ NULL,
510 ++ p,
511 ++ plen,
512 ++ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
513 ++ KEY_USR_VIEW | KEY_USR_READ),
514 ++ KEY_ALLOC_NOT_IN_QUOTA |
515 ++ KEY_ALLOC_BUILT_IN |
516 ++ KEY_ALLOC_BYPASS_RESTRICTION);
517 ++ if (IS_ERR(key)) {
518 ++ pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
519 ++ PTR_ERR(key));
520 ++ } else {
521 ++ pr_notice("Loaded X.509 cert '%s'\n",
522 ++ key_ref_to_ptr(key)->description);
523 ++ key_ref_put(key);
524 ++ }
525 ++ p += plen;
526 ++ }
527 ++
528 ++ return 0;
529 ++
530 ++dodgy_cert:
531 ++ pr_err("Problem parsing in-kernel X.509 certificate list\n");
532 ++ return 0;
533 ++}
534 +diff --git a/certs/common.h b/certs/common.h
535 +new file mode 100644
536 +index 0000000000000..abdb5795936b7
537 +--- /dev/null
538 ++++ b/certs/common.h
539 +@@ -0,0 +1,9 @@
540 ++/* SPDX-License-Identifier: GPL-2.0-or-later */
541 ++
542 ++#ifndef _CERT_COMMON_H
543 ++#define _CERT_COMMON_H
544 ++
545 ++int load_certificate_list(const u8 cert_list[], const unsigned long list_size,
546 ++ const struct key *keyring);
547 ++
548 ++#endif
549 +diff --git a/certs/system_keyring.c b/certs/system_keyring.c
550 +index 798291177186c..a44a8915c94cf 100644
551 +--- a/certs/system_keyring.c
552 ++++ b/certs/system_keyring.c
553 +@@ -15,6 +15,7 @@
554 + #include <keys/asymmetric-type.h>
555 + #include <keys/system_keyring.h>
556 + #include <crypto/pkcs7.h>
557 ++#include "common.h"
558 +
559 + static struct key *builtin_trusted_keys;
560 + #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
561 +@@ -136,54 +137,10 @@ device_initcall(system_trusted_keyring_init);
562 + */
563 + static __init int load_system_certificate_list(void)
564 + {
565 +- key_ref_t key;
566 +- const u8 *p, *end;
567 +- size_t plen;
568 +-
569 + pr_notice("Loading compiled-in X.509 certificates\n");
570 +
571 +- p = system_certificate_list;
572 +- end = p + system_certificate_list_size;
573 +- while (p < end) {
574 +- /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
575 +- * than 256 bytes in size.
576 +- */
577 +- if (end - p < 4)
578 +- goto dodgy_cert;
579 +- if (p[0] != 0x30 &&
580 +- p[1] != 0x82)
581 +- goto dodgy_cert;
582 +- plen = (p[2] << 8) | p[3];
583 +- plen += 4;
584 +- if (plen > end - p)
585 +- goto dodgy_cert;
586 +-
587 +- key = key_create_or_update(make_key_ref(builtin_trusted_keys, 1),
588 +- "asymmetric",
589 +- NULL,
590 +- p,
591 +- plen,
592 +- ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
593 +- KEY_USR_VIEW | KEY_USR_READ),
594 +- KEY_ALLOC_NOT_IN_QUOTA |
595 +- KEY_ALLOC_BUILT_IN |
596 +- KEY_ALLOC_BYPASS_RESTRICTION);
597 +- if (IS_ERR(key)) {
598 +- pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
599 +- PTR_ERR(key));
600 +- } else {
601 +- pr_notice("Loaded X.509 cert '%s'\n",
602 +- key_ref_to_ptr(key)->description);
603 +- key_ref_put(key);
604 +- }
605 +- p += plen;
606 +- }
607 +-
608 +- return 0;
609 +-
610 +-dodgy_cert:
611 +- pr_err("Problem parsing in-kernel X.509 certificate list\n");
612 +- return 0;
613 ++ return load_certificate_list(system_certificate_list, system_certificate_list_size,
614 ++ builtin_trusted_keys);
615 + }
616 + late_initcall(load_system_certificate_list);
617 +
618 +@@ -241,6 +198,12 @@ int verify_pkcs7_message_sig(const void *data, size_t len,
619 + pr_devel("PKCS#7 platform keyring is not available\n");
620 + goto error;
621 + }
622 ++
623 ++ ret = is_key_on_revocation_list(pkcs7);
624 ++ if (ret != -ENOKEY) {
625 ++ pr_devel("PKCS#7 platform key is on revocation list\n");
626 ++ goto error;
627 ++ }
628 + }
629 + ret = pkcs7_validate_trust(pkcs7, trusted_keys);
630 + if (ret < 0) {
631 +diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
632 +index f40051d6aecbc..9c0ea13ca7883 100644
633 +--- a/drivers/dma/mediatek/mtk-uart-apdma.c
634 ++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
635 +@@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
636 +
637 + static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
638 + {
639 +- struct dma_chan *chan = vd->tx.chan;
640 +- struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
641 +-
642 +- kfree(c->desc);
643 ++ kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
644 + }
645 +
646 + static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
647 +@@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
648 +
649 + static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
650 + {
651 +- struct mtk_uart_apdma_desc *d = c->desc;
652 +-
653 + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
654 + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
655 + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
656 +-
657 +- list_del(&d->vd.node);
658 +- vchan_cookie_complete(&d->vd);
659 + }
660 +
661 + static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
662 +@@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
663 +
664 + c->rx_status = d->avail_len - cnt;
665 + mtk_uart_apdma_write(c, VFF_RPT, wg);
666 ++}
667 +
668 +- list_del(&d->vd.node);
669 +- vchan_cookie_complete(&d->vd);
670 ++static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
671 ++{
672 ++ struct mtk_uart_apdma_desc *d = c->desc;
673 ++
674 ++ if (d) {
675 ++ list_del(&d->vd.node);
676 ++ vchan_cookie_complete(&d->vd);
677 ++ c->desc = NULL;
678 ++ }
679 + }
680 +
681 + static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
682 +@@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
683 + mtk_uart_apdma_rx_handler(c);
684 + else if (c->dir == DMA_MEM_TO_DEV)
685 + mtk_uart_apdma_tx_handler(c);
686 ++ mtk_uart_apdma_chan_complete_handler(c);
687 + spin_unlock_irqrestore(&c->vc.lock, flags);
688 +
689 + return IRQ_HANDLED;
690 +@@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
691 + return NULL;
692 +
693 + /* Now allocate and setup the descriptor */
694 +- d = kzalloc(sizeof(*d), GFP_ATOMIC);
695 ++ d = kzalloc(sizeof(*d), GFP_NOWAIT);
696 + if (!d)
697 + return NULL;
698 +
699 +@@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
700 + unsigned long flags;
701 +
702 + spin_lock_irqsave(&c->vc.lock, flags);
703 +- if (vchan_issue_pending(&c->vc)) {
704 ++ if (vchan_issue_pending(&c->vc) && !c->desc) {
705 + vd = vchan_next_desc(&c->vc);
706 + c->desc = to_mtk_uart_apdma_desc(&vd->tx);
707 +
708 +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
709 +index 3993ab65c62cd..89eb9ea258149 100644
710 +--- a/drivers/dma/sh/rcar-dmac.c
711 ++++ b/drivers/dma/sh/rcar-dmac.c
712 +@@ -1855,7 +1855,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
713 +
714 + /* Enable runtime PM and initialize the device. */
715 + pm_runtime_enable(&pdev->dev);
716 +- ret = pm_runtime_get_sync(&pdev->dev);
717 ++ ret = pm_runtime_resume_and_get(&pdev->dev);
718 + if (ret < 0) {
719 + dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
720 + return ret;
721 +diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
722 +index d47749a35863f..84009c5e0f330 100644
723 +--- a/drivers/dma/xilinx/zynqmp_dma.c
724 ++++ b/drivers/dma/xilinx/zynqmp_dma.c
725 +@@ -467,7 +467,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
726 + struct zynqmp_dma_desc_sw *desc;
727 + int i, ret;
728 +
729 +- ret = pm_runtime_get_sync(chan->dev);
730 ++ ret = pm_runtime_resume_and_get(chan->dev);
731 + if (ret < 0)
732 + return ret;
733 +
734 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
735 +index 9964ec0035ede..1d8739a4fbcad 100644
736 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
737 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
738 +@@ -3416,12 +3416,8 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
739 + if (ring->use_doorbell) {
740 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
741 + (adev->doorbell_index.kiq * 2) << 2);
742 +- /* If GC has entered CGPG, ringing doorbell > first page doesn't
743 +- * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
744 +- * this issue.
745 +- */
746 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
747 +- (adev->doorbell.size - 4));
748 ++ (adev->doorbell_index.userqueue_end * 2) << 2);
749 + }
750 +
751 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
752 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
753 +index 354da41f52def..06cdc22b5501d 100644
754 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
755 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
756 +@@ -3593,12 +3593,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
757 + if (ring->use_doorbell) {
758 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
759 + (adev->doorbell_index.kiq * 2) << 2);
760 +- /* If GC has entered CGPG, ringing doorbell > first page doesn't
761 +- * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
762 +- * this issue.
763 +- */
764 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
765 +- (adev->doorbell.size - 4));
766 ++ (adev->doorbell_index.userqueue_end * 2) << 2);
767 + }
768 +
769 + WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
770 +diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
771 +index bae6a3eccee0b..f9ee562f72d33 100644
772 +--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
773 ++++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
774 +@@ -112,7 +112,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
775 + if (ret)
776 + return -EINVAL;
777 +
778 +- return 0;
779 ++ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
780 ++ if (ret)
781 ++ goto error;
782 ++
783 ++ if (nvbo->bo.moving)
784 ++ ret = dma_fence_wait(nvbo->bo.moving, true);
785 ++
786 ++ ttm_bo_unreserve(&nvbo->bo);
787 ++ if (ret)
788 ++ goto error;
789 ++
790 ++ return ret;
791 ++
792 ++error:
793 ++ nouveau_bo_unpin(nvbo);
794 ++ return ret;
795 + }
796 +
797 + void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
798 +diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
799 +index b906e8fbd5f3a..7bc33a80934c4 100644
800 +--- a/drivers/gpu/drm/radeon/radeon_prime.c
801 ++++ b/drivers/gpu/drm/radeon/radeon_prime.c
802 +@@ -94,9 +94,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
803 +
804 + /* pin buffer into GTT */
805 + ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
806 +- if (likely(ret == 0))
807 +- bo->prime_shared_count++;
808 +-
809 ++ if (unlikely(ret))
810 ++ goto error;
811 ++
812 ++ if (bo->tbo.moving) {
813 ++ ret = dma_fence_wait(bo->tbo.moving, false);
814 ++ if (unlikely(ret)) {
815 ++ radeon_bo_unpin(bo);
816 ++ goto error;
817 ++ }
818 ++ }
819 ++
820 ++ bo->prime_shared_count++;
821 ++error:
822 + radeon_bo_unreserve(bo);
823 + return ret;
824 + }
825 +diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
826 +index a39f7d0927973..66dfa211e736b 100644
827 +--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
828 ++++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
829 +@@ -83,7 +83,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
830 + }
831 + }
832 +
833 +- ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
834 ++ ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
835 + if (ret) {
836 + dev_err(&adapter->dev, "failure sending STOP\n");
837 + return -EREMOTEIO;
838 +@@ -153,7 +153,7 @@ static int osif_probe(struct usb_interface *interface,
839 + * Set bus frequency. The frequency is:
840 + * 120,000,000 / ( 16 + 2 * div * 4^prescale).
841 + * Using dev = 52, prescale = 0 give 100KHz */
842 +- ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
843 ++ ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
844 + NULL, 0);
845 + if (ret) {
846 + dev_err(&interface->dev, "failure sending bit rate");
847 +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
848 +index 545c3f2f8a06c..a3e3b274f0ea3 100644
849 +--- a/drivers/mmc/host/meson-gx-mmc.c
850 ++++ b/drivers/mmc/host/meson-gx-mmc.c
851 +@@ -166,6 +166,7 @@ struct meson_host {
852 +
853 + unsigned int bounce_buf_size;
854 + void *bounce_buf;
855 ++ void __iomem *bounce_iomem_buf;
856 + dma_addr_t bounce_dma_addr;
857 + struct sd_emmc_desc *descs;
858 + dma_addr_t descs_dma_addr;
859 +@@ -737,6 +738,47 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
860 + writel(start, host->regs + SD_EMMC_START);
861 + }
862 +
863 ++/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
864 ++static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
865 ++ size_t buflen, bool to_buffer)
866 ++{
867 ++ unsigned int sg_flags = SG_MITER_ATOMIC;
868 ++ struct scatterlist *sgl = data->sg;
869 ++ unsigned int nents = data->sg_len;
870 ++ struct sg_mapping_iter miter;
871 ++ unsigned int offset = 0;
872 ++
873 ++ if (to_buffer)
874 ++ sg_flags |= SG_MITER_FROM_SG;
875 ++ else
876 ++ sg_flags |= SG_MITER_TO_SG;
877 ++
878 ++ sg_miter_start(&miter, sgl, nents, sg_flags);
879 ++
880 ++ while ((offset < buflen) && sg_miter_next(&miter)) {
881 ++ unsigned int len;
882 ++
883 ++ len = min(miter.length, buflen - offset);
884 ++
885 ++ /* When dram_access_quirk, the bounce buffer is a iomem mapping */
886 ++ if (host->dram_access_quirk) {
887 ++ if (to_buffer)
888 ++ memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
889 ++ else
890 ++ memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
891 ++ } else {
892 ++ if (to_buffer)
893 ++ memcpy(host->bounce_buf + offset, miter.addr, len);
894 ++ else
895 ++ memcpy(miter.addr, host->bounce_buf + offset, len);
896 ++ }
897 ++
898 ++ offset += len;
899 ++ }
900 ++
901 ++ sg_miter_stop(&miter);
902 ++}
903 ++
904 + static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
905 + {
906 + struct meson_host *host = mmc_priv(mmc);
907 +@@ -780,8 +822,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
908 + if (data->flags & MMC_DATA_WRITE) {
909 + cmd_cfg |= CMD_CFG_DATA_WR;
910 + WARN_ON(xfer_bytes > host->bounce_buf_size);
911 +- sg_copy_to_buffer(data->sg, data->sg_len,
912 +- host->bounce_buf, xfer_bytes);
913 ++ meson_mmc_copy_buffer(host, data, xfer_bytes, true);
914 + dma_wmb();
915 + }
916 +
917 +@@ -950,8 +991,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
918 + if (meson_mmc_bounce_buf_read(data)) {
919 + xfer_bytes = data->blksz * data->blocks;
920 + WARN_ON(xfer_bytes > host->bounce_buf_size);
921 +- sg_copy_from_buffer(data->sg, data->sg_len,
922 +- host->bounce_buf, xfer_bytes);
923 ++ meson_mmc_copy_buffer(host, data, xfer_bytes, false);
924 + }
925 +
926 + next_cmd = meson_mmc_get_next_command(cmd);
927 +@@ -1179,7 +1219,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
928 + * instead of the DDR memory
929 + */
930 + host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
931 +- host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
932 ++ host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
933 + host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
934 + } else {
935 + /* data bounce buffer */
936 +diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
937 +index 0f2bee59a82b0..0bc7f6518fb32 100644
938 +--- a/drivers/net/caif/caif_serial.c
939 ++++ b/drivers/net/caif/caif_serial.c
940 +@@ -351,6 +351,7 @@ static int ldisc_open(struct tty_struct *tty)
941 + rtnl_lock();
942 + result = register_netdevice(dev);
943 + if (result) {
944 ++ tty_kref_put(tty);
945 + rtnl_unlock();
946 + free_netdev(dev);
947 + return -ENODEV;
948 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
949 +index 5c6a276f69ac4..426b8098c50ee 100644
950 +--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
951 ++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
952 +@@ -1293,9 +1293,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
953 + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
954 +
955 + p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
956 ++ BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
957 ++ sizeof(p_hwfn->p_dcbx_info->set.config.params));
958 + memcpy(&p_hwfn->p_dcbx_info->set.config.params,
959 + &dcbx_info->operational.params,
960 +- sizeof(struct qed_dcbx_admin_params));
961 ++ sizeof(p_hwfn->p_dcbx_info->set.config.params));
962 + p_hwfn->p_dcbx_info->set.config.valid = true;
963 +
964 + memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
965 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
966 +index 8ff178fc2670c..661202e854121 100644
967 +--- a/drivers/net/ethernet/realtek/r8169_main.c
968 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
969 +@@ -1801,7 +1801,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
970 + {
971 + switch(stringset) {
972 + case ETH_SS_STATS:
973 +- memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
974 ++ memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
975 + break;
976 + }
977 + }
978 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
979 +index a042f4607b0d0..931a44fe7afe8 100644
980 +--- a/drivers/net/ethernet/renesas/sh_eth.c
981 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
982 +@@ -2322,7 +2322,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
983 + {
984 + switch (stringset) {
985 + case ETH_SS_STATS:
986 +- memcpy(data, *sh_eth_gstrings_stats,
987 ++ memcpy(data, sh_eth_gstrings_stats,
988 + sizeof(sh_eth_gstrings_stats));
989 + break;
990 + }
991 +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
992 +index 9b55fbdc3a7c6..9a7af7dda70dc 100644
993 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
994 ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
995 +@@ -770,12 +770,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
996 + stat = be32_to_cpu(cur_p->app0);
997 +
998 + while (stat & STS_CTRL_APP0_CMPLT) {
999 ++ /* Make sure that the other fields are read after bd is
1000 ++ * released by dma
1001 ++ */
1002 ++ rmb();
1003 + dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
1004 + be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
1005 + skb = (struct sk_buff *)ptr_from_txbd(cur_p);
1006 + if (skb)
1007 + dev_consume_skb_irq(skb);
1008 +- cur_p->app0 = 0;
1009 + cur_p->app1 = 0;
1010 + cur_p->app2 = 0;
1011 + cur_p->app3 = 0;
1012 +@@ -784,6 +787,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
1013 + ndev->stats.tx_packets++;
1014 + ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
1015 +
1016 ++ /* app0 must be visible last, as it is used to flag
1017 ++ * availability of the bd
1018 ++ */
1019 ++ smp_mb();
1020 ++ cur_p->app0 = 0;
1021 ++
1022 + lp->tx_bd_ci++;
1023 + if (lp->tx_bd_ci >= TX_BD_NUM)
1024 + lp->tx_bd_ci = 0;
1025 +@@ -810,6 +819,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
1026 + if (cur_p->app0)
1027 + return NETDEV_TX_BUSY;
1028 +
1029 ++ /* Make sure to read next bd app0 after this one */
1030 ++ rmb();
1031 ++
1032 + tail++;
1033 + if (tail >= TX_BD_NUM)
1034 + tail = 0;
1035 +@@ -927,6 +939,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1036 + wmb();
1037 + lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
1038 +
1039 ++ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1040 ++ netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
1041 ++ netif_stop_queue(ndev);
1042 ++ }
1043 ++
1044 + return NETDEV_TX_OK;
1045 + }
1046 +
1047 +diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
1048 +index 31a5595133628..87c0cdbf262ae 100644
1049 +--- a/drivers/net/phy/dp83867.c
1050 ++++ b/drivers/net/phy/dp83867.c
1051 +@@ -468,16 +468,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
1052 + {
1053 + int err;
1054 +
1055 +- err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
1056 ++ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
1057 + if (err < 0)
1058 + return err;
1059 +
1060 + usleep_range(10, 20);
1061 +
1062 +- /* After reset FORCE_LINK_GOOD bit is set. Although the
1063 +- * default value should be unset. Disable FORCE_LINK_GOOD
1064 +- * for the phy to work properly.
1065 +- */
1066 + return phy_modify(phydev, MII_DP83867_PHYCTRL,
1067 + DP83867_PHYCR_FORCE_LINK_GOOD, 0);
1068 + }
1069 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1070 +index f6d643ecaf39b..24d1246330375 100644
1071 +--- a/drivers/net/usb/r8152.c
1072 ++++ b/drivers/net/usb/r8152.c
1073 +@@ -5065,7 +5065,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1074 + {
1075 + switch (stringset) {
1076 + case ETH_SS_STATS:
1077 +- memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
1078 ++ memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
1079 + break;
1080 + }
1081 + }
1082 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1083 +index c48c68090d762..1033513d3d9de 100644
1084 +--- a/drivers/net/wireless/mac80211_hwsim.c
1085 ++++ b/drivers/net/wireless/mac80211_hwsim.c
1086 +@@ -1458,8 +1458,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
1087 + static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
1088 + {
1089 + struct mac80211_hwsim_data *data = hw->priv;
1090 ++
1091 + data->started = false;
1092 + hrtimer_cancel(&data->beacon_timer);
1093 ++
1094 ++ while (!skb_queue_empty(&data->pending))
1095 ++ ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
1096 ++
1097 + wiphy_dbg(hw->wiphy, "%s\n", __func__);
1098 + }
1099 +
1100 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1101 +index 34a06e89e176a..3c3bc9f584983 100644
1102 +--- a/drivers/pci/pci.c
1103 ++++ b/drivers/pci/pci.c
1104 +@@ -1666,11 +1666,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1105 + int err;
1106 + int i, bars = 0;
1107 +
1108 +- if (atomic_inc_return(&dev->enable_cnt) > 1) {
1109 +- pci_update_current_state(dev, dev->current_state);
1110 +- return 0; /* already enabled */
1111 ++ /*
1112 ++ * Power state could be unknown at this point, either due to a fresh
1113 ++ * boot or a device removal call. So get the current power state
1114 ++ * so that things like MSI message writing will behave as expected
1115 ++ * (e.g. if the device really is in D0 at enable time).
1116 ++ */
1117 ++ if (dev->pm_cap) {
1118 ++ u16 pmcsr;
1119 ++ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1120 ++ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1121 + }
1122 +
1123 ++ if (atomic_inc_return(&dev->enable_cnt) > 1)
1124 ++ return 0; /* already enabled */
1125 ++
1126 + bridge = pci_upstream_bridge(dev);
1127 + if (bridge)
1128 + pci_enable_bridge(bridge);
1129 +diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
1130 +index 2d5e0435af0a4..bac1d040bacab 100644
1131 +--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
1132 ++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
1133 +@@ -1153,7 +1153,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
1134 + struct resource res;
1135 + struct reset_control *rstc;
1136 + int npins = STM32_GPIO_PINS_PER_BANK;
1137 +- int bank_nr, err;
1138 ++ int bank_nr, err, i = 0;
1139 +
1140 + rstc = of_reset_control_get_exclusive(np, NULL);
1141 + if (!IS_ERR(rstc))
1142 +@@ -1182,9 +1182,14 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
1143 +
1144 + of_property_read_string(np, "st,bank-name", &bank->gpio_chip.label);
1145 +
1146 +- if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args)) {
1147 ++ if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, i, &args)) {
1148 + bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
1149 + bank->gpio_chip.base = args.args[1];
1150 ++
1151 ++ npins = args.args[2];
1152 ++ while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
1153 ++ ++i, &args))
1154 ++ npins += args.args[2];
1155 + } else {
1156 + bank_nr = pctl->nbanks;
1157 + bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
1158 +diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
1159 +index efd9e908e2248..36a44a837031d 100644
1160 +--- a/drivers/spi/spi-nxp-fspi.c
1161 ++++ b/drivers/spi/spi-nxp-fspi.c
1162 +@@ -975,12 +975,6 @@ static int nxp_fspi_probe(struct platform_device *pdev)
1163 + goto err_put_ctrl;
1164 + }
1165 +
1166 +- /* Clear potential interrupts */
1167 +- reg = fspi_readl(f, f->iobase + FSPI_INTR);
1168 +- if (reg)
1169 +- fspi_writel(f, reg, f->iobase + FSPI_INTR);
1170 +-
1171 +-
1172 + /* find the resources - controller memory mapped space */
1173 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
1174 + f->ahb_addr = devm_ioremap_resource(dev, res);
1175 +@@ -1012,6 +1006,11 @@ static int nxp_fspi_probe(struct platform_device *pdev)
1176 + goto err_put_ctrl;
1177 + }
1178 +
1179 ++ /* Clear potential interrupts */
1180 ++ reg = fspi_readl(f, f->iobase + FSPI_INTR);
1181 ++ if (reg)
1182 ++ fspi_writel(f, reg, f->iobase + FSPI_INTR);
1183 ++
1184 + /* find the irq */
1185 + ret = platform_get_irq(pdev, 0);
1186 + if (ret < 0)
1187 +diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
1188 +index e60be7bb55b0b..c6c8a33c81d5e 100644
1189 +--- a/fs/nilfs2/sysfs.c
1190 ++++ b/fs/nilfs2/sysfs.c
1191 +@@ -1054,6 +1054,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
1192 + nilfs_sysfs_delete_superblock_group(nilfs);
1193 + nilfs_sysfs_delete_segctor_group(nilfs);
1194 + kobject_del(&nilfs->ns_dev_kobj);
1195 ++ kobject_put(&nilfs->ns_dev_kobj);
1196 + kfree(nilfs->ns_dev_subgroups);
1197 + }
1198 +
1199 +diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
1200 +index c1a96fdf598bc..875e002a41804 100644
1201 +--- a/include/keys/system_keyring.h
1202 ++++ b/include/keys/system_keyring.h
1203 +@@ -31,16 +31,37 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
1204 + #define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
1205 + #endif
1206 +
1207 ++extern struct pkcs7_message *pkcs7;
1208 + #ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
1209 + extern int mark_hash_blacklisted(const char *hash);
1210 + extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
1211 + const char *type);
1212 ++extern int is_binary_blacklisted(const u8 *hash, size_t hash_len);
1213 + #else
1214 + static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len,
1215 + const char *type)
1216 + {
1217 + return 0;
1218 + }
1219 ++
1220 ++static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
1221 ++{
1222 ++ return 0;
1223 ++}
1224 ++#endif
1225 ++
1226 ++#ifdef CONFIG_SYSTEM_REVOCATION_LIST
1227 ++extern int add_key_to_revocation_list(const char *data, size_t size);
1228 ++extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
1229 ++#else
1230 ++static inline int add_key_to_revocation_list(const char *data, size_t size)
1231 ++{
1232 ++ return 0;
1233 ++}
1234 ++static inline int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
1235 ++{
1236 ++ return -ENOKEY;
1237 ++}
1238 + #endif
1239 +
1240 + #ifdef CONFIG_IMA_BLACKLIST_KEYRING
1241 +diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
1242 +index d8b86fd391134..d2dbe462efeef 100644
1243 +--- a/include/linux/huge_mm.h
1244 ++++ b/include/linux/huge_mm.h
1245 +@@ -259,6 +259,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1246 + extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
1247 +
1248 + extern struct page *huge_zero_page;
1249 ++extern unsigned long huge_zero_pfn;
1250 +
1251 + static inline bool is_huge_zero_page(struct page *page)
1252 + {
1253 +@@ -267,7 +268,7 @@ static inline bool is_huge_zero_page(struct page *page)
1254 +
1255 + static inline bool is_huge_zero_pmd(pmd_t pmd)
1256 + {
1257 +- return is_huge_zero_page(pmd_page(pmd));
1258 ++ return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
1259 + }
1260 +
1261 + static inline bool is_huge_zero_pud(pud_t pud)
1262 +@@ -398,6 +399,11 @@ static inline bool is_huge_zero_page(struct page *page)
1263 + return false;
1264 + }
1265 +
1266 ++static inline bool is_huge_zero_pmd(pmd_t pmd)
1267 ++{
1268 ++ return false;
1269 ++}
1270 ++
1271 + static inline bool is_huge_zero_pud(pud_t pud)
1272 + {
1273 + return false;
1274 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
1275 +index fc717aeb2b3de..a0513c444446d 100644
1276 +--- a/include/linux/hugetlb.h
1277 ++++ b/include/linux/hugetlb.h
1278 +@@ -469,17 +469,6 @@ static inline int hstate_index(struct hstate *h)
1279 + return h - hstates;
1280 + }
1281 +
1282 +-pgoff_t __basepage_index(struct page *page);
1283 +-
1284 +-/* Return page->index in PAGE_SIZE units */
1285 +-static inline pgoff_t basepage_index(struct page *page)
1286 +-{
1287 +- if (!PageCompound(page))
1288 +- return page->index;
1289 +-
1290 +- return __basepage_index(page);
1291 +-}
1292 +-
1293 + extern int dissolve_free_huge_page(struct page *page);
1294 + extern int dissolve_free_huge_pages(unsigned long start_pfn,
1295 + unsigned long end_pfn);
1296 +@@ -695,11 +684,6 @@ static inline int hstate_index(struct hstate *h)
1297 + return 0;
1298 + }
1299 +
1300 +-static inline pgoff_t basepage_index(struct page *page)
1301 +-{
1302 +- return page->index;
1303 +-}
1304 +-
1305 + static inline int dissolve_free_huge_page(struct page *page)
1306 + {
1307 + return 0;
1308 +diff --git a/include/linux/mm.h b/include/linux/mm.h
1309 +index 5565d11f95429..a7d626b4cad1c 100644
1310 +--- a/include/linux/mm.h
1311 ++++ b/include/linux/mm.h
1312 +@@ -1459,6 +1459,7 @@ struct zap_details {
1313 + struct address_space *check_mapping; /* Check page->mapping if set */
1314 + pgoff_t first_index; /* Lowest page->index to unmap */
1315 + pgoff_t last_index; /* Highest page->index to unmap */
1316 ++ struct page *single_page; /* Locked page to be unmapped */
1317 + };
1318 +
1319 + struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1320 +@@ -1505,6 +1506,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1321 + extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1322 + unsigned long address, unsigned int fault_flags,
1323 + bool *unlocked);
1324 ++void unmap_mapping_page(struct page *page);
1325 + void unmap_mapping_pages(struct address_space *mapping,
1326 + pgoff_t start, pgoff_t nr, bool even_cows);
1327 + void unmap_mapping_range(struct address_space *mapping,
1328 +@@ -1525,6 +1527,7 @@ static inline int fixup_user_fault(struct task_struct *tsk,
1329 + BUG();
1330 + return -EFAULT;
1331 + }
1332 ++static inline void unmap_mapping_page(struct page *page) { }
1333 + static inline void unmap_mapping_pages(struct address_space *mapping,
1334 + pgoff_t start, pgoff_t nr, bool even_cows) { }
1335 + static inline void unmap_mapping_range(struct address_space *mapping,
1336 +diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
1337 +index 2ad72d2c8cc52..5d0767cb424aa 100644
1338 +--- a/include/linux/mmdebug.h
1339 ++++ b/include/linux/mmdebug.h
1340 +@@ -37,6 +37,18 @@ void dump_mm(const struct mm_struct *mm);
1341 + BUG(); \
1342 + } \
1343 + } while (0)
1344 ++#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
1345 ++ static bool __section(".data.once") __warned; \
1346 ++ int __ret_warn_once = !!(cond); \
1347 ++ \
1348 ++ if (unlikely(__ret_warn_once && !__warned)) { \
1349 ++ dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\
1350 ++ __warned = true; \
1351 ++ WARN_ON(1); \
1352 ++ } \
1353 ++ unlikely(__ret_warn_once); \
1354 ++})
1355 ++
1356 + #define VM_WARN_ON(cond) (void)WARN_ON(cond)
1357 + #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
1358 + #define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
1359 +@@ -48,6 +60,7 @@ void dump_mm(const struct mm_struct *mm);
1360 + #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
1361 + #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
1362 + #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
1363 ++#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
1364 + #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
1365 + #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
1366 + #endif
1367 +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
1368 +index 37a4d9e32cd3f..8543b1aaa5299 100644
1369 +--- a/include/linux/pagemap.h
1370 ++++ b/include/linux/pagemap.h
1371 +@@ -397,7 +397,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
1372 + }
1373 +
1374 + /*
1375 +- * Get index of the page with in radix-tree
1376 ++ * Get index of the page within radix-tree (but not for hugetlb pages).
1377 + * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
1378 + */
1379 + static inline pgoff_t page_to_index(struct page *page)
1380 +@@ -416,15 +416,16 @@ static inline pgoff_t page_to_index(struct page *page)
1381 + return pgoff;
1382 + }
1383 +
1384 ++extern pgoff_t hugetlb_basepage_index(struct page *page);
1385 ++
1386 + /*
1387 +- * Get the offset in PAGE_SIZE.
1388 +- * (TODO: hugepage should have ->index in PAGE_SIZE)
1389 ++ * Get the offset in PAGE_SIZE (even for hugetlb pages).
1390 ++ * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
1391 + */
1392 + static inline pgoff_t page_to_pgoff(struct page *page)
1393 + {
1394 +- if (unlikely(PageHeadHuge(page)))
1395 +- return page->index << compound_order(page);
1396 +-
1397 ++ if (unlikely(PageHuge(page)))
1398 ++ return hugetlb_basepage_index(page);
1399 + return page_to_index(page);
1400 + }
1401 +
1402 +diff --git a/include/linux/rmap.h b/include/linux/rmap.h
1403 +index d7d6d4eb17949..91ccae9467164 100644
1404 +--- a/include/linux/rmap.h
1405 ++++ b/include/linux/rmap.h
1406 +@@ -98,7 +98,8 @@ enum ttu_flags {
1407 + * do a final flush if necessary */
1408 + TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
1409 + * caller holds it */
1410 +- TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
1411 ++ TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
1412 ++ TTU_SYNC = 0x200, /* avoid racy checks with PVMW_SYNC */
1413 + };
1414 +
1415 + #ifdef CONFIG_MMU
1416 +diff --git a/include/net/sock.h b/include/net/sock.h
1417 +index a0728f24ecc53..d3dd89b6e2cba 100644
1418 +--- a/include/net/sock.h
1419 ++++ b/include/net/sock.h
1420 +@@ -1860,7 +1860,8 @@ static inline u32 net_tx_rndhash(void)
1421 +
1422 + static inline void sk_set_txhash(struct sock *sk)
1423 + {
1424 +- sk->sk_txhash = net_tx_rndhash();
1425 ++ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
1426 ++ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
1427 + }
1428 +
1429 + static inline void sk_rethink_txhash(struct sock *sk)
1430 +@@ -2125,9 +2126,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
1431 +
1432 + static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
1433 + {
1434 +- if (sk->sk_txhash) {
1435 ++ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
1436 ++ u32 txhash = READ_ONCE(sk->sk_txhash);
1437 ++
1438 ++ if (txhash) {
1439 + skb->l4_hash = 1;
1440 +- skb->hash = sk->sk_txhash;
1441 ++ skb->hash = txhash;
1442 + }
1443 + }
1444 +
1445 +diff --git a/init/Kconfig b/init/Kconfig
1446 +index 4f9fd78e2200b..f23e90d9935f5 100644
1447 +--- a/init/Kconfig
1448 ++++ b/init/Kconfig
1449 +@@ -20,6 +20,9 @@ config GCC_VERSION
1450 + config CC_IS_CLANG
1451 + def_bool $(success,$(CC) --version | head -n 1 | grep -q clang)
1452 +
1453 ++config LD_IS_LLD
1454 ++ def_bool $(success,$(LD) -v | head -n 1 | grep -q LLD)
1455 ++
1456 + config CLANG_VERSION
1457 + int
1458 + default $(shell,$(srctree)/scripts/clang-version.sh $(CC))
1459 +diff --git a/kernel/futex.c b/kernel/futex.c
1460 +index 375e7e98e301f..f82879ae6577c 100644
1461 +--- a/kernel/futex.c
1462 ++++ b/kernel/futex.c
1463 +@@ -737,7 +737,7 @@ again:
1464 +
1465 + key->both.offset |= FUT_OFF_INODE; /* inode-based key */
1466 + key->shared.i_seq = get_inode_sequence_number(inode);
1467 +- key->shared.pgoff = basepage_index(tail);
1468 ++ key->shared.pgoff = page_to_pgoff(tail);
1469 + rcu_read_unlock();
1470 + }
1471 +
1472 +diff --git a/kernel/kthread.c b/kernel/kthread.c
1473 +index 1d4c98a19043f..2eb8d7550324b 100644
1474 +--- a/kernel/kthread.c
1475 ++++ b/kernel/kthread.c
1476 +@@ -1020,8 +1020,38 @@ void kthread_flush_work(struct kthread_work *work)
1477 + EXPORT_SYMBOL_GPL(kthread_flush_work);
1478 +
1479 + /*
1480 +- * This function removes the work from the worker queue. Also it makes sure
1481 +- * that it won't get queued later via the delayed work's timer.
1482 ++ * Make sure that the timer is neither set nor running and could
1483 ++ * not manipulate the work list_head any longer.
1484 ++ *
1485 ++ * The function is called under worker->lock. The lock is temporary
1486 ++ * released but the timer can't be set again in the meantime.
1487 ++ */
1488 ++static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1489 ++ unsigned long *flags)
1490 ++{
1491 ++ struct kthread_delayed_work *dwork =
1492 ++ container_of(work, struct kthread_delayed_work, work);
1493 ++ struct kthread_worker *worker = work->worker;
1494 ++
1495 ++ /*
1496 ++ * del_timer_sync() must be called to make sure that the timer
1497 ++ * callback is not running. The lock must be temporary released
1498 ++ * to avoid a deadlock with the callback. In the meantime,
1499 ++ * any queuing is blocked by setting the canceling counter.
1500 ++ */
1501 ++ work->canceling++;
1502 ++ raw_spin_unlock_irqrestore(&worker->lock, *flags);
1503 ++ del_timer_sync(&dwork->timer);
1504 ++ raw_spin_lock_irqsave(&worker->lock, *flags);
1505 ++ work->canceling--;
1506 ++}
1507 ++
1508 ++/*
1509 ++ * This function removes the work from the worker queue.
1510 ++ *
1511 ++ * It is called under worker->lock. The caller must make sure that
1512 ++ * the timer used by delayed work is not running, e.g. by calling
1513 ++ * kthread_cancel_delayed_work_timer().
1514 + *
1515 + * The work might still be in use when this function finishes. See the
1516 + * current_work proceed by the worker.
1517 +@@ -1029,28 +1059,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work);
1518 + * Return: %true if @work was pending and successfully canceled,
1519 + * %false if @work was not pending
1520 + */
1521 +-static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
1522 +- unsigned long *flags)
1523 ++static bool __kthread_cancel_work(struct kthread_work *work)
1524 + {
1525 +- /* Try to cancel the timer if exists. */
1526 +- if (is_dwork) {
1527 +- struct kthread_delayed_work *dwork =
1528 +- container_of(work, struct kthread_delayed_work, work);
1529 +- struct kthread_worker *worker = work->worker;
1530 +-
1531 +- /*
1532 +- * del_timer_sync() must be called to make sure that the timer
1533 +- * callback is not running. The lock must be temporary released
1534 +- * to avoid a deadlock with the callback. In the meantime,
1535 +- * any queuing is blocked by setting the canceling counter.
1536 +- */
1537 +- work->canceling++;
1538 +- raw_spin_unlock_irqrestore(&worker->lock, *flags);
1539 +- del_timer_sync(&dwork->timer);
1540 +- raw_spin_lock_irqsave(&worker->lock, *flags);
1541 +- work->canceling--;
1542 +- }
1543 +-
1544 + /*
1545 + * Try to remove the work from a worker list. It might either
1546 + * be from worker->work_list or from worker->delayed_work_list.
1547 +@@ -1103,11 +1113,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
1548 + /* Work must not be used with >1 worker, see kthread_queue_work() */
1549 + WARN_ON_ONCE(work->worker != worker);
1550 +
1551 +- /* Do not fight with another command that is canceling this work. */
1552 ++ /*
1553 ++ * Temporary cancel the work but do not fight with another command
1554 ++ * that is canceling the work as well.
1555 ++ *
1556 ++ * It is a bit tricky because of possible races with another
1557 ++ * mod_delayed_work() and cancel_delayed_work() callers.
1558 ++ *
1559 ++ * The timer must be canceled first because worker->lock is released
1560 ++ * when doing so. But the work can be removed from the queue (list)
1561 ++ * only when it can be queued again so that the return value can
1562 ++ * be used for reference counting.
1563 ++ */
1564 ++ kthread_cancel_delayed_work_timer(work, &flags);
1565 + if (work->canceling)
1566 + goto out;
1567 ++ ret = __kthread_cancel_work(work);
1568 +
1569 +- ret = __kthread_cancel_work(work, true, &flags);
1570 + fast_queue:
1571 + __kthread_queue_delayed_work(worker, dwork, delay);
1572 + out:
1573 +@@ -1129,7 +1151,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1574 + /* Work must not be used with >1 worker, see kthread_queue_work(). */
1575 + WARN_ON_ONCE(work->worker != worker);
1576 +
1577 +- ret = __kthread_cancel_work(work, is_dwork, &flags);
1578 ++ if (is_dwork)
1579 ++ kthread_cancel_delayed_work_timer(work, &flags);
1580 ++
1581 ++ ret = __kthread_cancel_work(work);
1582 +
1583 + if (worker->current_work != work)
1584 + goto out_fast;
1585 +diff --git a/kernel/module.c b/kernel/module.c
1586 +index 88a6a9e04f8dc..59d487b8d8dad 100644
1587 +--- a/kernel/module.c
1588 ++++ b/kernel/module.c
1589 +@@ -268,9 +268,18 @@ static void module_assert_mutex_or_preempt(void)
1590 + #endif
1591 + }
1592 +
1593 ++#ifdef CONFIG_MODULE_SIG
1594 + static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
1595 + module_param(sig_enforce, bool_enable_only, 0644);
1596 +
1597 ++void set_module_sig_enforced(void)
1598 ++{
1599 ++ sig_enforce = true;
1600 ++}
1601 ++#else
1602 ++#define sig_enforce false
1603 ++#endif
1604 ++
1605 + /*
1606 + * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
1607 + * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
1608 +@@ -281,11 +290,6 @@ bool is_module_sig_enforced(void)
1609 + }
1610 + EXPORT_SYMBOL(is_module_sig_enforced);
1611 +
1612 +-void set_module_sig_enforced(void)
1613 +-{
1614 +- sig_enforce = true;
1615 +-}
1616 +-
1617 + /* Block module loading/unloading? */
1618 + int modules_disabled = 0;
1619 + core_param(nomodule, modules_disabled, bint, 0);
1620 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1621 +index 7bbf419bb86d6..87a07aa61be0d 100644
1622 +--- a/mm/huge_memory.c
1623 ++++ b/mm/huge_memory.c
1624 +@@ -61,6 +61,7 @@ static struct shrinker deferred_split_shrinker;
1625 +
1626 + static atomic_t huge_zero_refcount;
1627 + struct page *huge_zero_page __read_mostly;
1628 ++unsigned long huge_zero_pfn __read_mostly = ~0UL;
1629 +
1630 + bool transparent_hugepage_enabled(struct vm_area_struct *vma)
1631 + {
1632 +@@ -97,6 +98,7 @@ retry:
1633 + __free_pages(zero_page, compound_order(zero_page));
1634 + goto retry;
1635 + }
1636 ++ WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
1637 +
1638 + /* We take additional reference here. It will be put back by shrinker */
1639 + atomic_set(&huge_zero_refcount, 2);
1640 +@@ -146,6 +148,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
1641 + if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
1642 + struct page *zero_page = xchg(&huge_zero_page, NULL);
1643 + BUG_ON(zero_page == NULL);
1644 ++ WRITE_ONCE(huge_zero_pfn, ~0UL);
1645 + __free_pages(zero_page, compound_order(zero_page));
1646 + return HPAGE_PMD_NR;
1647 + }
1648 +@@ -2155,7 +2158,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1649 + count_vm_event(THP_SPLIT_PMD);
1650 +
1651 + if (!vma_is_anonymous(vma)) {
1652 +- _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1653 ++ old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1654 + /*
1655 + * We are going to unmap this huge page. So
1656 + * just go ahead and zap it
1657 +@@ -2164,16 +2167,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1658 + zap_deposited_table(mm, pmd);
1659 + if (vma_is_dax(vma))
1660 + return;
1661 +- page = pmd_page(_pmd);
1662 +- if (!PageDirty(page) && pmd_dirty(_pmd))
1663 +- set_page_dirty(page);
1664 +- if (!PageReferenced(page) && pmd_young(_pmd))
1665 +- SetPageReferenced(page);
1666 +- page_remove_rmap(page, true);
1667 +- put_page(page);
1668 ++ if (unlikely(is_pmd_migration_entry(old_pmd))) {
1669 ++ swp_entry_t entry;
1670 ++
1671 ++ entry = pmd_to_swp_entry(old_pmd);
1672 ++ page = migration_entry_to_page(entry);
1673 ++ } else {
1674 ++ page = pmd_page(old_pmd);
1675 ++ if (!PageDirty(page) && pmd_dirty(old_pmd))
1676 ++ set_page_dirty(page);
1677 ++ if (!PageReferenced(page) && pmd_young(old_pmd))
1678 ++ SetPageReferenced(page);
1679 ++ page_remove_rmap(page, true);
1680 ++ put_page(page);
1681 ++ }
1682 + add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
1683 + return;
1684 +- } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
1685 ++ }
1686 ++
1687 ++ if (is_huge_zero_pmd(*pmd)) {
1688 + /*
1689 + * FIXME: Do we want to invalidate secondary mmu by calling
1690 + * mmu_notifier_invalidate_range() see comments below inside
1691 +@@ -2449,16 +2461,16 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
1692 + static void unmap_page(struct page *page)
1693 + {
1694 + enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
1695 +- TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
1696 +- bool unmap_success;
1697 ++ TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC;
1698 +
1699 + VM_BUG_ON_PAGE(!PageHead(page), page);
1700 +
1701 + if (PageAnon(page))
1702 + ttu_flags |= TTU_SPLIT_FREEZE;
1703 +
1704 +- unmap_success = try_to_unmap(page, ttu_flags);
1705 +- VM_BUG_ON_PAGE(!unmap_success, page);
1706 ++ try_to_unmap(page, ttu_flags);
1707 ++
1708 ++ VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
1709 + }
1710 +
1711 + static void remap_page(struct page *page)
1712 +@@ -2737,7 +2749,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1713 + struct deferred_split *ds_queue = get_deferred_split_queue(page);
1714 + struct anon_vma *anon_vma = NULL;
1715 + struct address_space *mapping = NULL;
1716 +- int count, mapcount, extra_pins, ret;
1717 ++ int extra_pins, ret;
1718 + bool mlocked;
1719 + unsigned long flags;
1720 + pgoff_t end;
1721 +@@ -2799,7 +2811,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1722 +
1723 + mlocked = PageMlocked(page);
1724 + unmap_page(head);
1725 +- VM_BUG_ON_PAGE(compound_mapcount(head), head);
1726 +
1727 + /* Make sure the page is not on per-CPU pagevec as it takes pin */
1728 + if (mlocked)
1729 +@@ -2822,9 +2833,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1730 +
1731 + /* Prevent deferred_split_scan() touching ->_refcount */
1732 + spin_lock(&ds_queue->split_queue_lock);
1733 +- count = page_count(head);
1734 +- mapcount = total_mapcount(head);
1735 +- if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
1736 ++ if (page_ref_freeze(head, 1 + extra_pins)) {
1737 + if (!list_empty(page_deferred_list(head))) {
1738 + ds_queue->split_queue_len--;
1739 + list_del(page_deferred_list(head));
1740 +@@ -2845,16 +2854,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1741 + } else
1742 + ret = 0;
1743 + } else {
1744 +- if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
1745 +- pr_alert("total_mapcount: %u, page_count(): %u\n",
1746 +- mapcount, count);
1747 +- if (PageTail(page))
1748 +- dump_page(head, NULL);
1749 +- dump_page(page, "total_mapcount(head) > 0");
1750 +- BUG();
1751 +- }
1752 + spin_unlock(&ds_queue->split_queue_lock);
1753 +-fail: if (mapping)
1754 ++fail:
1755 ++ if (mapping)
1756 + xa_unlock(&mapping->i_pages);
1757 + spin_unlock_irqrestore(&pgdata->lru_lock, flags);
1758 + remap_page(head);
1759 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1760 +index fe15e7d8220ab..95a32749af4da 100644
1761 +--- a/mm/hugetlb.c
1762 ++++ b/mm/hugetlb.c
1763 +@@ -1461,15 +1461,12 @@ int PageHeadHuge(struct page *page_head)
1764 + return get_compound_page_dtor(page_head) == free_huge_page;
1765 + }
1766 +
1767 +-pgoff_t __basepage_index(struct page *page)
1768 ++pgoff_t hugetlb_basepage_index(struct page *page)
1769 + {
1770 + struct page *page_head = compound_head(page);
1771 + pgoff_t index = page_index(page_head);
1772 + unsigned long compound_idx;
1773 +
1774 +- if (!PageHuge(page_head))
1775 +- return page_index(page);
1776 +-
1777 + if (compound_order(page_head) >= MAX_ORDER)
1778 + compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1779 + else
1780 +diff --git a/mm/internal.h b/mm/internal.h
1781 +index 7dd7fbb577a9a..cf382549dd702 100644
1782 +--- a/mm/internal.h
1783 ++++ b/mm/internal.h
1784 +@@ -339,27 +339,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
1785 + extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1786 +
1787 + /*
1788 +- * At what user virtual address is page expected in @vma?
1789 ++ * At what user virtual address is page expected in vma?
1790 ++ * Returns -EFAULT if all of the page is outside the range of vma.
1791 ++ * If page is a compound head, the entire compound page is considered.
1792 + */
1793 + static inline unsigned long
1794 +-__vma_address(struct page *page, struct vm_area_struct *vma)
1795 ++vma_address(struct page *page, struct vm_area_struct *vma)
1796 + {
1797 +- pgoff_t pgoff = page_to_pgoff(page);
1798 +- return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1799 ++ pgoff_t pgoff;
1800 ++ unsigned long address;
1801 ++
1802 ++ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
1803 ++ pgoff = page_to_pgoff(page);
1804 ++ if (pgoff >= vma->vm_pgoff) {
1805 ++ address = vma->vm_start +
1806 ++ ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1807 ++ /* Check for address beyond vma (or wrapped through 0?) */
1808 ++ if (address < vma->vm_start || address >= vma->vm_end)
1809 ++ address = -EFAULT;
1810 ++ } else if (PageHead(page) &&
1811 ++ pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
1812 ++ /* Test above avoids possibility of wrap to 0 on 32-bit */
1813 ++ address = vma->vm_start;
1814 ++ } else {
1815 ++ address = -EFAULT;
1816 ++ }
1817 ++ return address;
1818 + }
1819 +
1820 ++/*
1821 ++ * Then at what user virtual address will none of the page be found in vma?
1822 ++ * Assumes that vma_address() already returned a good starting address.
1823 ++ * If page is a compound head, the entire compound page is considered.
1824 ++ */
1825 + static inline unsigned long
1826 +-vma_address(struct page *page, struct vm_area_struct *vma)
1827 ++vma_address_end(struct page *page, struct vm_area_struct *vma)
1828 + {
1829 +- unsigned long start, end;
1830 +-
1831 +- start = __vma_address(page, vma);
1832 +- end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
1833 +-
1834 +- /* page should be within @vma mapping range */
1835 +- VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
1836 +-
1837 +- return max(start, vma->vm_start);
1838 ++ pgoff_t pgoff;
1839 ++ unsigned long address;
1840 ++
1841 ++ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
1842 ++ pgoff = page_to_pgoff(page) + compound_nr(page);
1843 ++ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1844 ++ /* Check for address beyond vma (or wrapped through 0?) */
1845 ++ if (address < vma->vm_start || address > vma->vm_end)
1846 ++ address = vma->vm_end;
1847 ++ return address;
1848 + }
1849 +
1850 + static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1851 +diff --git a/mm/memory.c b/mm/memory.c
1852 +index 13a575ce2ec8f..4bb7c6a364c81 100644
1853 +--- a/mm/memory.c
1854 ++++ b/mm/memory.c
1855 +@@ -1165,7 +1165,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1856 + else if (zap_huge_pmd(tlb, vma, pmd, addr))
1857 + goto next;
1858 + /* fall through */
1859 ++ } else if (details && details->single_page &&
1860 ++ PageTransCompound(details->single_page) &&
1861 ++ next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1862 ++ spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1863 ++ /*
1864 ++ * Take and drop THP pmd lock so that we cannot return
1865 ++ * prematurely, while zap_huge_pmd() has cleared *pmd,
1866 ++ * but not yet decremented compound_mapcount().
1867 ++ */
1868 ++ spin_unlock(ptl);
1869 + }
1870 ++
1871 + /*
1872 + * Here there can be other concurrent MADV_DONTNEED or
1873 + * trans huge page faults running, and if the pmd is
1874 +@@ -2769,6 +2780,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
1875 + }
1876 + }
1877 +
1878 ++/**
1879 ++ * unmap_mapping_page() - Unmap single page from processes.
1880 ++ * @page: The locked page to be unmapped.
1881 ++ *
1882 ++ * Unmap this page from any userspace process which still has it mmaped.
1883 ++ * Typically, for efficiency, the range of nearby pages has already been
1884 ++ * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
1885 ++ * truncation or invalidation holds the lock on a page, it may find that
1886 ++ * the page has been remapped again: and then uses unmap_mapping_page()
1887 ++ * to unmap it finally.
1888 ++ */
1889 ++void unmap_mapping_page(struct page *page)
1890 ++{
1891 ++ struct address_space *mapping = page->mapping;
1892 ++ struct zap_details details = { };
1893 ++
1894 ++ VM_BUG_ON(!PageLocked(page));
1895 ++ VM_BUG_ON(PageTail(page));
1896 ++
1897 ++ details.check_mapping = mapping;
1898 ++ details.first_index = page->index;
1899 ++ details.last_index = page->index + hpage_nr_pages(page) - 1;
1900 ++ details.single_page = page;
1901 ++
1902 ++ i_mmap_lock_write(mapping);
1903 ++ if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
1904 ++ unmap_mapping_range_tree(&mapping->i_mmap, &details);
1905 ++ i_mmap_unlock_write(mapping);
1906 ++}
1907 ++
1908 + /**
1909 + * unmap_mapping_pages() - Unmap pages from processes.
1910 + * @mapping: The address space containing pages to be unmapped.
1911 +diff --git a/mm/migrate.c b/mm/migrate.c
1912 +index 00bbe57c1ce22..5092ef2aa8a1f 100644
1913 +--- a/mm/migrate.c
1914 ++++ b/mm/migrate.c
1915 +@@ -321,6 +321,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
1916 + goto out;
1917 +
1918 + page = migration_entry_to_page(entry);
1919 ++ page = compound_head(page);
1920 +
1921 + /*
1922 + * Once page cache replacement of page migration started, page_count
1923 +diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
1924 +index eff4b4520c8d5..029f5598251c2 100644
1925 +--- a/mm/page_vma_mapped.c
1926 ++++ b/mm/page_vma_mapped.c
1927 +@@ -111,6 +111,13 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
1928 + return pfn_in_hpage(pvmw->page, pfn);
1929 + }
1930 +
1931 ++static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
1932 ++{
1933 ++ pvmw->address = (pvmw->address + size) & ~(size - 1);
1934 ++ if (!pvmw->address)
1935 ++ pvmw->address = ULONG_MAX;
1936 ++}
1937 ++
1938 + /**
1939 + * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
1940 + * @pvmw->address
1941 +@@ -139,6 +146,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
1942 + {
1943 + struct mm_struct *mm = pvmw->vma->vm_mm;
1944 + struct page *page = pvmw->page;
1945 ++ unsigned long end;
1946 + pgd_t *pgd;
1947 + p4d_t *p4d;
1948 + pud_t *pud;
1949 +@@ -148,10 +156,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
1950 + if (pvmw->pmd && !pvmw->pte)
1951 + return not_found(pvmw);
1952 +
1953 +- if (pvmw->pte)
1954 +- goto next_pte;
1955 ++ if (unlikely(PageHuge(page))) {
1956 ++ /* The only possible mapping was handled on last iteration */
1957 ++ if (pvmw->pte)
1958 ++ return not_found(pvmw);
1959 +
1960 +- if (unlikely(PageHuge(pvmw->page))) {
1961 + /* when pud is not present, pte will be NULL */
1962 + pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
1963 + if (!pvmw->pte)
1964 +@@ -163,78 +172,108 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
1965 + return not_found(pvmw);
1966 + return true;
1967 + }
1968 +-restart:
1969 +- pgd = pgd_offset(mm, pvmw->address);
1970 +- if (!pgd_present(*pgd))
1971 +- return false;
1972 +- p4d = p4d_offset(pgd, pvmw->address);
1973 +- if (!p4d_present(*p4d))
1974 +- return false;
1975 +- pud = pud_offset(p4d, pvmw->address);
1976 +- if (!pud_present(*pud))
1977 +- return false;
1978 +- pvmw->pmd = pmd_offset(pud, pvmw->address);
1979 ++
1980 + /*
1981 +- * Make sure the pmd value isn't cached in a register by the
1982 +- * compiler and used as a stale value after we've observed a
1983 +- * subsequent update.
1984 ++ * Seek to next pte only makes sense for THP.
1985 ++ * But more important than that optimization, is to filter out
1986 ++ * any PageKsm page: whose page->index misleads vma_address()
1987 ++ * and vma_address_end() to disaster.
1988 + */
1989 +- pmde = READ_ONCE(*pvmw->pmd);
1990 +- if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
1991 +- pvmw->ptl = pmd_lock(mm, pvmw->pmd);
1992 +- if (likely(pmd_trans_huge(*pvmw->pmd))) {
1993 +- if (pvmw->flags & PVMW_MIGRATION)
1994 +- return not_found(pvmw);
1995 +- if (pmd_page(*pvmw->pmd) != page)
1996 +- return not_found(pvmw);
1997 +- return true;
1998 +- } else if (!pmd_present(*pvmw->pmd)) {
1999 +- if (thp_migration_supported()) {
2000 +- if (!(pvmw->flags & PVMW_MIGRATION))
2001 ++ end = PageTransCompound(page) ?
2002 ++ vma_address_end(page, pvmw->vma) :
2003 ++ pvmw->address + PAGE_SIZE;
2004 ++ if (pvmw->pte)
2005 ++ goto next_pte;
2006 ++restart:
2007 ++ do {
2008 ++ pgd = pgd_offset(mm, pvmw->address);
2009 ++ if (!pgd_present(*pgd)) {
2010 ++ step_forward(pvmw, PGDIR_SIZE);
2011 ++ continue;
2012 ++ }
2013 ++ p4d = p4d_offset(pgd, pvmw->address);
2014 ++ if (!p4d_present(*p4d)) {
2015 ++ step_forward(pvmw, P4D_SIZE);
2016 ++ continue;
2017 ++ }
2018 ++ pud = pud_offset(p4d, pvmw->address);
2019 ++ if (!pud_present(*pud)) {
2020 ++ step_forward(pvmw, PUD_SIZE);
2021 ++ continue;
2022 ++ }
2023 ++
2024 ++ pvmw->pmd = pmd_offset(pud, pvmw->address);
2025 ++ /*
2026 ++ * Make sure the pmd value isn't cached in a register by the
2027 ++ * compiler and used as a stale value after we've observed a
2028 ++ * subsequent update.
2029 ++ */
2030 ++ pmde = READ_ONCE(*pvmw->pmd);
2031 ++
2032 ++ if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
2033 ++ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
2034 ++ pmde = *pvmw->pmd;
2035 ++ if (likely(pmd_trans_huge(pmde))) {
2036 ++ if (pvmw->flags & PVMW_MIGRATION)
2037 + return not_found(pvmw);
2038 +- if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
2039 +- swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
2040 ++ if (pmd_page(pmde) != page)
2041 ++ return not_found(pvmw);
2042 ++ return true;
2043 ++ }
2044 ++ if (!pmd_present(pmde)) {
2045 ++ swp_entry_t entry;
2046 +
2047 +- if (migration_entry_to_page(entry) != page)
2048 +- return not_found(pvmw);
2049 +- return true;
2050 +- }
2051 ++ if (!thp_migration_supported() ||
2052 ++ !(pvmw->flags & PVMW_MIGRATION))
2053 ++ return not_found(pvmw);
2054 ++ entry = pmd_to_swp_entry(pmde);
2055 ++ if (!is_migration_entry(entry) ||
2056 ++ migration_entry_to_page(entry) != page)
2057 ++ return not_found(pvmw);
2058 ++ return true;
2059 + }
2060 +- return not_found(pvmw);
2061 +- } else {
2062 + /* THP pmd was split under us: handle on pte level */
2063 + spin_unlock(pvmw->ptl);
2064 + pvmw->ptl = NULL;
2065 ++ } else if (!pmd_present(pmde)) {
2066 ++ /*
2067 ++ * If PVMW_SYNC, take and drop THP pmd lock so that we
2068 ++ * cannot return prematurely, while zap_huge_pmd() has
2069 ++ * cleared *pmd but not decremented compound_mapcount().
2070 ++ */
2071 ++ if ((pvmw->flags & PVMW_SYNC) &&
2072 ++ PageTransCompound(page)) {
2073 ++ spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
2074 ++
2075 ++ spin_unlock(ptl);
2076 ++ }
2077 ++ step_forward(pvmw, PMD_SIZE);
2078 ++ continue;
2079 + }
2080 +- } else if (!pmd_present(pmde)) {
2081 +- return false;
2082 +- }
2083 +- if (!map_pte(pvmw))
2084 +- goto next_pte;
2085 +- while (1) {
2086 ++ if (!map_pte(pvmw))
2087 ++ goto next_pte;
2088 ++this_pte:
2089 + if (check_pte(pvmw))
2090 + return true;
2091 + next_pte:
2092 +- /* Seek to next pte only makes sense for THP */
2093 +- if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
2094 +- return not_found(pvmw);
2095 + do {
2096 + pvmw->address += PAGE_SIZE;
2097 +- if (pvmw->address >= pvmw->vma->vm_end ||
2098 +- pvmw->address >=
2099 +- __vma_address(pvmw->page, pvmw->vma) +
2100 +- hpage_nr_pages(pvmw->page) * PAGE_SIZE)
2101 ++ if (pvmw->address >= end)
2102 + return not_found(pvmw);
2103 + /* Did we cross page table boundary? */
2104 +- if (pvmw->address % PMD_SIZE == 0) {
2105 +- pte_unmap(pvmw->pte);
2106 ++ if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
2107 + if (pvmw->ptl) {
2108 + spin_unlock(pvmw->ptl);
2109 + pvmw->ptl = NULL;
2110 + }
2111 ++ pte_unmap(pvmw->pte);
2112 ++ pvmw->pte = NULL;
2113 + goto restart;
2114 +- } else {
2115 +- pvmw->pte++;
2116 ++ }
2117 ++ pvmw->pte++;
2118 ++ if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
2119 ++ pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
2120 ++ spin_lock(pvmw->ptl);
2121 + }
2122 + } while (pte_none(*pvmw->pte));
2123 +
2124 +@@ -242,7 +281,10 @@ next_pte:
2125 + pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
2126 + spin_lock(pvmw->ptl);
2127 + }
2128 +- }
2129 ++ goto this_pte;
2130 ++ } while (pvmw->address < end);
2131 ++
2132 ++ return false;
2133 + }
2134 +
2135 + /**
2136 +@@ -261,14 +303,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
2137 + .vma = vma,
2138 + .flags = PVMW_SYNC,
2139 + };
2140 +- unsigned long start, end;
2141 +-
2142 +- start = __vma_address(page, vma);
2143 +- end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
2144 +
2145 +- if (unlikely(end < vma->vm_start || start >= vma->vm_end))
2146 ++ pvmw.address = vma_address(page, vma);
2147 ++ if (pvmw.address == -EFAULT)
2148 + return 0;
2149 +- pvmw.address = max(start, vma->vm_start);
2150 + if (!page_vma_mapped_walk(&pvmw))
2151 + return 0;
2152 + page_vma_mapped_walk_done(&pvmw);
2153 +diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
2154 +index 532c29276fcee..49e8a4fbc2051 100644
2155 +--- a/mm/pgtable-generic.c
2156 ++++ b/mm/pgtable-generic.c
2157 +@@ -126,8 +126,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
2158 + {
2159 + pmd_t pmd;
2160 + VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2161 +- VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
2162 +- !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
2163 ++ VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
2164 ++ !pmd_devmap(*pmdp));
2165 + pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
2166 + flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
2167 + return pmd;
2168 +diff --git a/mm/rmap.c b/mm/rmap.c
2169 +index 0c7b2a9400d4a..45f2106852e84 100644
2170 +--- a/mm/rmap.c
2171 ++++ b/mm/rmap.c
2172 +@@ -687,7 +687,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
2173 + */
2174 + unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
2175 + {
2176 +- unsigned long address;
2177 + if (PageAnon(page)) {
2178 + struct anon_vma *page__anon_vma = page_anon_vma(page);
2179 + /*
2180 +@@ -697,15 +696,13 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
2181 + if (!vma->anon_vma || !page__anon_vma ||
2182 + vma->anon_vma->root != page__anon_vma->root)
2183 + return -EFAULT;
2184 +- } else if (page->mapping) {
2185 +- if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
2186 +- return -EFAULT;
2187 +- } else
2188 ++ } else if (!vma->vm_file) {
2189 + return -EFAULT;
2190 +- address = __vma_address(page, vma);
2191 +- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
2192 ++ } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
2193 + return -EFAULT;
2194 +- return address;
2195 ++ }
2196 ++
2197 ++ return vma_address(page, vma);
2198 + }
2199 +
2200 + pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
2201 +@@ -899,7 +896,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
2202 + */
2203 + mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
2204 + 0, vma, vma->vm_mm, address,
2205 +- min(vma->vm_end, address + page_size(page)));
2206 ++ vma_address_end(page, vma));
2207 + mmu_notifier_invalidate_range_start(&range);
2208 +
2209 + while (page_vma_mapped_walk(&pvmw)) {
2210 +@@ -1353,6 +1350,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
2211 + struct mmu_notifier_range range;
2212 + enum ttu_flags flags = (enum ttu_flags)arg;
2213 +
2214 ++ /*
2215 ++ * When racing against e.g. zap_pte_range() on another cpu,
2216 ++ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
2217 ++ * try_to_unmap() may return false when it is about to become true,
2218 ++ * if page table locking is skipped: use TTU_SYNC to wait for that.
2219 ++ */
2220 ++ if (flags & TTU_SYNC)
2221 ++ pvmw.flags = PVMW_SYNC;
2222 ++
2223 + /* munlock has nothing to gain from examining un-locked vmas */
2224 + if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
2225 + return true;
2226 +@@ -1374,9 +1380,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
2227 + * Note that the page can not be free in this function as call of
2228 + * try_to_unmap() must hold a reference on the page.
2229 + */
2230 ++ range.end = PageKsm(page) ?
2231 ++ address + PAGE_SIZE : vma_address_end(page, vma);
2232 + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
2233 +- address,
2234 +- min(vma->vm_end, address + page_size(page)));
2235 ++ address, range.end);
2236 + if (PageHuge(page)) {
2237 + /*
2238 + * If sharing is possible, start and end will be adjusted
2239 +@@ -1690,9 +1697,9 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
2240 + return is_vma_temporary_stack(vma);
2241 + }
2242 +
2243 +-static int page_mapcount_is_zero(struct page *page)
2244 ++static int page_not_mapped(struct page *page)
2245 + {
2246 +- return !total_mapcount(page);
2247 ++ return !page_mapped(page);
2248 + }
2249 +
2250 + /**
2251 +@@ -1710,7 +1717,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
2252 + struct rmap_walk_control rwc = {
2253 + .rmap_one = try_to_unmap_one,
2254 + .arg = (void *)flags,
2255 +- .done = page_mapcount_is_zero,
2256 ++ .done = page_not_mapped,
2257 + .anon_lock = page_lock_anon_vma_read,
2258 + };
2259 +
2260 +@@ -1731,14 +1738,15 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
2261 + else
2262 + rmap_walk(page, &rwc);
2263 +
2264 +- return !page_mapcount(page) ? true : false;
2265 ++ /*
2266 ++ * When racing against e.g. zap_pte_range() on another cpu,
2267 ++ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
2268 ++ * try_to_unmap() may return false when it is about to become true,
2269 ++ * if page table locking is skipped: use TTU_SYNC to wait for that.
2270 ++ */
2271 ++ return !page_mapcount(page);
2272 + }
2273 +
2274 +-static int page_not_mapped(struct page *page)
2275 +-{
2276 +- return !page_mapped(page);
2277 +-};
2278 +-
2279 + /**
2280 + * try_to_munlock - try to munlock a page
2281 + * @page: the page to be munlocked
2282 +@@ -1833,6 +1841,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
2283 + struct vm_area_struct *vma = avc->vma;
2284 + unsigned long address = vma_address(page, vma);
2285 +
2286 ++ VM_BUG_ON_VMA(address == -EFAULT, vma);
2287 + cond_resched();
2288 +
2289 + if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2290 +@@ -1887,6 +1896,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
2291 + pgoff_start, pgoff_end) {
2292 + unsigned long address = vma_address(page, vma);
2293 +
2294 ++ VM_BUG_ON_VMA(address == -EFAULT, vma);
2295 + cond_resched();
2296 +
2297 + if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2298 +diff --git a/mm/truncate.c b/mm/truncate.c
2299 +index dd9ebc1da3566..4d5add7d8ab6d 100644
2300 +--- a/mm/truncate.c
2301 ++++ b/mm/truncate.c
2302 +@@ -173,13 +173,10 @@ void do_invalidatepage(struct page *page, unsigned int offset,
2303 + * its lock, b) when a concurrent invalidate_mapping_pages got there first and
2304 + * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
2305 + */
2306 +-static void
2307 +-truncate_cleanup_page(struct address_space *mapping, struct page *page)
2308 ++static void truncate_cleanup_page(struct page *page)
2309 + {
2310 +- if (page_mapped(page)) {
2311 +- pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
2312 +- unmap_mapping_pages(mapping, page->index, nr, false);
2313 +- }
2314 ++ if (page_mapped(page))
2315 ++ unmap_mapping_page(page);
2316 +
2317 + if (page_has_private(page))
2318 + do_invalidatepage(page, 0, PAGE_SIZE);
2319 +@@ -224,7 +221,7 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
2320 + if (page->mapping != mapping)
2321 + return -EIO;
2322 +
2323 +- truncate_cleanup_page(mapping, page);
2324 ++ truncate_cleanup_page(page);
2325 + delete_from_page_cache(page);
2326 + return 0;
2327 + }
2328 +@@ -362,7 +359,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
2329 + pagevec_add(&locked_pvec, page);
2330 + }
2331 + for (i = 0; i < pagevec_count(&locked_pvec); i++)
2332 +- truncate_cleanup_page(mapping, locked_pvec.pages[i]);
2333 ++ truncate_cleanup_page(locked_pvec.pages[i]);
2334 + delete_from_page_cache_batch(mapping, &locked_pvec);
2335 + for (i = 0; i < pagevec_count(&locked_pvec); i++)
2336 + unlock_page(locked_pvec.pages[i]);
2337 +@@ -715,6 +712,16 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
2338 + continue;
2339 + }
2340 +
2341 ++ if (!did_range_unmap && page_mapped(page)) {
2342 ++ /*
2343 ++ * If page is mapped, before taking its lock,
2344 ++ * zap the rest of the file in one hit.
2345 ++ */
2346 ++ unmap_mapping_pages(mapping, index,
2347 ++ (1 + end - index), false);
2348 ++ did_range_unmap = 1;
2349 ++ }
2350 ++
2351 + lock_page(page);
2352 + WARN_ON(page_to_index(page) != index);
2353 + if (page->mapping != mapping) {
2354 +@@ -722,23 +729,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
2355 + continue;
2356 + }
2357 + wait_on_page_writeback(page);
2358 +- if (page_mapped(page)) {
2359 +- if (!did_range_unmap) {
2360 +- /*
2361 +- * Zap the rest of the file in one hit.
2362 +- */
2363 +- unmap_mapping_pages(mapping, index,
2364 +- (1 + end - index), false);
2365 +- did_range_unmap = 1;
2366 +- } else {
2367 +- /*
2368 +- * Just zap this page
2369 +- */
2370 +- unmap_mapping_pages(mapping, index,
2371 +- 1, false);
2372 +- }
2373 +- }
2374 ++
2375 ++ if (page_mapped(page))
2376 ++ unmap_mapping_page(page);
2377 + BUG_ON(page_mapped(page));
2378 ++
2379 + ret2 = do_launder_page(mapping, page);
2380 + if (ret2 == 0) {
2381 + if (!invalidate_complete_page2(mapping, page))
2382 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
2383 +index 76506975d59a5..cbd1885f24592 100644
2384 +--- a/net/core/ethtool.c
2385 ++++ b/net/core/ethtool.c
2386 +@@ -1508,7 +1508,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
2387 + if (eeprom.offset + eeprom.len > total_len)
2388 + return -EINVAL;
2389 +
2390 +- data = kmalloc(PAGE_SIZE, GFP_USER);
2391 ++ data = kzalloc(PAGE_SIZE, GFP_USER);
2392 + if (!data)
2393 + return -ENOMEM;
2394 +
2395 +@@ -1573,7 +1573,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
2396 + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
2397 + return -EINVAL;
2398 +
2399 +- data = kmalloc(PAGE_SIZE, GFP_USER);
2400 ++ data = kzalloc(PAGE_SIZE, GFP_USER);
2401 + if (!data)
2402 + return -ENOMEM;
2403 +
2404 +@@ -1764,7 +1764,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
2405 + return -EFAULT;
2406 +
2407 + test.len = test_len;
2408 +- data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
2409 ++ data = kcalloc(test_len, sizeof(u64), GFP_USER);
2410 + if (!data)
2411 + return -ENOMEM;
2412 +
2413 +@@ -2295,7 +2295,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
2414 + ret = ethtool_tunable_valid(&tuna);
2415 + if (ret)
2416 + return ret;
2417 +- data = kmalloc(tuna.len, GFP_USER);
2418 ++ data = kzalloc(tuna.len, GFP_USER);
2419 + if (!data)
2420 + return -ENOMEM;
2421 + ret = ops->get_tunable(dev, &tuna, data);
2422 +@@ -2481,7 +2481,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
2423 + ret = ethtool_phy_tunable_valid(&tuna);
2424 + if (ret)
2425 + return ret;
2426 +- data = kmalloc(tuna.len, GFP_USER);
2427 ++ data = kzalloc(tuna.len, GFP_USER);
2428 + if (!data)
2429 + return -ENOMEM;
2430 + mutex_lock(&phydev->lock);
2431 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
2432 +index a27d034c85ccb..603a3495afa62 100644
2433 +--- a/net/ipv4/devinet.c
2434 ++++ b/net/ipv4/devinet.c
2435 +@@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
2436 + return -EAFNOSUPPORT;
2437 +
2438 + if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
2439 +- BUG();
2440 ++ return -EINVAL;
2441 +
2442 + if (tb[IFLA_INET_CONF]) {
2443 + nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
2444 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
2445 +index df6fbefe44d4b..1c3d5d3702a10 100644
2446 +--- a/net/ipv4/ping.c
2447 ++++ b/net/ipv4/ping.c
2448 +@@ -963,6 +963,7 @@ bool ping_rcv(struct sk_buff *skb)
2449 + struct sock *sk;
2450 + struct net *net = dev_net(skb->dev);
2451 + struct icmphdr *icmph = icmp_hdr(skb);
2452 ++ bool rc = false;
2453 +
2454 + /* We assume the packet has already been checked by icmp_rcv */
2455 +
2456 +@@ -977,14 +978,15 @@ bool ping_rcv(struct sk_buff *skb)
2457 + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2458 +
2459 + pr_debug("rcv on socket %p\n", sk);
2460 +- if (skb2)
2461 +- ping_queue_rcv_skb(sk, skb2);
2462 ++ if (skb2 && !ping_queue_rcv_skb(sk, skb2))
2463 ++ rc = true;
2464 + sock_put(sk);
2465 +- return true;
2466 + }
2467 +- pr_debug("no socket, dropping\n");
2468 +
2469 +- return false;
2470 ++ if (!rc)
2471 ++ pr_debug("no socket, dropping\n");
2472 ++
2473 ++ return rc;
2474 + }
2475 + EXPORT_SYMBOL_GPL(ping_rcv);
2476 +
2477 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2478 +index 52feab2baeee5..366c3792b8604 100644
2479 +--- a/net/ipv6/addrconf.c
2480 ++++ b/net/ipv6/addrconf.c
2481 +@@ -5761,7 +5761,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
2482 + return -EAFNOSUPPORT;
2483 +
2484 + if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
2485 +- BUG();
2486 ++ return -EINVAL;
2487 +
2488 + if (tb[IFLA_INET6_TOKEN]) {
2489 + err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
2490 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
2491 +index a7933279a80b7..e574fbf6745a4 100644
2492 +--- a/net/mac80211/ieee80211_i.h
2493 ++++ b/net/mac80211/ieee80211_i.h
2494 +@@ -1420,7 +1420,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
2495 + rcu_read_lock();
2496 + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2497 +
2498 +- if (WARN_ON_ONCE(!chanctx_conf)) {
2499 ++ if (!chanctx_conf) {
2500 + rcu_read_unlock();
2501 + return NULL;
2502 + }
2503 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2504 +index 3d7a5c5e586a6..670d84e54db73 100644
2505 +--- a/net/mac80211/rx.c
2506 ++++ b/net/mac80211/rx.c
2507 +@@ -2200,17 +2200,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2508 + sc = le16_to_cpu(hdr->seq_ctrl);
2509 + frag = sc & IEEE80211_SCTL_FRAG;
2510 +
2511 +- if (is_multicast_ether_addr(hdr->addr1)) {
2512 +- I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
2513 +- goto out_no_led;
2514 +- }
2515 +-
2516 + if (rx->sta)
2517 + cache = &rx->sta->frags;
2518 +
2519 + if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2520 + goto out;
2521 +
2522 ++ if (is_multicast_ether_addr(hdr->addr1))
2523 ++ return RX_DROP_MONITOR;
2524 ++
2525 + I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2526 +
2527 + if (skb_linearize(rx->skb))
2528 +@@ -2336,7 +2334,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2529 +
2530 + out:
2531 + ieee80211_led_rx(rx->local);
2532 +- out_no_led:
2533 + if (rx->sta)
2534 + rx->sta->rx_stats.packets++;
2535 + return RX_CONTINUE;
2536 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2537 +index fbc2d4dfddf0e..0ffbf3d17911a 100644
2538 +--- a/net/packet/af_packet.c
2539 ++++ b/net/packet/af_packet.c
2540 +@@ -2656,7 +2656,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2541 + }
2542 + if (likely(saddr == NULL)) {
2543 + dev = packet_cached_dev_get(po);
2544 +- proto = po->num;
2545 ++ proto = READ_ONCE(po->num);
2546 + } else {
2547 + err = -EINVAL;
2548 + if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2549 +@@ -2869,7 +2869,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2550 +
2551 + if (likely(saddr == NULL)) {
2552 + dev = packet_cached_dev_get(po);
2553 +- proto = po->num;
2554 ++ proto = READ_ONCE(po->num);
2555 + } else {
2556 + err = -EINVAL;
2557 + if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2558 +@@ -3141,7 +3141,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2559 + /* prevents packet_notifier() from calling
2560 + * register_prot_hook()
2561 + */
2562 +- po->num = 0;
2563 ++ WRITE_ONCE(po->num, 0);
2564 + __unregister_prot_hook(sk, true);
2565 + rcu_read_lock();
2566 + dev_curr = po->prot_hook.dev;
2567 +@@ -3151,17 +3151,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2568 + }
2569 +
2570 + BUG_ON(po->running);
2571 +- po->num = proto;
2572 ++ WRITE_ONCE(po->num, proto);
2573 + po->prot_hook.type = proto;
2574 +
2575 + if (unlikely(unlisted)) {
2576 + dev_put(dev);
2577 + po->prot_hook.dev = NULL;
2578 +- po->ifindex = -1;
2579 ++ WRITE_ONCE(po->ifindex, -1);
2580 + packet_cached_dev_reset(po);
2581 + } else {
2582 + po->prot_hook.dev = dev;
2583 +- po->ifindex = dev ? dev->ifindex : 0;
2584 ++ WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
2585 + packet_cached_dev_assign(po, dev);
2586 + }
2587 + }
2588 +@@ -3475,7 +3475,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2589 + uaddr->sa_family = AF_PACKET;
2590 + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
2591 + rcu_read_lock();
2592 +- dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2593 ++ dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
2594 + if (dev)
2595 + strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
2596 + rcu_read_unlock();
2597 +@@ -3490,16 +3490,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2598 + struct sock *sk = sock->sk;
2599 + struct packet_sock *po = pkt_sk(sk);
2600 + DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2601 ++ int ifindex;
2602 +
2603 + if (peer)
2604 + return -EOPNOTSUPP;
2605 +
2606 ++ ifindex = READ_ONCE(po->ifindex);
2607 + sll->sll_family = AF_PACKET;
2608 +- sll->sll_ifindex = po->ifindex;
2609 +- sll->sll_protocol = po->num;
2610 ++ sll->sll_ifindex = ifindex;
2611 ++ sll->sll_protocol = READ_ONCE(po->num);
2612 + sll->sll_pkttype = 0;
2613 + rcu_read_lock();
2614 +- dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2615 ++ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
2616 + if (dev) {
2617 + sll->sll_hatype = dev->type;
2618 + sll->sll_halen = dev->addr_len;
2619 +@@ -4099,7 +4101,7 @@ static int packet_notifier(struct notifier_block *this,
2620 + }
2621 + if (msg == NETDEV_UNREGISTER) {
2622 + packet_cached_dev_reset(po);
2623 +- po->ifindex = -1;
2624 ++ WRITE_ONCE(po->ifindex, -1);
2625 + if (po->prot_hook.dev)
2626 + dev_put(po->prot_hook.dev);
2627 + po->prot_hook.dev = NULL;
2628 +@@ -4405,7 +4407,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2629 + was_running = po->running;
2630 + num = po->num;
2631 + if (was_running) {
2632 +- po->num = 0;
2633 ++ WRITE_ONCE(po->num, 0);
2634 + __unregister_prot_hook(sk, false);
2635 + }
2636 + spin_unlock(&po->bind_lock);
2637 +@@ -4440,7 +4442,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2638 +
2639 + spin_lock(&po->bind_lock);
2640 + if (was_running) {
2641 +- po->num = num;
2642 ++ WRITE_ONCE(po->num, num);
2643 + register_prot_hook(sk);
2644 + }
2645 + spin_unlock(&po->bind_lock);
2646 +@@ -4613,8 +4615,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2647 + s,
2648 + refcount_read(&s->sk_refcnt),
2649 + s->sk_type,
2650 +- ntohs(po->num),
2651 +- po->ifindex,
2652 ++ ntohs(READ_ONCE(po->num)),
2653 ++ READ_ONCE(po->ifindex),
2654 + po->running,
2655 + atomic_read(&s->sk_rmem_alloc),
2656 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
2657 +diff --git a/net/wireless/util.c b/net/wireless/util.c
2658 +index 4eae6ad328514..f0247eab5bc94 100644
2659 +--- a/net/wireless/util.c
2660 ++++ b/net/wireless/util.c
2661 +@@ -1006,6 +1006,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
2662 + case NL80211_IFTYPE_MESH_POINT:
2663 + /* mesh should be handled? */
2664 + break;
2665 ++ case NL80211_IFTYPE_OCB:
2666 ++ cfg80211_leave_ocb(rdev, dev);
2667 ++ break;
2668 + default:
2669 + break;
2670 + }
2671 +diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
2672 +index f9b19524da112..1e9baa5c4fc6e 100644
2673 +--- a/scripts/recordmcount.h
2674 ++++ b/scripts/recordmcount.h
2675 +@@ -192,15 +192,20 @@ static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
2676 + Elf32_Word const *symtab_shndx)
2677 + {
2678 + unsigned long offset;
2679 ++ unsigned short shndx = w2(sym->st_shndx);
2680 + int index;
2681 +
2682 +- if (sym->st_shndx != SHN_XINDEX)
2683 +- return w2(sym->st_shndx);
2684 ++ if (shndx > SHN_UNDEF && shndx < SHN_LORESERVE)
2685 ++ return shndx;
2686 +
2687 +- offset = (unsigned long)sym - (unsigned long)symtab;
2688 +- index = offset / sizeof(*sym);
2689 ++ if (shndx == SHN_XINDEX) {
2690 ++ offset = (unsigned long)sym - (unsigned long)symtab;
2691 ++ index = offset / sizeof(*sym);
2692 +
2693 +- return w(symtab_shndx[index]);
2694 ++ return w(symtab_shndx[index]);
2695 ++ }
2696 ++
2697 ++ return 0;
2698 + }
2699 +
2700 + static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
2701 +diff --git a/security/integrity/Makefile b/security/integrity/Makefile
2702 +index 35e6ca7737346..351c9662994b5 100644
2703 +--- a/security/integrity/Makefile
2704 ++++ b/security/integrity/Makefile
2705 +@@ -11,7 +11,8 @@ integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
2706 + integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
2707 + integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyring.o
2708 + integrity-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/efi_parser.o \
2709 +- platform_certs/load_uefi.o
2710 ++ platform_certs/load_uefi.o \
2711 ++ platform_certs/keyring_handler.o
2712 + integrity-$(CONFIG_LOAD_IPL_KEYS) += platform_certs/load_ipl_s390.o
2713 +
2714 + obj-$(CONFIG_IMA) += ima/
2715 +diff --git a/security/integrity/platform_certs/keyring_handler.c b/security/integrity/platform_certs/keyring_handler.c
2716 +new file mode 100644
2717 +index 0000000000000..5604bd57c9907
2718 +--- /dev/null
2719 ++++ b/security/integrity/platform_certs/keyring_handler.c
2720 +@@ -0,0 +1,91 @@
2721 ++// SPDX-License-Identifier: GPL-2.0
2722 ++
2723 ++#include <linux/kernel.h>
2724 ++#include <linux/sched.h>
2725 ++#include <linux/cred.h>
2726 ++#include <linux/err.h>
2727 ++#include <linux/efi.h>
2728 ++#include <linux/slab.h>
2729 ++#include <keys/asymmetric-type.h>
2730 ++#include <keys/system_keyring.h>
2731 ++#include "../integrity.h"
2732 ++
2733 ++static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
2734 ++static efi_guid_t efi_cert_x509_sha256_guid __initdata =
2735 ++ EFI_CERT_X509_SHA256_GUID;
2736 ++static efi_guid_t efi_cert_sha256_guid __initdata = EFI_CERT_SHA256_GUID;
2737 ++
2738 ++/*
2739 ++ * Blacklist a hash.
2740 ++ */
2741 ++static __init void uefi_blacklist_hash(const char *source, const void *data,
2742 ++ size_t len, const char *type,
2743 ++ size_t type_len)
2744 ++{
2745 ++ char *hash, *p;
2746 ++
2747 ++ hash = kmalloc(type_len + len * 2 + 1, GFP_KERNEL);
2748 ++ if (!hash)
2749 ++ return;
2750 ++ p = memcpy(hash, type, type_len);
2751 ++ p += type_len;
2752 ++ bin2hex(p, data, len);
2753 ++ p += len * 2;
2754 ++ *p = 0;
2755 ++
2756 ++ mark_hash_blacklisted(hash);
2757 ++ kfree(hash);
2758 ++}
2759 ++
2760 ++/*
2761 ++ * Blacklist an X509 TBS hash.
2762 ++ */
2763 ++static __init void uefi_blacklist_x509_tbs(const char *source,
2764 ++ const void *data, size_t len)
2765 ++{
2766 ++ uefi_blacklist_hash(source, data, len, "tbs:", 4);
2767 ++}
2768 ++
2769 ++/*
2770 ++ * Blacklist the hash of an executable.
2771 ++ */
2772 ++static __init void uefi_blacklist_binary(const char *source,
2773 ++ const void *data, size_t len)
2774 ++{
2775 ++ uefi_blacklist_hash(source, data, len, "bin:", 4);
2776 ++}
2777 ++
2778 ++/*
2779 ++ * Add an X509 cert to the revocation list.
2780 ++ */
2781 ++static __init void uefi_revocation_list_x509(const char *source,
2782 ++ const void *data, size_t len)
2783 ++{
2784 ++ add_key_to_revocation_list(data, len);
2785 ++}
2786 ++
2787 ++/*
2788 ++ * Return the appropriate handler for particular signature list types found in
2789 ++ * the UEFI db and MokListRT tables.
2790 ++ */
2791 ++__init efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type)
2792 ++{
2793 ++ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
2794 ++ return add_to_platform_keyring;
2795 ++ return 0;
2796 ++}
2797 ++
2798 ++/*
2799 ++ * Return the appropriate handler for particular signature list types found in
2800 ++ * the UEFI dbx and MokListXRT tables.
2801 ++ */
2802 ++__init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type)
2803 ++{
2804 ++ if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
2805 ++ return uefi_blacklist_x509_tbs;
2806 ++ if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
2807 ++ return uefi_blacklist_binary;
2808 ++ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
2809 ++ return uefi_revocation_list_x509;
2810 ++ return 0;
2811 ++}
2812 +diff --git a/security/integrity/platform_certs/keyring_handler.h b/security/integrity/platform_certs/keyring_handler.h
2813 +new file mode 100644
2814 +index 0000000000000..2462bfa08fe34
2815 +--- /dev/null
2816 ++++ b/security/integrity/platform_certs/keyring_handler.h
2817 +@@ -0,0 +1,32 @@
2818 ++/* SPDX-License-Identifier: GPL-2.0 */
2819 ++
2820 ++#ifndef PLATFORM_CERTS_INTERNAL_H
2821 ++#define PLATFORM_CERTS_INTERNAL_H
2822 ++
2823 ++#include <linux/efi.h>
2824 ++
2825 ++void blacklist_hash(const char *source, const void *data,
2826 ++ size_t len, const char *type,
2827 ++ size_t type_len);
2828 ++
2829 ++/*
2830 ++ * Blacklist an X509 TBS hash.
2831 ++ */
2832 ++void blacklist_x509_tbs(const char *source, const void *data, size_t len);
2833 ++
2834 ++/*
2835 ++ * Blacklist the hash of an executable.
2836 ++ */
2837 ++void blacklist_binary(const char *source, const void *data, size_t len);
2838 ++
2839 ++/*
2840 ++ * Return the handler for particular signature list types found in the db.
2841 ++ */
2842 ++efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type);
2843 ++
2844 ++/*
2845 ++ * Return the handler for particular signature list types found in the dbx.
2846 ++ */
2847 ++efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type);
2848 ++
2849 ++#endif
2850 +diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
2851 +index 020fc7a11ef0e..aa874d84e413e 100644
2852 +--- a/security/integrity/platform_certs/load_uefi.c
2853 ++++ b/security/integrity/platform_certs/load_uefi.c
2854 +@@ -9,6 +9,7 @@
2855 + #include <keys/asymmetric-type.h>
2856 + #include <keys/system_keyring.h>
2857 + #include "../integrity.h"
2858 ++#include "keyring_handler.h"
2859 +
2860 + static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
2861 + static efi_guid_t efi_cert_x509_sha256_guid __initdata =
2862 +@@ -69,72 +70,6 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
2863 + return db;
2864 + }
2865 +
2866 +-/*
2867 +- * Blacklist a hash.
2868 +- */
2869 +-static __init void uefi_blacklist_hash(const char *source, const void *data,
2870 +- size_t len, const char *type,
2871 +- size_t type_len)
2872 +-{
2873 +- char *hash, *p;
2874 +-
2875 +- hash = kmalloc(type_len + len * 2 + 1, GFP_KERNEL);
2876 +- if (!hash)
2877 +- return;
2878 +- p = memcpy(hash, type, type_len);
2879 +- p += type_len;
2880 +- bin2hex(p, data, len);
2881 +- p += len * 2;
2882 +- *p = 0;
2883 +-
2884 +- mark_hash_blacklisted(hash);
2885 +- kfree(hash);
2886 +-}
2887 +-
2888 +-/*
2889 +- * Blacklist an X509 TBS hash.
2890 +- */
2891 +-static __init void uefi_blacklist_x509_tbs(const char *source,
2892 +- const void *data, size_t len)
2893 +-{
2894 +- uefi_blacklist_hash(source, data, len, "tbs:", 4);
2895 +-}
2896 +-
2897 +-/*
2898 +- * Blacklist the hash of an executable.
2899 +- */
2900 +-static __init void uefi_blacklist_binary(const char *source,
2901 +- const void *data, size_t len)
2902 +-{
2903 +- uefi_blacklist_hash(source, data, len, "bin:", 4);
2904 +-}
2905 +-
2906 +-/*
2907 +- * Return the appropriate handler for particular signature list types found in
2908 +- * the UEFI db and MokListRT tables.
2909 +- */
2910 +-static __init efi_element_handler_t get_handler_for_db(const efi_guid_t *
2911 +- sig_type)
2912 +-{
2913 +- if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
2914 +- return add_to_platform_keyring;
2915 +- return 0;
2916 +-}
2917 +-
2918 +-/*
2919 +- * Return the appropriate handler for particular signature list types found in
2920 +- * the UEFI dbx and MokListXRT tables.
2921 +- */
2922 +-static __init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *
2923 +- sig_type)
2924 +-{
2925 +- if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
2926 +- return uefi_blacklist_x509_tbs;
2927 +- if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
2928 +- return uefi_blacklist_binary;
2929 +- return 0;
2930 +-}
2931 +-
2932 + /*
2933 + * Load the certs contained in the UEFI databases into the platform trusted
2934 + * keyring and the UEFI blacklisted X.509 cert SHA256 hashes into the blacklist
2935 +diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
2936 +index 41cf45416060f..38de88e5ffbb2 100644
2937 +--- a/tools/testing/selftests/kvm/lib/kvm_util.c
2938 ++++ b/tools/testing/selftests/kvm/lib/kvm_util.c
2939 +@@ -54,7 +54,7 @@ int kvm_check_cap(long cap)
2940 + exit(KSFT_SKIP);
2941 +
2942 + ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
2943 +- TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
2944 ++ TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
2945 + " rc: %i errno: %i", ret, errno);
2946 +
2947 + close(kvm_fd);
2948 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2949 +index f83fa0aeeb451..b2287e7d3ba4a 100644
2950 +--- a/virt/kvm/kvm_main.c
2951 ++++ b/virt/kvm/kvm_main.c
2952 +@@ -1593,6 +1593,13 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2953 + return true;
2954 + }
2955 +
2956 ++static int kvm_try_get_pfn(kvm_pfn_t pfn)
2957 ++{
2958 ++ if (kvm_is_reserved_pfn(pfn))
2959 ++ return 1;
2960 ++ return get_page_unless_zero(pfn_to_page(pfn));
2961 ++}
2962 ++
2963 + static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2964 + unsigned long addr, bool *async,
2965 + bool write_fault, bool *writable,
2966 +@@ -1642,13 +1649,21 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2967 + * Whoever called remap_pfn_range is also going to call e.g.
2968 + * unmap_mapping_range before the underlying pages are freed,
2969 + * causing a call to our MMU notifier.
2970 ++ *
2971 ++ * Certain IO or PFNMAP mappings can be backed with valid
2972 ++ * struct pages, but be allocated without refcounting e.g.,
2973 ++ * tail pages of non-compound higher order allocations, which
2974 ++ * would then underflow the refcount when the caller does the
2975 ++ * required put_page. Don't allow those pages here.
2976 + */
2977 +- kvm_get_pfn(pfn);
2978 ++ if (!kvm_try_get_pfn(pfn))
2979 ++ r = -EFAULT;
2980 +
2981 + out:
2982 + pte_unmap_unlock(ptep, ptl);
2983 + *p_pfn = pfn;
2984 +- return 0;
2985 ++
2986 ++ return r;
2987 + }
2988 +
2989 + /*