Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 23 Feb 2021 14:31:34
Message-Id: 1614090664.8bc0f0a9365ed33be2d79d652aa3d13faf58ff7f.alicef@gentoo
1 commit: 8bc0f0a9365ed33be2d79d652aa3d13faf58ff7f
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Tue Feb 23 14:30:48 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Tue Feb 23 14:31:04 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8bc0f0a9
7
8 Linux patch 4.19.177
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1176_linux-4.19.177.patch | 1506 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1510 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 4101b37..310ffed 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -743,6 +743,10 @@ Patch: 1175_linux-4.19.176.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.176
23
24 +Patch: 1176_linux-4.19.177.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.177
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1176_linux-4.19.177.patch b/1176_linux-4.19.177.patch
33 new file mode 100644
34 index 0000000..6692875
35 --- /dev/null
36 +++ b/1176_linux-4.19.177.patch
37 @@ -0,0 +1,1506 @@
38 +diff --git a/Makefile b/Makefile
39 +index 6bebe3b22b452..74b9258e7d2ce 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 176
47 ++SUBLEVEL = 177
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
52 +index 9ad3df11db0d8..abef034987a27 100644
53 +--- a/arch/arm/boot/dts/lpc32xx.dtsi
54 ++++ b/arch/arm/boot/dts/lpc32xx.dtsi
55 +@@ -323,9 +323,6 @@
56 +
57 + clocks = <&xtal_32k>, <&xtal>;
58 + clock-names = "xtal_32k", "xtal";
59 +-
60 +- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
61 +- assigned-clock-rates = <208000000>;
62 + };
63 + };
64 +
65 +diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
66 +new file mode 100644
67 +index 0000000000000..ecc2322db7aa1
68 +--- /dev/null
69 ++++ b/arch/arm/include/asm/kexec-internal.h
70 +@@ -0,0 +1,12 @@
71 ++/* SPDX-License-Identifier: GPL-2.0 */
72 ++#ifndef _ARM_KEXEC_INTERNAL_H
73 ++#define _ARM_KEXEC_INTERNAL_H
74 ++
75 ++struct kexec_relocate_data {
76 ++ unsigned long kexec_start_address;
77 ++ unsigned long kexec_indirection_page;
78 ++ unsigned long kexec_mach_type;
79 ++ unsigned long kexec_r2;
80 ++};
81 ++
82 ++#endif
83 +diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
84 +index 3968d6c22455b..ae85f67a63520 100644
85 +--- a/arch/arm/kernel/asm-offsets.c
86 ++++ b/arch/arm/kernel/asm-offsets.c
87 +@@ -18,6 +18,7 @@
88 + #include <linux/kvm_host.h>
89 + #endif
90 + #include <asm/cacheflush.h>
91 ++#include <asm/kexec-internal.h>
92 + #include <asm/glue-df.h>
93 + #include <asm/glue-pf.h>
94 + #include <asm/mach/arch.h>
95 +@@ -189,5 +190,9 @@ int main(void)
96 + DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
97 + DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
98 + #endif
99 ++ DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
100 ++ DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
101 ++ DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
102 ++ DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
103 + return 0;
104 + }
105 +diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
106 +index 76300f3813e89..734adeb42df87 100644
107 +--- a/arch/arm/kernel/machine_kexec.c
108 ++++ b/arch/arm/kernel/machine_kexec.c
109 +@@ -15,6 +15,7 @@
110 + #include <asm/pgalloc.h>
111 + #include <asm/mmu_context.h>
112 + #include <asm/cacheflush.h>
113 ++#include <asm/kexec-internal.h>
114 + #include <asm/fncpy.h>
115 + #include <asm/mach-types.h>
116 + #include <asm/smp_plat.h>
117 +@@ -24,11 +25,6 @@
118 + extern void relocate_new_kernel(void);
119 + extern const unsigned int relocate_new_kernel_size;
120 +
121 +-extern unsigned long kexec_start_address;
122 +-extern unsigned long kexec_indirection_page;
123 +-extern unsigned long kexec_mach_type;
124 +-extern unsigned long kexec_boot_atags;
125 +-
126 + static atomic_t waiting_for_crash_ipi;
127 +
128 + /*
129 +@@ -161,6 +157,7 @@ void (*kexec_reinit)(void);
130 + void machine_kexec(struct kimage *image)
131 + {
132 + unsigned long page_list, reboot_entry_phys;
133 ++ struct kexec_relocate_data *data;
134 + void (*reboot_entry)(void);
135 + void *reboot_code_buffer;
136 +
137 +@@ -176,18 +173,17 @@ void machine_kexec(struct kimage *image)
138 +
139 + reboot_code_buffer = page_address(image->control_code_page);
140 +
141 +- /* Prepare parameters for reboot_code_buffer*/
142 +- set_kernel_text_rw();
143 +- kexec_start_address = image->start;
144 +- kexec_indirection_page = page_list;
145 +- kexec_mach_type = machine_arch_type;
146 +- kexec_boot_atags = image->arch.kernel_r2;
147 +-
148 + /* copy our kernel relocation code to the control code page */
149 + reboot_entry = fncpy(reboot_code_buffer,
150 + &relocate_new_kernel,
151 + relocate_new_kernel_size);
152 +
153 ++ data = reboot_code_buffer + relocate_new_kernel_size;
154 ++ data->kexec_start_address = image->start;
155 ++ data->kexec_indirection_page = page_list;
156 ++ data->kexec_mach_type = machine_arch_type;
157 ++ data->kexec_r2 = image->arch.kernel_r2;
158 ++
159 + /* get the identity mapping physical address for the reboot code */
160 + reboot_entry_phys = virt_to_idmap(reboot_entry);
161 +
162 +diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
163 +index 7eaa2ae7aff58..5e15b5912cb05 100644
164 +--- a/arch/arm/kernel/relocate_kernel.S
165 ++++ b/arch/arm/kernel/relocate_kernel.S
166 +@@ -5,14 +5,16 @@
167 +
168 + #include <linux/linkage.h>
169 + #include <asm/assembler.h>
170 ++#include <asm/asm-offsets.h>
171 + #include <asm/kexec.h>
172 +
173 + .align 3 /* not needed for this code, but keeps fncpy() happy */
174 +
175 + ENTRY(relocate_new_kernel)
176 +
177 +- ldr r0,kexec_indirection_page
178 +- ldr r1,kexec_start_address
179 ++ adr r7, relocate_new_kernel_end
180 ++ ldr r0, [r7, #KEXEC_INDIR_PAGE]
181 ++ ldr r1, [r7, #KEXEC_START_ADDR]
182 +
183 + /*
184 + * If there is no indirection page (we are doing crashdumps)
185 +@@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
186 +
187 + 2:
188 + /* Jump to relocated kernel */
189 +- mov lr,r1
190 +- mov r0,#0
191 +- ldr r1,kexec_mach_type
192 +- ldr r2,kexec_boot_atags
193 +- ARM( ret lr )
194 +- THUMB( bx lr )
195 +-
196 +- .align
197 +-
198 +- .globl kexec_start_address
199 +-kexec_start_address:
200 +- .long 0x0
201 +-
202 +- .globl kexec_indirection_page
203 +-kexec_indirection_page:
204 +- .long 0x0
205 +-
206 +- .globl kexec_mach_type
207 +-kexec_mach_type:
208 +- .long 0x0
209 +-
210 +- /* phy addr of the atags for the new kernel */
211 +- .globl kexec_boot_atags
212 +-kexec_boot_atags:
213 +- .long 0x0
214 ++ mov lr, r1
215 ++ mov r0, #0
216 ++ ldr r1, [r7, #KEXEC_MACH_TYPE]
217 ++ ldr r2, [r7, #KEXEC_R2]
218 ++ ARM( ret lr )
219 ++ THUMB( bx lr )
220 +
221 + ENDPROC(relocate_new_kernel)
222 +
223 ++ .align 3
224 + relocate_new_kernel_end:
225 +
226 + .globl relocate_new_kernel_size
227 +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
228 +index b908382b69ff5..1c01358b9b6db 100644
229 +--- a/arch/arm/kernel/signal.c
230 ++++ b/arch/arm/kernel/signal.c
231 +@@ -697,18 +697,20 @@ struct page *get_signal_page(void)
232 +
233 + addr = page_address(page);
234 +
235 ++ /* Poison the entire page */
236 ++ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
237 ++ PAGE_SIZE / sizeof(u32));
238 ++
239 + /* Give the signal return code some randomness */
240 + offset = 0x200 + (get_random_int() & 0x7fc);
241 + signal_return_offset = offset;
242 +
243 +- /*
244 +- * Copy signal return handlers into the vector page, and
245 +- * set sigreturn to be a pointer to these.
246 +- */
247 ++ /* Copy signal return handlers into the page */
248 + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
249 +
250 +- ptr = (unsigned long)addr + offset;
251 +- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
252 ++ /* Flush out all instructions in this page */
253 ++ ptr = (unsigned long)addr;
254 ++ flush_icache_range(ptr, ptr + PAGE_SIZE);
255 +
256 + return page;
257 + }
258 +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
259 +index 8aa901e20ca8e..dd946c77e8015 100644
260 +--- a/arch/arm/xen/enlighten.c
261 ++++ b/arch/arm/xen/enlighten.c
262 +@@ -404,8 +404,6 @@ static int __init xen_guest_init(void)
263 + return -ENOMEM;
264 + }
265 + gnttab_init();
266 +- if (!xen_initial_domain())
267 +- xenbus_probe();
268 +
269 + /*
270 + * Making sure board specific code will not set up ops for
271 +diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
272 +index 0641ba54ab62a..ce538c51fa3fb 100644
273 +--- a/arch/arm/xen/p2m.c
274 ++++ b/arch/arm/xen/p2m.c
275 +@@ -93,8 +93,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
276 + for (i = 0; i < count; i++) {
277 + if (map_ops[i].status)
278 + continue;
279 +- set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
280 +- map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
281 ++ if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
282 ++ map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
283 ++ return -ENOMEM;
284 ++ }
285 + }
286 +
287 + return 0;
288 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
289 +index f4ee7c4f83b8b..b1c1a88a1c20c 100644
290 +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
291 ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
292 +@@ -198,6 +198,7 @@
293 + reg = <0x0 0xf8000000 0x0 0x2000000>,
294 + <0x0 0xfd000000 0x0 0x1000000>;
295 + reg-names = "axi-base", "apb-base";
296 ++ device_type = "pci";
297 + #address-cells = <3>;
298 + #size-cells = <2>;
299 + #interrupt-cells = <1>;
300 +@@ -216,7 +217,6 @@
301 + <0 0 0 2 &pcie0_intc 1>,
302 + <0 0 0 3 &pcie0_intc 2>,
303 + <0 0 0 4 &pcie0_intc 3>;
304 +- linux,pci-domain = <0>;
305 + max-link-speed = <1>;
306 + msi-map = <0x0 &its 0x0 0x1000>;
307 + phys = <&pcie_phy 0>, <&pcie_phy 1>,
308 +diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
309 +index 85e60509f0a83..d4b53af657c84 100644
310 +--- a/arch/h8300/kernel/asm-offsets.c
311 ++++ b/arch/h8300/kernel/asm-offsets.c
312 +@@ -63,6 +63,9 @@ int main(void)
313 + OFFSET(TI_FLAGS, thread_info, flags);
314 + OFFSET(TI_CPU, thread_info, cpu);
315 + OFFSET(TI_PRE, thread_info, preempt_count);
316 ++#ifdef CONFIG_PREEMPTION
317 ++ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
318 ++#endif
319 +
320 + return 0;
321 + }
322 +diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
323 +index 06cfbb3aacbb0..abc147aeff8b0 100644
324 +--- a/arch/riscv/include/asm/page.h
325 ++++ b/arch/riscv/include/asm/page.h
326 +@@ -115,7 +115,10 @@ extern unsigned long min_low_pfn;
327 +
328 + #endif /* __ASSEMBLY__ */
329 +
330 +-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
331 ++#define virt_addr_valid(vaddr) ({ \
332 ++ unsigned long _addr = (unsigned long)vaddr; \
333 ++ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
334 ++})
335 +
336 + #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
337 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
338 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
339 +index 0303a243b634e..75200b421f29b 100644
340 +--- a/arch/x86/Makefile
341 ++++ b/arch/x86/Makefile
342 +@@ -61,6 +61,9 @@ endif
343 + KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
344 + KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
345 +
346 ++# Intel CET isn't enabled in the kernel
347 ++KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
348 ++
349 + ifeq ($(CONFIG_X86_32),y)
350 + BITS := 32
351 + UTS_MACHINE := i386
352 +@@ -132,9 +135,6 @@ else
353 + KBUILD_CFLAGS += -mno-red-zone
354 + KBUILD_CFLAGS += -mcmodel=kernel
355 +
356 +- # Intel CET isn't enabled in the kernel
357 +- KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
358 +-
359 + # -funit-at-a-time shrinks the kernel .text considerably
360 + # unfortunately it makes reading oopses harder.
361 + KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
362 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
363 +index b34d11f22213f..8cb9277aa6ff2 100644
364 +--- a/arch/x86/kvm/svm.c
365 ++++ b/arch/x86/kvm/svm.c
366 +@@ -7097,7 +7097,6 @@ static int svm_register_enc_region(struct kvm *kvm,
367 + region->uaddr = range->addr;
368 + region->size = range->size;
369 +
370 +- mutex_lock(&kvm->lock);
371 + list_add_tail(&region->list, &sev->regions_list);
372 + mutex_unlock(&kvm->lock);
373 +
374 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
375 +index 159a897151d64..e8ef994c7243c 100644
376 +--- a/arch/x86/xen/p2m.c
377 ++++ b/arch/x86/xen/p2m.c
378 +@@ -708,7 +708,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
379 + unsigned long mfn, pfn;
380 +
381 + /* Do not add to override if the map failed. */
382 +- if (map_ops[i].status)
383 ++ if (map_ops[i].status != GNTST_okay ||
384 ++ (kmap_ops && kmap_ops[i].status != GNTST_okay))
385 + continue;
386 +
387 + if (map_ops[i].flags & GNTMAP_contains_pte) {
388 +@@ -746,17 +747,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
389 + unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
390 + unsigned long pfn = page_to_pfn(pages[i]);
391 +
392 +- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
393 ++ if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
394 ++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
395 ++ else
396 + ret = -EINVAL;
397 +- goto out;
398 +- }
399 +-
400 +- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
401 + }
402 + if (kunmap_ops)
403 + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
404 +- kunmap_ops, count);
405 +-out:
406 ++ kunmap_ops, count) ?: ret;
407 ++
408 + return ret;
409 + }
410 + EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
411 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
412 +index b7ad8ac6bb41e..5198ed1b36690 100644
413 +--- a/block/bfq-iosched.c
414 ++++ b/block/bfq-iosched.c
415 +@@ -5280,13 +5280,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
416 + * limit 'something'.
417 + */
418 + /* no more than 50% of tags for async I/O */
419 +- bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
420 ++ bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
421 + /*
422 + * no more than 75% of tags for sync writes (25% extra tags
423 + * w.r.t. async I/O, to prevent async I/O from starving sync
424 + * writes)
425 + */
426 +- bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
427 ++ bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
428 +
429 + /*
430 + * In-word depths in case some bfq_queue is being weight-
431 +@@ -5296,9 +5296,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
432 + * shortage.
433 + */
434 + /* no more than ~18% of tags for async I/O */
435 +- bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
436 ++ bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
437 + /* no more than ~37% of tags for sync writes (~20% extra tags) */
438 +- bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
439 ++ bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
440 +
441 + for (i = 0; i < 2; i++)
442 + for (j = 0; j < 2; j++)
443 +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
444 +index b18f0162cb9c4..208f3eea3641f 100644
445 +--- a/drivers/block/xen-blkback/blkback.c
446 ++++ b/drivers/block/xen-blkback/blkback.c
447 +@@ -850,8 +850,11 @@ again:
448 + pages[i]->page = persistent_gnt->page;
449 + pages[i]->persistent_gnt = persistent_gnt;
450 + } else {
451 +- if (get_free_page(ring, &pages[i]->page))
452 +- goto out_of_memory;
453 ++ if (get_free_page(ring, &pages[i]->page)) {
454 ++ put_free_pages(ring, pages_to_gnt, segs_to_map);
455 ++ ret = -ENOMEM;
456 ++ goto out;
457 ++ }
458 + addr = vaddr(pages[i]->page);
459 + pages_to_gnt[segs_to_map] = pages[i]->page;
460 + pages[i]->persistent_gnt = NULL;
461 +@@ -867,10 +870,8 @@ again:
462 + break;
463 + }
464 +
465 +- if (segs_to_map) {
466 ++ if (segs_to_map)
467 + ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
468 +- BUG_ON(ret);
469 +- }
470 +
471 + /*
472 + * Now swizzle the MFN in our domain with the MFN from the other domain
473 +@@ -885,7 +886,7 @@ again:
474 + pr_debug("invalid buffer -- could not remap it\n");
475 + put_free_pages(ring, &pages[seg_idx]->page, 1);
476 + pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
477 +- ret |= 1;
478 ++ ret |= !ret;
479 + goto next;
480 + }
481 + pages[seg_idx]->handle = map[new_map_idx].handle;
482 +@@ -937,17 +938,18 @@ next:
483 + }
484 + segs_to_map = 0;
485 + last_map = map_until;
486 +- if (map_until != num)
487 ++ if (!ret && map_until != num)
488 + goto again;
489 +
490 +- return ret;
491 +-
492 +-out_of_memory:
493 +- pr_alert("%s: out of memory\n", __func__);
494 +- put_free_pages(ring, pages_to_gnt, segs_to_map);
495 +- for (i = last_map; i < num; i++)
496 ++out:
497 ++ for (i = last_map; i < num; i++) {
498 ++ /* Don't zap current batch's valid persistent grants. */
499 ++ if(i >= last_map + segs_to_map)
500 ++ pages[i]->persistent_gnt = NULL;
501 + pages[i]->handle = BLKBACK_INVALID_HANDLE;
502 +- return -ENOMEM;
503 ++ }
504 ++
505 ++ return ret;
506 + }
507 +
508 + static int xen_blkbk_map_seg(struct pending_req *pending_req)
509 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
510 +index 3b07a316680c2..62a2f0491117d 100644
511 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
512 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
513 +@@ -668,8 +668,8 @@ static void emulated_link_detect(struct dc_link *link)
514 + link->type = dc_connection_none;
515 + prev_sink = link->local_sink;
516 +
517 +- if (prev_sink != NULL)
518 +- dc_sink_retain(prev_sink);
519 ++ if (prev_sink)
520 ++ dc_sink_release(prev_sink);
521 +
522 + switch (link->connector_signal) {
523 + case SIGNAL_TYPE_HDMI_TYPE_A: {
524 +@@ -4732,14 +4732,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
525 +
526 + ret = PTR_ERR_OR_ZERO(conn_state);
527 + if (ret)
528 +- goto err;
529 ++ goto out;
530 +
531 + /* Attach crtc to drm_atomic_state*/
532 + crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
533 +
534 + ret = PTR_ERR_OR_ZERO(crtc_state);
535 + if (ret)
536 +- goto err;
537 ++ goto out;
538 +
539 + /* force a restore */
540 + crtc_state->mode_changed = true;
541 +@@ -4749,17 +4749,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
542 +
543 + ret = PTR_ERR_OR_ZERO(plane_state);
544 + if (ret)
545 +- goto err;
546 +-
547 ++ goto out;
548 +
549 + /* Call commit internally with the state we just constructed */
550 + ret = drm_atomic_commit(state);
551 +- if (!ret)
552 +- return 0;
553 +
554 +-err:
555 +- DRM_ERROR("Restoring old state failed with %i\n", ret);
556 ++out:
557 + drm_atomic_state_put(state);
558 ++ if (ret)
559 ++ DRM_ERROR("Restoring old state failed with %i\n", ret);
560 +
561 + return ret;
562 + }
563 +diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
564 +index eb7e533b0dd47..6feafebf85feb 100644
565 +--- a/drivers/i2c/busses/i2c-stm32f7.c
566 ++++ b/drivers/i2c/busses/i2c-stm32f7.c
567 +@@ -49,6 +49,8 @@
568 + #define STM32F7_I2C_CR1_RXDMAEN BIT(15)
569 + #define STM32F7_I2C_CR1_TXDMAEN BIT(14)
570 + #define STM32F7_I2C_CR1_ANFOFF BIT(12)
571 ++#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
572 ++#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
573 + #define STM32F7_I2C_CR1_ERRIE BIT(7)
574 + #define STM32F7_I2C_CR1_TCIE BIT(6)
575 + #define STM32F7_I2C_CR1_STOPIE BIT(5)
576 +@@ -147,7 +149,7 @@
577 + #define STM32F7_I2C_MAX_SLAVE 0x2
578 +
579 + #define STM32F7_I2C_DNF_DEFAULT 0
580 +-#define STM32F7_I2C_DNF_MAX 16
581 ++#define STM32F7_I2C_DNF_MAX 15
582 +
583 + #define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
584 + #define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
585 +@@ -645,6 +647,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
586 + else
587 + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
588 + STM32F7_I2C_CR1_ANFOFF);
589 ++
590 ++ /* Program the Digital Filter */
591 ++ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
592 ++ STM32F7_I2C_CR1_DNF_MASK);
593 ++ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
594 ++ STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
595 ++
596 + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
597 + STM32F7_I2C_CR1_PE);
598 + }
599 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
600 +index d575dd9a329d9..16ab000454f91 100644
601 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
602 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
603 +@@ -5182,12 +5182,19 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
604 +
605 + void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
606 + {
607 ++ struct hnae3_handle *handle = &vport->nic;
608 + struct hclge_dev *hdev = vport->back;
609 + int reset_try_times = 0;
610 + int reset_status;
611 + u16 queue_gid;
612 + int ret;
613 +
614 ++ if (queue_id >= handle->kinfo.num_tqps) {
615 ++ dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
616 ++ queue_id);
617 ++ return;
618 ++ }
619 ++
620 + queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
621 +
622 + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
623 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
624 +index 37c0bc699cd9c..cc1895a32b9d3 100644
625 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
626 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
627 +@@ -314,7 +314,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
628 +
629 + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
630 + } else if (!qopt->enable) {
631 +- return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
632 ++ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
633 ++ MTL_QUEUE_DCB);
634 ++ if (ret)
635 ++ return ret;
636 ++
637 ++ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
638 + }
639 +
640 + /* Port Transmit Rate and Speed Divider */
641 +diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
642 +index cc6840377bc27..8ce6a167dd2cc 100644
643 +--- a/drivers/net/wireless/mediatek/mt76/dma.c
644 ++++ b/drivers/net/wireless/mediatek/mt76/dma.c
645 +@@ -393,15 +393,17 @@ static void
646 + mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
647 + int len, bool more)
648 + {
649 +- struct page *page = virt_to_head_page(data);
650 +- int offset = data - page_address(page);
651 + struct sk_buff *skb = q->rx_head;
652 + struct skb_shared_info *shinfo = skb_shinfo(skb);
653 +
654 + if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
655 +- offset += q->buf_offset;
656 ++ struct page *page = virt_to_head_page(data);
657 ++ int offset = data - page_address(page) + q->buf_offset;
658 ++
659 + skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
660 + q->buf_size);
661 ++ } else {
662 ++ skb_free_frag(data);
663 + }
664 +
665 + if (more)
666 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
667 +index f228298c3bd08..b29a1b279fff4 100644
668 +--- a/drivers/net/xen-netback/netback.c
669 ++++ b/drivers/net/xen-netback/netback.c
670 +@@ -1326,13 +1326,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
671 + return 0;
672 +
673 + gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
674 +- if (nr_mops != 0) {
675 ++ if (nr_mops != 0)
676 + ret = gnttab_map_refs(queue->tx_map_ops,
677 + NULL,
678 + queue->pages_to_map,
679 + nr_mops);
680 +- BUG_ON(ret);
681 +- }
682 +
683 + work_done = xenvif_tx_submit(queue);
684 +
685 +diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
686 +index 9b62f65b630e4..48e2006f96ce6 100644
687 +--- a/drivers/net/xen-netback/rx.c
688 ++++ b/drivers/net/xen-netback/rx.c
689 +@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
690 + RING_IDX prod, cons;
691 + struct sk_buff *skb;
692 + int needed;
693 ++ unsigned long flags;
694 ++
695 ++ spin_lock_irqsave(&queue->rx_queue.lock, flags);
696 +
697 + skb = skb_peek(&queue->rx_queue);
698 +- if (!skb)
699 ++ if (!skb) {
700 ++ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
701 + return false;
702 ++ }
703 +
704 + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
705 + if (skb_is_gso(skb))
706 +@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
707 + if (skb->sw_hash)
708 + needed++;
709 +
710 ++ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
711 ++
712 + do {
713 + prod = queue->rx.sring->req_prod;
714 + cons = queue->rx.req_cons;
715 +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
716 +index 952544ca0d84d..93fadd4abf14d 100644
717 +--- a/drivers/platform/x86/hp-wmi.c
718 ++++ b/drivers/platform/x86/hp-wmi.c
719 +@@ -45,6 +45,10 @@ MODULE_LICENSE("GPL");
720 + MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
721 + MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
722 +
723 ++static int enable_tablet_mode_sw = -1;
724 ++module_param(enable_tablet_mode_sw, int, 0444);
725 ++MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
726 ++
727 + #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
728 + #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
729 +
730 +@@ -656,10 +660,12 @@ static int __init hp_wmi_input_setup(void)
731 + }
732 +
733 + /* Tablet mode */
734 +- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
735 +- if (!(val < 0)) {
736 +- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
737 +- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
738 ++ if (enable_tablet_mode_sw > 0) {
739 ++ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
740 ++ if (val >= 0) {
741 ++ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
742 ++ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
743 ++ }
744 + }
745 +
746 + err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
747 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
748 +index 0ccd06f11f123..de31362940973 100644
749 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
750 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
751 +@@ -940,7 +940,8 @@ qla27xx_template_checksum(void *p, ulong size)
752 + static inline int
753 + qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
754 + {
755 +- return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
756 ++ return qla27xx_template_checksum(tmp,
757 ++ le32_to_cpu(tmp->template_size)) == 0;
758 + }
759 +
760 + static inline int
761 +@@ -956,7 +957,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
762 + ulong len;
763 +
764 + if (qla27xx_fwdt_template_valid(tmp)) {
765 +- len = tmp->template_size;
766 ++ len = le32_to_cpu(tmp->template_size);
767 + tmp = memcpy(vha->hw->fw_dump, tmp, len);
768 + ql27xx_edit_template(vha, tmp);
769 + qla27xx_walk_template(vha, tmp, tmp, &len);
770 +@@ -972,7 +973,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
771 + ulong len = 0;
772 +
773 + if (qla27xx_fwdt_template_valid(tmp)) {
774 +- len = tmp->template_size;
775 ++ len = le32_to_cpu(tmp->template_size);
776 + qla27xx_walk_template(vha, tmp, NULL, &len);
777 + }
778 +
779 +@@ -984,7 +985,7 @@ qla27xx_fwdt_template_size(void *p)
780 + {
781 + struct qla27xx_fwdt_template *tmp = p;
782 +
783 +- return tmp->template_size;
784 ++ return le32_to_cpu(tmp->template_size);
785 + }
786 +
787 + ulong
788 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
789 +index 141c1c5e73f42..2d3e1a8349b3b 100644
790 +--- a/drivers/scsi/qla2xxx/qla_tmpl.h
791 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.h
792 +@@ -13,7 +13,7 @@
793 + struct __packed qla27xx_fwdt_template {
794 + uint32_t template_type;
795 + uint32_t entry_offset;
796 +- uint32_t template_size;
797 ++ __le32 template_size;
798 + uint32_t reserved_1;
799 +
800 + uint32_t entry_count;
801 +diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
802 +index bb8271531da70..ffe3440abb74c 100644
803 +--- a/drivers/usb/dwc3/ulpi.c
804 ++++ b/drivers/usb/dwc3/ulpi.c
805 +@@ -7,6 +7,8 @@
806 + * Author: Heikki Krogerus <heikki.krogerus@×××××××××××.com>
807 + */
808 +
809 ++#include <linux/delay.h>
810 ++#include <linux/time64.h>
811 + #include <linux/ulpi/regs.h>
812 +
813 + #include "core.h"
814 +@@ -17,12 +19,22 @@
815 + DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
816 + DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
817 +
818 +-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
819 ++#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
820 ++
821 ++static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
822 + {
823 +- unsigned count = 1000;
824 ++ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
825 ++ unsigned int count = 1000;
826 + u32 reg;
827 +
828 ++ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
829 ++ ns += DWC3_ULPI_BASE_DELAY;
830 ++
831 ++ if (read)
832 ++ ns += DWC3_ULPI_BASE_DELAY;
833 ++
834 + while (count--) {
835 ++ ndelay(ns);
836 + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
837 + if (reg & DWC3_GUSB2PHYACC_DONE)
838 + return 0;
839 +@@ -47,7 +59,7 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
840 + reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
841 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
842 +
843 +- ret = dwc3_ulpi_busyloop(dwc);
844 ++ ret = dwc3_ulpi_busyloop(dwc, addr, true);
845 + if (ret)
846 + return ret;
847 +
848 +@@ -71,7 +83,7 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
849 + reg |= DWC3_GUSB2PHYACC_WRITE | val;
850 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
851 +
852 +- return dwc3_ulpi_busyloop(dwc);
853 ++ return dwc3_ulpi_busyloop(dwc, addr, false);
854 + }
855 +
856 + static const struct ulpi_ops dwc3_ulpi_ops = {
857 +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
858 +index 3cfbec482efb9..e519063e421e2 100644
859 +--- a/drivers/xen/gntdev.c
860 ++++ b/drivers/xen/gntdev.c
861 +@@ -323,44 +323,47 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
862 + * to the kernel linear addresses of the struct pages.
863 + * These ptes are completely different from the user ptes dealt
864 + * with find_grant_ptes.
865 ++ * Note that GNTMAP_device_map isn't needed here: The
866 ++ * dev_bus_addr output field gets consumed only from ->map_ops,
867 ++ * and by not requesting it when mapping we also avoid needing
868 ++ * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
869 ++ * reference to the page in the hypervisor).
870 + */
871 ++ unsigned int flags = (map->flags & ~GNTMAP_device_map) |
872 ++ GNTMAP_host_map;
873 ++
874 + for (i = 0; i < map->count; i++) {
875 + unsigned long address = (unsigned long)
876 + pfn_to_kaddr(page_to_pfn(map->pages[i]));
877 + BUG_ON(PageHighMem(map->pages[i]));
878 +
879 +- gnttab_set_map_op(&map->kmap_ops[i], address,
880 +- map->flags | GNTMAP_host_map,
881 ++ gnttab_set_map_op(&map->kmap_ops[i], address, flags,
882 + map->grants[i].ref,
883 + map->grants[i].domid);
884 + gnttab_set_unmap_op(&map->kunmap_ops[i], address,
885 +- map->flags | GNTMAP_host_map, -1);
886 ++ flags, -1);
887 + }
888 + }
889 +
890 + pr_debug("map %d+%d\n", map->index, map->count);
891 + err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
892 + map->pages, map->count);
893 +- if (err)
894 +- return err;
895 +
896 + for (i = 0; i < map->count; i++) {
897 +- if (map->map_ops[i].status) {
898 ++ if (map->map_ops[i].status == GNTST_okay)
899 ++ map->unmap_ops[i].handle = map->map_ops[i].handle;
900 ++ else if (!err)
901 + err = -EINVAL;
902 +- continue;
903 +- }
904 +
905 +- map->unmap_ops[i].handle = map->map_ops[i].handle;
906 +- if (use_ptemod)
907 +- map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
908 +-#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
909 +- else if (map->dma_vaddr) {
910 +- unsigned long bfn;
911 ++ if (map->flags & GNTMAP_device_map)
912 ++ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
913 +
914 +- bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
915 +- map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
916 ++ if (use_ptemod) {
917 ++ if (map->kmap_ops[i].status == GNTST_okay)
918 ++ map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
919 ++ else if (!err)
920 ++ err = -EINVAL;
921 + }
922 +-#endif
923 + }
924 + return err;
925 + }
926 +diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
927 +index 1abc0a55b8d9c..614d067ffe126 100644
928 +--- a/drivers/xen/xen-scsiback.c
929 ++++ b/drivers/xen/xen-scsiback.c
930 +@@ -422,12 +422,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
931 + return 0;
932 +
933 + err = gnttab_map_refs(map, NULL, pg, cnt);
934 +- BUG_ON(err);
935 + for (i = 0; i < cnt; i++) {
936 + if (unlikely(map[i].status != GNTST_okay)) {
937 + pr_err("invalid buffer -- could not remap it\n");
938 + map[i].handle = SCSIBACK_INVALID_HANDLE;
939 +- err = -ENOMEM;
940 ++ if (!err)
941 ++ err = -ENOMEM;
942 + } else {
943 + get_page(pg[i]);
944 + }
945 +diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
946 +index a9bb5f91082d3..88516a8a9f932 100644
947 +--- a/drivers/xen/xenbus/xenbus.h
948 ++++ b/drivers/xen/xenbus/xenbus.h
949 +@@ -115,7 +115,6 @@ int xenbus_probe_node(struct xen_bus_type *bus,
950 + const char *type,
951 + const char *nodename);
952 + int xenbus_probe_devices(struct xen_bus_type *bus);
953 +-void xenbus_probe(void);
954 +
955 + void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
956 +
957 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
958 +index 786494bb7f20b..652894d619677 100644
959 +--- a/drivers/xen/xenbus/xenbus_probe.c
960 ++++ b/drivers/xen/xenbus/xenbus_probe.c
961 +@@ -683,7 +683,7 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
962 + }
963 + EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
964 +
965 +-void xenbus_probe(void)
966 ++static void xenbus_probe(void)
967 + {
968 + xenstored_ready = 1;
969 +
970 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
971 +index 6eb0b882ad231..e164f489d01d9 100644
972 +--- a/fs/overlayfs/copy_up.c
973 ++++ b/fs/overlayfs/copy_up.c
974 +@@ -79,6 +79,14 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
975 +
976 + if (ovl_is_private_xattr(name))
977 + continue;
978 ++
979 ++ error = security_inode_copy_up_xattr(name);
980 ++ if (error < 0 && error != -EOPNOTSUPP)
981 ++ break;
982 ++ if (error == 1) {
983 ++ error = 0;
984 ++ continue; /* Discard */
985 ++ }
986 + retry:
987 + size = vfs_getxattr(old, name, value, value_size);
988 + if (size == -ERANGE)
989 +@@ -102,13 +110,6 @@ retry:
990 + goto retry;
991 + }
992 +
993 +- error = security_inode_copy_up_xattr(name);
994 +- if (error < 0 && error != -EOPNOTSUPP)
995 +- break;
996 +- if (error == 1) {
997 +- error = 0;
998 +- continue; /* Discard */
999 +- }
1000 + error = vfs_setxattr(new, name, value, size, 0);
1001 + if (error)
1002 + break;
1003 +diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
1004 +index 8b3c284ce92ea..08e60a6df77c3 100644
1005 +--- a/fs/overlayfs/inode.c
1006 ++++ b/fs/overlayfs/inode.c
1007 +@@ -340,7 +340,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
1008 + goto out;
1009 +
1010 + if (!value && !upperdentry) {
1011 ++ old_cred = ovl_override_creds(dentry->d_sb);
1012 + err = vfs_getxattr(realdentry, name, NULL, 0);
1013 ++ revert_creds(old_cred);
1014 + if (err < 0)
1015 + goto out_drop_write;
1016 + }
1017 +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
1018 +index 246623406db9f..f0dc432a3ceb3 100644
1019 +--- a/fs/overlayfs/super.c
1020 ++++ b/fs/overlayfs/super.c
1021 +@@ -82,7 +82,7 @@ static void ovl_dentry_release(struct dentry *dentry)
1022 + static struct dentry *ovl_d_real(struct dentry *dentry,
1023 + const struct inode *inode)
1024 + {
1025 +- struct dentry *real;
1026 ++ struct dentry *real = NULL, *lower;
1027 +
1028 + /* It's an overlay file */
1029 + if (inode && d_inode(dentry) == inode)
1030 +@@ -101,9 +101,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
1031 + if (real && !inode && ovl_has_upperdata(d_inode(dentry)))
1032 + return real;
1033 +
1034 +- real = ovl_dentry_lowerdata(dentry);
1035 +- if (!real)
1036 ++ lower = ovl_dentry_lowerdata(dentry);
1037 ++ if (!lower)
1038 + goto bug;
1039 ++ real = lower;
1040 +
1041 + /* Handle recursion */
1042 + real = d_real(real, inode);
1043 +@@ -111,8 +112,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
1044 + if (!inode || inode == d_inode(real))
1045 + return real;
1046 + bug:
1047 +- WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
1048 +- inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
1049 ++ WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n",
1050 ++ __func__, dentry, inode ? inode->i_sb->s_id : "NULL",
1051 ++ inode ? inode->i_ino : 0, real,
1052 ++ real && d_inode(real) ? d_inode(real)->i_ino : 0);
1053 + return dentry;
1054 + }
1055 +
1056 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
1057 +index f65a924a75abd..e71c97c3c25ef 100644
1058 +--- a/include/asm-generic/vmlinux.lds.h
1059 ++++ b/include/asm-generic/vmlinux.lds.h
1060 +@@ -363,7 +363,7 @@
1061 + } \
1062 + \
1063 + /* Built-in firmware blobs */ \
1064 +- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
1065 ++ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
1066 + __start_builtin_fw = .; \
1067 + KEEP(*(.builtin_fw)) \
1068 + __end_builtin_fw = .; \
1069 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1070 +index 4e14926433edb..9770e08c7e029 100644
1071 +--- a/include/linux/netdevice.h
1072 ++++ b/include/linux/netdevice.h
1073 +@@ -3966,6 +3966,7 @@ static inline void netif_tx_disable(struct net_device *dev)
1074 +
1075 + local_bh_disable();
1076 + cpu = smp_processor_id();
1077 ++ spin_lock(&dev->tx_global_lock);
1078 + for (i = 0; i < dev->num_tx_queues; i++) {
1079 + struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1080 +
1081 +@@ -3973,6 +3974,7 @@ static inline void netif_tx_disable(struct net_device *dev)
1082 + netif_tx_stop_queue(txq);
1083 + __netif_tx_unlock(txq);
1084 + }
1085 ++ spin_unlock(&dev->tx_global_lock);
1086 + local_bh_enable();
1087 + }
1088 +
1089 +diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
1090 +index 9bc5bc07d4d3f..a9978350b45b0 100644
1091 +--- a/include/xen/grant_table.h
1092 ++++ b/include/xen/grant_table.h
1093 +@@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
1094 + map->flags = flags;
1095 + map->ref = ref;
1096 + map->dom = domid;
1097 ++ map->status = 1; /* arbitrary positive value */
1098 + }
1099 +
1100 + static inline void
1101 +diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
1102 +index fe9a9fa2ebc45..14d47ed4114fd 100644
1103 +--- a/include/xen/xenbus.h
1104 ++++ b/include/xen/xenbus.h
1105 +@@ -187,8 +187,6 @@ void xs_suspend_cancel(void);
1106 +
1107 + struct work_struct;
1108 +
1109 +-void xenbus_probe(void);
1110 +-
1111 + #define XENBUS_IS_ERR_READ(str) ({ \
1112 + if (!IS_ERR(str) && strlen(str) == 0) { \
1113 + kfree(str); \
1114 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
1115 +index 55fff5e6d9831..a47d623f59fe7 100644
1116 +--- a/kernel/bpf/stackmap.c
1117 ++++ b/kernel/bpf/stackmap.c
1118 +@@ -114,6 +114,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
1119 +
1120 + /* hash table size must be power of 2 */
1121 + n_buckets = roundup_pow_of_two(attr->max_entries);
1122 ++ if (!n_buckets)
1123 ++ return ERR_PTR(-E2BIG);
1124 +
1125 + cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
1126 + if (cost >= U32_MAX - PAGE_SIZE)
1127 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1128 +index d6f1e305bb3db..88a4f9e2d06c7 100644
1129 +--- a/kernel/trace/trace.c
1130 ++++ b/kernel/trace/trace.c
1131 +@@ -2292,7 +2292,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1132 + (entry = this_cpu_read(trace_buffered_event))) {
1133 + /* Try to use the per cpu buffer first */
1134 + val = this_cpu_inc_return(trace_buffered_event_cnt);
1135 +- if (val == 1) {
1136 ++ if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
1137 + trace_event_setup(entry, type, flags, pc);
1138 + entry->array[0] = len;
1139 + return entry;
1140 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
1141 +index 0fc06a7da87fb..ea43be6b9cc3c 100644
1142 +--- a/kernel/trace/trace_events.c
1143 ++++ b/kernel/trace/trace_events.c
1144 +@@ -1113,7 +1113,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1145 + mutex_lock(&event_mutex);
1146 + list_for_each_entry(file, &tr->events, list) {
1147 + call = file->event_call;
1148 +- if (!trace_event_name(call) || !call->class || !call->class->reg)
1149 ++ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1150 ++ !trace_event_name(call) || !call->class || !call->class->reg)
1151 + continue;
1152 +
1153 + if (system && strcmp(call->class->system, system->name) != 0)
1154 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
1155 +index ad1da6b2fb607..1dceda3c0e759 100644
1156 +--- a/net/netfilter/nf_conntrack_core.c
1157 ++++ b/net/netfilter/nf_conntrack_core.c
1158 +@@ -1063,7 +1063,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1159 + * Let nf_ct_resolve_clash() deal with this later.
1160 + */
1161 + if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1162 +- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1163 ++ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
1164 ++ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
1165 + continue;
1166 +
1167 + NF_CT_STAT_INC_ATOMIC(net, found);
1168 +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
1169 +index 890799c16aa41..b3957fe7eced2 100644
1170 +--- a/net/netfilter/nf_flow_table_core.c
1171 ++++ b/net/netfilter/nf_flow_table_core.c
1172 +@@ -360,7 +360,7 @@ static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
1173 + return -1;
1174 +
1175 + tcph = (void *)(skb_network_header(skb) + thoff);
1176 +- inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
1177 ++ inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
1178 +
1179 + return 0;
1180 + }
1181 +@@ -377,7 +377,7 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
1182 + udph = (void *)(skb_network_header(skb) + thoff);
1183 + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
1184 + inet_proto_csum_replace2(&udph->check, skb, port,
1185 +- new_port, true);
1186 ++ new_port, false);
1187 + if (!udph->check)
1188 + udph->check = CSUM_MANGLED_0;
1189 + }
1190 +diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
1191 +index 570144507df11..cb58bc7ae30d3 100644
1192 +--- a/net/netfilter/xt_recent.c
1193 ++++ b/net/netfilter/xt_recent.c
1194 +@@ -155,7 +155,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
1195 + /*
1196 + * Drop entries with timestamps older then 'time'.
1197 + */
1198 +-static void recent_entry_reap(struct recent_table *t, unsigned long time)
1199 ++static void recent_entry_reap(struct recent_table *t, unsigned long time,
1200 ++ struct recent_entry *working, bool update)
1201 + {
1202 + struct recent_entry *e;
1203 +
1204 +@@ -164,6 +165,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
1205 + */
1206 + e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
1207 +
1208 ++ /*
1209 ++ * Do not reap the entry which are going to be updated.
1210 ++ */
1211 ++ if (e == working && update)
1212 ++ return;
1213 ++
1214 + /*
1215 + * The last time stamp is the most recent.
1216 + */
1217 +@@ -306,7 +313,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
1218 +
1219 + /* info->seconds must be non-zero */
1220 + if (info->check_set & XT_RECENT_REAP)
1221 +- recent_entry_reap(t, time);
1222 ++ recent_entry_reap(t, time, e,
1223 ++ info->check_set & XT_RECENT_UPDATE && ret);
1224 + }
1225 +
1226 + if (info->check_set & XT_RECENT_SET ||
1227 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
1228 +index a05c5cb3429c0..69cf9cbbb05f6 100644
1229 +--- a/net/qrtr/qrtr.c
1230 ++++ b/net/qrtr/qrtr.c
1231 +@@ -194,7 +194,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
1232 + hdr->src_port_id = cpu_to_le32(from->sq_port);
1233 + if (to->sq_port == QRTR_PORT_CTRL) {
1234 + hdr->dst_node_id = cpu_to_le32(node->nid);
1235 +- hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
1236 ++ hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
1237 + } else {
1238 + hdr->dst_node_id = cpu_to_le32(to->sq_node);
1239 + hdr->dst_port_id = cpu_to_le32(to->sq_port);
1240 +diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
1241 +index e35869e81766e..997af345ce374 100644
1242 +--- a/net/qrtr/tun.c
1243 ++++ b/net/qrtr/tun.c
1244 +@@ -80,6 +80,12 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
1245 + ssize_t ret;
1246 + void *kbuf;
1247 +
1248 ++ if (!len)
1249 ++ return -EINVAL;
1250 ++
1251 ++ if (len > KMALLOC_MAX_SIZE)
1252 ++ return -ENOMEM;
1253 ++
1254 + kbuf = kzalloc(len, GFP_KERNEL);
1255 + if (!kbuf)
1256 + return -ENOMEM;
1257 +diff --git a/net/rds/rdma.c b/net/rds/rdma.c
1258 +index e1965d9cbcf82..9882cebfcad60 100644
1259 +--- a/net/rds/rdma.c
1260 ++++ b/net/rds/rdma.c
1261 +@@ -531,6 +531,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args,
1262 + if (args->nr_local == 0)
1263 + return -EINVAL;
1264 +
1265 ++ if (args->nr_local > UIO_MAXIOV)
1266 ++ return -EMSGSIZE;
1267 ++
1268 + iov->iov = kcalloc(args->nr_local,
1269 + sizeof(struct rds_iovec),
1270 + GFP_KERNEL);
1271 +diff --git a/net/sctp/proc.c b/net/sctp/proc.c
1272 +index a644292f9fafd..84f79ac4b9842 100644
1273 +--- a/net/sctp/proc.c
1274 ++++ b/net/sctp/proc.c
1275 +@@ -230,6 +230,12 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
1276 + {
1277 + struct sctp_ht_iter *iter = seq->private;
1278 +
1279 ++ if (v && v != SEQ_START_TOKEN) {
1280 ++ struct sctp_transport *transport = v;
1281 ++
1282 ++ sctp_transport_put(transport);
1283 ++ }
1284 ++
1285 + sctp_transport_walk_stop(&iter->hti);
1286 + }
1287 +
1288 +@@ -237,6 +243,12 @@ static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1289 + {
1290 + struct sctp_ht_iter *iter = seq->private;
1291 +
1292 ++ if (v && v != SEQ_START_TOKEN) {
1293 ++ struct sctp_transport *transport = v;
1294 ++
1295 ++ sctp_transport_put(transport);
1296 ++ }
1297 ++
1298 + ++*pos;
1299 +
1300 + return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
1301 +@@ -292,8 +304,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
1302 + sk->sk_rcvbuf);
1303 + seq_printf(seq, "\n");
1304 +
1305 +- sctp_transport_put(transport);
1306 +-
1307 + return 0;
1308 + }
1309 +
1310 +@@ -369,8 +379,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
1311 + seq_printf(seq, "\n");
1312 + }
1313 +
1314 +- sctp_transport_put(transport);
1315 +-
1316 + return 0;
1317 + }
1318 +
1319 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
1320 +index 02374459c4179..4b65db13e1bba 100644
1321 +--- a/net/vmw_vsock/af_vsock.c
1322 ++++ b/net/vmw_vsock/af_vsock.c
1323 +@@ -816,10 +816,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
1324 + */
1325 +
1326 + sk = sock->sk;
1327 ++
1328 ++ lock_sock(sk);
1329 + if (sock->state == SS_UNCONNECTED) {
1330 + err = -ENOTCONN;
1331 + if (sk->sk_type == SOCK_STREAM)
1332 +- return err;
1333 ++ goto out;
1334 + } else {
1335 + sock->state = SS_DISCONNECTING;
1336 + err = 0;
1337 +@@ -828,10 +830,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
1338 + /* Receive and send shutdowns are treated alike. */
1339 + mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
1340 + if (mode) {
1341 +- lock_sock(sk);
1342 + sk->sk_shutdown |= mode;
1343 + sk->sk_state_change(sk);
1344 +- release_sock(sk);
1345 +
1346 + if (sk->sk_type == SOCK_STREAM) {
1347 + sock_reset_flag(sk, SOCK_DONE);
1348 +@@ -839,6 +839,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
1349 + }
1350 + }
1351 +
1352 ++out:
1353 ++ release_sock(sk);
1354 + return err;
1355 + }
1356 +
1357 +@@ -1107,7 +1109,6 @@ static void vsock_connect_timeout(struct work_struct *work)
1358 + {
1359 + struct sock *sk;
1360 + struct vsock_sock *vsk;
1361 +- int cancel = 0;
1362 +
1363 + vsk = container_of(work, struct vsock_sock, connect_work.work);
1364 + sk = sk_vsock(vsk);
1365 +@@ -1118,11 +1119,9 @@ static void vsock_connect_timeout(struct work_struct *work)
1366 + sk->sk_state = TCP_CLOSE;
1367 + sk->sk_err = ETIMEDOUT;
1368 + sk->sk_error_report(sk);
1369 +- cancel = 1;
1370 ++ vsock_transport_cancel_pkt(vsk);
1371 + }
1372 + release_sock(sk);
1373 +- if (cancel)
1374 +- vsock_transport_cancel_pkt(vsk);
1375 +
1376 + sock_put(sk);
1377 + }
1378 +diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
1379 +index db6ca51228d2f..2bdf36845a5f3 100644
1380 +--- a/net/vmw_vsock/hyperv_transport.c
1381 ++++ b/net/vmw_vsock/hyperv_transport.c
1382 +@@ -443,14 +443,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
1383 +
1384 + static int hvs_shutdown(struct vsock_sock *vsk, int mode)
1385 + {
1386 +- struct sock *sk = sk_vsock(vsk);
1387 +-
1388 + if (!(mode & SEND_SHUTDOWN))
1389 + return 0;
1390 +
1391 +- lock_sock(sk);
1392 + hvs_shutdown_lock_held(vsk->trans, mode);
1393 +- release_sock(sk);
1394 + return 0;
1395 + }
1396 +
1397 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
1398 +index 5f8a72d34d313..cbb336f01cf2b 100644
1399 +--- a/net/vmw_vsock/virtio_transport_common.c
1400 ++++ b/net/vmw_vsock/virtio_transport_common.c
1401 +@@ -1033,10 +1033,10 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
1402 +
1403 + vsk = vsock_sk(sk);
1404 +
1405 +- space_available = virtio_transport_space_update(sk, pkt);
1406 +-
1407 + lock_sock(sk);
1408 +
1409 ++ space_available = virtio_transport_space_update(sk, pkt);
1410 ++
1411 + /* Update CID in case it has changed after a transport reset event */
1412 + vsk->local_addr.svm_cid = dst.svm_cid;
1413 +
1414 +diff --git a/security/commoncap.c b/security/commoncap.c
1415 +index f86557a8e43f6..a1dee0ab345a2 100644
1416 +--- a/security/commoncap.c
1417 ++++ b/security/commoncap.c
1418 +@@ -377,10 +377,11 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
1419 + {
1420 + int size, ret;
1421 + kuid_t kroot;
1422 ++ u32 nsmagic, magic;
1423 + uid_t root, mappedroot;
1424 + char *tmpbuf = NULL;
1425 + struct vfs_cap_data *cap;
1426 +- struct vfs_ns_cap_data *nscap;
1427 ++ struct vfs_ns_cap_data *nscap = NULL;
1428 + struct dentry *dentry;
1429 + struct user_namespace *fs_ns;
1430 +
1431 +@@ -402,46 +403,61 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
1432 + fs_ns = inode->i_sb->s_user_ns;
1433 + cap = (struct vfs_cap_data *) tmpbuf;
1434 + if (is_v2header((size_t) ret, cap)) {
1435 +- /* If this is sizeof(vfs_cap_data) then we're ok with the
1436 +- * on-disk value, so return that. */
1437 +- if (alloc)
1438 +- *buffer = tmpbuf;
1439 +- else
1440 +- kfree(tmpbuf);
1441 +- return ret;
1442 +- } else if (!is_v3header((size_t) ret, cap)) {
1443 +- kfree(tmpbuf);
1444 +- return -EINVAL;
1445 ++ root = 0;
1446 ++ } else if (is_v3header((size_t) ret, cap)) {
1447 ++ nscap = (struct vfs_ns_cap_data *) tmpbuf;
1448 ++ root = le32_to_cpu(nscap->rootid);
1449 ++ } else {
1450 ++ size = -EINVAL;
1451 ++ goto out_free;
1452 + }
1453 +
1454 +- nscap = (struct vfs_ns_cap_data *) tmpbuf;
1455 +- root = le32_to_cpu(nscap->rootid);
1456 + kroot = make_kuid(fs_ns, root);
1457 +
1458 + /* If the root kuid maps to a valid uid in current ns, then return
1459 + * this as a nscap. */
1460 + mappedroot = from_kuid(current_user_ns(), kroot);
1461 + if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
1462 ++ size = sizeof(struct vfs_ns_cap_data);
1463 + if (alloc) {
1464 +- *buffer = tmpbuf;
1465 ++ if (!nscap) {
1466 ++ /* v2 -> v3 conversion */
1467 ++ nscap = kzalloc(size, GFP_ATOMIC);
1468 ++ if (!nscap) {
1469 ++ size = -ENOMEM;
1470 ++ goto out_free;
1471 ++ }
1472 ++ nsmagic = VFS_CAP_REVISION_3;
1473 ++ magic = le32_to_cpu(cap->magic_etc);
1474 ++ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
1475 ++ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
1476 ++ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
1477 ++ nscap->magic_etc = cpu_to_le32(nsmagic);
1478 ++ } else {
1479 ++ /* use allocated v3 buffer */
1480 ++ tmpbuf = NULL;
1481 ++ }
1482 + nscap->rootid = cpu_to_le32(mappedroot);
1483 +- } else
1484 +- kfree(tmpbuf);
1485 +- return size;
1486 ++ *buffer = nscap;
1487 ++ }
1488 ++ goto out_free;
1489 + }
1490 +
1491 + if (!rootid_owns_currentns(kroot)) {
1492 +- kfree(tmpbuf);
1493 +- return -EOPNOTSUPP;
1494 ++ size = -EOVERFLOW;
1495 ++ goto out_free;
1496 + }
1497 +
1498 + /* This comes from a parent namespace. Return as a v2 capability */
1499 + size = sizeof(struct vfs_cap_data);
1500 + if (alloc) {
1501 +- *buffer = kmalloc(size, GFP_ATOMIC);
1502 +- if (*buffer) {
1503 +- struct vfs_cap_data *cap = *buffer;
1504 +- __le32 nsmagic, magic;
1505 ++ if (nscap) {
1506 ++ /* v3 -> v2 conversion */
1507 ++ cap = kzalloc(size, GFP_ATOMIC);
1508 ++ if (!cap) {
1509 ++ size = -ENOMEM;
1510 ++ goto out_free;
1511 ++ }
1512 + magic = VFS_CAP_REVISION_2;
1513 + nsmagic = le32_to_cpu(nscap->magic_etc);
1514 + if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
1515 +@@ -449,9 +465,12 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
1516 + memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
1517 + cap->magic_etc = cpu_to_le32(magic);
1518 + } else {
1519 +- size = -ENOMEM;
1520 ++ /* use unconverted v2 */
1521 ++ tmpbuf = NULL;
1522 + }
1523 ++ *buffer = cap;
1524 + }
1525 ++out_free:
1526 + kfree(tmpbuf);
1527 + return size;
1528 + }
1529 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1530 +index 9312c7e750ed3..1ecb27b3421a7 100644
1531 +--- a/virt/kvm/kvm_main.c
1532 ++++ b/virt/kvm/kvm_main.c
1533 +@@ -412,9 +412,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
1534 + */
1535 + kvm->mmu_notifier_count++;
1536 + need_tlb_flush = kvm_unmap_hva_range(kvm, start, end, blockable);
1537 +- need_tlb_flush |= kvm->tlbs_dirty;
1538 + /* we've to flush the tlb before the pages can be freed */
1539 +- if (need_tlb_flush)
1540 ++ if (need_tlb_flush || kvm->tlbs_dirty)
1541 + kvm_flush_remote_tlbs(kvm);
1542 +
1543 + spin_unlock(&kvm->mmu_lock);