Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 10 Jan 2018 11:43:26
Message-Id: 1515584589.1e4d06678c3d3c6bef6c60ef5d05ec227fc8d26d.mpagano@gentoo
1 commit: 1e4d06678c3d3c6bef6c60ef5d05ec227fc8d26d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 10 11:43:09 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 10 11:43:09 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1e4d0667
7
8 Linux patch 4.14.13
9
10 0000_README | 4 +
11 1012_linux-4.14.13.patch | 1518 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1522 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index af8a1d1..1abd05e 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -91,6 +91,10 @@ Patch: 1011_linux-4.14.12.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.12
21
22 +Patch: 1012_linux-4.14.13.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.13
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1012_linux-4.14.13.patch b/1012_linux-4.14.13.patch
31 new file mode 100644
32 index 0000000..faee46a
33 --- /dev/null
34 +++ b/1012_linux-4.14.13.patch
35 @@ -0,0 +1,1518 @@
36 +diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
37 +index ad41b3813f0a..ea91cb61a602 100644
38 +--- a/Documentation/x86/x86_64/mm.txt
39 ++++ b/Documentation/x86/x86_64/mm.txt
40 +@@ -12,8 +12,9 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
41 + ... unused hole ...
42 + ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
43 + ... unused hole ...
44 +-fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI
45 +-fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
46 ++ vaddr_end for KASLR
47 ++fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
48 ++fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
49 + ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
50 + ... unused hole ...
51 + ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
52 +@@ -37,13 +38,15 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
53 + ... unused hole ...
54 + ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
55 + ... unused hole ...
56 +-fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
57 ++ vaddr_end for KASLR
58 ++fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
59 ++... unused hole ...
60 + ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
61 + ... unused hole ...
62 + ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
63 + ... unused hole ...
64 + ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
65 +-ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space
66 ++ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
67 + [fixmap start] - ffffffffff5fffff kernel-internal fixmap range
68 + ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
69 + ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
70 +@@ -67,9 +70,10 @@ memory window (this size is arbitrary, it can be raised later if needed).
71 + The mappings are not part of any other kernel PGD and are only available
72 + during EFI runtime calls.
73 +
74 +-The module mapping space size changes based on the CONFIG requirements for the
75 +-following fixmap section.
76 +-
77 + Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
78 + physical memory, vmalloc/ioremap space and virtual memory map are randomized.
79 + Their order is preserved but their base will be offset early at boot time.
80 ++
81 ++Be very careful vs. KASLR when changing anything here. The KASLR address
82 ++range must not overlap with anything except the KASAN shadow area, which is
83 ++correct as KASAN disables KASLR.
84 +diff --git a/Makefile b/Makefile
85 +index 20f7d4de0f1c..a67c5179052a 100644
86 +--- a/Makefile
87 ++++ b/Makefile
88 +@@ -1,7 +1,7 @@
89 + # SPDX-License-Identifier: GPL-2.0
90 + VERSION = 4
91 + PATCHLEVEL = 14
92 +-SUBLEVEL = 12
93 ++SUBLEVEL = 13
94 + EXTRAVERSION =
95 + NAME = Petit Gorille
96 +
97 +diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
98 +index f35974ee7264..c9173c02081c 100644
99 +--- a/arch/arc/include/asm/uaccess.h
100 ++++ b/arch/arc/include/asm/uaccess.h
101 +@@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
102 + return 0;
103 +
104 + __asm__ __volatile__(
105 ++ " mov lp_count, %5 \n"
106 + " lp 3f \n"
107 + "1: ldb.ab %3, [%2, 1] \n"
108 + " breq.d %3, 0, 3f \n"
109 +@@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
110 + " .word 1b, 4b \n"
111 + " .previous \n"
112 + : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
113 +- : "g"(-EFAULT), "l"(count)
114 +- : "memory");
115 ++ : "g"(-EFAULT), "r"(count)
116 ++ : "lp_count", "lp_start", "lp_end", "memory");
117 +
118 + return res;
119 + }
120 +diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
121 +index dd5a08aaa4da..3eb4bfc1fb36 100644
122 +--- a/arch/parisc/include/asm/ldcw.h
123 ++++ b/arch/parisc/include/asm/ldcw.h
124 +@@ -12,6 +12,7 @@
125 + for the semaphore. */
126 +
127 + #define __PA_LDCW_ALIGNMENT 16
128 ++#define __PA_LDCW_ALIGN_ORDER 4
129 + #define __ldcw_align(a) ({ \
130 + unsigned long __ret = (unsigned long) &(a)->lock[0]; \
131 + __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
132 +@@ -29,6 +30,7 @@
133 + ldcd). */
134 +
135 + #define __PA_LDCW_ALIGNMENT 4
136 ++#define __PA_LDCW_ALIGN_ORDER 2
137 + #define __ldcw_align(a) (&(a)->slock)
138 + #define __LDCW "ldcw,co"
139 +
140 +diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
141 +index f3cecf5117cf..e95207c0565e 100644
142 +--- a/arch/parisc/kernel/entry.S
143 ++++ b/arch/parisc/kernel/entry.S
144 +@@ -35,6 +35,7 @@
145 + #include <asm/pgtable.h>
146 + #include <asm/signal.h>
147 + #include <asm/unistd.h>
148 ++#include <asm/ldcw.h>
149 + #include <asm/thread_info.h>
150 +
151 + #include <linux/linkage.h>
152 +@@ -46,6 +47,14 @@
153 + #endif
154 +
155 + .import pa_tlb_lock,data
156 ++ .macro load_pa_tlb_lock reg
157 ++#if __PA_LDCW_ALIGNMENT > 4
158 ++ load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
159 ++ depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
160 ++#else
161 ++ load32 PA(pa_tlb_lock), \reg
162 ++#endif
163 ++ .endm
164 +
165 + /* space_to_prot macro creates a prot id from a space id */
166 +
167 +@@ -457,7 +466,7 @@
168 + .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
169 + #ifdef CONFIG_SMP
170 + cmpib,COND(=),n 0,\spc,2f
171 +- load32 PA(pa_tlb_lock),\tmp
172 ++ load_pa_tlb_lock \tmp
173 + 1: LDCW 0(\tmp),\tmp1
174 + cmpib,COND(=) 0,\tmp1,1b
175 + nop
176 +@@ -480,7 +489,7 @@
177 + /* Release pa_tlb_lock lock. */
178 + .macro tlb_unlock1 spc,tmp
179 + #ifdef CONFIG_SMP
180 +- load32 PA(pa_tlb_lock),\tmp
181 ++ load_pa_tlb_lock \tmp
182 + tlb_unlock0 \spc,\tmp
183 + #endif
184 + .endm
185 +diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
186 +index adf7187f8951..2d40c4ff3f69 100644
187 +--- a/arch/parisc/kernel/pacache.S
188 ++++ b/arch/parisc/kernel/pacache.S
189 +@@ -36,6 +36,7 @@
190 + #include <asm/assembly.h>
191 + #include <asm/pgtable.h>
192 + #include <asm/cache.h>
193 ++#include <asm/ldcw.h>
194 + #include <linux/linkage.h>
195 +
196 + .text
197 +@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
198 +
199 + .macro tlb_lock la,flags,tmp
200 + #ifdef CONFIG_SMP
201 +- ldil L%pa_tlb_lock,%r1
202 +- ldo R%pa_tlb_lock(%r1),\la
203 ++#if __PA_LDCW_ALIGNMENT > 4
204 ++ load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
205 ++ depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
206 ++#else
207 ++ load32 pa_tlb_lock, \la
208 ++#endif
209 + rsm PSW_SM_I,\flags
210 + 1: LDCW 0(\la),\tmp
211 + cmpib,<>,n 0,\tmp,3f
212 +diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
213 +index 30f92391a93e..cad3e8661cd6 100644
214 +--- a/arch/parisc/kernel/process.c
215 ++++ b/arch/parisc/kernel/process.c
216 +@@ -39,6 +39,7 @@
217 + #include <linux/kernel.h>
218 + #include <linux/mm.h>
219 + #include <linux/fs.h>
220 ++#include <linux/cpu.h>
221 + #include <linux/module.h>
222 + #include <linux/personality.h>
223 + #include <linux/ptrace.h>
224 +@@ -183,6 +184,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
225 + return 1;
226 + }
227 +
228 ++/*
229 ++ * Idle thread support
230 ++ *
231 ++ * Detect when running on QEMU with SeaBIOS PDC Firmware and let
232 ++ * QEMU idle the host too.
233 ++ */
234 ++
235 ++int running_on_qemu __read_mostly;
236 ++
237 ++void __cpuidle arch_cpu_idle_dead(void)
238 ++{
239 ++ /* nop on real hardware, qemu will offline CPU. */
240 ++ asm volatile("or %%r31,%%r31,%%r31\n":::);
241 ++}
242 ++
243 ++void __cpuidle arch_cpu_idle(void)
244 ++{
245 ++ local_irq_enable();
246 ++
247 ++ /* nop on real hardware, qemu will idle sleep. */
248 ++ asm volatile("or %%r10,%%r10,%%r10\n":::);
249 ++}
250 ++
251 ++static int __init parisc_idle_init(void)
252 ++{
253 ++ const char *marker;
254 ++
255 ++ /* check QEMU/SeaBIOS marker in PAGE0 */
256 ++ marker = (char *) &PAGE0->pad0;
257 ++ running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
258 ++
259 ++ if (!running_on_qemu)
260 ++ cpu_idle_poll_ctrl(1);
261 ++
262 ++ return 0;
263 ++}
264 ++arch_initcall(parisc_idle_init);
265 ++
266 + /*
267 + * Copy architecture-specific thread state
268 + */
269 +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
270 +index 4797d08581ce..6e1e39035380 100644
271 +--- a/arch/powerpc/mm/fault.c
272 ++++ b/arch/powerpc/mm/fault.c
273 +@@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
274 + return __bad_area(regs, address, SEGV_MAPERR);
275 + }
276 +
277 ++static noinline int bad_access(struct pt_regs *regs, unsigned long address)
278 ++{
279 ++ return __bad_area(regs, address, SEGV_ACCERR);
280 ++}
281 ++
282 + static int do_sigbus(struct pt_regs *regs, unsigned long address,
283 + unsigned int fault)
284 + {
285 +@@ -490,7 +495,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
286 +
287 + good_area:
288 + if (unlikely(access_error(is_write, is_exec, vma)))
289 +- return bad_area(regs, address);
290 ++ return bad_access(regs, address);
291 +
292 + /*
293 + * If for any reason at all we couldn't handle the fault,
294 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
295 +index 40d0a1a97889..b87a930c2201 100644
296 +--- a/arch/s390/kvm/kvm-s390.c
297 ++++ b/arch/s390/kvm/kvm-s390.c
298 +@@ -794,11 +794,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
299 +
300 + if (kvm->arch.use_cmma) {
301 + /*
302 +- * Get the last slot. They should be sorted by base_gfn, so the
303 +- * last slot is also the one at the end of the address space.
304 +- * We have verified above that at least one slot is present.
305 ++ * Get the first slot. They are reverse sorted by base_gfn, so
306 ++ * the first slot is also the one at the end of the address
307 ++ * space. We have verified above that at least one slot is
308 ++ * present.
309 + */
310 +- ms = slots->memslots + slots->used_slots - 1;
311 ++ ms = slots->memslots;
312 + /* round up so we only use full longs */
313 + ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
314 + /* allocate enough bytes to store all the bits */
315 +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
316 +index 5b25287f449b..7bd3a59232f0 100644
317 +--- a/arch/s390/kvm/priv.c
318 ++++ b/arch/s390/kvm/priv.c
319 +@@ -1009,7 +1009,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
320 + cbrlo[entries] = gfn << PAGE_SHIFT;
321 + }
322 +
323 +- if (orc) {
324 ++ if (orc && gfn < ms->bitmap_size) {
325 + /* increment only if we are really flipping the bit to 1 */
326 + if (!test_and_set_bit(gfn, ms->pgste_bitmap))
327 + atomic64_inc(&ms->dirty_pages);
328 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
329 +index 8f0aace08b87..8156e47da7ba 100644
330 +--- a/arch/x86/events/intel/ds.c
331 ++++ b/arch/x86/events/intel/ds.c
332 +@@ -5,6 +5,7 @@
333 +
334 + #include <asm/cpu_entry_area.h>
335 + #include <asm/perf_event.h>
336 ++#include <asm/tlbflush.h>
337 + #include <asm/insn.h>
338 +
339 + #include "../perf_event.h"
340 +@@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
341 +
342 + static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
343 + {
344 ++ unsigned long start = (unsigned long)cea;
345 + phys_addr_t pa;
346 + size_t msz = 0;
347 +
348 + pa = virt_to_phys(addr);
349 ++
350 ++ preempt_disable();
351 + for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
352 + cea_set_pte(cea, pa, prot);
353 ++
354 ++ /*
355 ++ * This is a cross-CPU update of the cpu_entry_area, we must shoot down
356 ++ * all TLB entries for it.
357 ++ */
358 ++ flush_tlb_kernel_range(start, start + size);
359 ++ preempt_enable();
360 + }
361 +
362 + static void ds_clear_cea(void *cea, size_t size)
363 + {
364 ++ unsigned long start = (unsigned long)cea;
365 + size_t msz = 0;
366 +
367 ++ preempt_disable();
368 + for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
369 + cea_set_pte(cea, 0, PAGE_NONE);
370 ++
371 ++ flush_tlb_kernel_range(start, start + size);
372 ++ preempt_enable();
373 + }
374 +
375 + static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
376 +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
377 +index dbfd0854651f..cf5961ca8677 100644
378 +--- a/arch/x86/include/asm/alternative.h
379 ++++ b/arch/x86/include/asm/alternative.h
380 +@@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
381 + ".popsection\n" \
382 + ".pushsection .altinstr_replacement, \"ax\"\n" \
383 + ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
384 +- ".popsection"
385 ++ ".popsection\n"
386 +
387 + #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
388 + OLDINSTR_2(oldinstr, 1, 2) \
389 +@@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
390 + ".pushsection .altinstr_replacement, \"ax\"\n" \
391 + ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
392 + ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
393 +- ".popsection"
394 ++ ".popsection\n"
395 +
396 + /*
397 + * Alternative instructions for different CPU types or capabilities.
398 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
399 +index 07cdd1715705..21ac898df2d8 100644
400 +--- a/arch/x86/include/asm/cpufeatures.h
401 ++++ b/arch/x86/include/asm/cpufeatures.h
402 +@@ -341,6 +341,6 @@
403 + #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
404 + #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
405 + #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
406 +-#define X86_BUG_CPU_INSECURE X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */
407 ++#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
408 +
409 + #endif /* _ASM_X86_CPUFEATURES_H */
410 +diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
411 +index b97a539bcdee..6b8f73dcbc2c 100644
412 +--- a/arch/x86/include/asm/pgtable_64_types.h
413 ++++ b/arch/x86/include/asm/pgtable_64_types.h
414 +@@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t;
415 + #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
416 + #define PGDIR_MASK (~(PGDIR_SIZE - 1))
417 +
418 +-/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
419 ++/*
420 ++ * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
421 ++ *
422 ++ * Be very careful vs. KASLR when changing anything here. The KASLR address
423 ++ * range must not overlap with anything except the KASAN shadow area, which
424 ++ * is correct as KASAN disables KASLR.
425 ++ */
426 + #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
427 +
428 + #ifdef CONFIG_X86_5LEVEL
429 +@@ -88,7 +94,7 @@ typedef struct { pteval_t pte; } pte_t;
430 + # define VMALLOC_SIZE_TB _AC(32, UL)
431 + # define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
432 + # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
433 +-# define LDT_PGD_ENTRY _AC(-4, UL)
434 ++# define LDT_PGD_ENTRY _AC(-3, UL)
435 + # define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
436 + #endif
437 +
438 +@@ -104,13 +110,13 @@ typedef struct { pteval_t pte; } pte_t;
439 +
440 + #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
441 + /* The module sections ends with the start of the fixmap */
442 +-#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1)
443 ++#define MODULES_END _AC(0xffffffffff000000, UL)
444 + #define MODULES_LEN (MODULES_END - MODULES_VADDR)
445 +
446 + #define ESPFIX_PGD_ENTRY _AC(-2, UL)
447 + #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
448 +
449 +-#define CPU_ENTRY_AREA_PGD _AC(-3, UL)
450 ++#define CPU_ENTRY_AREA_PGD _AC(-4, UL)
451 + #define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
452 +
453 + #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
454 +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
455 +index 90cb82dbba57..570e8bb1f386 100644
456 +--- a/arch/x86/kernel/cpu/Makefile
457 ++++ b/arch/x86/kernel/cpu/Makefile
458 +@@ -22,7 +22,7 @@ obj-y += common.o
459 + obj-y += rdrand.o
460 + obj-y += match.o
461 + obj-y += bugs.o
462 +-obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
463 ++obj-y += aperfmperf.o
464 + obj-y += cpuid-deps.o
465 +
466 + obj-$(CONFIG_PROC_FS) += proc.o
467 +diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
468 +index 0ee83321a313..7eba34df54c3 100644
469 +--- a/arch/x86/kernel/cpu/aperfmperf.c
470 ++++ b/arch/x86/kernel/cpu/aperfmperf.c
471 +@@ -14,6 +14,8 @@
472 + #include <linux/percpu.h>
473 + #include <linux/smp.h>
474 +
475 ++#include "cpu.h"
476 ++
477 + struct aperfmperf_sample {
478 + unsigned int khz;
479 + ktime_t time;
480 +@@ -24,7 +26,7 @@ struct aperfmperf_sample {
481 + static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
482 +
483 + #define APERFMPERF_CACHE_THRESHOLD_MS 10
484 +-#define APERFMPERF_REFRESH_DELAY_MS 20
485 ++#define APERFMPERF_REFRESH_DELAY_MS 10
486 + #define APERFMPERF_STALE_THRESHOLD_MS 1000
487 +
488 + /*
489 +@@ -38,14 +40,8 @@ static void aperfmperf_snapshot_khz(void *dummy)
490 + u64 aperf, aperf_delta;
491 + u64 mperf, mperf_delta;
492 + struct aperfmperf_sample *s = this_cpu_ptr(&samples);
493 +- ktime_t now = ktime_get();
494 +- s64 time_delta = ktime_ms_delta(now, s->time);
495 + unsigned long flags;
496 +
497 +- /* Don't bother re-computing within the cache threshold time. */
498 +- if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
499 +- return;
500 +-
501 + local_irq_save(flags);
502 + rdmsrl(MSR_IA32_APERF, aperf);
503 + rdmsrl(MSR_IA32_MPERF, mperf);
504 +@@ -61,31 +57,68 @@ static void aperfmperf_snapshot_khz(void *dummy)
505 + if (mperf_delta == 0)
506 + return;
507 +
508 +- s->time = now;
509 ++ s->time = ktime_get();
510 + s->aperf = aperf;
511 + s->mperf = mperf;
512 ++ s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
513 ++}
514 +
515 +- /* If the previous iteration was too long ago, discard it. */
516 +- if (time_delta > APERFMPERF_STALE_THRESHOLD_MS)
517 +- s->khz = 0;
518 +- else
519 +- s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
520 ++static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
521 ++{
522 ++ s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
523 ++
524 ++ /* Don't bother re-computing within the cache threshold time. */
525 ++ if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
526 ++ return true;
527 ++
528 ++ smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
529 ++
530 ++ /* Return false if the previous iteration was too long ago. */
531 ++ return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
532 + }
533 +
534 +-unsigned int arch_freq_get_on_cpu(int cpu)
535 ++unsigned int aperfmperf_get_khz(int cpu)
536 + {
537 +- unsigned int khz;
538 ++ if (!cpu_khz)
539 ++ return 0;
540 ++
541 ++ if (!static_cpu_has(X86_FEATURE_APERFMPERF))
542 ++ return 0;
543 +
544 ++ aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
545 ++ return per_cpu(samples.khz, cpu);
546 ++}
547 ++
548 ++void arch_freq_prepare_all(void)
549 ++{
550 ++ ktime_t now = ktime_get();
551 ++ bool wait = false;
552 ++ int cpu;
553 ++
554 ++ if (!cpu_khz)
555 ++ return;
556 ++
557 ++ if (!static_cpu_has(X86_FEATURE_APERFMPERF))
558 ++ return;
559 ++
560 ++ for_each_online_cpu(cpu)
561 ++ if (!aperfmperf_snapshot_cpu(cpu, now, false))
562 ++ wait = true;
563 ++
564 ++ if (wait)
565 ++ msleep(APERFMPERF_REFRESH_DELAY_MS);
566 ++}
567 ++
568 ++unsigned int arch_freq_get_on_cpu(int cpu)
569 ++{
570 + if (!cpu_khz)
571 + return 0;
572 +
573 + if (!static_cpu_has(X86_FEATURE_APERFMPERF))
574 + return 0;
575 +
576 +- smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
577 +- khz = per_cpu(samples.khz, cpu);
578 +- if (khz)
579 +- return khz;
580 ++ if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
581 ++ return per_cpu(samples.khz, cpu);
582 +
583 + msleep(APERFMPERF_REFRESH_DELAY_MS);
584 + smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
585 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
586 +index b1be494ab4e8..2d3bd2215e5b 100644
587 +--- a/arch/x86/kernel/cpu/common.c
588 ++++ b/arch/x86/kernel/cpu/common.c
589 +@@ -900,7 +900,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
590 + setup_force_cpu_cap(X86_FEATURE_ALWAYS);
591 +
592 + if (c->x86_vendor != X86_VENDOR_AMD)
593 +- setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
594 ++ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
595 +
596 + fpu__init_system(c);
597 +
598 +diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
599 +index f52a370b6c00..e806b11a99af 100644
600 +--- a/arch/x86/kernel/cpu/cpu.h
601 ++++ b/arch/x86/kernel/cpu/cpu.h
602 +@@ -47,4 +47,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
603 +
604 + extern void get_cpu_cap(struct cpuinfo_x86 *c);
605 + extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
606 ++
607 ++unsigned int aperfmperf_get_khz(int cpu);
608 ++
609 + #endif /* ARCH_X86_CPU_H */
610 +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
611 +index c6daec4bdba5..330b8462d426 100644
612 +--- a/arch/x86/kernel/cpu/microcode/amd.c
613 ++++ b/arch/x86/kernel/cpu/microcode/amd.c
614 +@@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
615 + #define F14H_MPB_MAX_SIZE 1824
616 + #define F15H_MPB_MAX_SIZE 4096
617 + #define F16H_MPB_MAX_SIZE 3458
618 ++#define F17H_MPB_MAX_SIZE 3200
619 +
620 + switch (family) {
621 + case 0x14:
622 +@@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
623 + case 0x16:
624 + max_size = F16H_MPB_MAX_SIZE;
625 + break;
626 ++ case 0x17:
627 ++ max_size = F17H_MPB_MAX_SIZE;
628 ++ break;
629 + default:
630 + max_size = F1XH_MPB_MAX_SIZE;
631 + break;
632 +diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
633 +index 6b7e17bf0b71..e7ecedafa1c8 100644
634 +--- a/arch/x86/kernel/cpu/proc.c
635 ++++ b/arch/x86/kernel/cpu/proc.c
636 +@@ -5,6 +5,8 @@
637 + #include <linux/seq_file.h>
638 + #include <linux/cpufreq.h>
639 +
640 ++#include "cpu.h"
641 ++
642 + /*
643 + * Get CPU information for use by the procfs.
644 + */
645 +@@ -78,8 +80,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
646 + seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
647 +
648 + if (cpu_has(c, X86_FEATURE_TSC)) {
649 +- unsigned int freq = cpufreq_quick_get(cpu);
650 ++ unsigned int freq = aperfmperf_get_khz(cpu);
651 +
652 ++ if (!freq)
653 ++ freq = cpufreq_quick_get(cpu);
654 + if (!freq)
655 + freq = cpu_khz;
656 + seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
657 +diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
658 +index f56902c1f04b..2a4849e92831 100644
659 +--- a/arch/x86/mm/dump_pagetables.c
660 ++++ b/arch/x86/mm/dump_pagetables.c
661 +@@ -61,10 +61,10 @@ enum address_markers_idx {
662 + KASAN_SHADOW_START_NR,
663 + KASAN_SHADOW_END_NR,
664 + #endif
665 ++ CPU_ENTRY_AREA_NR,
666 + #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
667 + LDT_NR,
668 + #endif
669 +- CPU_ENTRY_AREA_NR,
670 + #ifdef CONFIG_X86_ESPFIX64
671 + ESPFIX_START_NR,
672 + #endif
673 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
674 +index 80259ad8c386..6b462a472a7b 100644
675 +--- a/arch/x86/mm/init.c
676 ++++ b/arch/x86/mm/init.c
677 +@@ -870,7 +870,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
678 + .next_asid = 1,
679 + .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
680 + };
681 +-EXPORT_SYMBOL_GPL(cpu_tlbstate);
682 ++EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
683 +
684 + void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
685 + {
686 +diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
687 +index 879ef930e2c2..aedebd2ebf1e 100644
688 +--- a/arch/x86/mm/kaslr.c
689 ++++ b/arch/x86/mm/kaslr.c
690 +@@ -34,25 +34,14 @@
691 + #define TB_SHIFT 40
692 +
693 + /*
694 +- * Virtual address start and end range for randomization. The end changes base
695 +- * on configuration to have the highest amount of space for randomization.
696 +- * It increases the possible random position for each randomized region.
697 ++ * Virtual address start and end range for randomization.
698 + *
699 +- * You need to add an if/def entry if you introduce a new memory region
700 +- * compatible with KASLR. Your entry must be in logical order with memory
701 +- * layout. For example, ESPFIX is before EFI because its virtual address is
702 +- * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
703 +- * ensure that this order is correct and won't be changed.
704 ++ * The end address could depend on more configuration options to make the
705 ++ * highest amount of space for randomization available, but that's too hard
706 ++ * to keep straight and caused issues already.
707 + */
708 + static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
709 +-
710 +-#if defined(CONFIG_X86_ESPFIX64)
711 +-static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
712 +-#elif defined(CONFIG_EFI)
713 +-static const unsigned long vaddr_end = EFI_VA_END;
714 +-#else
715 +-static const unsigned long vaddr_end = __START_KERNEL_map;
716 +-#endif
717 ++static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
718 +
719 + /* Default values */
720 + unsigned long page_offset_base = __PAGE_OFFSET_BASE;
721 +@@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
722 + unsigned long remain_entropy;
723 +
724 + /*
725 +- * All these BUILD_BUG_ON checks ensures the memory layout is
726 +- * consistent with the vaddr_start/vaddr_end variables.
727 ++ * These BUILD_BUG_ON checks ensure the memory layout is consistent
728 ++ * with the vaddr_start/vaddr_end variables. These checks are very
729 ++ * limited....
730 + */
731 + BUILD_BUG_ON(vaddr_start >= vaddr_end);
732 +- BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
733 +- vaddr_end >= EFI_VA_END);
734 +- BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
735 +- IS_ENABLED(CONFIG_EFI)) &&
736 +- vaddr_end >= __START_KERNEL_map);
737 ++ BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
738 + BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
739 +
740 + if (!kaslr_memory_enabled())
741 +diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
742 +index 2da28ba97508..43d4a4a29037 100644
743 +--- a/arch/x86/mm/pti.c
744 ++++ b/arch/x86/mm/pti.c
745 +@@ -56,13 +56,13 @@
746 +
747 + static void __init pti_print_if_insecure(const char *reason)
748 + {
749 +- if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
750 ++ if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
751 + pr_info("%s\n", reason);
752 + }
753 +
754 + static void __init pti_print_if_secure(const char *reason)
755 + {
756 +- if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
757 ++ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
758 + pr_info("%s\n", reason);
759 + }
760 +
761 +@@ -96,7 +96,7 @@ void __init pti_check_boottime_disable(void)
762 + }
763 +
764 + autosel:
765 +- if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
766 ++ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
767 + return;
768 + enable:
769 + setup_force_cpu_cap(X86_FEATURE_PTI);
770 +diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
771 +index 8a99a2e96537..5b513ccffde4 100644
772 +--- a/arch/x86/platform/efi/quirks.c
773 ++++ b/arch/x86/platform/efi/quirks.c
774 +@@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
775 + /*
776 + * Update the first page pointer to skip over the CSH header.
777 + */
778 +- cap_info->pages[0] += csh->headersize;
779 ++ cap_info->phys[0] += csh->headersize;
780 ++
781 ++ /*
782 ++ * cap_info->capsule should point at a virtual mapping of the entire
783 ++ * capsule, starting at the capsule header. Our image has the Quark
784 ++ * security header prepended, so we cannot rely on the default vmap()
785 ++ * mapping created by the generic capsule code.
786 ++ * Given that the Quark firmware does not appear to care about the
787 ++ * virtual mapping, let's just point cap_info->capsule at our copy
788 ++ * of the capsule header.
789 ++ */
790 ++ cap_info->capsule = &cap_info->header;
791 +
792 + return 1;
793 + }
794 +diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
795 +index db1bc3147bc4..600afa99941f 100644
796 +--- a/crypto/chacha20poly1305.c
797 ++++ b/crypto/chacha20poly1305.c
798 +@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
799 + algt->mask));
800 + if (IS_ERR(poly))
801 + return PTR_ERR(poly);
802 ++ poly_hash = __crypto_hash_alg_common(poly);
803 ++
804 ++ err = -EINVAL;
805 ++ if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
806 ++ goto out_put_poly;
807 +
808 + err = -ENOMEM;
809 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
810 +@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
811 +
812 + ctx = aead_instance_ctx(inst);
813 + ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
814 +- poly_hash = __crypto_hash_alg_common(poly);
815 + err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
816 + aead_crypto_instance(inst));
817 + if (err)
818 +diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
819 +index ee9cfb99fe25..f8ec3d4ba4a8 100644
820 +--- a/crypto/pcrypt.c
821 ++++ b/crypto/pcrypt.c
822 +@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
823 + crypto_free_aead(ctx->child);
824 + }
825 +
826 ++static void pcrypt_free(struct aead_instance *inst)
827 ++{
828 ++ struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
829 ++
830 ++ crypto_drop_aead(&ctx->spawn);
831 ++ kfree(inst);
832 ++}
833 ++
834 + static int pcrypt_init_instance(struct crypto_instance *inst,
835 + struct crypto_alg *alg)
836 + {
837 +@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
838 + inst->alg.encrypt = pcrypt_aead_encrypt;
839 + inst->alg.decrypt = pcrypt_aead_decrypt;
840 +
841 ++ inst->free = pcrypt_free;
842 ++
843 + err = aead_register_instance(tmpl, inst);
844 + if (err)
845 + goto out_drop_aead;
846 +@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
847 + return -EINVAL;
848 + }
849 +
850 +-static void pcrypt_free(struct crypto_instance *inst)
851 +-{
852 +- struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
853 +-
854 +- crypto_drop_aead(&ctx->spawn);
855 +- kfree(inst);
856 +-}
857 +-
858 + static int pcrypt_cpumask_change_notify(struct notifier_block *self,
859 + unsigned long val, void *data)
860 + {
861 +@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
862 + static struct crypto_template pcrypt_tmpl = {
863 + .name = "pcrypt",
864 + .create = pcrypt_create,
865 +- .free = pcrypt_free,
866 + .module = THIS_MODULE,
867 + };
868 +
869 +diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
870 +index 328ca93781cf..1b76d9585902 100644
871 +--- a/drivers/bus/sunxi-rsb.c
872 ++++ b/drivers/bus/sunxi-rsb.c
873 +@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
874 + .match = sunxi_rsb_device_match,
875 + .probe = sunxi_rsb_device_probe,
876 + .remove = sunxi_rsb_device_remove,
877 ++ .uevent = of_device_uevent_modalias,
878 + };
879 +
880 + static void sunxi_rsb_dev_release(struct device *dev)
881 +diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
882 +index 3e104f5aa0c2..b56b3f711d94 100644
883 +--- a/drivers/crypto/chelsio/Kconfig
884 ++++ b/drivers/crypto/chelsio/Kconfig
885 +@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
886 + select CRYPTO_SHA256
887 + select CRYPTO_SHA512
888 + select CRYPTO_AUTHENC
889 ++ select CRYPTO_GF128MUL
890 + ---help---
891 + The Chelsio Crypto Co-processor driver for T6 adapters.
892 +
893 +diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
894 +index a9fd8b9e86cd..699ee5a9a8f9 100644
895 +--- a/drivers/crypto/n2_core.c
896 ++++ b/drivers/crypto/n2_core.c
897 +@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
898 + CWQ_ENTRY_SIZE, 0, NULL);
899 + if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
900 + kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
901 ++ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
902 + return -ENOMEM;
903 + }
904 + return 0;
905 +@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
906 + {
907 + kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
908 + kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
909 ++ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
910 ++ queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
911 + }
912 +
913 + static long spu_queue_register_workfn(void *arg)
914 +diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
915 +index ec8ac5c4dd84..055e2e8f985a 100644
916 +--- a/drivers/firmware/efi/capsule-loader.c
917 ++++ b/drivers/firmware/efi/capsule-loader.c
918 +@@ -20,10 +20,6 @@
919 +
920 + #define NO_FURTHER_WRITE_ACTION -1
921 +
922 +-#ifndef phys_to_page
923 +-#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
924 +-#endif
925 +-
926 + /**
927 + * efi_free_all_buff_pages - free all previous allocated buffer pages
928 + * @cap_info: pointer to current instance of capsule_info structure
929 +@@ -35,7 +31,7 @@
930 + static void efi_free_all_buff_pages(struct capsule_info *cap_info)
931 + {
932 + while (cap_info->index > 0)
933 +- __free_page(phys_to_page(cap_info->pages[--cap_info->index]));
934 ++ __free_page(cap_info->pages[--cap_info->index]);
935 +
936 + cap_info->index = NO_FURTHER_WRITE_ACTION;
937 + }
938 +@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
939 +
940 + cap_info->pages = temp_page;
941 +
942 ++ temp_page = krealloc(cap_info->phys,
943 ++ pages_needed * sizeof(phys_addr_t *),
944 ++ GFP_KERNEL | __GFP_ZERO);
945 ++ if (!temp_page)
946 ++ return -ENOMEM;
947 ++
948 ++ cap_info->phys = temp_page;
949 ++
950 + return 0;
951 + }
952 +
953 +@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
954 + **/
955 + static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
956 + {
957 ++ bool do_vunmap = false;
958 + int ret;
959 +
960 +- ret = efi_capsule_update(&cap_info->header, cap_info->pages);
961 ++ /*
962 ++ * cap_info->capsule may have been assigned already by a quirk
963 ++ * handler, so only overwrite it if it is NULL
964 ++ */
965 ++ if (!cap_info->capsule) {
966 ++ cap_info->capsule = vmap(cap_info->pages, cap_info->index,
967 ++ VM_MAP, PAGE_KERNEL);
968 ++ if (!cap_info->capsule)
969 ++ return -ENOMEM;
970 ++ do_vunmap = true;
971 ++ }
972 ++
973 ++ ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
974 ++ if (do_vunmap)
975 ++ vunmap(cap_info->capsule);
976 + if (ret) {
977 + pr_err("capsule update failed\n");
978 + return ret;
979 +@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
980 + goto failed;
981 + }
982 +
983 +- cap_info->pages[cap_info->index++] = page_to_phys(page);
984 ++ cap_info->pages[cap_info->index] = page;
985 ++ cap_info->phys[cap_info->index] = page_to_phys(page);
986 + cap_info->page_bytes_remain = PAGE_SIZE;
987 ++ cap_info->index++;
988 + } else {
989 +- page = phys_to_page(cap_info->pages[cap_info->index - 1]);
990 ++ page = cap_info->pages[cap_info->index - 1];
991 + }
992 +
993 + kbuff = kmap(page);
994 +@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
995 + struct capsule_info *cap_info = file->private_data;
996 +
997 + kfree(cap_info->pages);
998 ++ kfree(cap_info->phys);
999 + kfree(file->private_data);
1000 + file->private_data = NULL;
1001 + return 0;
1002 +@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
1003 + return -ENOMEM;
1004 + }
1005 +
1006 ++ cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
1007 ++ if (!cap_info->phys) {
1008 ++ kfree(cap_info->pages);
1009 ++ kfree(cap_info);
1010 ++ return -ENOMEM;
1011 ++ }
1012 ++
1013 + file->private_data = cap_info;
1014 +
1015 + return 0;
1016 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1017 +index c9bcc6c45012..ce2ed16f2a30 100644
1018 +--- a/drivers/gpu/drm/i915/i915_reg.h
1019 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1020 +@@ -6944,6 +6944,7 @@ enum {
1021 + #define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
1022 +
1023 + #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
1024 ++#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
1025 + #define MASK_WAKEMEM (1<<13)
1026 +
1027 + #define SKL_DFSM _MMIO(0x51000)
1028 +@@ -8475,6 +8476,7 @@ enum skl_power_gate {
1029 + #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
1030 + #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
1031 + #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
1032 ++#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
1033 + #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
1034 + #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
1035 + #define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
1036 +diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
1037 +index 1241e5891b29..26a8dcd2c549 100644
1038 +--- a/drivers/gpu/drm/i915/intel_cdclk.c
1039 ++++ b/drivers/gpu/drm/i915/intel_cdclk.c
1040 +@@ -859,16 +859,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
1041 +
1042 + static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
1043 + {
1044 +- int min_cdclk = skl_calc_cdclk(0, vco);
1045 + u32 val;
1046 +
1047 + WARN_ON(vco != 8100000 && vco != 8640000);
1048 +
1049 +- /* select the minimum CDCLK before enabling DPLL 0 */
1050 +- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
1051 +- I915_WRITE(CDCLK_CTL, val);
1052 +- POSTING_READ(CDCLK_CTL);
1053 +-
1054 + /*
1055 + * We always enable DPLL0 with the lowest link rate possible, but still
1056 + * taking into account the VCO required to operate the eDP panel at the
1057 +@@ -922,7 +916,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
1058 + {
1059 + int cdclk = cdclk_state->cdclk;
1060 + int vco = cdclk_state->vco;
1061 +- u32 freq_select, pcu_ack;
1062 ++ u32 freq_select, pcu_ack, cdclk_ctl;
1063 + int ret;
1064 +
1065 + WARN_ON((cdclk == 24000) != (vco == 0));
1066 +@@ -939,7 +933,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
1067 + return;
1068 + }
1069 +
1070 +- /* set CDCLK_CTL */
1071 ++ /* Choose frequency for this cdclk */
1072 + switch (cdclk) {
1073 + case 450000:
1074 + case 432000:
1075 +@@ -967,10 +961,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
1076 + dev_priv->cdclk.hw.vco != vco)
1077 + skl_dpll0_disable(dev_priv);
1078 +
1079 ++ cdclk_ctl = I915_READ(CDCLK_CTL);
1080 ++
1081 ++ if (dev_priv->cdclk.hw.vco != vco) {
1082 ++ /* Wa Display #1183: skl,kbl,cfl */
1083 ++ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
1084 ++ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
1085 ++ I915_WRITE(CDCLK_CTL, cdclk_ctl);
1086 ++ }
1087 ++
1088 ++ /* Wa Display #1183: skl,kbl,cfl */
1089 ++ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
1090 ++ I915_WRITE(CDCLK_CTL, cdclk_ctl);
1091 ++ POSTING_READ(CDCLK_CTL);
1092 ++
1093 + if (dev_priv->cdclk.hw.vco != vco)
1094 + skl_dpll0_enable(dev_priv, vco);
1095 +
1096 +- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
1097 ++ /* Wa Display #1183: skl,kbl,cfl */
1098 ++ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
1099 ++ I915_WRITE(CDCLK_CTL, cdclk_ctl);
1100 ++
1101 ++ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
1102 ++ I915_WRITE(CDCLK_CTL, cdclk_ctl);
1103 ++
1104 ++ /* Wa Display #1183: skl,kbl,cfl */
1105 ++ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
1106 ++ I915_WRITE(CDCLK_CTL, cdclk_ctl);
1107 + POSTING_READ(CDCLK_CTL);
1108 +
1109 + /* inform PCU of the change */
1110 +diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
1111 +index 49577eba8e7e..51cb5293bf43 100644
1112 +--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
1113 ++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
1114 +@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1115 +
1116 + DRM_DEBUG_KMS("Enabling DC5\n");
1117 +
1118 ++ /* Wa Display #1183: skl,kbl,cfl */
1119 ++ if (IS_GEN9_BC(dev_priv))
1120 ++ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1121 ++ SKL_SELECT_ALTERNATE_DC_EXIT);
1122 ++
1123 + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1124 + }
1125 +
1126 +@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
1127 + {
1128 + DRM_DEBUG_KMS("Disabling DC6\n");
1129 +
1130 ++ /* Wa Display #1183: skl,kbl,cfl */
1131 ++ if (IS_GEN9_BC(dev_priv))
1132 ++ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1133 ++ SKL_SELECT_ALTERNATE_DC_EXIT);
1134 ++
1135 + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1136 + }
1137 +
1138 +@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1139 + GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
1140 + BIT_ULL(POWER_DOMAIN_MODESET) | \
1141 + BIT_ULL(POWER_DOMAIN_AUX_A) | \
1142 ++ BIT_ULL(POWER_DOMAIN_GMBUS) | \
1143 + BIT_ULL(POWER_DOMAIN_INIT))
1144 +
1145 + #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1146 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1147 +index b84cd978fce2..a4aaa748e987 100644
1148 +--- a/drivers/input/mouse/elantech.c
1149 ++++ b/drivers/input/mouse/elantech.c
1150 +@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1151 + case 5:
1152 + etd->hw_version = 3;
1153 + break;
1154 +- case 6 ... 14:
1155 ++ case 6 ... 15:
1156 + etd->hw_version = 4;
1157 + break;
1158 + default:
1159 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1160 +index e67ba6c40faf..8f7a3c00b6cf 100644
1161 +--- a/drivers/iommu/arm-smmu-v3.c
1162 ++++ b/drivers/iommu/arm-smmu-v3.c
1163 +@@ -1611,13 +1611,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1164 + domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1165 + domain->geometry.aperture_end = (1UL << ias) - 1;
1166 + domain->geometry.force_aperture = true;
1167 +- smmu_domain->pgtbl_ops = pgtbl_ops;
1168 +
1169 + ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1170 +- if (ret < 0)
1171 ++ if (ret < 0) {
1172 + free_io_pgtable_ops(pgtbl_ops);
1173 ++ return ret;
1174 ++ }
1175 +
1176 +- return ret;
1177 ++ smmu_domain->pgtbl_ops = pgtbl_ops;
1178 ++ return 0;
1179 + }
1180 +
1181 + static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1182 +@@ -1644,7 +1646,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1183 +
1184 + static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1185 + {
1186 +- int i;
1187 ++ int i, j;
1188 + struct arm_smmu_master_data *master = fwspec->iommu_priv;
1189 + struct arm_smmu_device *smmu = master->smmu;
1190 +
1191 +@@ -1652,6 +1654,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1192 + u32 sid = fwspec->ids[i];
1193 + __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1194 +
1195 ++ /* Bridged PCI devices may end up with duplicated IDs */
1196 ++ for (j = 0; j < i; j++)
1197 ++ if (fwspec->ids[j] == sid)
1198 ++ break;
1199 ++ if (j < i)
1200 ++ continue;
1201 ++
1202 + arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1203 + }
1204 + }
1205 +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
1206 +index 85cff68643e0..125b744c9c28 100644
1207 +--- a/drivers/mtd/nand/pxa3xx_nand.c
1208 ++++ b/drivers/mtd/nand/pxa3xx_nand.c
1209 +@@ -950,6 +950,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
1210 +
1211 + switch (command) {
1212 + case NAND_CMD_READ0:
1213 ++ case NAND_CMD_READOOB:
1214 + case NAND_CMD_PAGEPROG:
1215 + info->use_ecc = 1;
1216 + break;
1217 +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
1218 +index 19e4ad2f3f2e..0c4b690cf761 100644
1219 +--- a/fs/btrfs/delayed-inode.c
1220 ++++ b/fs/btrfs/delayed-inode.c
1221 +@@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
1222 +
1223 + spin_lock(&root->inode_lock);
1224 + node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
1225 ++
1226 + if (node) {
1227 + if (btrfs_inode->delayed_node) {
1228 + refcount_inc(&node->refs); /* can be accessed */
1229 +@@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
1230 + spin_unlock(&root->inode_lock);
1231 + return node;
1232 + }
1233 +- btrfs_inode->delayed_node = node;
1234 +- /* can be accessed and cached in the inode */
1235 +- refcount_add(2, &node->refs);
1236 ++
1237 ++ /*
1238 ++ * It's possible that we're racing into the middle of removing
1239 ++ * this node from the radix tree. In this case, the refcount
1240 ++ * was zero and it should never go back to one. Just return
1241 ++ * NULL like it was never in the radix at all; our release
1242 ++ * function is in the process of removing it.
1243 ++ *
1244 ++ * Some implementations of refcount_inc refuse to bump the
1245 ++ * refcount once it has hit zero. If we don't do this dance
1246 ++ * here, refcount_inc() may decide to just WARN_ONCE() instead
1247 ++ * of actually bumping the refcount.
1248 ++ *
1249 ++ * If this node is properly in the radix, we want to bump the
1250 ++ * refcount twice, once for the inode and once for this get
1251 ++ * operation.
1252 ++ */
1253 ++ if (refcount_inc_not_zero(&node->refs)) {
1254 ++ refcount_inc(&node->refs);
1255 ++ btrfs_inode->delayed_node = node;
1256 ++ } else {
1257 ++ node = NULL;
1258 ++ }
1259 ++
1260 + spin_unlock(&root->inode_lock);
1261 + return node;
1262 + }
1263 +@@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node(
1264 + mutex_unlock(&delayed_node->mutex);
1265 +
1266 + if (refcount_dec_and_test(&delayed_node->refs)) {
1267 +- bool free = false;
1268 + struct btrfs_root *root = delayed_node->root;
1269 ++
1270 + spin_lock(&root->inode_lock);
1271 +- if (refcount_read(&delayed_node->refs) == 0) {
1272 +- radix_tree_delete(&root->delayed_nodes_tree,
1273 +- delayed_node->inode_id);
1274 +- free = true;
1275 +- }
1276 ++ /*
1277 ++ * Once our refcount goes to zero, nobody is allowed to bump it
1278 ++ * back up. We can delete it now.
1279 ++ */
1280 ++ ASSERT(refcount_read(&delayed_node->refs) == 0);
1281 ++ radix_tree_delete(&root->delayed_nodes_tree,
1282 ++ delayed_node->inode_id);
1283 + spin_unlock(&root->inode_lock);
1284 +- if (free)
1285 +- kmem_cache_free(delayed_node_cache, delayed_node);
1286 ++ kmem_cache_free(delayed_node_cache, delayed_node);
1287 + }
1288 + }
1289 +
1290 +diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c
1291 +index e0f867cd8553..96f1087e372c 100644
1292 +--- a/fs/proc/cpuinfo.c
1293 ++++ b/fs/proc/cpuinfo.c
1294 +@@ -1,12 +1,18 @@
1295 + // SPDX-License-Identifier: GPL-2.0
1296 ++#include <linux/cpufreq.h>
1297 + #include <linux/fs.h>
1298 + #include <linux/init.h>
1299 + #include <linux/proc_fs.h>
1300 + #include <linux/seq_file.h>
1301 +
1302 ++__weak void arch_freq_prepare_all(void)
1303 ++{
1304 ++}
1305 ++
1306 + extern const struct seq_operations cpuinfo_op;
1307 + static int cpuinfo_open(struct inode *inode, struct file *file)
1308 + {
1309 ++ arch_freq_prepare_all();
1310 + return seq_open(file, &cpuinfo_op);
1311 + }
1312 +
1313 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
1314 +index 1c713fd5b3e6..5aa392eae1c3 100644
1315 +--- a/fs/userfaultfd.c
1316 ++++ b/fs/userfaultfd.c
1317 +@@ -570,11 +570,14 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
1318 + static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
1319 + struct userfaultfd_wait_queue *ewq)
1320 + {
1321 ++ struct userfaultfd_ctx *release_new_ctx;
1322 ++
1323 + if (WARN_ON_ONCE(current->flags & PF_EXITING))
1324 + goto out;
1325 +
1326 + ewq->ctx = ctx;
1327 + init_waitqueue_entry(&ewq->wq, current);
1328 ++ release_new_ctx = NULL;
1329 +
1330 + spin_lock(&ctx->event_wqh.lock);
1331 + /*
1332 +@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
1333 + new = (struct userfaultfd_ctx *)
1334 + (unsigned long)
1335 + ewq->msg.arg.reserved.reserved1;
1336 +-
1337 +- userfaultfd_ctx_put(new);
1338 ++ release_new_ctx = new;
1339 + }
1340 + break;
1341 + }
1342 +@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
1343 + __set_current_state(TASK_RUNNING);
1344 + spin_unlock(&ctx->event_wqh.lock);
1345 +
1346 ++ if (release_new_ctx) {
1347 ++ struct vm_area_struct *vma;
1348 ++ struct mm_struct *mm = release_new_ctx->mm;
1349 ++
1350 ++ /* the various vma->vm_userfaultfd_ctx still points to it */
1351 ++ down_write(&mm->mmap_sem);
1352 ++ for (vma = mm->mmap; vma; vma = vma->vm_next)
1353 ++ if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
1354 ++ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1355 ++ up_write(&mm->mmap_sem);
1356 ++
1357 ++ userfaultfd_ctx_put(release_new_ctx);
1358 ++ }
1359 ++
1360 + /*
1361 + * ctx may go away after this if the userfault pseudo fd is
1362 + * already released.
1363 +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
1364 +index 537ff842ff73..cbf85c4c745f 100644
1365 +--- a/include/linux/cpufreq.h
1366 ++++ b/include/linux/cpufreq.h
1367 +@@ -917,6 +917,7 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
1368 + }
1369 + #endif
1370 +
1371 ++extern void arch_freq_prepare_all(void);
1372 + extern unsigned int arch_freq_get_on_cpu(int cpu);
1373 +
1374 + /* the following are really really optional */
1375 +diff --git a/include/linux/efi.h b/include/linux/efi.h
1376 +index d813f7b04da7..29fdf8029cf6 100644
1377 +--- a/include/linux/efi.h
1378 ++++ b/include/linux/efi.h
1379 +@@ -140,11 +140,13 @@ struct efi_boot_memmap {
1380 +
1381 + struct capsule_info {
1382 + efi_capsule_header_t header;
1383 ++ efi_capsule_header_t *capsule;
1384 + int reset_type;
1385 + long index;
1386 + size_t count;
1387 + size_t total_size;
1388 +- phys_addr_t *pages;
1389 ++ struct page **pages;
1390 ++ phys_addr_t *phys;
1391 + size_t page_bytes_remain;
1392 + };
1393 +
1394 +diff --git a/include/linux/fscache.h b/include/linux/fscache.h
1395 +index f4ff47d4a893..fe0c349684fa 100644
1396 +--- a/include/linux/fscache.h
1397 ++++ b/include/linux/fscache.h
1398 +@@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
1399 + {
1400 + if (fscache_cookie_valid(cookie) && PageFsCache(page))
1401 + return __fscache_maybe_release_page(cookie, page, gfp);
1402 +- return false;
1403 ++ return true;
1404 + }
1405 +
1406 + /**
1407 +diff --git a/kernel/acct.c b/kernel/acct.c
1408 +index 6670fbd3e466..354578d253d5 100644
1409 +--- a/kernel/acct.c
1410 ++++ b/kernel/acct.c
1411 +@@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
1412 + {
1413 + struct kstatfs sbuf;
1414 +
1415 +- if (time_is_before_jiffies(acct->needcheck))
1416 ++ if (time_is_after_jiffies(acct->needcheck))
1417 + goto out;
1418 +
1419 + /* May block */
1420 +diff --git a/kernel/signal.c b/kernel/signal.c
1421 +index 8dcd8825b2de..1facff1dbbae 100644
1422 +--- a/kernel/signal.c
1423 ++++ b/kernel/signal.c
1424 +@@ -78,7 +78,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
1425 + handler = sig_handler(t, sig);
1426 +
1427 + if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
1428 +- handler == SIG_DFL && !force)
1429 ++ handler == SIG_DFL && !(force && sig_kernel_only(sig)))
1430 + return 1;
1431 +
1432 + return sig_handler_ignored(handler, sig);
1433 +@@ -94,13 +94,15 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
1434 + if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
1435 + return 0;
1436 +
1437 +- if (!sig_task_ignored(t, sig, force))
1438 +- return 0;
1439 +-
1440 + /*
1441 +- * Tracers may want to know about even ignored signals.
1442 ++ * Tracers may want to know about even ignored signal unless it
1443 ++ * is SIGKILL which can't be reported anyway but can be ignored
1444 ++ * by SIGNAL_UNKILLABLE task.
1445 + */
1446 +- return !t->ptrace;
1447 ++ if (t->ptrace && sig != SIGKILL)
1448 ++ return 0;
1449 ++
1450 ++ return sig_task_ignored(t, sig, force);
1451 + }
1452 +
1453 + /*
1454 +@@ -929,9 +931,9 @@ static void complete_signal(int sig, struct task_struct *p, int group)
1455 + * then start taking the whole group down immediately.
1456 + */
1457 + if (sig_fatal(p, sig) &&
1458 +- !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
1459 ++ !(signal->flags & SIGNAL_GROUP_EXIT) &&
1460 + !sigismember(&t->real_blocked, sig) &&
1461 +- (sig == SIGKILL || !t->ptrace)) {
1462 ++ (sig == SIGKILL || !p->ptrace)) {
1463 + /*
1464 + * This signal will be fatal to the whole group.
1465 + */
1466 +diff --git a/mm/mprotect.c b/mm/mprotect.c
1467 +index ec39f730a0bf..58b629bb70de 100644
1468 +--- a/mm/mprotect.c
1469 ++++ b/mm/mprotect.c
1470 +@@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1471 + next = pmd_addr_end(addr, end);
1472 + if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
1473 + && pmd_none_or_clear_bad(pmd))
1474 +- continue;
1475 ++ goto next;
1476 +
1477 + /* invoke the mmu notifier if the pmd is populated */
1478 + if (!mni_start) {
1479 +@@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1480 + }
1481 +
1482 + /* huge pmd was handled */
1483 +- continue;
1484 ++ goto next;
1485 + }
1486 + }
1487 + /* fall through, the trans huge pmd just split */
1488 +@@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1489 + this_pages = change_pte_range(vma, pmd, addr, next, newprot,
1490 + dirty_accountable, prot_numa);
1491 + pages += this_pages;
1492 ++next:
1493 ++ cond_resched();
1494 + } while (pmd++, addr = next, addr != end);
1495 +
1496 + if (mni_start)
1497 +diff --git a/mm/sparse.c b/mm/sparse.c
1498 +index 60805abf98af..30e56a100ee8 100644
1499 +--- a/mm/sparse.c
1500 ++++ b/mm/sparse.c
1501 +@@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
1502 + if (unlikely(!mem_section)) {
1503 + unsigned long size, align;
1504 +
1505 +- size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
1506 ++ size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
1507 + align = 1 << (INTERNODE_CACHE_SHIFT);
1508 + mem_section = memblock_virt_alloc(size, align);
1509 + }
1510 +diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
1511 +index 82a64b58041d..e395137ecff1 100644
1512 +--- a/security/apparmor/mount.c
1513 ++++ b/security/apparmor/mount.c
1514 +@@ -330,6 +330,9 @@ static int match_mnt_path_str(struct aa_profile *profile,
1515 + AA_BUG(!mntpath);
1516 + AA_BUG(!buffer);
1517 +
1518 ++ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
1519 ++ return 0;
1520 ++
1521 + error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
1522 + &mntpnt, &info, profile->disconnected);
1523 + if (error)
1524 +@@ -381,6 +384,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
1525 + AA_BUG(!profile);
1526 + AA_BUG(devpath && !devbuffer);
1527 +
1528 ++ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
1529 ++ return 0;
1530 ++
1531 + if (devpath) {
1532 + error = aa_path_name(devpath, path_flags(profile, devpath),
1533 + devbuffer, &devname, &info,
1534 +@@ -559,6 +565,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path,
1535 + AA_BUG(!profile);
1536 + AA_BUG(!path);
1537 +
1538 ++ if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
1539 ++ return 0;
1540 ++
1541 + error = aa_path_name(path, path_flags(profile, path), buffer, &name,
1542 + &info, profile->disconnected);
1543 + if (error)
1544 +@@ -614,7 +623,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
1545 + AA_BUG(!new_path);
1546 + AA_BUG(!old_path);
1547 +
1548 +- if (profile_unconfined(profile))
1549 ++ if (profile_unconfined(profile) ||
1550 ++ !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
1551 + return aa_get_newest_label(&profile->label);
1552 +
1553 + error = aa_path_name(old_path, path_flags(profile, old_path),