Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.8 commit in: /
Date: Fri, 02 Dec 2016 16:23:46
Message-Id: 1480695768.27ab52c49dea953256202d19c96202f5cf703bbe.alicef@gentoo
1 commit: 27ab52c49dea953256202d19c96202f5cf703bbe
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Fri Dec 2 16:22:48 2016 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Fri Dec 2 16:22:48 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=27ab52c4
7
8 Linux patch 4.8.12
9
10 0000_README | 4 +
11 1011_linux-4.8.12.patch | 1563 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1567 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 4aa1baf..cd56013 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -87,6 +87,10 @@ Patch: 1010_linux-4.8.11.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.8.11
21
22 +Patch: 1011_linux-4.8.12.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.8.12
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1011_linux-4.8.12.patch b/1011_linux-4.8.12.patch
31 new file mode 100644
32 index 0000000..9855afb
33 --- /dev/null
34 +++ b/1011_linux-4.8.12.patch
35 @@ -0,0 +1,1563 @@
36 +diff --git a/Makefile b/Makefile
37 +index 2b1bcbacebcd..7b0c92f53169 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 8
43 +-SUBLEVEL = 11
44 ++SUBLEVEL = 12
45 + EXTRAVERSION =
46 + NAME = Psychotic Stoned Sheep
47 +
48 +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
49 +index af12c2db9bb8..81c11a62b1fa 100644
50 +--- a/arch/parisc/Kconfig
51 ++++ b/arch/parisc/Kconfig
52 +@@ -33,7 +33,9 @@ config PARISC
53 + select HAVE_ARCH_HASH
54 + select HAVE_ARCH_SECCOMP_FILTER
55 + select HAVE_ARCH_TRACEHOOK
56 +- select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
57 ++ select GENERIC_SCHED_CLOCK
58 ++ select HAVE_UNSTABLE_SCHED_CLOCK if SMP
59 ++ select GENERIC_CLOCKEVENTS
60 + select ARCH_NO_COHERENT_DMA_MMAP
61 + select CPU_NO_EFFICIENT_FFS
62 +
63 +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
64 +index 67001277256c..c2259d4a3c33 100644
65 +--- a/arch/parisc/kernel/cache.c
66 ++++ b/arch/parisc/kernel/cache.c
67 +@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void)
68 + {
69 + unsigned long rangetime, alltime;
70 + unsigned long size, start;
71 ++ unsigned long threshold;
72 +
73 + alltime = mfctl(16);
74 + flush_data_cache();
75 +@@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void)
76 + printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
77 + alltime, size, rangetime);
78 +
79 +- /* Racy, but if we see an intermediate value, it's ok too... */
80 +- parisc_cache_flush_threshold = size * alltime / rangetime;
81 +-
82 +- parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
83 +- if (!parisc_cache_flush_threshold)
84 +- parisc_cache_flush_threshold = FLUSH_THRESHOLD;
85 +-
86 +- if (parisc_cache_flush_threshold > cache_info.dc_size)
87 +- parisc_cache_flush_threshold = cache_info.dc_size;
88 +-
89 +- printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
90 ++ threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
91 ++ if (threshold > cache_info.dc_size)
92 ++ threshold = cache_info.dc_size;
93 ++ if (threshold)
94 ++ parisc_cache_flush_threshold = threshold;
95 ++ printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
96 + parisc_cache_flush_threshold/1024);
97 +
98 + /* calculate TLB flush threshold */
99 +@@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void)
100 + flush_tlb_all();
101 + alltime = mfctl(16) - alltime;
102 +
103 +- size = PAGE_SIZE;
104 ++ size = 0;
105 + start = (unsigned long) _text;
106 + rangetime = mfctl(16);
107 + while (start < (unsigned long) _end) {
108 +@@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void)
109 + printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
110 + alltime, size, rangetime);
111 +
112 +- parisc_tlb_flush_threshold = size * alltime / rangetime;
113 +- parisc_tlb_flush_threshold *= num_online_cpus();
114 +- parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
115 +- if (!parisc_tlb_flush_threshold)
116 +- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
117 +-
118 +- printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
119 ++ threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
120 ++ if (threshold)
121 ++ parisc_tlb_flush_threshold = threshold;
122 ++ printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
123 + parisc_tlb_flush_threshold/1024);
124 + }
125 +
126 +diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
127 +index b743a80eaba0..675521919229 100644
128 +--- a/arch/parisc/kernel/pacache.S
129 ++++ b/arch/parisc/kernel/pacache.S
130 +@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
131 +
132 + fitmanymiddle: /* Loop if LOOP >= 2 */
133 + addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
134 +- pitlbe 0(%sr1, %r28)
135 ++ pitlbe %r0(%sr1, %r28)
136 + pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
137 + addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
138 + copy %arg3, %r31 /* Re-init inner loop count */
139 +@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
140 +
141 + fdtmanymiddle: /* Loop if LOOP >= 2 */
142 + addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
143 +- pdtlbe 0(%sr1, %r28)
144 ++ pdtlbe %r0(%sr1, %r28)
145 + pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
146 + addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
147 + copy %arg3, %r31 /* Re-init inner loop count */
148 +@@ -620,12 +620,12 @@ ENTRY(copy_user_page_asm)
149 + /* Purge any old translations */
150 +
151 + #ifdef CONFIG_PA20
152 +- pdtlb,l 0(%r28)
153 +- pdtlb,l 0(%r29)
154 ++ pdtlb,l %r0(%r28)
155 ++ pdtlb,l %r0(%r29)
156 + #else
157 + tlb_lock %r20,%r21,%r22
158 +- pdtlb 0(%r28)
159 +- pdtlb 0(%r29)
160 ++ pdtlb %r0(%r28)
161 ++ pdtlb %r0(%r29)
162 + tlb_unlock %r20,%r21,%r22
163 + #endif
164 +
165 +@@ -768,10 +768,10 @@ ENTRY(clear_user_page_asm)
166 + /* Purge any old translation */
167 +
168 + #ifdef CONFIG_PA20
169 +- pdtlb,l 0(%r28)
170 ++ pdtlb,l %r0(%r28)
171 + #else
172 + tlb_lock %r20,%r21,%r22
173 +- pdtlb 0(%r28)
174 ++ pdtlb %r0(%r28)
175 + tlb_unlock %r20,%r21,%r22
176 + #endif
177 +
178 +@@ -852,10 +852,10 @@ ENTRY(flush_dcache_page_asm)
179 + /* Purge any old translation */
180 +
181 + #ifdef CONFIG_PA20
182 +- pdtlb,l 0(%r28)
183 ++ pdtlb,l %r0(%r28)
184 + #else
185 + tlb_lock %r20,%r21,%r22
186 +- pdtlb 0(%r28)
187 ++ pdtlb %r0(%r28)
188 + tlb_unlock %r20,%r21,%r22
189 + #endif
190 +
191 +@@ -892,10 +892,10 @@ ENTRY(flush_dcache_page_asm)
192 + sync
193 +
194 + #ifdef CONFIG_PA20
195 +- pdtlb,l 0(%r25)
196 ++ pdtlb,l %r0(%r25)
197 + #else
198 + tlb_lock %r20,%r21,%r22
199 +- pdtlb 0(%r25)
200 ++ pdtlb %r0(%r25)
201 + tlb_unlock %r20,%r21,%r22
202 + #endif
203 +
204 +@@ -925,13 +925,18 @@ ENTRY(flush_icache_page_asm)
205 + depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
206 + #endif
207 +
208 +- /* Purge any old translation */
209 ++ /* Purge any old translation. Note that the FIC instruction
210 ++ * may use either the instruction or data TLB. Given that we
211 ++ * have a flat address space, it's not clear which TLB will be
212 ++ * used. So, we purge both entries. */
213 +
214 + #ifdef CONFIG_PA20
215 ++ pdtlb,l %r0(%r28)
216 + pitlb,l %r0(%sr4,%r28)
217 + #else
218 + tlb_lock %r20,%r21,%r22
219 +- pitlb (%sr4,%r28)
220 ++ pdtlb %r0(%r28)
221 ++ pitlb %r0(%sr4,%r28)
222 + tlb_unlock %r20,%r21,%r22
223 + #endif
224 +
225 +@@ -970,10 +975,12 @@ ENTRY(flush_icache_page_asm)
226 + sync
227 +
228 + #ifdef CONFIG_PA20
229 ++ pdtlb,l %r0(%r28)
230 + pitlb,l %r0(%sr4,%r25)
231 + #else
232 + tlb_lock %r20,%r21,%r22
233 +- pitlb (%sr4,%r25)
234 ++ pdtlb %r0(%r28)
235 ++ pitlb %r0(%sr4,%r25)
236 + tlb_unlock %r20,%r21,%r22
237 + #endif
238 +
239 +diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
240 +index 02d9ed0f3949..494ff6e8c88a 100644
241 +--- a/arch/parisc/kernel/pci-dma.c
242 ++++ b/arch/parisc/kernel/pci-dma.c
243 +@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
244 +
245 + if (!pte_none(*pte))
246 + printk(KERN_ERR "map_pte_uncached: page already exists\n");
247 +- set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
248 + purge_tlb_start(flags);
249 ++ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
250 + pdtlb_kernel(orig_vaddr);
251 + purge_tlb_end(flags);
252 + vaddr += PAGE_SIZE;
253 +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
254 +index 81d6f6391944..2e66a887788e 100644
255 +--- a/arch/parisc/kernel/setup.c
256 ++++ b/arch/parisc/kernel/setup.c
257 +@@ -334,6 +334,10 @@ static int __init parisc_init(void)
258 + /* tell PDC we're Linux. Nevermind failure. */
259 + pdc_stable_write(0x40, &osid, sizeof(osid));
260 +
261 ++ /* start with known state */
262 ++ flush_cache_all_local();
263 ++ flush_tlb_all_local(NULL);
264 ++
265 + processor_init();
266 + #ifdef CONFIG_SMP
267 + pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
268 +diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
269 +index 9b63b876a13a..325f30d82b64 100644
270 +--- a/arch/parisc/kernel/time.c
271 ++++ b/arch/parisc/kernel/time.c
272 +@@ -14,6 +14,7 @@
273 + #include <linux/module.h>
274 + #include <linux/rtc.h>
275 + #include <linux/sched.h>
276 ++#include <linux/sched_clock.h>
277 + #include <linux/kernel.h>
278 + #include <linux/param.h>
279 + #include <linux/string.h>
280 +@@ -39,18 +40,6 @@
281 +
282 + static unsigned long clocktick __read_mostly; /* timer cycles per tick */
283 +
284 +-#ifndef CONFIG_64BIT
285 +-/*
286 +- * The processor-internal cycle counter (Control Register 16) is used as time
287 +- * source for the sched_clock() function. This register is 64bit wide on a
288 +- * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
289 +- * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
290 +- * with a per-cpu variable which we increase every time the counter
291 +- * wraps-around (which happens every ~4 secounds).
292 +- */
293 +-static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
294 +-#endif
295 +-
296 + /*
297 + * We keep time on PA-RISC Linux by using the Interval Timer which is
298 + * a pair of registers; one is read-only and one is write-only; both
299 +@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
300 + */
301 + mtctl(next_tick, 16);
302 +
303 +-#if !defined(CONFIG_64BIT)
304 +- /* check for overflow on a 32bit kernel (every ~4 seconds). */
305 +- if (unlikely(next_tick < now))
306 +- this_cpu_inc(cr16_high_32_bits);
307 +-#endif
308 +-
309 + /* Skip one clocktick on purpose if we missed next_tick.
310 + * The new CR16 must be "later" than current CR16 otherwise
311 + * itimer would not fire until CR16 wrapped - e.g 4 seconds
312 +@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
313 +
314 + /* clock source code */
315 +
316 +-static cycle_t read_cr16(struct clocksource *cs)
317 ++static cycle_t notrace read_cr16(struct clocksource *cs)
318 + {
319 + return get_cycles();
320 + }
321 +@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
322 + }
323 +
324 +
325 +-/*
326 +- * sched_clock() framework
327 +- */
328 +-
329 +-static u32 cyc2ns_mul __read_mostly;
330 +-static u32 cyc2ns_shift __read_mostly;
331 +-
332 +-u64 sched_clock(void)
333 ++static u64 notrace read_cr16_sched_clock(void)
334 + {
335 +- u64 now;
336 +-
337 +- /* Get current cycle counter (Control Register 16). */
338 +-#ifdef CONFIG_64BIT
339 +- now = mfctl(16);
340 +-#else
341 +- now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
342 +-#endif
343 +-
344 +- /* return the value in ns (cycles_2_ns) */
345 +- return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
346 ++ return get_cycles();
347 + }
348 +
349 +
350 +@@ -316,17 +282,16 @@ u64 sched_clock(void)
351 +
352 + void __init time_init(void)
353 + {
354 +- unsigned long current_cr16_khz;
355 ++ unsigned long cr16_hz;
356 +
357 +- current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
358 + clocktick = (100 * PAGE0->mem_10msec) / HZ;
359 +-
360 +- /* calculate mult/shift values for cr16 */
361 +- clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
362 +- NSEC_PER_MSEC, 0);
363 +-
364 + start_cpu_itimer(); /* get CPU 0 started */
365 +
366 ++ cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
367 ++
368 + /* register at clocksource framework */
369 +- clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
370 ++ clocksource_register_hz(&clocksource_cr16, cr16_hz);
371 ++
372 ++ /* register as sched_clock source */
373 ++ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
374 + }
375 +diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
376 +index d80161b633f4..60522d22a428 100644
377 +--- a/arch/powerpc/boot/main.c
378 ++++ b/arch/powerpc/boot/main.c
379 +@@ -217,8 +217,12 @@ void start(void)
380 + console_ops.close();
381 +
382 + kentry = (kernel_entry_t) vmlinux.addr;
383 +- if (ft_addr)
384 +- kentry(ft_addr, 0, NULL);
385 ++ if (ft_addr) {
386 ++ if(platform_ops.kentry)
387 ++ platform_ops.kentry(ft_addr, vmlinux.addr);
388 ++ else
389 ++ kentry(ft_addr, 0, NULL);
390 ++ }
391 + else
392 + kentry((unsigned long)initrd.addr, initrd.size,
393 + loader_info.promptr);
394 +diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
395 +index ff2f1b97bc53..2a99fc9a3ccf 100644
396 +--- a/arch/powerpc/boot/opal-calls.S
397 ++++ b/arch/powerpc/boot/opal-calls.S
398 +@@ -12,6 +12,19 @@
399 +
400 + .text
401 +
402 ++ .globl opal_kentry
403 ++opal_kentry:
404 ++ /* r3 is the fdt ptr */
405 ++ mtctr r4
406 ++ li r4, 0
407 ++ li r5, 0
408 ++ li r6, 0
409 ++ li r7, 0
410 ++ ld r11,opal@got(r2)
411 ++ ld r8,0(r11)
412 ++ ld r9,8(r11)
413 ++ bctr
414 ++
415 + #define OPAL_CALL(name, token) \
416 + .globl name; \
417 + name: \
418 +diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
419 +index 1f37e1c1d6d8..d7b4fd47eb44 100644
420 +--- a/arch/powerpc/boot/opal.c
421 ++++ b/arch/powerpc/boot/opal.c
422 +@@ -23,14 +23,25 @@ struct opal {
423 +
424 + static u32 opal_con_id;
425 +
426 ++/* see opal-wrappers.S */
427 + int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer);
428 + int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer);
429 + int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length);
430 + int64_t opal_console_flush(uint64_t term_number);
431 + int64_t opal_poll_events(uint64_t *outstanding_event_mask);
432 +
433 ++void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr);
434 ++
435 + static int opal_con_open(void)
436 + {
437 ++ /*
438 ++ * When OPAL loads the boot kernel it stashes the OPAL base and entry
439 ++ * address in r8 and r9 so the kernel can use the OPAL console
440 ++ * before unflattening the devicetree. While executing the wrapper will
441 ++ * probably trash r8 and r9 so this kentry hook restores them before
442 ++ * entering the decompressed kernel.
443 ++ */
444 ++ platform_ops.kentry = opal_kentry;
445 + return 0;
446 + }
447 +
448 +diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
449 +index e19b64ef977a..deeae6f6ba9c 100644
450 +--- a/arch/powerpc/boot/ops.h
451 ++++ b/arch/powerpc/boot/ops.h
452 +@@ -30,6 +30,7 @@ struct platform_ops {
453 + void * (*realloc)(void *ptr, unsigned long size);
454 + void (*exit)(void);
455 + void * (*vmlinux_alloc)(unsigned long size);
456 ++ void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr);
457 + };
458 + extern struct platform_ops platform_ops;
459 +
460 +diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
461 +index e2fb408f8398..fd10b582fb2d 100644
462 +--- a/arch/powerpc/include/asm/mmu.h
463 ++++ b/arch/powerpc/include/asm/mmu.h
464 +@@ -29,6 +29,12 @@
465 + */
466 +
467 + /*
468 ++ * Kernel read only support.
469 ++ * We added the ppp value 0b110 in ISA 2.04.
470 ++ */
471 ++#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000)
472 ++
473 ++/*
474 + * We need to clear top 16bits of va (from the remaining 64 bits )in
475 + * tlbie* instructions
476 + */
477 +@@ -103,10 +109,10 @@
478 + #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
479 + #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
480 + #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
481 +-#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
482 +-#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
483 +-#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
484 +-#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
485 ++#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
486 ++#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
487 ++#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
488 ++#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
489 + #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
490 + MMU_FTR_CI_LARGE_PAGE
491 + #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
492 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
493 +index 978dada662ae..52cbf043e960 100644
494 +--- a/arch/powerpc/include/asm/reg.h
495 ++++ b/arch/powerpc/include/asm/reg.h
496 +@@ -355,6 +355,7 @@
497 + #define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
498 + #define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
499 + #define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
500 ++#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */
501 + #define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
502 + #define LPCR_MER_SH 11
503 + #define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
504 +diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
505 +index 52ff3f025437..37c027ca83b2 100644
506 +--- a/arch/powerpc/kernel/cpu_setup_power.S
507 ++++ b/arch/powerpc/kernel/cpu_setup_power.S
508 +@@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9)
509 + li r0,0
510 + mtspr SPRN_LPID,r0
511 + mfspr r3,SPRN_LPCR
512 +- ori r3, r3, LPCR_PECEDH
513 +- ori r3, r3, LPCR_HVICE
514 ++ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
515 ++ or r3, r3, r4
516 + bl __init_LPCR
517 + bl __init_HFSCR
518 + bl __init_tlb_power9
519 +@@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9)
520 + li r0,0
521 + mtspr SPRN_LPID,r0
522 + mfspr r3,SPRN_LPCR
523 +- ori r3, r3, LPCR_PECEDH
524 +- ori r3, r3, LPCR_HVICE
525 ++ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
526 ++ or r3, r3, r4
527 + bl __init_LPCR
528 + bl __init_HFSCR
529 + bl __init_tlb_power9
530 +diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
531 +index 28923b2e2df1..8dff9ce6fbc1 100644
532 +--- a/arch/powerpc/mm/hash_utils_64.c
533 ++++ b/arch/powerpc/mm/hash_utils_64.c
534 +@@ -190,8 +190,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
535 + /*
536 + * Kernel read only mapped with ppp bits 0b110
537 + */
538 +- if (!(pteflags & _PAGE_WRITE))
539 +- rflags |= (HPTE_R_PP0 | 0x2);
540 ++ if (!(pteflags & _PAGE_WRITE)) {
541 ++ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
542 ++ rflags |= (HPTE_R_PP0 | 0x2);
543 ++ else
544 ++ rflags |= 0x3;
545 ++ }
546 + } else {
547 + if (pteflags & _PAGE_RWX)
548 + rflags |= 0x2;
549 +diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
550 +index 178989e6d3e3..ea960d660917 100644
551 +--- a/arch/tile/kernel/time.c
552 ++++ b/arch/tile/kernel/time.c
553 +@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
554 + */
555 + unsigned long long sched_clock(void)
556 + {
557 +- return clocksource_cyc2ns(get_cycles(),
558 +- sched_clock_mult, SCHED_CLOCK_SHIFT);
559 ++ return mult_frac(get_cycles(),
560 ++ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
561 + }
562 +
563 + int setup_profiling_timer(unsigned int multiplier)
564 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
565 +index 9b983a474253..8fc714b4f18a 100644
566 +--- a/arch/x86/events/intel/ds.c
567 ++++ b/arch/x86/events/intel/ds.c
568 +@@ -1070,20 +1070,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
569 + }
570 +
571 + /*
572 +- * We use the interrupt regs as a base because the PEBS record
573 +- * does not contain a full regs set, specifically it seems to
574 +- * lack segment descriptors, which get used by things like
575 +- * user_mode().
576 ++ * We use the interrupt regs as a base because the PEBS record does not
577 ++ * contain a full regs set, specifically it seems to lack segment
578 ++ * descriptors, which get used by things like user_mode().
579 + *
580 +- * In the simple case fix up only the IP and BP,SP regs, for
581 +- * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
582 +- * A possible PERF_SAMPLE_REGS will have to transfer all regs.
583 ++ * In the simple case fix up only the IP for PERF_SAMPLE_IP.
584 ++ *
585 ++ * We must however always use BP,SP from iregs for the unwinder to stay
586 ++ * sane; the record BP,SP can point into thin air when the record is
587 ++ * from a previous PMI context or an (I)RET happend between the record
588 ++ * and PMI.
589 + */
590 + *regs = *iregs;
591 + regs->flags = pebs->flags;
592 + set_linear_ip(regs, pebs->ip);
593 +- regs->bp = pebs->bp;
594 +- regs->sp = pebs->sp;
595 +
596 + if (sample_type & PERF_SAMPLE_REGS_INTR) {
597 + regs->ax = pebs->ax;
598 +@@ -1092,10 +1092,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
599 + regs->dx = pebs->dx;
600 + regs->si = pebs->si;
601 + regs->di = pebs->di;
602 +- regs->bp = pebs->bp;
603 +- regs->sp = pebs->sp;
604 +
605 +- regs->flags = pebs->flags;
606 ++ /*
607 ++ * Per the above; only set BP,SP if we don't need callchains.
608 ++ *
609 ++ * XXX: does this make sense?
610 ++ */
611 ++ if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
612 ++ regs->bp = pebs->bp;
613 ++ regs->sp = pebs->sp;
614 ++ }
615 ++
616 ++ /*
617 ++ * Preserve PERF_EFLAGS_VM from set_linear_ip().
618 ++ */
619 ++ regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
620 + #ifndef CONFIG_X86_32
621 + regs->r8 = pebs->r8;
622 + regs->r9 = pebs->r9;
623 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
624 +index 8c4a47706296..181c238d4df9 100644
625 +--- a/arch/x86/events/perf_event.h
626 ++++ b/arch/x86/events/perf_event.h
627 +@@ -113,7 +113,7 @@ struct debug_store {
628 + * Per register state.
629 + */
630 + struct er_account {
631 +- raw_spinlock_t lock; /* per-core: protect structure */
632 ++ raw_spinlock_t lock; /* per-core: protect structure */
633 + u64 config; /* extra MSR config */
634 + u64 reg; /* extra MSR number */
635 + atomic_t ref; /* reference count */
636 +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
637 +index 3fc03a09a93b..c289e2f4a6e5 100644
638 +--- a/arch/x86/kernel/fpu/core.c
639 ++++ b/arch/x86/kernel/fpu/core.c
640 +@@ -517,14 +517,14 @@ void fpu__clear(struct fpu *fpu)
641 + {
642 + WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
643 +
644 +- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
645 +- /* FPU state will be reallocated lazily at the first use. */
646 +- fpu__drop(fpu);
647 +- } else {
648 +- if (!fpu->fpstate_active) {
649 +- fpu__activate_curr(fpu);
650 +- user_fpu_begin();
651 +- }
652 ++ fpu__drop(fpu);
653 ++
654 ++ /*
655 ++ * Make sure fpstate is cleared and initialized.
656 ++ */
657 ++ if (static_cpu_has(X86_FEATURE_FPU)) {
658 ++ fpu__activate_curr(fpu);
659 ++ user_fpu_begin();
660 + copy_init_fpstate_to_fpregs();
661 + }
662 + }
663 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
664 +index cbd7b92585bb..a3ce9d260d68 100644
665 +--- a/arch/x86/kvm/emulate.c
666 ++++ b/arch/x86/kvm/emulate.c
667 +@@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
668 + static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
669 + {
670 + int rc;
671 +- unsigned short sel, old_sel;
672 +- struct desc_struct old_desc, new_desc;
673 +- const struct x86_emulate_ops *ops = ctxt->ops;
674 ++ unsigned short sel;
675 ++ struct desc_struct new_desc;
676 + u8 cpl = ctxt->ops->cpl(ctxt);
677 +
678 +- /* Assignment of RIP may only fail in 64-bit mode */
679 +- if (ctxt->mode == X86EMUL_MODE_PROT64)
680 +- ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
681 +- VCPU_SREG_CS);
682 +-
683 + memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
684 +
685 + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
686 +@@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
687 + return rc;
688 +
689 + rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
690 +- if (rc != X86EMUL_CONTINUE) {
691 +- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
692 +- /* assigning eip failed; restore the old cs */
693 +- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
694 +- return rc;
695 +- }
696 ++ /* Error handling is not implemented. */
697 ++ if (rc != X86EMUL_CONTINUE)
698 ++ return X86EMUL_UNHANDLEABLE;
699 ++
700 + return rc;
701 + }
702 +
703 +@@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
704 + {
705 + int rc;
706 + unsigned long eip, cs;
707 +- u16 old_cs;
708 + int cpl = ctxt->ops->cpl(ctxt);
709 +- struct desc_struct old_desc, new_desc;
710 +- const struct x86_emulate_ops *ops = ctxt->ops;
711 +-
712 +- if (ctxt->mode == X86EMUL_MODE_PROT64)
713 +- ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
714 +- VCPU_SREG_CS);
715 ++ struct desc_struct new_desc;
716 +
717 + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
718 + if (rc != X86EMUL_CONTINUE)
719 +@@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
720 + if (rc != X86EMUL_CONTINUE)
721 + return rc;
722 + rc = assign_eip_far(ctxt, eip, &new_desc);
723 +- if (rc != X86EMUL_CONTINUE) {
724 +- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
725 +- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
726 +- }
727 ++ /* Error handling is not implemented. */
728 ++ if (rc != X86EMUL_CONTINUE)
729 ++ return X86EMUL_UNHANDLEABLE;
730 ++
731 + return rc;
732 + }
733 +
734 +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
735 +index 1a22de70f7f7..6e219e5c07d2 100644
736 +--- a/arch/x86/kvm/ioapic.c
737 ++++ b/arch/x86/kvm/ioapic.c
738 +@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
739 + static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
740 + {
741 + ioapic->rtc_status.pending_eoi = 0;
742 +- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
743 ++ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
744 + }
745 +
746 + static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
747 +diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
748 +index 7d2692a49657..1cc6e54436db 100644
749 +--- a/arch/x86/kvm/ioapic.h
750 ++++ b/arch/x86/kvm/ioapic.h
751 +@@ -42,13 +42,13 @@ struct kvm_vcpu;
752 +
753 + struct dest_map {
754 + /* vcpu bitmap where IRQ has been sent */
755 +- DECLARE_BITMAP(map, KVM_MAX_VCPUS);
756 ++ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
757 +
758 + /*
759 + * Vector sent to a given vcpu, only valid when
760 + * the vcpu's bit in map is set
761 + */
762 +- u8 vectors[KVM_MAX_VCPUS];
763 ++ u8 vectors[KVM_MAX_VCPU_ID];
764 + };
765 +
766 +
767 +diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
768 +index 25810b144b58..e7a112ac51a8 100644
769 +--- a/arch/x86/kvm/irq_comm.c
770 ++++ b/arch/x86/kvm/irq_comm.c
771 +@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
772 + bool line_status)
773 + {
774 + struct kvm_pic *pic = pic_irqchip(kvm);
775 ++
776 ++ /*
777 ++ * XXX: rejecting pic routes when pic isn't in use would be better,
778 ++ * but the default routing table is installed while kvm->arch.vpic is
779 ++ * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
780 ++ */
781 ++ if (!pic)
782 ++ return -1;
783 ++
784 + return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
785 + }
786 +
787 +@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
788 + bool line_status)
789 + {
790 + struct kvm_ioapic *ioapic = kvm->arch.vioapic;
791 ++
792 ++ if (!ioapic)
793 ++ return -1;
794 ++
795 + return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
796 + line_status);
797 + }
798 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
799 +index b62c85229711..d2255e4f9589 100644
800 +--- a/arch/x86/kvm/lapic.c
801 ++++ b/arch/x86/kvm/lapic.c
802 +@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
803 + *mask = dest_id & 0xff;
804 + return true;
805 + case KVM_APIC_MODE_XAPIC_CLUSTER:
806 +- *cluster = map->xapic_cluster_map[dest_id >> 4];
807 ++ *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
808 + *mask = dest_id & 0xf;
809 + return true;
810 + default:
811 +diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
812 +index 832b98f822be..a3a983fd4248 100644
813 +--- a/arch/x86/mm/extable.c
814 ++++ b/arch/x86/mm/extable.c
815 +@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
816 + if (early_recursion_flag > 2)
817 + goto halt_loop;
818 +
819 +- if (regs->cs != __KERNEL_CS)
820 ++ /*
821 ++ * Old CPUs leave the high bits of CS on the stack
822 ++ * undefined. I'm not sure which CPUs do this, but at least
823 ++ * the 486 DX works this way.
824 ++ */
825 ++ if ((regs->cs & 0xFFFF) != __KERNEL_CS)
826 + goto fail;
827 +
828 + /*
829 +diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
830 +index 865f46ea724f..c80765b211cf 100644
831 +--- a/crypto/asymmetric_keys/x509_cert_parser.c
832 ++++ b/crypto/asymmetric_keys/x509_cert_parser.c
833 +@@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
834 + return cert;
835 +
836 + error_decode:
837 +- kfree(cert->pub->key);
838 + kfree(ctx);
839 + error_no_ctx:
840 + x509_free_certificate(cert);
841 +diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
842 +index 29f600f2c447..ff64313770bd 100644
843 +--- a/drivers/dax/dax.c
844 ++++ b/drivers/dax/dax.c
845 +@@ -323,8 +323,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
846 + if (!dax_dev->alive)
847 + return -ENXIO;
848 +
849 +- /* prevent private / writable mappings from being established */
850 +- if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
851 ++ /* prevent private mappings from being established */
852 ++ if ((vma->vm_flags & VM_SHARED) != VM_SHARED) {
853 + dev_info(dev, "%s: %s: fail, attempted private mapping\n",
854 + current->comm, func);
855 + return -EINVAL;
856 +diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
857 +index 73ae849f5170..76dd42dd7088 100644
858 +--- a/drivers/dax/pmem.c
859 ++++ b/drivers/dax/pmem.c
860 +@@ -77,7 +77,9 @@ static int dax_pmem_probe(struct device *dev)
861 + nsio = to_nd_namespace_io(&ndns->dev);
862 +
863 + /* parse the 'pfn' info block via ->rw_bytes */
864 +- devm_nsio_enable(dev, nsio);
865 ++ rc = devm_nsio_enable(dev, nsio);
866 ++ if (rc)
867 ++ return rc;
868 + altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
869 + if (IS_ERR(altmap))
870 + return PTR_ERR(altmap);
871 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
872 +index 58470f5ced04..8c53748a769d 100644
873 +--- a/drivers/iommu/dmar.c
874 ++++ b/drivers/iommu/dmar.c
875 +@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
876 + struct pci_dev *pdev = to_pci_dev(data);
877 + struct dmar_pci_notify_info *info;
878 +
879 +- /* Only care about add/remove events for physical functions */
880 ++ /* Only care about add/remove events for physical functions.
881 ++ * For VFs we actually do the lookup based on the corresponding
882 ++ * PF in device_to_iommu() anyway. */
883 + if (pdev->is_virtfn)
884 + return NOTIFY_DONE;
885 + if (action != BUS_NOTIFY_ADD_DEVICE &&
886 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
887 +index 1257b0b80296..7fb538708cec 100644
888 +--- a/drivers/iommu/intel-iommu.c
889 ++++ b/drivers/iommu/intel-iommu.c
890 +@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
891 + return NULL;
892 +
893 + if (dev_is_pci(dev)) {
894 ++ struct pci_dev *pf_pdev;
895 ++
896 + pdev = to_pci_dev(dev);
897 ++ /* VFs aren't listed in scope tables; we need to look up
898 ++ * the PF instead to find the IOMMU. */
899 ++ pf_pdev = pci_physfn(pdev);
900 ++ dev = &pf_pdev->dev;
901 + segment = pci_domain_nr(pdev->bus);
902 + } else if (has_acpi_companion(dev))
903 + dev = &ACPI_COMPANION(dev)->dev;
904 +@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
905 + for_each_active_dev_scope(drhd->devices,
906 + drhd->devices_cnt, i, tmp) {
907 + if (tmp == dev) {
908 ++ /* For a VF use its original BDF# not that of the PF
909 ++ * which we used for the IOMMU lookup. Strictly speaking
910 ++ * we could do this for all PCI devices; we only need to
911 ++ * get the BDF# from the scope table for ACPI matches. */
912 ++ if (pdev->is_virtfn)
913 ++ goto got_pdev;
914 ++
915 + *bus = drhd->devices[i].bus;
916 + *devfn = drhd->devices[i].devfn;
917 + goto out;
918 +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
919 +index 8ebb3530afa7..cb72e0011310 100644
920 +--- a/drivers/iommu/intel-svm.c
921 ++++ b/drivers/iommu/intel-svm.c
922 +@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
923 + struct page *pages;
924 + int order;
925 +
926 +- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
927 +- if (order < 0)
928 +- order = 0;
929 +-
930 ++ /* Start at 2 because it's defined as 2^(1+PSS) */
931 ++ iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
932 ++
933 ++ /* Eventually I'm promised we will get a multi-level PASID table
934 ++ * and it won't have to be physically contiguous. Until then,
935 ++ * limit the size because 8MiB contiguous allocations can be hard
936 ++ * to come by. The limit of 0x20000, which is 1MiB for each of
937 ++ * the PASID and PASID-state tables, is somewhat arbitrary. */
938 ++ if (iommu->pasid_max > 0x20000)
939 ++ iommu->pasid_max = 0x20000;
940 ++
941 ++ order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
942 + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
943 + if (!pages) {
944 + pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
945 +@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
946 + pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
947 +
948 + if (ecap_dis(iommu->ecap)) {
949 ++ /* Just making it explicit... */
950 ++ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
951 + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
952 + if (pages)
953 + iommu->pasid_state_table = page_address(pages);
954 +@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
955 +
956 + int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
957 + {
958 +- int order;
959 +-
960 +- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
961 +- if (order < 0)
962 +- order = 0;
963 ++ int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
964 +
965 + if (iommu->pasid_table) {
966 + free_pages((unsigned long)iommu->pasid_table, order);
967 +@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
968 + }
969 + svm->iommu = iommu;
970 +
971 +- if (pasid_max > 2 << ecap_pss(iommu->ecap))
972 +- pasid_max = 2 << ecap_pss(iommu->ecap);
973 ++ if (pasid_max > iommu->pasid_max)
974 ++ pasid_max = iommu->pasid_max;
975 +
976 + /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
977 + ret = idr_alloc(&iommu->pasid_idr, svm,
978 +diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
979 +index 317ef63ee789..8d96a22647b3 100644
980 +--- a/drivers/media/tuners/tuner-xc2028.c
981 ++++ b/drivers/media/tuners/tuner-xc2028.c
982 +@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
983 + int i;
984 + tuner_dbg("%s called\n", __func__);
985 +
986 ++ /* free allocated f/w string */
987 ++ if (priv->fname != firmware_name)
988 ++ kfree(priv->fname);
989 ++ priv->fname = NULL;
990 ++
991 ++ priv->state = XC2028_NO_FIRMWARE;
992 ++ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
993 ++
994 + if (!priv->firm)
995 + return;
996 +
997 +@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
998 +
999 + priv->firm = NULL;
1000 + priv->firm_size = 0;
1001 +- priv->state = XC2028_NO_FIRMWARE;
1002 +-
1003 +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
1004 + }
1005 +
1006 + static int load_all_firmwares(struct dvb_frontend *fe,
1007 +@@ -884,9 +889,8 @@ read_not_reliable:
1008 + return 0;
1009 +
1010 + fail:
1011 +- priv->state = XC2028_NO_FIRMWARE;
1012 ++ free_firmware(priv);
1013 +
1014 +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
1015 + if (retry_count < 8) {
1016 + msleep(50);
1017 + retry_count++;
1018 +@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
1019 + mutex_lock(&xc2028_list_mutex);
1020 +
1021 + /* only perform final cleanup if this is the last instance */
1022 +- if (hybrid_tuner_report_instance_count(priv) == 1) {
1023 ++ if (hybrid_tuner_report_instance_count(priv) == 1)
1024 + free_firmware(priv);
1025 +- kfree(priv->ctrl.fname);
1026 +- priv->ctrl.fname = NULL;
1027 +- }
1028 +
1029 + if (priv)
1030 + hybrid_tuner_release_state(priv);
1031 +@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1032 +
1033 + /*
1034 + * Copy the config data.
1035 +- * For the firmware name, keep a local copy of the string,
1036 +- * in order to avoid troubles during device release.
1037 + */
1038 +- kfree(priv->ctrl.fname);
1039 +- priv->ctrl.fname = NULL;
1040 + memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
1041 +- if (p->fname) {
1042 +- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
1043 +- if (priv->ctrl.fname == NULL) {
1044 +- rc = -ENOMEM;
1045 +- goto unlock;
1046 +- }
1047 +- }
1048 +
1049 + /*
1050 + * If firmware name changed, frees firmware. As free_firmware will
1051 +@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1052 +
1053 + if (priv->state == XC2028_NO_FIRMWARE) {
1054 + if (!firmware_name[0])
1055 +- priv->fname = priv->ctrl.fname;
1056 ++ priv->fname = kstrdup(p->fname, GFP_KERNEL);
1057 + else
1058 + priv->fname = firmware_name;
1059 +
1060 ++ if (!priv->fname) {
1061 ++ rc = -ENOMEM;
1062 ++ goto unlock;
1063 ++ }
1064 ++
1065 + rc = request_firmware_nowait(THIS_MODULE, 1,
1066 + priv->fname,
1067 + priv->i2c_props.adap->dev.parent,
1068 +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
1069 +index 239be2fde242..2267601f0ac1 100644
1070 +--- a/drivers/mmc/host/sdhci-of-esdhc.c
1071 ++++ b/drivers/mmc/host/sdhci-of-esdhc.c
1072 +@@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
1073 + return ret;
1074 + }
1075 + }
1076 ++ /*
1077 ++ * The DAT[3:0] line signal levels and the CMD line signal level are
1078 ++ * not compatible with standard SDHC register. The line signal levels
1079 ++ * DAT[7:0] are at bits 31:24 and the command line signal level is at
1080 ++ * bit 23. All other bits are the same as in the standard SDHC
1081 ++ * register.
1082 ++ */
1083 ++ if (spec_reg == SDHCI_PRESENT_STATE) {
1084 ++ ret = value & 0x000fffff;
1085 ++ ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
1086 ++ ret |= (value << 1) & SDHCI_CMD_LVL;
1087 ++ return ret;
1088 ++ }
1089 ++
1090 + ret = value;
1091 + return ret;
1092 + }
1093 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
1094 +index 0411c9f36461..1b3bd1c7f4f6 100644
1095 +--- a/drivers/mmc/host/sdhci.h
1096 ++++ b/drivers/mmc/host/sdhci.h
1097 +@@ -73,6 +73,7 @@
1098 + #define SDHCI_DATA_LVL_MASK 0x00F00000
1099 + #define SDHCI_DATA_LVL_SHIFT 20
1100 + #define SDHCI_DATA_0_LVL_MASK 0x00100000
1101 ++#define SDHCI_CMD_LVL 0x01000000
1102 +
1103 + #define SDHCI_HOST_CONTROL 0x28
1104 + #define SDHCI_CTRL_LED 0x01
1105 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1106 +index 46c0f5ecd99d..58e60298a360 100644
1107 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1108 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1109 +@@ -3894,6 +3894,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
1110 + }
1111 + }
1112 +
1113 ++static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
1114 ++{
1115 ++ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
1116 ++}
1117 ++
1118 + /**
1119 + * _scsih_flush_running_cmds - completing outstanding commands.
1120 + * @ioc: per adapter object
1121 +@@ -3915,6 +3920,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
1122 + if (!scmd)
1123 + continue;
1124 + count++;
1125 ++ if (ata_12_16_cmd(scmd))
1126 ++ scsi_internal_device_unblock(scmd->device,
1127 ++ SDEV_RUNNING);
1128 + mpt3sas_base_free_smid(ioc, smid);
1129 + scsi_dma_unmap(scmd);
1130 + if (ioc->pci_error_recovery)
1131 +@@ -4019,8 +4027,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
1132 + SAM_STAT_CHECK_CONDITION;
1133 + }
1134 +
1135 +-
1136 +-
1137 + /**
1138 + * scsih_qcmd - main scsi request entry point
1139 + * @scmd: pointer to scsi command object
1140 +@@ -4047,6 +4053,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1141 + if (ioc->logging_level & MPT_DEBUG_SCSI)
1142 + scsi_print_command(scmd);
1143 +
1144 ++ /*
1145 ++ * Lock the device for any subsequent command until command is
1146 ++ * done.
1147 ++ */
1148 ++ if (ata_12_16_cmd(scmd))
1149 ++ scsi_internal_device_block(scmd->device);
1150 ++
1151 + sas_device_priv_data = scmd->device->hostdata;
1152 + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1153 + scmd->result = DID_NO_CONNECT << 16;
1154 +@@ -4622,6 +4635,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
1155 + if (scmd == NULL)
1156 + return 1;
1157 +
1158 ++ if (ata_12_16_cmd(scmd))
1159 ++ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
1160 ++
1161 + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1162 +
1163 + if (mpi_reply == NULL) {
1164 +diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
1165 +index 7a223074df3d..afada655f861 100644
1166 +--- a/drivers/thermal/intel_powerclamp.c
1167 ++++ b/drivers/thermal/intel_powerclamp.c
1168 +@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
1169 + .set_cur_state = powerclamp_set_cur_state,
1170 + };
1171 +
1172 ++static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
1173 ++ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
1174 ++ {}
1175 ++};
1176 ++MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
1177 ++
1178 + static int __init powerclamp_probe(void)
1179 + {
1180 +- if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
1181 ++
1182 ++ if (!x86_match_cpu(intel_powerclamp_ids)) {
1183 + pr_err("CPU does not support MWAIT");
1184 + return -ENODEV;
1185 + }
1186 +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
1187 +index 69426e644d17..3dbb4a21ab44 100644
1188 +--- a/drivers/usb/chipidea/core.c
1189 ++++ b/drivers/usb/chipidea/core.c
1190 +@@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
1191 + if (!ci)
1192 + return -ENOMEM;
1193 +
1194 ++ spin_lock_init(&ci->lock);
1195 + ci->dev = dev;
1196 + ci->platdata = dev_get_platdata(dev);
1197 + ci->imx28_write_fix = !!(ci->platdata->flags &
1198 +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
1199 +index b93356834bb5..bced28fa1cbd 100644
1200 +--- a/drivers/usb/chipidea/udc.c
1201 ++++ b/drivers/usb/chipidea/udc.c
1202 +@@ -1895,8 +1895,6 @@ static int udc_start(struct ci_hdrc *ci)
1203 + struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
1204 + int retval = 0;
1205 +
1206 +- spin_lock_init(&ci->lock);
1207 +-
1208 + ci->gadget.ops = &usb_gadget_ops;
1209 + ci->gadget.speed = USB_SPEED_UNKNOWN;
1210 + ci->gadget.max_speed = USB_SPEED_HIGH;
1211 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1212 +index f61477bed3a8..243ac5ebe46a 100644
1213 +--- a/drivers/usb/serial/cp210x.c
1214 ++++ b/drivers/usb/serial/cp210x.c
1215 +@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = {
1216 + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
1217 + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
1218 + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
1219 ++ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
1220 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
1221 + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
1222 + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
1223 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1224 +index 0ff7f38d7800..6e9fc8bcc285 100644
1225 +--- a/drivers/usb/serial/ftdi_sio.c
1226 ++++ b/drivers/usb/serial/ftdi_sio.c
1227 +@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = {
1228 + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
1229 + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
1230 + { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
1231 ++ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
1232 ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1233 + { } /* Terminating entry */
1234 + };
1235 +
1236 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1237 +index 21011c0a4c64..48ee04c94a75 100644
1238 +--- a/drivers/usb/serial/ftdi_sio_ids.h
1239 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
1240 +@@ -596,6 +596,12 @@
1241 + #define STK541_PID 0x2109 /* Zigbee Controller */
1242 +
1243 + /*
1244 ++ * Texas Instruments
1245 ++ */
1246 ++#define TI_VID 0x0451
1247 ++#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
1248 ++
1249 ++/*
1250 + * Blackfin gnICE JTAG
1251 + * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
1252 + */
1253 +diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
1254 +index ffd086733421..1a59f335b063 100644
1255 +--- a/drivers/usb/storage/transport.c
1256 ++++ b/drivers/usb/storage/transport.c
1257 +@@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
1258 +
1259 + /* COMMAND STAGE */
1260 + /* let's send the command via the control pipe */
1261 ++ /*
1262 ++ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
1263 ++ * Stack may be vmallocated. So no DMA for us. Make a copy.
1264 ++ */
1265 ++ memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
1266 + result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
1267 + US_CBI_ADSC,
1268 + USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
1269 +- us->ifnum, srb->cmnd, srb->cmd_len);
1270 ++ us->ifnum, us->iobuf, srb->cmd_len);
1271 +
1272 + /* check the return code for the command */
1273 + usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
1274 +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
1275 +index 52a28311e2a4..48efe62e1302 100644
1276 +--- a/fs/nfs/callback.c
1277 ++++ b/fs/nfs/callback.c
1278 +@@ -261,7 +261,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
1279 + }
1280 +
1281 + ret = -EPROTONOSUPPORT;
1282 +- if (minorversion == 0)
1283 ++ if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
1284 + ret = nfs4_callback_up_net(serv, net);
1285 + else if (xprt->ops->bc_up)
1286 + ret = xprt->ops->bc_up(serv, net);
1287 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
1288 +index 2d9b650047a5..d49e26c6cdc7 100644
1289 +--- a/include/linux/intel-iommu.h
1290 ++++ b/include/linux/intel-iommu.h
1291 +@@ -429,6 +429,7 @@ struct intel_iommu {
1292 + struct page_req_dsc *prq;
1293 + unsigned char prq_name[16]; /* Name for PRQ interrupt */
1294 + struct idr pasid_idr;
1295 ++ u32 pasid_max;
1296 + #endif
1297 + struct q_inval *qi; /* Queued invalidation info */
1298 + u32 *iommu_state; /* Store iommu states between suspend and resume.*/
1299 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1300 +index fc9bb2225291..f8c5f5ec666e 100644
1301 +--- a/kernel/events/core.c
1302 ++++ b/kernel/events/core.c
1303 +@@ -7908,6 +7908,7 @@ restart:
1304 + * if <size> is not specified, the range is treated as a single address.
1305 + */
1306 + enum {
1307 ++ IF_ACT_NONE = -1,
1308 + IF_ACT_FILTER,
1309 + IF_ACT_START,
1310 + IF_ACT_STOP,
1311 +@@ -7931,6 +7932,7 @@ static const match_table_t if_tokens = {
1312 + { IF_SRC_KERNEL, "%u/%u" },
1313 + { IF_SRC_FILEADDR, "%u@%s" },
1314 + { IF_SRC_KERNELADDR, "%u" },
1315 ++ { IF_ACT_NONE, NULL },
1316 + };
1317 +
1318 + /*
1319 +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
1320 +index 5464c8744ea9..e24388a863a7 100644
1321 +--- a/lib/mpi/mpi-pow.c
1322 ++++ b/lib/mpi/mpi-pow.c
1323 +@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
1324 + if (!esize) {
1325 + /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
1326 + * depending on if MOD equals 1. */
1327 +- rp[0] = 1;
1328 + res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
1329 ++ if (res->nlimbs) {
1330 ++ if (mpi_resize(res, 1) < 0)
1331 ++ goto enomem;
1332 ++ rp = res->d;
1333 ++ rp[0] = 1;
1334 ++ }
1335 + res->sign = 0;
1336 + goto leave;
1337 + }
1338 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1339 +index a2214c64ed3c..7401e996009a 100644
1340 +--- a/mm/page_alloc.c
1341 ++++ b/mm/page_alloc.c
1342 +@@ -3161,6 +3161,16 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
1343 + if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
1344 + return false;
1345 +
1346 ++#ifdef CONFIG_COMPACTION
1347 ++ /*
1348 ++ * This is a gross workaround to compensate a lack of reliable compaction
1349 ++ * operation. We cannot simply go OOM with the current state of the compaction
1350 ++ * code because this can lead to pre mature OOM declaration.
1351 ++ */
1352 ++ if (order <= PAGE_ALLOC_COSTLY_ORDER)
1353 ++ return true;
1354 ++#endif
1355 ++
1356 + /*
1357 + * There are setups with compaction disabled which would prefer to loop
1358 + * inside the allocator rather than hit the oom killer prematurely.
1359 +diff --git a/net/can/bcm.c b/net/can/bcm.c
1360 +index 8af9d25ff988..436a7537e6a9 100644
1361 +--- a/net/can/bcm.c
1362 ++++ b/net/can/bcm.c
1363 +@@ -77,7 +77,7 @@
1364 + (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
1365 + (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
1366 +
1367 +-#define CAN_BCM_VERSION "20160617"
1368 ++#define CAN_BCM_VERSION "20161123"
1369 +
1370 + MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
1371 + MODULE_LICENSE("Dual BSD/GPL");
1372 +@@ -109,8 +109,9 @@ struct bcm_op {
1373 + u32 count;
1374 + u32 nframes;
1375 + u32 currframe;
1376 +- struct canfd_frame *frames;
1377 +- struct canfd_frame *last_frames;
1378 ++ /* void pointers to arrays of struct can[fd]_frame */
1379 ++ void *frames;
1380 ++ void *last_frames;
1381 + struct canfd_frame sframe;
1382 + struct canfd_frame last_sframe;
1383 + struct sock *sk;
1384 +@@ -681,7 +682,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
1385 +
1386 + if (op->flags & RX_FILTER_ID) {
1387 + /* the easiest case */
1388 +- bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
1389 ++ bcm_rx_update_and_send(op, op->last_frames, rxframe);
1390 + goto rx_starttimer;
1391 + }
1392 +
1393 +@@ -1068,7 +1069,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1394 +
1395 + if (msg_head->nframes) {
1396 + /* update CAN frames content */
1397 +- err = memcpy_from_msg((u8 *)op->frames, msg,
1398 ++ err = memcpy_from_msg(op->frames, msg,
1399 + msg_head->nframes * op->cfsiz);
1400 + if (err < 0)
1401 + return err;
1402 +@@ -1118,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1403 + }
1404 +
1405 + if (msg_head->nframes) {
1406 +- err = memcpy_from_msg((u8 *)op->frames, msg,
1407 ++ err = memcpy_from_msg(op->frames, msg,
1408 + msg_head->nframes * op->cfsiz);
1409 + if (err < 0) {
1410 + if (op->frames != &op->sframe)
1411 +@@ -1163,6 +1164,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1412 + /* check flags */
1413 +
1414 + if (op->flags & RX_RTR_FRAME) {
1415 ++ struct canfd_frame *frame0 = op->frames;
1416 +
1417 + /* no timers in RTR-mode */
1418 + hrtimer_cancel(&op->thrtimer);
1419 +@@ -1174,8 +1176,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1420 + * prevent a full-load-loopback-test ... ;-]
1421 + */
1422 + if ((op->flags & TX_CP_CAN_ID) ||
1423 +- (op->frames[0].can_id == op->can_id))
1424 +- op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1425 ++ (frame0->can_id == op->can_id))
1426 ++ frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1427 +
1428 + } else {
1429 + if (op->flags & SETTIMER) {
1430 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1431 +index 5550a86f7264..396aac7e6e79 100644
1432 +--- a/net/core/flow_dissector.c
1433 ++++ b/net/core/flow_dissector.c
1434 +@@ -945,4 +945,4 @@ static int __init init_default_flow_dissectors(void)
1435 + return 0;
1436 + }
1437 +
1438 +-late_initcall_sync(init_default_flow_dissectors);
1439 ++core_initcall(init_default_flow_dissectors);
1440 +diff --git a/net/wireless/core.h b/net/wireless/core.h
1441 +index eee91443924d..66f2a1145d7c 100644
1442 +--- a/net/wireless/core.h
1443 ++++ b/net/wireless/core.h
1444 +@@ -71,6 +71,7 @@ struct cfg80211_registered_device {
1445 + struct list_head bss_list;
1446 + struct rb_root bss_tree;
1447 + u32 bss_generation;
1448 ++ u32 bss_entries;
1449 + struct cfg80211_scan_request *scan_req; /* protected by RTNL */
1450 + struct sk_buff *scan_msg;
1451 + struct cfg80211_sched_scan_request __rcu *sched_scan_req;
1452 +diff --git a/net/wireless/scan.c b/net/wireless/scan.c
1453 +index 0358e12be54b..438143a3827d 100644
1454 +--- a/net/wireless/scan.c
1455 ++++ b/net/wireless/scan.c
1456 +@@ -57,6 +57,19 @@
1457 + * also linked into the probe response struct.
1458 + */
1459 +
1460 ++/*
1461 ++ * Limit the number of BSS entries stored in mac80211. Each one is
1462 ++ * a bit over 4k at most, so this limits to roughly 4-5M of memory.
1463 ++ * If somebody wants to really attack this though, they'd likely
1464 ++ * use small beacons, and only one type of frame, limiting each of
1465 ++ * the entries to a much smaller size (in order to generate more
1466 ++ * entries in total, so overhead is bigger.)
1467 ++ */
1468 ++static int bss_entries_limit = 1000;
1469 ++module_param(bss_entries_limit, int, 0644);
1470 ++MODULE_PARM_DESC(bss_entries_limit,
1471 ++ "limit to number of scan BSS entries (per wiphy, default 1000)");
1472 ++
1473 + #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
1474 +
1475 + static void bss_free(struct cfg80211_internal_bss *bss)
1476 +@@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
1477 +
1478 + list_del_init(&bss->list);
1479 + rb_erase(&bss->rbn, &rdev->bss_tree);
1480 ++ rdev->bss_entries--;
1481 ++ WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
1482 ++ "rdev bss entries[%d]/list[empty:%d] corruption\n",
1483 ++ rdev->bss_entries, list_empty(&rdev->bss_list));
1484 + bss_ref_put(rdev, bss);
1485 + return true;
1486 + }
1487 +@@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
1488 + rdev->bss_generation++;
1489 + }
1490 +
1491 ++static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
1492 ++{
1493 ++ struct cfg80211_internal_bss *bss, *oldest = NULL;
1494 ++ bool ret;
1495 ++
1496 ++ lockdep_assert_held(&rdev->bss_lock);
1497 ++
1498 ++ list_for_each_entry(bss, &rdev->bss_list, list) {
1499 ++ if (atomic_read(&bss->hold))
1500 ++ continue;
1501 ++
1502 ++ if (!list_empty(&bss->hidden_list) &&
1503 ++ !bss->pub.hidden_beacon_bss)
1504 ++ continue;
1505 ++
1506 ++ if (oldest && time_before(oldest->ts, bss->ts))
1507 ++ continue;
1508 ++ oldest = bss;
1509 ++ }
1510 ++
1511 ++ if (WARN_ON(!oldest))
1512 ++ return false;
1513 ++
1514 ++ /*
1515 ++ * The callers make sure to increase rdev->bss_generation if anything
1516 ++ * gets removed (and a new entry added), so there's no need to also do
1517 ++ * it here.
1518 ++ */
1519 ++
1520 ++ ret = __cfg80211_unlink_bss(rdev, oldest);
1521 ++ WARN_ON(!ret);
1522 ++ return ret;
1523 ++}
1524 ++
1525 + void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
1526 + bool send_message)
1527 + {
1528 +@@ -693,6 +744,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
1529 + const u8 *ie;
1530 + int i, ssidlen;
1531 + u8 fold = 0;
1532 ++ u32 n_entries = 0;
1533 +
1534 + ies = rcu_access_pointer(new->pub.beacon_ies);
1535 + if (WARN_ON(!ies))
1536 +@@ -716,6 +768,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
1537 + /* This is the bad part ... */
1538 +
1539 + list_for_each_entry(bss, &rdev->bss_list, list) {
1540 ++ /*
1541 ++ * we're iterating all the entries anyway, so take the
1542 ++ * opportunity to validate the list length accounting
1543 ++ */
1544 ++ n_entries++;
1545 ++
1546 + if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
1547 + continue;
1548 + if (bss->pub.channel != new->pub.channel)
1549 +@@ -744,6 +802,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
1550 + new->pub.beacon_ies);
1551 + }
1552 +
1553 ++ WARN_ONCE(n_entries != rdev->bss_entries,
1554 ++ "rdev bss entries[%d]/list[len:%d] corruption\n",
1555 ++ rdev->bss_entries, n_entries);
1556 ++
1557 + return true;
1558 + }
1559 +
1560 +@@ -898,7 +960,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
1561 + }
1562 + }
1563 +
1564 ++ if (rdev->bss_entries >= bss_entries_limit &&
1565 ++ !cfg80211_bss_expire_oldest(rdev)) {
1566 ++ kfree(new);
1567 ++ goto drop;
1568 ++ }
1569 ++
1570 + list_add_tail(&new->list, &rdev->bss_list);
1571 ++ rdev->bss_entries++;
1572 + rb_insert_bss(rdev, new);
1573 + found = new;
1574 + }
1575 +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
1576 +index fc3036b34e51..a4d90aa1045a 100644
1577 +--- a/security/apparmor/domain.c
1578 ++++ b/security/apparmor/domain.c
1579 +@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
1580 + /* released below */
1581 + cred = get_current_cred();
1582 + cxt = cred_cxt(cred);
1583 +- profile = aa_cred_profile(cred);
1584 +- previous_profile = cxt->previous;
1585 ++ profile = aa_get_newest_profile(aa_cred_profile(cred));
1586 ++ previous_profile = aa_get_newest_profile(cxt->previous);
1587 +
1588 + if (unconfined(profile)) {
1589 + info = "unconfined";
1590 +@@ -718,6 +718,8 @@ audit:
1591 + out:
1592 + aa_put_profile(hat);
1593 + kfree(name);
1594 ++ aa_put_profile(profile);
1595 ++ aa_put_profile(previous_profile);
1596 + put_cred(cred);
1597 +
1598 + return error;