Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 20 Feb 2019 11:16:06
Message-Id: 1550661340.e51dfb22b20095d7285f68aad2fbaa0a84faa088.mpagano@gentoo
1 commit: e51dfb22b20095d7285f68aad2fbaa0a84faa088
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 20 11:15:40 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 20 11:15:40 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e51dfb22
7
8 proj/linux-patches: Linux patch 4.9.159
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1158_linux-4.9.159.patch | 2426 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2430 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 0d0c627..53c6c44 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -675,6 +675,10 @@ Patch: 1157_linux-4.9.158.patch
21 From: http://www.k5rnel.org
22 Desc: Linux 4.9.158
23
24 +Patch: 1158_linux-4.9.159.patch
25 +From: http://www.k5rnel.org
26 +Desc: Linux 4.9.159
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1158_linux-4.9.159.patch b/1158_linux-4.9.159.patch
33 new file mode 100644
34 index 0000000..fe0e636
35 --- /dev/null
36 +++ b/1158_linux-4.9.159.patch
37 @@ -0,0 +1,2426 @@
38 +diff --git a/Documentation/devicetree/bindings/eeprom/eeprom.txt b/Documentation/devicetree/bindings/eeprom/eeprom.txt
39 +index 735bc94444bb..4dcce8ee5cee 100644
40 +--- a/Documentation/devicetree/bindings/eeprom/eeprom.txt
41 ++++ b/Documentation/devicetree/bindings/eeprom/eeprom.txt
42 +@@ -6,7 +6,8 @@ Required properties:
43 +
44 + "atmel,24c00", "atmel,24c01", "atmel,24c02", "atmel,24c04",
45 + "atmel,24c08", "atmel,24c16", "atmel,24c32", "atmel,24c64",
46 +- "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024"
47 ++ "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024",
48 ++ "atmel,24c2048"
49 +
50 + "catalyst,24c32"
51 +
52 +@@ -17,7 +18,7 @@ Required properties:
53 + If there is no specific driver for <manufacturer>, a generic
54 + driver based on <type> is selected. Possible types are:
55 + "24c00", "24c01", "24c02", "24c04", "24c08", "24c16", "24c32", "24c64",
56 +- "24c128", "24c256", "24c512", "24c1024", "spd"
57 ++ "24c128", "24c256", "24c512", "24c1024", "24c2048", "spd"
58 +
59 + - reg : the I2C address of the EEPROM
60 +
61 +diff --git a/Makefile b/Makefile
62 +index 2b8434aaeece..a452ead13b1e 100644
63 +--- a/Makefile
64 ++++ b/Makefile
65 +@@ -1,6 +1,6 @@
66 + VERSION = 4
67 + PATCHLEVEL = 9
68 +-SUBLEVEL = 158
69 ++SUBLEVEL = 159
70 + EXTRAVERSION =
71 + NAME = Roaring Lionus
72 +
73 +diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
74 +index 06377400dc09..469642801a68 100644
75 +--- a/arch/alpha/include/asm/irq.h
76 ++++ b/arch/alpha/include/asm/irq.h
77 +@@ -55,15 +55,15 @@
78 +
79 + #elif defined(CONFIG_ALPHA_DP264) || \
80 + defined(CONFIG_ALPHA_LYNX) || \
81 +- defined(CONFIG_ALPHA_SHARK) || \
82 +- defined(CONFIG_ALPHA_EIGER)
83 ++ defined(CONFIG_ALPHA_SHARK)
84 + # define NR_IRQS 64
85 +
86 + #elif defined(CONFIG_ALPHA_TITAN)
87 + #define NR_IRQS 80
88 +
89 + #elif defined(CONFIG_ALPHA_RAWHIDE) || \
90 +- defined(CONFIG_ALPHA_TAKARA)
91 ++ defined(CONFIG_ALPHA_TAKARA) || \
92 ++ defined(CONFIG_ALPHA_EIGER)
93 + # define NR_IRQS 128
94 +
95 + #elif defined(CONFIG_ALPHA_WILDFIRE)
96 +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
97 +index 83e9eee57a55..f70663127aad 100644
98 +--- a/arch/alpha/mm/fault.c
99 ++++ b/arch/alpha/mm/fault.c
100 +@@ -77,7 +77,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
101 + /* Macro for exception fixup code to access integer registers. */
102 + #define dpf_reg(r) \
103 + (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
104 +- (r) <= 18 ? (r)+8 : (r)-10])
105 ++ (r) <= 18 ? (r)+10 : (r)-10])
106 +
107 + asmlinkage void
108 + do_page_fault(unsigned long address, unsigned long mmcsr,
109 +diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
110 +index 78492a0bbbab..3c58ec707ea9 100644
111 +--- a/arch/arm/boot/dts/da850-evm.dts
112 ++++ b/arch/arm/boot/dts/da850-evm.dts
113 +@@ -156,7 +156,7 @@
114 +
115 + sound {
116 + compatible = "simple-audio-card";
117 +- simple-audio-card,name = "DA850/OMAP-L138 EVM";
118 ++ simple-audio-card,name = "DA850-OMAPL138 EVM";
119 + simple-audio-card,widgets =
120 + "Line", "Line In",
121 + "Line", "Line Out";
122 +diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
123 +index 7b8ab21fed6c..920e64cdb673 100644
124 +--- a/arch/arm/boot/dts/da850-lcdk.dts
125 ++++ b/arch/arm/boot/dts/da850-lcdk.dts
126 +@@ -26,7 +26,7 @@
127 +
128 + sound {
129 + compatible = "simple-audio-card";
130 +- simple-audio-card,name = "DA850/OMAP-L138 LCDK";
131 ++ simple-audio-card,name = "DA850-OMAPL138 LCDK";
132 + simple-audio-card,widgets =
133 + "Line", "Line In",
134 + "Line", "Line Out";
135 +diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
136 +index d8fca9db46d0..dddbc0d03da5 100644
137 +--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
138 ++++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
139 +@@ -35,8 +35,8 @@
140 + compatible = "gpio-fan";
141 + pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
142 + pinctrl-names = "default";
143 +- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
144 +- &gpio1 13 GPIO_ACTIVE_LOW>;
145 ++ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
146 ++ &gpio1 13 GPIO_ACTIVE_HIGH>;
147 + gpio-fan,speed-map = <0 0
148 + 3000 1
149 + 6000 2>;
150 +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
151 +index e616f61f859d..7d727506096f 100644
152 +--- a/arch/arm/include/asm/assembler.h
153 ++++ b/arch/arm/include/asm/assembler.h
154 +@@ -465,6 +465,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
155 + #endif
156 + .endm
157 +
158 ++ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
159 ++#ifdef CONFIG_CPU_SPECTRE
160 ++ sub \tmp, \limit, #1
161 ++ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
162 ++ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
163 ++ subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
164 ++ movlo \addr, #0 @ if (tmp < 0) addr = NULL
165 ++ csdb
166 ++#endif
167 ++ .endm
168 ++
169 + .macro uaccess_disable, tmp, isb=1
170 + #ifdef CONFIG_CPU_SW_DOMAIN_PAN
171 + /*
172 +diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
173 +index c55db1e22f0c..b9356dbfded0 100644
174 +--- a/arch/arm/include/asm/cputype.h
175 ++++ b/arch/arm/include/asm/cputype.h
176 +@@ -106,6 +106,7 @@
177 + #define ARM_CPU_PART_SCORPION 0x510002d0
178 +
179 + extern unsigned int processor_id;
180 ++struct proc_info_list *lookup_processor(u32 midr);
181 +
182 + #ifdef CONFIG_CPU_CP15
183 + #define read_cpuid(reg) \
184 +diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
185 +index f379f5f849a9..1bfcc3bcfc6d 100644
186 +--- a/arch/arm/include/asm/proc-fns.h
187 ++++ b/arch/arm/include/asm/proc-fns.h
188 +@@ -23,7 +23,7 @@ struct mm_struct;
189 + /*
190 + * Don't change this structure - ASM code relies on it.
191 + */
192 +-extern struct processor {
193 ++struct processor {
194 + /* MISC
195 + * get data abort address/flags
196 + */
197 +@@ -79,9 +79,13 @@ extern struct processor {
198 + unsigned int suspend_size;
199 + void (*do_suspend)(void *);
200 + void (*do_resume)(void *);
201 +-} processor;
202 ++};
203 +
204 + #ifndef MULTI_CPU
205 ++static inline void init_proc_vtable(const struct processor *p)
206 ++{
207 ++}
208 ++
209 + extern void cpu_proc_init(void);
210 + extern void cpu_proc_fin(void);
211 + extern int cpu_do_idle(void);
212 +@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
213 + extern void cpu_do_suspend(void *);
214 + extern void cpu_do_resume(void *);
215 + #else
216 +-#define cpu_proc_init processor._proc_init
217 +-#define cpu_proc_fin processor._proc_fin
218 +-#define cpu_reset processor.reset
219 +-#define cpu_do_idle processor._do_idle
220 +-#define cpu_dcache_clean_area processor.dcache_clean_area
221 +-#define cpu_set_pte_ext processor.set_pte_ext
222 +-#define cpu_do_switch_mm processor.switch_mm
223 +
224 +-/* These three are private to arch/arm/kernel/suspend.c */
225 +-#define cpu_do_suspend processor.do_suspend
226 +-#define cpu_do_resume processor.do_resume
227 ++extern struct processor processor;
228 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
229 ++#include <linux/smp.h>
230 ++/*
231 ++ * This can't be a per-cpu variable because we need to access it before
232 ++ * per-cpu has been initialised. We have a couple of functions that are
233 ++ * called in a pre-emptible context, and so can't use smp_processor_id()
234 ++ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
235 ++ * function pointers for these are identical across all CPUs.
236 ++ */
237 ++extern struct processor *cpu_vtable[];
238 ++#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
239 ++#define PROC_TABLE(f) cpu_vtable[0]->f
240 ++static inline void init_proc_vtable(const struct processor *p)
241 ++{
242 ++ unsigned int cpu = smp_processor_id();
243 ++ *cpu_vtable[cpu] = *p;
244 ++ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
245 ++ cpu_vtable[0]->dcache_clean_area);
246 ++ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
247 ++ cpu_vtable[0]->set_pte_ext);
248 ++}
249 ++#else
250 ++#define PROC_VTABLE(f) processor.f
251 ++#define PROC_TABLE(f) processor.f
252 ++static inline void init_proc_vtable(const struct processor *p)
253 ++{
254 ++ processor = *p;
255 ++}
256 ++#endif
257 ++
258 ++#define cpu_proc_init PROC_VTABLE(_proc_init)
259 ++#define cpu_check_bugs PROC_VTABLE(check_bugs)
260 ++#define cpu_proc_fin PROC_VTABLE(_proc_fin)
261 ++#define cpu_reset PROC_VTABLE(reset)
262 ++#define cpu_do_idle PROC_VTABLE(_do_idle)
263 ++#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
264 ++#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
265 ++#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
266 ++
267 ++/* These two are private to arch/arm/kernel/suspend.c */
268 ++#define cpu_do_suspend PROC_VTABLE(do_suspend)
269 ++#define cpu_do_resume PROC_VTABLE(do_resume)
270 + #endif
271 +
272 + extern void cpu_resume(void);
273 +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
274 +index 57d2ad9c75ca..df8420672c7e 100644
275 +--- a/arch/arm/include/asm/thread_info.h
276 ++++ b/arch/arm/include/asm/thread_info.h
277 +@@ -124,8 +124,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
278 + struct user_vfp;
279 + struct user_vfp_exc;
280 +
281 +-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
282 +- struct user_vfp_exc __user *);
283 ++extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
284 ++ struct user_vfp_exc *);
285 + extern int vfp_restore_user_hwstate(struct user_vfp *,
286 + struct user_vfp_exc *);
287 + #endif
288 +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
289 +index 7b17460127fd..0f6c6b873bc5 100644
290 +--- a/arch/arm/include/asm/uaccess.h
291 ++++ b/arch/arm/include/asm/uaccess.h
292 +@@ -99,6 +99,14 @@ extern int __put_user_bad(void);
293 + static inline void set_fs(mm_segment_t fs)
294 + {
295 + current_thread_info()->addr_limit = fs;
296 ++
297 ++ /*
298 ++ * Prevent a mispredicted conditional call to set_fs from forwarding
299 ++ * the wrong address limit to access_ok under speculation.
300 ++ */
301 ++ dsb(nsh);
302 ++ isb();
303 ++
304 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
305 + }
306 +
307 +@@ -121,6 +129,32 @@ static inline void set_fs(mm_segment_t fs)
308 + #define __inttype(x) \
309 + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
310 +
311 ++/*
312 ++ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
313 ++ * is above the current addr_limit.
314 ++ */
315 ++#define uaccess_mask_range_ptr(ptr, size) \
316 ++ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
317 ++static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
318 ++ size_t size)
319 ++{
320 ++ void __user *safe_ptr = (void __user *)ptr;
321 ++ unsigned long tmp;
322 ++
323 ++ asm volatile(
324 ++ " sub %1, %3, #1\n"
325 ++ " subs %1, %1, %0\n"
326 ++ " addhs %1, %1, #1\n"
327 ++ " subhss %1, %1, %2\n"
328 ++ " movlo %0, #0\n"
329 ++ : "+r" (safe_ptr), "=&r" (tmp)
330 ++ : "r" (size), "r" (current_thread_info()->addr_limit)
331 ++ : "cc");
332 ++
333 ++ csdb();
334 ++ return safe_ptr;
335 ++}
336 ++
337 + /*
338 + * Single-value transfer routines. They automatically use the right
339 + * size if we just have the right pointer type. Note that the functions
340 +@@ -392,6 +426,14 @@ do { \
341 + __pu_err; \
342 + })
343 +
344 ++#ifdef CONFIG_CPU_SPECTRE
345 ++/*
346 ++ * When mitigating Spectre variant 1.1, all accessors need to include
347 ++ * verification of the address space.
348 ++ */
349 ++#define __put_user(x, ptr) put_user(x, ptr)
350 ++
351 ++#else
352 + #define __put_user(x, ptr) \
353 + ({ \
354 + long __pu_err = 0; \
355 +@@ -399,12 +441,6 @@ do { \
356 + __pu_err; \
357 + })
358 +
359 +-#define __put_user_error(x, ptr, err) \
360 +-({ \
361 +- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
362 +- (void) 0; \
363 +-})
364 +-
365 + #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
366 + do { \
367 + unsigned long __pu_addr = (unsigned long)__pu_ptr; \
368 +@@ -484,6 +520,7 @@ do { \
369 + : "r" (x), "i" (-EFAULT) \
370 + : "cc")
371 +
372 ++#endif /* !CONFIG_CPU_SPECTRE */
373 +
374 + #ifdef CONFIG_MMU
375 + extern unsigned long __must_check
376 +diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
377 +index 7be511310191..d41d3598e5e5 100644
378 +--- a/arch/arm/kernel/bugs.c
379 ++++ b/arch/arm/kernel/bugs.c
380 +@@ -6,8 +6,8 @@
381 + void check_other_bugs(void)
382 + {
383 + #ifdef MULTI_CPU
384 +- if (processor.check_bugs)
385 +- processor.check_bugs();
386 ++ if (cpu_check_bugs)
387 ++ cpu_check_bugs();
388 + #endif
389 + }
390 +
391 +diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
392 +index 8733012d231f..7e662bdd5cb3 100644
393 +--- a/arch/arm/kernel/head-common.S
394 ++++ b/arch/arm/kernel/head-common.S
395 +@@ -122,6 +122,9 @@ __mmap_switched_data:
396 + .long init_thread_union + THREAD_START_SP @ sp
397 + .size __mmap_switched_data, . - __mmap_switched_data
398 +
399 ++ __FINIT
400 ++ .text
401 ++
402 + /*
403 + * This provides a C-API version of __lookup_processor_type
404 + */
405 +@@ -133,9 +136,6 @@ ENTRY(lookup_processor_type)
406 + ldmfd sp!, {r4 - r6, r9, pc}
407 + ENDPROC(lookup_processor_type)
408 +
409 +- __FINIT
410 +- .text
411 +-
412 + /*
413 + * Read processor ID register (CP#15, CR0), and look up in the linker-built
414 + * supported processor list. Note that we can't use the absolute addresses
415 +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
416 +index f4e54503afa9..4764742db7b0 100644
417 +--- a/arch/arm/kernel/setup.c
418 ++++ b/arch/arm/kernel/setup.c
419 +@@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2);
420 +
421 + #ifdef MULTI_CPU
422 + struct processor processor __ro_after_init;
423 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
424 ++struct processor *cpu_vtable[NR_CPUS] = {
425 ++ [0] = &processor,
426 ++};
427 ++#endif
428 + #endif
429 + #ifdef MULTI_TLB
430 + struct cpu_tlb_fns cpu_tlb __ro_after_init;
431 +@@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void)
432 + }
433 + #endif
434 +
435 +-static void __init setup_processor(void)
436 ++/*
437 ++ * locate processor in the list of supported processor types. The linker
438 ++ * builds this table for us from the entries in arch/arm/mm/proc-*.S
439 ++ */
440 ++struct proc_info_list *lookup_processor(u32 midr)
441 + {
442 +- struct proc_info_list *list;
443 ++ struct proc_info_list *list = lookup_processor_type(midr);
444 +
445 +- /*
446 +- * locate processor in the list of supported processor
447 +- * types. The linker builds this table for us from the
448 +- * entries in arch/arm/mm/proc-*.S
449 +- */
450 +- list = lookup_processor_type(read_cpuid_id());
451 + if (!list) {
452 +- pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
453 +- read_cpuid_id());
454 +- while (1);
455 ++ pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
456 ++ smp_processor_id(), midr);
457 ++ while (1)
458 ++ /* can't use cpu_relax() here as it may require MMU setup */;
459 + }
460 +
461 ++ return list;
462 ++}
463 ++
464 ++static void __init setup_processor(void)
465 ++{
466 ++ unsigned int midr = read_cpuid_id();
467 ++ struct proc_info_list *list = lookup_processor(midr);
468 ++
469 + cpu_name = list->cpu_name;
470 + __cpu_architecture = __get_cpu_architecture();
471 +
472 +-#ifdef MULTI_CPU
473 +- processor = *list->proc;
474 +-#endif
475 ++ init_proc_vtable(list->proc);
476 + #ifdef MULTI_TLB
477 + cpu_tlb = *list->tlb;
478 + #endif
479 +@@ -700,7 +710,7 @@ static void __init setup_processor(void)
480 + #endif
481 +
482 + pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
483 +- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
484 ++ list->cpu_name, midr, midr & 15,
485 + proc_arch[cpu_architecture()], get_cr());
486 +
487 + snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
488 +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
489 +index 6bee5c9b1133..0a066f03b5ec 100644
490 +--- a/arch/arm/kernel/signal.c
491 ++++ b/arch/arm/kernel/signal.c
492 +@@ -94,17 +94,18 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
493 +
494 + static int preserve_vfp_context(struct vfp_sigframe __user *frame)
495 + {
496 +- const unsigned long magic = VFP_MAGIC;
497 +- const unsigned long size = VFP_STORAGE_SIZE;
498 ++ struct vfp_sigframe kframe;
499 + int err = 0;
500 +
501 +- __put_user_error(magic, &frame->magic, err);
502 +- __put_user_error(size, &frame->size, err);
503 ++ memset(&kframe, 0, sizeof(kframe));
504 ++ kframe.magic = VFP_MAGIC;
505 ++ kframe.size = VFP_STORAGE_SIZE;
506 +
507 ++ err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
508 + if (err)
509 +- return -EFAULT;
510 ++ return err;
511 +
512 +- return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
513 ++ return __copy_to_user(frame, &kframe, sizeof(kframe));
514 + }
515 +
516 + static int restore_vfp_context(struct vfp_sigframe __user *auxp)
517 +@@ -256,30 +257,35 @@ static int
518 + setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
519 + {
520 + struct aux_sigframe __user *aux;
521 ++ struct sigcontext context;
522 + int err = 0;
523 +
524 +- __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
525 +- __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
526 +- __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
527 +- __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
528 +- __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
529 +- __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
530 +- __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
531 +- __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
532 +- __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
533 +- __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
534 +- __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
535 +- __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
536 +- __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
537 +- __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
538 +- __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
539 +- __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
540 +- __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
541 +-
542 +- __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
543 +- __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
544 +- __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
545 +- __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
546 ++ context = (struct sigcontext) {
547 ++ .arm_r0 = regs->ARM_r0,
548 ++ .arm_r1 = regs->ARM_r1,
549 ++ .arm_r2 = regs->ARM_r2,
550 ++ .arm_r3 = regs->ARM_r3,
551 ++ .arm_r4 = regs->ARM_r4,
552 ++ .arm_r5 = regs->ARM_r5,
553 ++ .arm_r6 = regs->ARM_r6,
554 ++ .arm_r7 = regs->ARM_r7,
555 ++ .arm_r8 = regs->ARM_r8,
556 ++ .arm_r9 = regs->ARM_r9,
557 ++ .arm_r10 = regs->ARM_r10,
558 ++ .arm_fp = regs->ARM_fp,
559 ++ .arm_ip = regs->ARM_ip,
560 ++ .arm_sp = regs->ARM_sp,
561 ++ .arm_lr = regs->ARM_lr,
562 ++ .arm_pc = regs->ARM_pc,
563 ++ .arm_cpsr = regs->ARM_cpsr,
564 ++
565 ++ .trap_no = current->thread.trap_no,
566 ++ .error_code = current->thread.error_code,
567 ++ .fault_address = current->thread.address,
568 ++ .oldmask = set->sig[0],
569 ++ };
570 ++
571 ++ err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
572 +
573 + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
574 +
575 +@@ -296,7 +302,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
576 + if (err == 0)
577 + err |= preserve_vfp_context(&aux->vfp);
578 + #endif
579 +- __put_user_error(0, &aux->end_magic, err);
580 ++ err |= __put_user(0, &aux->end_magic);
581 +
582 + return err;
583 + }
584 +@@ -428,7 +434,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
585 + /*
586 + * Set uc.uc_flags to a value which sc.trap_no would never have.
587 + */
588 +- __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
589 ++ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
590 +
591 + err |= setup_sigframe(frame, regs, set);
592 + if (err == 0)
593 +@@ -448,8 +454,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
594 +
595 + err |= copy_siginfo_to_user(&frame->info, &ksig->info);
596 +
597 +- __put_user_error(0, &frame->sig.uc.uc_flags, err);
598 +- __put_user_error(NULL, &frame->sig.uc.uc_link, err);
599 ++ err |= __put_user(0, &frame->sig.uc.uc_flags);
600 ++ err |= __put_user(NULL, &frame->sig.uc.uc_link);
601 +
602 + err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
603 + err |= setup_sigframe(&frame->sig, regs, set);
604 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
605 +index 4b129aac7233..8faf869e9fb2 100644
606 +--- a/arch/arm/kernel/smp.c
607 ++++ b/arch/arm/kernel/smp.c
608 +@@ -27,6 +27,7 @@
609 + #include <linux/completion.h>
610 + #include <linux/cpufreq.h>
611 + #include <linux/irq_work.h>
612 ++#include <linux/slab.h>
613 +
614 + #include <linux/atomic.h>
615 + #include <asm/bugs.h>
616 +@@ -40,6 +41,7 @@
617 + #include <asm/mmu_context.h>
618 + #include <asm/pgtable.h>
619 + #include <asm/pgalloc.h>
620 ++#include <asm/procinfo.h>
621 + #include <asm/processor.h>
622 + #include <asm/sections.h>
623 + #include <asm/tlbflush.h>
624 +@@ -100,6 +102,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
625 + #endif
626 + }
627 +
628 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
629 ++static int secondary_biglittle_prepare(unsigned int cpu)
630 ++{
631 ++ if (!cpu_vtable[cpu])
632 ++ cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
633 ++
634 ++ return cpu_vtable[cpu] ? 0 : -ENOMEM;
635 ++}
636 ++
637 ++static void secondary_biglittle_init(void)
638 ++{
639 ++ init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
640 ++}
641 ++#else
642 ++static int secondary_biglittle_prepare(unsigned int cpu)
643 ++{
644 ++ return 0;
645 ++}
646 ++
647 ++static void secondary_biglittle_init(void)
648 ++{
649 ++}
650 ++#endif
651 ++
652 + int __cpu_up(unsigned int cpu, struct task_struct *idle)
653 + {
654 + int ret;
655 +@@ -107,6 +133,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
656 + if (!smp_ops.smp_boot_secondary)
657 + return -ENOSYS;
658 +
659 ++ ret = secondary_biglittle_prepare(cpu);
660 ++ if (ret)
661 ++ return ret;
662 ++
663 + /*
664 + * We need to tell the secondary core where to find
665 + * its stack and the page tables.
666 +@@ -358,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
667 + struct mm_struct *mm = &init_mm;
668 + unsigned int cpu;
669 +
670 ++ secondary_biglittle_init();
671 ++
672 + /*
673 + * The identity mapping is uncached (strongly ordered), so
674 + * switch away from it before attempting any exclusive accesses.
675 +diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
676 +index 640748e27035..d844c5c9364b 100644
677 +--- a/arch/arm/kernel/sys_oabi-compat.c
678 ++++ b/arch/arm/kernel/sys_oabi-compat.c
679 +@@ -276,6 +276,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
680 + int maxevents, int timeout)
681 + {
682 + struct epoll_event *kbuf;
683 ++ struct oabi_epoll_event e;
684 + mm_segment_t fs;
685 + long ret, err, i;
686 +
687 +@@ -294,8 +295,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
688 + set_fs(fs);
689 + err = 0;
690 + for (i = 0; i < ret; i++) {
691 +- __put_user_error(kbuf[i].events, &events->events, err);
692 +- __put_user_error(kbuf[i].data, &events->data, err);
693 ++ e.events = kbuf[i].events;
694 ++ e.data = kbuf[i].data;
695 ++ err = __copy_to_user(events, &e, sizeof(e));
696 ++ if (err)
697 ++ break;
698 + events++;
699 + }
700 + kfree(kbuf);
701 +diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
702 +index a826df3d3814..6709a8d33963 100644
703 +--- a/arch/arm/lib/copy_from_user.S
704 ++++ b/arch/arm/lib/copy_from_user.S
705 +@@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
706 + #ifdef CONFIG_CPU_SPECTRE
707 + get_thread_info r3
708 + ldr r3, [r3, #TI_ADDR_LIMIT]
709 +- adds ip, r1, r2 @ ip=addr+size
710 +- sub r3, r3, #1 @ addr_limit - 1
711 +- cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
712 +- movcs r1, #0 @ addr = NULL
713 +- csdb
714 ++ uaccess_mask_range_ptr r1, r2, r3, ip
715 + #endif
716 +
717 + #include "copy_template.S"
718 +diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
719 +index caf5019d8161..970abe521197 100644
720 +--- a/arch/arm/lib/copy_to_user.S
721 ++++ b/arch/arm/lib/copy_to_user.S
722 +@@ -94,6 +94,11 @@
723 +
724 + ENTRY(__copy_to_user_std)
725 + WEAK(arm_copy_to_user)
726 ++#ifdef CONFIG_CPU_SPECTRE
727 ++ get_thread_info r3
728 ++ ldr r3, [r3, #TI_ADDR_LIMIT]
729 ++ uaccess_mask_range_ptr r0, r2, r3, ip
730 ++#endif
731 +
732 + #include "copy_template.S"
733 +
734 +@@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
735 + rsb r0, r0, r2
736 + copy_abort_end
737 + .popsection
738 +-
739 +diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
740 +index 6bd1089b07e0..f598d792bace 100644
741 +--- a/arch/arm/lib/uaccess_with_memcpy.c
742 ++++ b/arch/arm/lib/uaccess_with_memcpy.c
743 +@@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
744 + n = __copy_to_user_std(to, from, n);
745 + uaccess_restore(ua_flags);
746 + } else {
747 +- n = __copy_to_user_memcpy(to, from, n);
748 ++ n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
749 ++ from, n);
750 + }
751 + return n;
752 + }
753 +diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
754 +index ed9a01484030..a52fe871adbc 100644
755 +--- a/arch/arm/mach-integrator/impd1.c
756 ++++ b/arch/arm/mach-integrator/impd1.c
757 +@@ -394,7 +394,11 @@ static int __ref impd1_probe(struct lm_device *dev)
758 + sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
759 + GFP_KERNEL);
760 + chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
761 +- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
762 ++ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
763 ++ "lm%x:00700", dev->id);
764 ++ if (!lookup || !chipname || !mmciname)
765 ++ return -ENOMEM;
766 ++
767 + lookup->dev_id = mmciname;
768 + /*
769 + * Offsets on GPIO block 1:
770 +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
771 +index 7d9176c4a21d..f8bb65032b79 100644
772 +--- a/arch/arm/mm/proc-macros.S
773 ++++ b/arch/arm/mm/proc-macros.S
774 +@@ -275,6 +275,13 @@
775 + .endm
776 +
777 + .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
778 ++/*
779 ++ * If we are building for big.Little with branch predictor hardening,
780 ++ * we need the processor function tables to remain available after boot.
781 ++ */
782 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
783 ++ .section ".rodata"
784 ++#endif
785 + .type \name\()_processor_functions, #object
786 + .align 2
787 + ENTRY(\name\()_processor_functions)
788 +@@ -310,6 +317,9 @@ ENTRY(\name\()_processor_functions)
789 + .endif
790 +
791 + .size \name\()_processor_functions, . - \name\()_processor_functions
792 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
793 ++ .previous
794 ++#endif
795 + .endm
796 +
797 + .macro define_cache_functions name:req
798 +diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
799 +index 5544b82a2e7a..9a07916af8dd 100644
800 +--- a/arch/arm/mm/proc-v7-bugs.c
801 ++++ b/arch/arm/mm/proc-v7-bugs.c
802 +@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
803 + case ARM_CPU_PART_CORTEX_A17:
804 + case ARM_CPU_PART_CORTEX_A73:
805 + case ARM_CPU_PART_CORTEX_A75:
806 +- if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
807 +- goto bl_error;
808 + per_cpu(harden_branch_predictor_fn, cpu) =
809 + harden_branch_predictor_bpiall;
810 + spectre_v2_method = "BPIALL";
811 +@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
812 +
813 + case ARM_CPU_PART_CORTEX_A15:
814 + case ARM_CPU_PART_BRAHMA_B15:
815 +- if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
816 +- goto bl_error;
817 + per_cpu(harden_branch_predictor_fn, cpu) =
818 + harden_branch_predictor_iciallu;
819 + spectre_v2_method = "ICIALLU";
820 +@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
821 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
822 + if ((int)res.a0 != 0)
823 + break;
824 +- if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
825 +- goto bl_error;
826 + per_cpu(harden_branch_predictor_fn, cpu) =
827 + call_hvc_arch_workaround_1;
828 +- processor.switch_mm = cpu_v7_hvc_switch_mm;
829 ++ cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
830 + spectre_v2_method = "hypervisor";
831 + break;
832 +
833 +@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
834 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
835 + if ((int)res.a0 != 0)
836 + break;
837 +- if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
838 +- goto bl_error;
839 + per_cpu(harden_branch_predictor_fn, cpu) =
840 + call_smc_arch_workaround_1;
841 +- processor.switch_mm = cpu_v7_smc_switch_mm;
842 ++ cpu_do_switch_mm = cpu_v7_smc_switch_mm;
843 + spectre_v2_method = "firmware";
844 + break;
845 +
846 +@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
847 + if (spectre_v2_method)
848 + pr_info("CPU%u: Spectre v2: using %s workaround\n",
849 + smp_processor_id(), spectre_v2_method);
850 +- return;
851 +-
852 +-bl_error:
853 +- pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
854 +- cpu);
855 + }
856 + #else
857 + static void cpu_v7_spectre_init(void)
858 +diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
859 +index 8e5e97989fda..00dd8cf36632 100644
860 +--- a/arch/arm/vfp/vfpmodule.c
861 ++++ b/arch/arm/vfp/vfpmodule.c
862 +@@ -554,12 +554,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
863 + * Save the current VFP state into the provided structures and prepare
864 + * for entry into a new function (signal handler).
865 + */
866 +-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
867 +- struct user_vfp_exc __user *ufp_exc)
868 ++int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
869 ++ struct user_vfp_exc *ufp_exc)
870 + {
871 + struct thread_info *thread = current_thread_info();
872 + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
873 +- int err = 0;
874 +
875 + /* Ensure that the saved hwstate is up-to-date. */
876 + vfp_sync_hwstate(thread);
877 +@@ -568,22 +567,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
878 + * Copy the floating point registers. There can be unused
879 + * registers see asm/hwcap.h for details.
880 + */
881 +- err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
882 +- sizeof(hwstate->fpregs));
883 ++ memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
884 ++
885 + /*
886 + * Copy the status and control register.
887 + */
888 +- __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
889 ++ ufp->fpscr = hwstate->fpscr;
890 +
891 + /*
892 + * Copy the exception registers.
893 + */
894 +- __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
895 +- __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
896 +- __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
897 +-
898 +- if (err)
899 +- return -EFAULT;
900 ++ ufp_exc->fpexc = hwstate->fpexc;
901 ++ ufp_exc->fpinst = hwstate->fpinst;
902 ++ ufp_exc->fpinst2 = hwstate->fpinst2;
903 +
904 + /* Ensure that VFP is disabled. */
905 + vfp_flush_hwstate(thread);
906 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
907 +index cadf99923600..ab04751a12b6 100644
908 +--- a/arch/x86/events/core.c
909 ++++ b/arch/x86/events/core.c
910 +@@ -2196,6 +2196,19 @@ void perf_check_microcode(void)
911 + }
912 + EXPORT_SYMBOL_GPL(perf_check_microcode);
913 +
914 ++static int x86_pmu_check_period(struct perf_event *event, u64 value)
915 ++{
916 ++ if (x86_pmu.check_period && x86_pmu.check_period(event, value))
917 ++ return -EINVAL;
918 ++
919 ++ if (value && x86_pmu.limit_period) {
920 ++ if (x86_pmu.limit_period(event, value) > value)
921 ++ return -EINVAL;
922 ++ }
923 ++
924 ++ return 0;
925 ++}
926 ++
927 + static struct pmu pmu = {
928 + .pmu_enable = x86_pmu_enable,
929 + .pmu_disable = x86_pmu_disable,
930 +@@ -2220,6 +2233,7 @@ static struct pmu pmu = {
931 + .event_idx = x86_pmu_event_idx,
932 + .sched_task = x86_pmu_sched_task,
933 + .task_ctx_size = sizeof(struct x86_perf_task_context),
934 ++ .check_period = x86_pmu_check_period,
935 + };
936 +
937 + void arch_perf_update_userpage(struct perf_event *event,
938 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
939 +index f600ab601e00..f0639c8ebcb6 100644
940 +--- a/arch/x86/events/intel/core.c
941 ++++ b/arch/x86/events/intel/core.c
942 +@@ -3262,6 +3262,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
943 + intel_pmu_lbr_sched_task(ctx, sched_in);
944 + }
945 +
946 ++static int intel_pmu_check_period(struct perf_event *event, u64 value)
947 ++{
948 ++ return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
949 ++}
950 ++
951 + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
952 +
953 + PMU_FORMAT_ATTR(ldlat, "config1:0-15");
954 +@@ -3328,6 +3333,8 @@ static __initconst const struct x86_pmu core_pmu = {
955 + .cpu_starting = intel_pmu_cpu_starting,
956 + .cpu_dying = intel_pmu_cpu_dying,
957 + .cpu_dead = intel_pmu_cpu_dead,
958 ++
959 ++ .check_period = intel_pmu_check_period,
960 + };
961 +
962 + static __initconst const struct x86_pmu intel_pmu = {
963 +@@ -3367,6 +3374,8 @@ static __initconst const struct x86_pmu intel_pmu = {
964 +
965 + .guest_get_msrs = intel_guest_get_msrs,
966 + .sched_task = intel_pmu_sched_task,
967 ++
968 ++ .check_period = intel_pmu_check_period,
969 + };
970 +
971 + static __init void intel_clovertown_quirk(void)
972 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
973 +index 7ace39c51ff7..5c21680b0a69 100644
974 +--- a/arch/x86/events/perf_event.h
975 ++++ b/arch/x86/events/perf_event.h
976 +@@ -626,6 +626,11 @@ struct x86_pmu {
977 + * Intel host/guest support (KVM)
978 + */
979 + struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
980 ++
981 ++ /*
982 ++ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
983 ++ */
984 ++ int (*check_period) (struct perf_event *event, u64 period);
985 + };
986 +
987 + struct x86_perf_task_context {
988 +@@ -833,7 +838,7 @@ static inline int amd_pmu_init(void)
989 +
990 + #ifdef CONFIG_CPU_SUP_INTEL
991 +
992 +-static inline bool intel_pmu_has_bts(struct perf_event *event)
993 ++static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
994 + {
995 + struct hw_perf_event *hwc = &event->hw;
996 + unsigned int hw_event, bts_event;
997 +@@ -844,7 +849,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
998 + hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
999 + bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1000 +
1001 +- return hw_event == bts_event && hwc->sample_period == 1;
1002 ++ return hw_event == bts_event && period == 1;
1003 ++}
1004 ++
1005 ++static inline bool intel_pmu_has_bts(struct perf_event *event)
1006 ++{
1007 ++ struct hw_perf_event *hwc = &event->hw;
1008 ++
1009 ++ return intel_pmu_has_bts_period(event, hwc->sample_period);
1010 + }
1011 +
1012 + int intel_pmu_save_and_restart(struct perf_event *event);
1013 +diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
1014 +index cb26f18d43af..555c002167ad 100644
1015 +--- a/arch/x86/ia32/ia32_aout.c
1016 ++++ b/arch/x86/ia32/ia32_aout.c
1017 +@@ -50,7 +50,7 @@ static unsigned long get_dr(int n)
1018 + /*
1019 + * fill in the user structure for a core dump..
1020 + */
1021 +-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
1022 ++static void fill_dump(struct pt_regs *regs, struct user32 *dump)
1023 + {
1024 + u32 fs, gs;
1025 + memset(dump, 0, sizeof(*dump));
1026 +@@ -156,10 +156,12 @@ static int aout_core_dump(struct coredump_params *cprm)
1027 + fs = get_fs();
1028 + set_fs(KERNEL_DS);
1029 + has_dumped = 1;
1030 ++
1031 ++ fill_dump(cprm->regs, &dump);
1032 ++
1033 + strncpy(dump.u_comm, current->comm, sizeof(current->comm));
1034 + dump.u_ar0 = offsetof(struct user32, regs);
1035 + dump.signal = cprm->siginfo->si_signo;
1036 +- dump_thread32(cprm->regs, &dump);
1037 +
1038 + /*
1039 + * If the size of the dump file exceeds the rlimit, then see
1040 +diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
1041 +index e652a7cc6186..3f697a9e3f59 100644
1042 +--- a/arch/x86/include/asm/uv/bios.h
1043 ++++ b/arch/x86/include/asm/uv/bios.h
1044 +@@ -48,7 +48,8 @@ enum {
1045 + BIOS_STATUS_SUCCESS = 0,
1046 + BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
1047 + BIOS_STATUS_EINVAL = -EINVAL,
1048 +- BIOS_STATUS_UNAVAIL = -EBUSY
1049 ++ BIOS_STATUS_UNAVAIL = -EBUSY,
1050 ++ BIOS_STATUS_ABORT = -EINTR,
1051 + };
1052 +
1053 + /* Address map parameters */
1054 +@@ -167,4 +168,9 @@ extern long system_serial_number;
1055 +
1056 + extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
1057 +
1058 ++/*
1059 ++ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
1060 ++ */
1061 ++extern struct semaphore __efi_uv_runtime_lock;
1062 ++
1063 + #endif /* _ASM_X86_UV_BIOS_H */
1064 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1065 +index 91db841101ca..1870fa7387b7 100644
1066 +--- a/arch/x86/kvm/vmx.c
1067 ++++ b/arch/x86/kvm/vmx.c
1068 +@@ -2178,7 +2178,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1069 + if (!entry_only)
1070 + j = find_msr(&m->host, msr);
1071 +
1072 +- if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
1073 ++ if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
1074 ++ (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
1075 + printk_once(KERN_WARNING "Not enough msr switch entries. "
1076 + "Can't add msr %x\n", msr);
1077 + return;
1078 +diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
1079 +index 4a6a5a26c582..eb33432f2f24 100644
1080 +--- a/arch/x86/platform/uv/bios_uv.c
1081 ++++ b/arch/x86/platform/uv/bios_uv.c
1082 +@@ -29,7 +29,8 @@
1083 +
1084 + struct uv_systab *uv_systab;
1085 +
1086 +-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1087 ++static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1088 ++ u64 a4, u64 a5)
1089 + {
1090 + struct uv_systab *tab = uv_systab;
1091 + s64 ret;
1092 +@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1093 +
1094 + return ret;
1095 + }
1096 ++
1097 ++s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1098 ++{
1099 ++ s64 ret;
1100 ++
1101 ++ if (down_interruptible(&__efi_uv_runtime_lock))
1102 ++ return BIOS_STATUS_ABORT;
1103 ++
1104 ++ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
1105 ++ up(&__efi_uv_runtime_lock);
1106 ++
1107 ++ return ret;
1108 ++}
1109 + EXPORT_SYMBOL_GPL(uv_bios_call);
1110 +
1111 + s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1112 +@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1113 + unsigned long bios_flags;
1114 + s64 ret;
1115 +
1116 ++ if (down_interruptible(&__efi_uv_runtime_lock))
1117 ++ return BIOS_STATUS_ABORT;
1118 ++
1119 + local_irq_save(bios_flags);
1120 +- ret = uv_bios_call(which, a1, a2, a3, a4, a5);
1121 ++ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
1122 + local_irq_restore(bios_flags);
1123 +
1124 ++ up(&__efi_uv_runtime_lock);
1125 ++
1126 + return ret;
1127 + }
1128 +
1129 +diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
1130 +index 17b518cb787c..0ea065c6725a 100644
1131 +--- a/drivers/acpi/numa.c
1132 ++++ b/drivers/acpi/numa.c
1133 +@@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
1134 + {
1135 + struct acpi_srat_mem_affinity *p =
1136 + (struct acpi_srat_mem_affinity *)header;
1137 +- pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
1138 +- (unsigned long)p->base_address,
1139 +- (unsigned long)p->length,
1140 ++ pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
1141 ++ (unsigned long long)p->base_address,
1142 ++ (unsigned long long)p->length,
1143 + p->proximity_domain,
1144 + (p->flags & ACPI_SRAT_MEM_ENABLED) ?
1145 + "enabled" : "disabled",
1146 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1147 +index d6d91e8afa9e..61fe4bbc6dc0 100644
1148 +--- a/drivers/cpufreq/cpufreq.c
1149 ++++ b/drivers/cpufreq/cpufreq.c
1150 +@@ -1496,17 +1496,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1151 + {
1152 + unsigned int ret_freq = 0;
1153 +
1154 +- if (!cpufreq_driver->get)
1155 ++ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
1156 + return ret_freq;
1157 +
1158 + ret_freq = cpufreq_driver->get(policy->cpu);
1159 +
1160 + /*
1161 +- * Updating inactive policies is invalid, so avoid doing that. Also
1162 +- * if fast frequency switching is used with the given policy, the check
1163 ++ * If fast frequency switching is used with the given policy, the check
1164 + * against policy->cur is pointless, so skip it in that case too.
1165 + */
1166 +- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
1167 ++ if (policy->fast_switch_enabled)
1168 + return ret_freq;
1169 +
1170 + if (ret_freq && policy->cur &&
1171 +diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
1172 +index ae54870b2788..dd7f63354ca0 100644
1173 +--- a/drivers/firmware/efi/runtime-wrappers.c
1174 ++++ b/drivers/firmware/efi/runtime-wrappers.c
1175 +@@ -49,6 +49,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
1176 + local_irq_restore(flags);
1177 + }
1178 +
1179 ++/*
1180 ++ * Expose the EFI runtime lock to the UV platform
1181 ++ */
1182 ++#ifdef CONFIG_X86_UV
1183 ++extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
1184 ++#endif
1185 ++
1186 + /*
1187 + * According to section 7.1 of the UEFI spec, Runtime Services are not fully
1188 + * reentrant, and there are particular combinations of calls that need to be
1189 +diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
1190 +index f64f35cdc2ff..fa3f2f039a74 100644
1191 +--- a/drivers/gpu/drm/bridge/tc358767.c
1192 ++++ b/drivers/gpu/drm/bridge/tc358767.c
1193 +@@ -96,6 +96,8 @@
1194 + #define DP0_STARTVAL 0x064c
1195 + #define DP0_ACTIVEVAL 0x0650
1196 + #define DP0_SYNCVAL 0x0654
1197 ++#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
1198 ++#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
1199 + #define DP0_MISC 0x0658
1200 + #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
1201 + #define BPC_6 (0 << 5)
1202 +@@ -140,6 +142,8 @@
1203 + #define DP0_LTLOOPCTRL 0x06d8
1204 + #define DP0_SNKLTCTRL 0x06e4
1205 +
1206 ++#define DP1_SRCCTRL 0x07a0
1207 ++
1208 + /* PHY */
1209 + #define DP_PHY_CTRL 0x0800
1210 + #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
1211 +@@ -148,6 +152,7 @@
1212 + #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
1213 + #define PHY_RDY BIT(16) /* PHY Main Channels Ready */
1214 + #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
1215 ++#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
1216 + #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
1217 + #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
1218 +
1219 +@@ -538,6 +543,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
1220 + unsigned long rate;
1221 + u32 value;
1222 + int ret;
1223 ++ u32 dp_phy_ctrl;
1224 +
1225 + rate = clk_get_rate(tc->refclk);
1226 + switch (rate) {
1227 +@@ -562,7 +568,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
1228 + value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
1229 + tc_write(SYS_PLLPARAM, value);
1230 +
1231 +- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
1232 ++ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
1233 ++ if (tc->link.base.num_lanes == 2)
1234 ++ dp_phy_ctrl |= PHY_2LANE;
1235 ++ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
1236 +
1237 + /*
1238 + * Initially PLLs are in bypass. Force PLL parameter update,
1239 +@@ -717,7 +726,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
1240 +
1241 + tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
1242 +
1243 +- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
1244 ++ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
1245 ++ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
1246 ++ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
1247 +
1248 + tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
1249 + DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
1250 +@@ -827,12 +838,11 @@ static int tc_main_link_setup(struct tc_data *tc)
1251 + if (!tc->mode)
1252 + return -EINVAL;
1253 +
1254 +- /* from excel file - DP0_SrcCtrl */
1255 +- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
1256 +- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
1257 +- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
1258 +- /* from excel file - DP1_SrcCtrl */
1259 +- tc_write(0x07a0, 0x00003083);
1260 ++ tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
1261 ++ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
1262 ++ tc_write(DP1_SRCCTRL,
1263 ++ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
1264 ++ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
1265 +
1266 + rate = clk_get_rate(tc->refclk);
1267 + switch (rate) {
1268 +@@ -853,8 +863,11 @@ static int tc_main_link_setup(struct tc_data *tc)
1269 + }
1270 + value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
1271 + tc_write(SYS_PLLPARAM, value);
1272 ++
1273 + /* Setup Main Link */
1274 +- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
1275 ++ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
1276 ++ if (tc->link.base.num_lanes == 2)
1277 ++ dp_phy_ctrl |= PHY_2LANE;
1278 + tc_write(DP_PHY_CTRL, dp_phy_ctrl);
1279 + msleep(100);
1280 +
1281 +@@ -1109,10 +1122,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1282 + static int tc_connector_mode_valid(struct drm_connector *connector,
1283 + struct drm_display_mode *mode)
1284 + {
1285 ++ struct tc_data *tc = connector_to_tc(connector);
1286 ++ u32 req, avail;
1287 ++ u32 bits_per_pixel = 24;
1288 ++
1289 + /* DPI interface clock limitation: upto 154 MHz */
1290 + if (mode->clock > 154000)
1291 + return MODE_CLOCK_HIGH;
1292 +
1293 ++ req = mode->clock * bits_per_pixel / 8;
1294 ++ avail = tc->link.base.num_lanes * tc->link.base.rate;
1295 ++
1296 ++ if (req > avail)
1297 ++ return MODE_BAD;
1298 ++
1299 + return MODE_OK;
1300 + }
1301 +
1302 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1303 +index 7b2030925825..6509031098d5 100644
1304 +--- a/drivers/gpu/drm/i915/i915_gem.c
1305 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1306 +@@ -1593,6 +1593,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1307 + return err;
1308 + }
1309 +
1310 ++static inline bool
1311 ++__vma_matches(struct vm_area_struct *vma, struct file *filp,
1312 ++ unsigned long addr, unsigned long size)
1313 ++{
1314 ++ if (vma->vm_file != filp)
1315 ++ return false;
1316 ++
1317 ++ return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
1318 ++}
1319 ++
1320 + /**
1321 + * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1322 + * it is mapped to.
1323 +@@ -1651,7 +1661,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1324 + return -EINTR;
1325 + }
1326 + vma = find_vma(mm, addr);
1327 +- if (vma)
1328 ++ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1329 + vma->vm_page_prot =
1330 + pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1331 + else
1332 +diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
1333 +index b0d445390ee4..d43bc7bd3387 100644
1334 +--- a/drivers/input/misc/bma150.c
1335 ++++ b/drivers/input/misc/bma150.c
1336 +@@ -482,13 +482,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
1337 + idev->close = bma150_irq_close;
1338 + input_set_drvdata(idev, bma150);
1339 +
1340 ++ bma150->input = idev;
1341 ++
1342 + error = input_register_device(idev);
1343 + if (error) {
1344 + input_free_device(idev);
1345 + return error;
1346 + }
1347 +
1348 +- bma150->input = idev;
1349 + return 0;
1350 + }
1351 +
1352 +@@ -511,15 +512,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
1353 +
1354 + bma150_init_input_device(bma150, ipoll_dev->input);
1355 +
1356 ++ bma150->input_polled = ipoll_dev;
1357 ++ bma150->input = ipoll_dev->input;
1358 ++
1359 + error = input_register_polled_device(ipoll_dev);
1360 + if (error) {
1361 + input_free_polled_device(ipoll_dev);
1362 + return error;
1363 + }
1364 +
1365 +- bma150->input_polled = ipoll_dev;
1366 +- bma150->input = ipoll_dev->input;
1367 +-
1368 + return 0;
1369 + }
1370 +
1371 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1372 +index 30adc5745cba..25ce9047b682 100644
1373 +--- a/drivers/input/mouse/elan_i2c_core.c
1374 ++++ b/drivers/input/mouse/elan_i2c_core.c
1375 +@@ -1240,7 +1240,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1376 + static const struct acpi_device_id elan_acpi_id[] = {
1377 + { "ELAN0000", 0 },
1378 + { "ELAN0100", 0 },
1379 +- { "ELAN0501", 0 },
1380 + { "ELAN0600", 0 },
1381 + { "ELAN0602", 0 },
1382 + { "ELAN0605", 0 },
1383 +@@ -1251,6 +1250,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1384 + { "ELAN060C", 0 },
1385 + { "ELAN0611", 0 },
1386 + { "ELAN0612", 0 },
1387 ++ { "ELAN0617", 0 },
1388 + { "ELAN0618", 0 },
1389 + { "ELAN061C", 0 },
1390 + { "ELAN061D", 0 },
1391 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1392 +index c120afd9c46a..38edf8f5bf8a 100644
1393 +--- a/drivers/input/mouse/elantech.c
1394 ++++ b/drivers/input/mouse/elantech.c
1395 +@@ -1117,6 +1117,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1396 + * Asus UX31 0x361f00 20, 15, 0e clickpad
1397 + * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1398 + * Avatar AVIU-145A2 0x361f00 ? clickpad
1399 ++ * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
1400 ++ * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
1401 + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1402 + * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1403 + * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
1404 +@@ -1169,6 +1171,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1405 + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
1406 + },
1407 + },
1408 ++ {
1409 ++ /* Fujitsu H780 also has a middle button */
1410 ++ .matches = {
1411 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1412 ++ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
1413 ++ },
1414 ++ },
1415 + #endif
1416 + { }
1417 + };
1418 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1419 +index 914c8a6bf93c..345f4d81ba07 100644
1420 +--- a/drivers/md/dm-thin.c
1421 ++++ b/drivers/md/dm-thin.c
1422 +@@ -257,6 +257,7 @@ struct pool {
1423 +
1424 + spinlock_t lock;
1425 + struct bio_list deferred_flush_bios;
1426 ++ struct bio_list deferred_flush_completions;
1427 + struct list_head prepared_mappings;
1428 + struct list_head prepared_discards;
1429 + struct list_head prepared_discards_pt2;
1430 +@@ -925,6 +926,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
1431 + mempool_free(m, m->tc->pool->mapping_pool);
1432 + }
1433 +
1434 ++static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
1435 ++{
1436 ++ struct pool *pool = tc->pool;
1437 ++ unsigned long flags;
1438 ++
1439 ++ /*
1440 ++ * If the bio has the REQ_FUA flag set we must commit the metadata
1441 ++ * before signaling its completion.
1442 ++ */
1443 ++ if (!bio_triggers_commit(tc, bio)) {
1444 ++ bio_endio(bio);
1445 ++ return;
1446 ++ }
1447 ++
1448 ++ /*
1449 ++ * Complete bio with an error if earlier I/O caused changes to the
1450 ++ * metadata that can't be committed, e.g, due to I/O errors on the
1451 ++ * metadata device.
1452 ++ */
1453 ++ if (dm_thin_aborted_changes(tc->td)) {
1454 ++ bio_io_error(bio);
1455 ++ return;
1456 ++ }
1457 ++
1458 ++ /*
1459 ++ * Batch together any bios that trigger commits and then issue a
1460 ++ * single commit for them in process_deferred_bios().
1461 ++ */
1462 ++ spin_lock_irqsave(&pool->lock, flags);
1463 ++ bio_list_add(&pool->deferred_flush_completions, bio);
1464 ++ spin_unlock_irqrestore(&pool->lock, flags);
1465 ++}
1466 ++
1467 + static void process_prepared_mapping(struct dm_thin_new_mapping *m)
1468 + {
1469 + struct thin_c *tc = m->tc;
1470 +@@ -957,7 +991,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
1471 + */
1472 + if (bio) {
1473 + inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1474 +- bio_endio(bio);
1475 ++ complete_overwrite_bio(tc, bio);
1476 + } else {
1477 + inc_all_io_entry(tc->pool, m->cell->holder);
1478 + remap_and_issue(tc, m->cell->holder, m->data_block);
1479 +@@ -2303,7 +2337,7 @@ static void process_deferred_bios(struct pool *pool)
1480 + {
1481 + unsigned long flags;
1482 + struct bio *bio;
1483 +- struct bio_list bios;
1484 ++ struct bio_list bios, bio_completions;
1485 + struct thin_c *tc;
1486 +
1487 + tc = get_first_thin(pool);
1488 +@@ -2314,26 +2348,36 @@ static void process_deferred_bios(struct pool *pool)
1489 + }
1490 +
1491 + /*
1492 +- * If there are any deferred flush bios, we must commit
1493 +- * the metadata before issuing them.
1494 ++ * If there are any deferred flush bios, we must commit the metadata
1495 ++ * before issuing them or signaling their completion.
1496 + */
1497 + bio_list_init(&bios);
1498 ++ bio_list_init(&bio_completions);
1499 ++
1500 + spin_lock_irqsave(&pool->lock, flags);
1501 + bio_list_merge(&bios, &pool->deferred_flush_bios);
1502 + bio_list_init(&pool->deferred_flush_bios);
1503 ++
1504 ++ bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
1505 ++ bio_list_init(&pool->deferred_flush_completions);
1506 + spin_unlock_irqrestore(&pool->lock, flags);
1507 +
1508 +- if (bio_list_empty(&bios) &&
1509 ++ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1510 + !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1511 + return;
1512 +
1513 + if (commit(pool)) {
1514 ++ bio_list_merge(&bios, &bio_completions);
1515 ++
1516 + while ((bio = bio_list_pop(&bios)))
1517 + bio_io_error(bio);
1518 + return;
1519 + }
1520 + pool->last_commit_jiffies = jiffies;
1521 +
1522 ++ while ((bio = bio_list_pop(&bio_completions)))
1523 ++ bio_endio(bio);
1524 ++
1525 + while ((bio = bio_list_pop(&bios)))
1526 + generic_make_request(bio);
1527 + }
1528 +@@ -2968,6 +3012,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1529 + INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
1530 + spin_lock_init(&pool->lock);
1531 + bio_list_init(&pool->deferred_flush_bios);
1532 ++ bio_list_init(&pool->deferred_flush_completions);
1533 + INIT_LIST_HEAD(&pool->prepared_mappings);
1534 + INIT_LIST_HEAD(&pool->prepared_discards);
1535 + INIT_LIST_HEAD(&pool->prepared_discards_pt2);
1536 +diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
1537 +index c4e41c26649e..fac10c0e852c 100644
1538 +--- a/drivers/misc/eeprom/Kconfig
1539 ++++ b/drivers/misc/eeprom/Kconfig
1540 +@@ -12,7 +12,7 @@ config EEPROM_AT24
1541 + ones like at24c64, 24lc02 or fm24c04:
1542 +
1543 + 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
1544 +- 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
1545 ++ 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
1546 +
1547 + Unless you like data loss puzzles, always be sure that any chip
1548 + you configure as a 24c32 (32 kbit) or larger is NOT really a
1549 +diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
1550 +index d8a485f1798b..a37b9b6a315a 100644
1551 +--- a/drivers/misc/eeprom/at24.c
1552 ++++ b/drivers/misc/eeprom/at24.c
1553 +@@ -170,6 +170,7 @@ static const struct i2c_device_id at24_ids[] = {
1554 + { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) },
1555 + { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) },
1556 + { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) },
1557 ++ { "24c2048", AT24_DEVICE_MAGIC(2097152 / 8, AT24_FLAG_ADDR16) },
1558 + { "at24", 0 },
1559 + { /* END OF LIST */ }
1560 + };
1561 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1562 +index 4bc2c806eb61..eeeb4c5740bf 100644
1563 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1564 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1565 +@@ -12979,6 +12979,24 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
1566 + struct net_device *dev,
1567 + netdev_features_t features)
1568 + {
1569 ++ /*
1570 ++ * A skb with gso_size + header length > 9700 will cause a
1571 ++ * firmware panic. Drop GSO support.
1572 ++ *
1573 ++ * Eventually the upper layer should not pass these packets down.
1574 ++ *
1575 ++ * For speed, if the gso_size is <= 9000, assume there will
1576 ++ * not be 700 bytes of headers and pass it through. Only do a
1577 ++ * full (slow) validation if the gso_size is > 9000.
1578 ++ *
1579 ++ * (Due to the way SKB_BY_FRAGS works this will also do a full
1580 ++ * validation in that case.)
1581 ++ */
1582 ++ if (unlikely(skb_is_gso(skb) &&
1583 ++ (skb_shinfo(skb)->gso_size > 9000) &&
1584 ++ !skb_gso_validate_mac_len(skb, 9700)))
1585 ++ features &= ~NETIF_F_GSO_MASK;
1586 ++
1587 + features = vlan_features_check(skb, features);
1588 + return vxlan_features_check(skb, features);
1589 + }
1590 +diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
1591 +index 8a40202c0a17..c4f1c363e24b 100644
1592 +--- a/drivers/net/usb/ch9200.c
1593 ++++ b/drivers/net/usb/ch9200.c
1594 +@@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
1595 + tx_overhead = 0x40;
1596 +
1597 + len = skb->len;
1598 +- if (skb_headroom(skb) < tx_overhead) {
1599 +- struct sk_buff *skb2;
1600 +-
1601 +- skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
1602 ++ if (skb_cow_head(skb, tx_overhead)) {
1603 + dev_kfree_skb_any(skb);
1604 +- skb = skb2;
1605 +- if (!skb)
1606 +- return NULL;
1607 ++ return NULL;
1608 + }
1609 +
1610 + __skb_push(skb, tx_overhead);
1611 +diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
1612 +index 66b34ddbe216..72d9e7954b0a 100644
1613 +--- a/drivers/net/usb/kaweth.c
1614 ++++ b/drivers/net/usb/kaweth.c
1615 +@@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
1616 + }
1617 +
1618 + /* We now decide whether we can put our special header into the sk_buff */
1619 +- if (skb_cloned(skb) || skb_headroom(skb) < 2) {
1620 +- /* no such luck - we make our own */
1621 +- struct sk_buff *copied_skb;
1622 +- copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
1623 +- dev_kfree_skb_irq(skb);
1624 +- skb = copied_skb;
1625 +- if (!copied_skb) {
1626 +- kaweth->stats.tx_errors++;
1627 +- netif_start_queue(net);
1628 +- spin_unlock_irq(&kaweth->device_lock);
1629 +- return NETDEV_TX_OK;
1630 +- }
1631 ++ if (skb_cow_head(skb, 2)) {
1632 ++ kaweth->stats.tx_errors++;
1633 ++ netif_start_queue(net);
1634 ++ spin_unlock_irq(&kaweth->device_lock);
1635 ++ dev_kfree_skb_any(skb);
1636 ++ return NETDEV_TX_OK;
1637 + }
1638 +
1639 + private_header = (__le16 *)__skb_push(skb, 2);
1640 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
1641 +index e29f4c0767eb..e719ecd69d01 100644
1642 +--- a/drivers/net/usb/smsc95xx.c
1643 ++++ b/drivers/net/usb/smsc95xx.c
1644 +@@ -2011,13 +2011,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
1645 + /* We do not advertise SG, so skbs should be already linearized */
1646 + BUG_ON(skb_shinfo(skb)->nr_frags);
1647 +
1648 +- if (skb_headroom(skb) < overhead) {
1649 +- struct sk_buff *skb2 = skb_copy_expand(skb,
1650 +- overhead, 0, flags);
1651 ++ /* Make writable and expand header space by overhead if required */
1652 ++ if (skb_cow_head(skb, overhead)) {
1653 ++ /* Must deallocate here as returning NULL to indicate error
1654 ++ * means the skb won't be deallocated in the caller.
1655 ++ */
1656 + dev_kfree_skb_any(skb);
1657 +- skb = skb2;
1658 +- if (!skb)
1659 +- return NULL;
1660 ++ return NULL;
1661 + }
1662 +
1663 + if (csum) {
1664 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
1665 +index bedce3453dd3..5aa221487a9c 100644
1666 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
1667 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
1668 +@@ -803,11 +803,24 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
1669 + return ret;
1670 + }
1671 +
1672 +- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
1673 +- if (ret) {
1674 +- dev_err(pctrl->dev, "Failed to add pin range\n");
1675 +- gpiochip_remove(&pctrl->chip);
1676 +- return ret;
1677 ++ /*
1678 ++ * For DeviceTree-supported systems, the gpio core checks the
1679 ++ * pinctrl's device node for the "gpio-ranges" property.
1680 ++ * If it is present, it takes care of adding the pin ranges
1681 ++ * for the driver. In this case the driver can skip ahead.
1682 ++ *
1683 ++ * In order to remain compatible with older, existing DeviceTree
1684 ++ * files which don't set the "gpio-ranges" property or systems that
1685 ++ * utilize ACPI the driver has to call gpiochip_add_pin_range().
1686 ++ */
1687 ++ if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) {
1688 ++ ret = gpiochip_add_pin_range(&pctrl->chip,
1689 ++ dev_name(pctrl->dev), 0, 0, chip->ngpio);
1690 ++ if (ret) {
1691 ++ dev_err(pctrl->dev, "Failed to add pin range\n");
1692 ++ gpiochip_remove(&pctrl->chip);
1693 ++ return ret;
1694 ++ }
1695 + }
1696 +
1697 + ret = gpiochip_irqchip_add(chip,
1698 +diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
1699 +index 85442edf3c49..913ebb6d0d29 100644
1700 +--- a/drivers/scsi/aic94xx/aic94xx_init.c
1701 ++++ b/drivers/scsi/aic94xx/aic94xx_init.c
1702 +@@ -281,7 +281,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
1703 + return snprintf(buf, PAGE_SIZE, "%s\n",
1704 + asd_dev_rev[asd_ha->revision_id]);
1705 + }
1706 +-static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
1707 ++static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
1708 +
1709 + static ssize_t asd_show_dev_bios_build(struct device *dev,
1710 + struct device_attribute *attr,char *buf)
1711 +@@ -478,7 +478,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
1712 + {
1713 + int err;
1714 +
1715 +- err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
1716 ++ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
1717 + if (err)
1718 + return err;
1719 +
1720 +@@ -500,13 +500,13 @@ err_update_bios:
1721 + err_biosb:
1722 + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
1723 + err_rev:
1724 +- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
1725 ++ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
1726 + return err;
1727 + }
1728 +
1729 + static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
1730 + {
1731 +- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
1732 ++ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
1733 + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
1734 + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
1735 + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
1736 +diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
1737 +index 984d6aae7529..0e5435330c07 100644
1738 +--- a/drivers/usb/dwc2/hcd.c
1739 ++++ b/drivers/usb/dwc2/hcd.c
1740 +@@ -5202,7 +5202,6 @@ error3:
1741 + error2:
1742 + usb_put_hcd(hcd);
1743 + error1:
1744 +- kfree(hsotg->core_params);
1745 +
1746 + #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
1747 + kfree(hsotg->last_frame_num_array);
1748 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1749 +index a3046b6523c8..8ec296308729 100644
1750 +--- a/fs/cifs/file.c
1751 ++++ b/fs/cifs/file.c
1752 +@@ -1126,6 +1126,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1753 + return -EINVAL;
1754 + }
1755 +
1756 ++ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1757 ++ PAGE_SIZE);
1758 ++ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1759 ++ PAGE_SIZE);
1760 + max_num = (max_buf - sizeof(struct smb_hdr)) /
1761 + sizeof(LOCKING_ANDX_RANGE);
1762 + buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1763 +@@ -1462,6 +1466,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1764 + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1765 + return -EINVAL;
1766 +
1767 ++ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1768 ++ PAGE_SIZE);
1769 ++ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1770 ++ PAGE_SIZE);
1771 + max_num = (max_buf - sizeof(struct smb_hdr)) /
1772 + sizeof(LOCKING_ANDX_RANGE);
1773 + buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1774 +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
1775 +index b7885dc0d9bb..dee5250701de 100644
1776 +--- a/fs/cifs/smb2file.c
1777 ++++ b/fs/cifs/smb2file.c
1778 +@@ -129,6 +129,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1779 + if (max_buf < sizeof(struct smb2_lock_element))
1780 + return -EINVAL;
1781 +
1782 ++ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
1783 ++ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
1784 + max_num = max_buf / sizeof(struct smb2_lock_element);
1785 + buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
1786 + if (!buf)
1787 +@@ -265,6 +267,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
1788 + return -EINVAL;
1789 + }
1790 +
1791 ++ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
1792 ++ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
1793 + max_num = max_buf / sizeof(struct smb2_lock_element);
1794 + buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
1795 + if (!buf) {
1796 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
1797 +index 78ed8105e64d..ae8ecf821019 100644
1798 +--- a/include/linux/perf_event.h
1799 ++++ b/include/linux/perf_event.h
1800 +@@ -455,6 +455,11 @@ struct pmu {
1801 + * Filter events for PMU-specific reasons.
1802 + */
1803 + int (*filter_match) (struct perf_event *event); /* optional */
1804 ++
1805 ++ /*
1806 ++ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
1807 ++ */
1808 ++ int (*check_period) (struct perf_event *event, u64 value); /* optional */
1809 + };
1810 +
1811 + /**
1812 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1813 +index ed329a39d621..f8761774a94f 100644
1814 +--- a/include/linux/skbuff.h
1815 ++++ b/include/linux/skbuff.h
1816 +@@ -3102,6 +3102,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
1817 + void skb_scrub_packet(struct sk_buff *skb, bool xnet);
1818 + unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
1819 + bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
1820 ++bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
1821 + struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
1822 + struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
1823 + int skb_ensure_writable(struct sk_buff *skb, int write_len);
1824 +@@ -3880,6 +3881,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
1825 + return hdr_len + skb_gso_transport_seglen(skb);
1826 + }
1827 +
1828 ++/**
1829 ++ * skb_gso_mac_seglen - Return length of individual segments of a gso packet
1830 ++ *
1831 ++ * @skb: GSO skb
1832 ++ *
1833 ++ * skb_gso_mac_seglen is used to determine the real size of the
1834 ++ * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
1835 ++ * headers (TCP/UDP).
1836 ++ */
1837 ++static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
1838 ++{
1839 ++ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1840 ++ return hdr_len + skb_gso_transport_seglen(skb);
1841 ++}
1842 ++
1843 + /* Local Checksum Offload.
1844 + * Compute outer checksum based on the assumption that the
1845 + * inner checksum will be offloaded later.
1846 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
1847 +index b02af0bf5777..66f6b84df287 100644
1848 +--- a/include/net/netfilter/nf_tables.h
1849 ++++ b/include/net/netfilter/nf_tables.h
1850 +@@ -87,6 +87,35 @@ struct nft_regs {
1851 + };
1852 + };
1853 +
1854 ++/* Store/load an u16 or u8 integer to/from the u32 data register.
1855 ++ *
1856 ++ * Note, when using concatenations, register allocation happens at 32-bit
1857 ++ * level. So for store instruction, pad the rest part with zero to avoid
1858 ++ * garbage values.
1859 ++ */
1860 ++
1861 ++static inline void nft_reg_store16(u32 *dreg, u16 val)
1862 ++{
1863 ++ *dreg = 0;
1864 ++ *(u16 *)dreg = val;
1865 ++}
1866 ++
1867 ++static inline void nft_reg_store8(u32 *dreg, u8 val)
1868 ++{
1869 ++ *dreg = 0;
1870 ++ *(u8 *)dreg = val;
1871 ++}
1872 ++
1873 ++static inline u16 nft_reg_load16(u32 *sreg)
1874 ++{
1875 ++ return *(u16 *)sreg;
1876 ++}
1877 ++
1878 ++static inline u8 nft_reg_load8(u32 *sreg)
1879 ++{
1880 ++ return *(u8 *)sreg;
1881 ++}
1882 ++
1883 + static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
1884 + unsigned int len)
1885 + {
1886 +diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
1887 +index 659b1634de61..3d3de5e9f9cc 100644
1888 +--- a/include/uapi/linux/if_ether.h
1889 ++++ b/include/uapi/linux/if_ether.h
1890 +@@ -139,11 +139,18 @@
1891 + * This is an Ethernet frame header.
1892 + */
1893 +
1894 ++/* allow libcs like musl to deactivate this, glibc does not implement this. */
1895 ++#ifndef __UAPI_DEF_ETHHDR
1896 ++#define __UAPI_DEF_ETHHDR 1
1897 ++#endif
1898 ++
1899 ++#if __UAPI_DEF_ETHHDR
1900 + struct ethhdr {
1901 + unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
1902 + unsigned char h_source[ETH_ALEN]; /* source ether addr */
1903 + __be16 h_proto; /* packet type ID field */
1904 + } __attribute__((packed));
1905 ++#endif
1906 +
1907 +
1908 + #endif /* _UAPI_LINUX_IF_ETHER_H */
1909 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1910 +index 1af0bbf20984..17339506f9f8 100644
1911 +--- a/kernel/events/core.c
1912 ++++ b/kernel/events/core.c
1913 +@@ -4600,6 +4600,11 @@ static void __perf_event_period(struct perf_event *event,
1914 + }
1915 + }
1916 +
1917 ++static int perf_event_check_period(struct perf_event *event, u64 value)
1918 ++{
1919 ++ return event->pmu->check_period(event, value);
1920 ++}
1921 ++
1922 + static int perf_event_period(struct perf_event *event, u64 __user *arg)
1923 + {
1924 + u64 value;
1925 +@@ -4616,6 +4621,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
1926 + if (event->attr.freq && value > sysctl_perf_event_sample_rate)
1927 + return -EINVAL;
1928 +
1929 ++ if (perf_event_check_period(event, value))
1930 ++ return -EINVAL;
1931 ++
1932 + event_function_call(event, __perf_event_period, &value);
1933 +
1934 + return 0;
1935 +@@ -8622,6 +8630,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
1936 + return 0;
1937 + }
1938 +
1939 ++static int perf_event_nop_int(struct perf_event *event, u64 value)
1940 ++{
1941 ++ return 0;
1942 ++}
1943 ++
1944 + static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
1945 +
1946 + static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
1947 +@@ -8944,6 +8957,9 @@ got_cpu_context:
1948 + pmu->pmu_disable = perf_pmu_nop_void;
1949 + }
1950 +
1951 ++ if (!pmu->check_period)
1952 ++ pmu->check_period = perf_event_nop_int;
1953 ++
1954 + if (!pmu->event_idx)
1955 + pmu->event_idx = perf_event_idx_default;
1956 +
1957 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
1958 +index f4b5811ebe23..99becab2c1ce 100644
1959 +--- a/kernel/events/ring_buffer.c
1960 ++++ b/kernel/events/ring_buffer.c
1961 +@@ -700,7 +700,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
1962 + size = sizeof(struct ring_buffer);
1963 + size += nr_pages * sizeof(void *);
1964 +
1965 +- if (order_base_2(size) >= MAX_ORDER)
1966 ++ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
1967 + goto fail;
1968 +
1969 + rb = kzalloc(size, GFP_KERNEL);
1970 +diff --git a/kernel/signal.c b/kernel/signal.c
1971 +index 798b8f495ae2..c091dcc9f19b 100644
1972 +--- a/kernel/signal.c
1973 ++++ b/kernel/signal.c
1974 +@@ -2241,9 +2241,12 @@ relock:
1975 + }
1976 +
1977 + /* Has this task already been marked for death? */
1978 +- ksig->info.si_signo = signr = SIGKILL;
1979 +- if (signal_group_exit(signal))
1980 ++ if (signal_group_exit(signal)) {
1981 ++ ksig->info.si_signo = signr = SIGKILL;
1982 ++ sigdelset(&current->pending.signal, SIGKILL);
1983 ++ recalc_sigpending();
1984 + goto fatal;
1985 ++ }
1986 +
1987 + for (;;) {
1988 + struct k_sigaction *ka;
1989 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
1990 +index f0ab801a6437..c6eee3d9ed00 100644
1991 +--- a/kernel/trace/trace_uprobe.c
1992 ++++ b/kernel/trace/trace_uprobe.c
1993 +@@ -150,7 +150,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
1994 +
1995 + ret = strncpy_from_user(dst, src, maxlen);
1996 + if (ret == maxlen)
1997 +- dst[--ret] = '\0';
1998 ++ dst[ret - 1] = '\0';
1999 ++ else if (ret >= 0)
2000 ++ /*
2001 ++ * Include the terminating null byte. In this case it
2002 ++ * was copied by strncpy_from_user but not accounted
2003 ++ * for in ret.
2004 ++ */
2005 ++ ret++;
2006 +
2007 + if (ret < 0) { /* Failed to fetch string */
2008 + ((u8 *)get_rloc_data(dest))[0] = '\0';
2009 +diff --git a/mm/memory.c b/mm/memory.c
2010 +index 35d8217bb046..47248dc0b9e1 100644
2011 +--- a/mm/memory.c
2012 ++++ b/mm/memory.c
2013 +@@ -3329,15 +3329,24 @@ static int do_fault(struct fault_env *fe)
2014 + {
2015 + struct vm_area_struct *vma = fe->vma;
2016 + pgoff_t pgoff = linear_page_index(vma, fe->address);
2017 ++ int ret;
2018 +
2019 + /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
2020 + if (!vma->vm_ops->fault)
2021 +- return VM_FAULT_SIGBUS;
2022 +- if (!(fe->flags & FAULT_FLAG_WRITE))
2023 +- return do_read_fault(fe, pgoff);
2024 +- if (!(vma->vm_flags & VM_SHARED))
2025 +- return do_cow_fault(fe, pgoff);
2026 +- return do_shared_fault(fe, pgoff);
2027 ++ ret = VM_FAULT_SIGBUS;
2028 ++ else if (!(fe->flags & FAULT_FLAG_WRITE))
2029 ++ ret = do_read_fault(fe, pgoff);
2030 ++ else if (!(vma->vm_flags & VM_SHARED))
2031 ++ ret = do_cow_fault(fe, pgoff);
2032 ++ else
2033 ++ ret = do_shared_fault(fe, pgoff);
2034 ++
2035 ++ /* preallocated pagetable is unused: free it */
2036 ++ if (fe->prealloc_pte) {
2037 ++ pte_free(vma->vm_mm, fe->prealloc_pte);
2038 ++ fe->prealloc_pte = 0;
2039 ++ }
2040 ++ return ret;
2041 + }
2042 +
2043 + static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
2044 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2045 +index dca1fed0d7da..11501165f0df 100644
2046 +--- a/net/core/skbuff.c
2047 ++++ b/net/core/skbuff.c
2048 +@@ -4469,37 +4469,74 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
2049 + EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
2050 +
2051 + /**
2052 +- * skb_gso_validate_mtu - Return in case such skb fits a given MTU
2053 ++ * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
2054 + *
2055 +- * @skb: GSO skb
2056 +- * @mtu: MTU to validate against
2057 ++ * There are a couple of instances where we have a GSO skb, and we
2058 ++ * want to determine what size it would be after it is segmented.
2059 + *
2060 +- * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
2061 +- * once split.
2062 ++ * We might want to check:
2063 ++ * - L3+L4+payload size (e.g. IP forwarding)
2064 ++ * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
2065 ++ *
2066 ++ * This is a helper to do that correctly considering GSO_BY_FRAGS.
2067 ++ *
2068 ++ * @seg_len: The segmented length (from skb_gso_*_seglen). In the
2069 ++ * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
2070 ++ *
2071 ++ * @max_len: The maximum permissible length.
2072 ++ *
2073 ++ * Returns true if the segmented length <= max length.
2074 + */
2075 +-bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
2076 +-{
2077 ++static inline bool skb_gso_size_check(const struct sk_buff *skb,
2078 ++ unsigned int seg_len,
2079 ++ unsigned int max_len) {
2080 + const struct skb_shared_info *shinfo = skb_shinfo(skb);
2081 + const struct sk_buff *iter;
2082 +- unsigned int hlen;
2083 +-
2084 +- hlen = skb_gso_network_seglen(skb);
2085 +
2086 + if (shinfo->gso_size != GSO_BY_FRAGS)
2087 +- return hlen <= mtu;
2088 ++ return seg_len <= max_len;
2089 +
2090 + /* Undo this so we can re-use header sizes */
2091 +- hlen -= GSO_BY_FRAGS;
2092 ++ seg_len -= GSO_BY_FRAGS;
2093 +
2094 + skb_walk_frags(skb, iter) {
2095 +- if (hlen + skb_headlen(iter) > mtu)
2096 ++ if (seg_len + skb_headlen(iter) > max_len)
2097 + return false;
2098 + }
2099 +
2100 + return true;
2101 + }
2102 ++
2103 ++/**
2104 ++ * skb_gso_validate_mtu - Return in case such skb fits a given MTU
2105 ++ *
2106 ++ * @skb: GSO skb
2107 ++ * @mtu: MTU to validate against
2108 ++ *
2109 ++ * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
2110 ++ * once split.
2111 ++ */
2112 ++bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
2113 ++{
2114 ++ return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
2115 ++}
2116 + EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
2117 +
2118 ++/**
2119 ++ * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
2120 ++ *
2121 ++ * @skb: GSO skb
2122 ++ * @len: length to validate against
2123 ++ *
2124 ++ * skb_gso_validate_mac_len validates if a given skb will fit a wanted
2125 ++ * length once split, including L2, L3 and L4 headers and the payload.
2126 ++ */
2127 ++bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
2128 ++{
2129 ++ return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
2130 ++}
2131 ++EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
2132 ++
2133 + static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
2134 + {
2135 + int mac_len;
2136 +diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
2137 +index 51ced81b616c..dc3628a396ec 100644
2138 +--- a/net/ipv4/netfilter/nft_masq_ipv4.c
2139 ++++ b/net/ipv4/netfilter/nft_masq_ipv4.c
2140 +@@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
2141 + memset(&range, 0, sizeof(range));
2142 + range.flags = priv->flags;
2143 + if (priv->sreg_proto_min) {
2144 +- range.min_proto.all =
2145 +- *(__be16 *)&regs->data[priv->sreg_proto_min];
2146 +- range.max_proto.all =
2147 +- *(__be16 *)&regs->data[priv->sreg_proto_max];
2148 ++ range.min_proto.all = (__force __be16)nft_reg_load16(
2149 ++ &regs->data[priv->sreg_proto_min]);
2150 ++ range.max_proto.all = (__force __be16)nft_reg_load16(
2151 ++ &regs->data[priv->sreg_proto_max]);
2152 + }
2153 + regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, pkt->hook,
2154 + &range, pkt->out);
2155 +diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
2156 +index c09d4381427e..f760524e1353 100644
2157 +--- a/net/ipv4/netfilter/nft_redir_ipv4.c
2158 ++++ b/net/ipv4/netfilter/nft_redir_ipv4.c
2159 +@@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
2160 +
2161 + memset(&mr, 0, sizeof(mr));
2162 + if (priv->sreg_proto_min) {
2163 +- mr.range[0].min.all =
2164 +- *(__be16 *)&regs->data[priv->sreg_proto_min];
2165 +- mr.range[0].max.all =
2166 +- *(__be16 *)&regs->data[priv->sreg_proto_max];
2167 ++ mr.range[0].min.all = (__force __be16)nft_reg_load16(
2168 ++ &regs->data[priv->sreg_proto_min]);
2169 ++ mr.range[0].max.all = (__force __be16)nft_reg_load16(
2170 ++ &regs->data[priv->sreg_proto_max]);
2171 + mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
2172 + }
2173 +
2174 +diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
2175 +index 9597ffb74077..b74a420050c4 100644
2176 +--- a/net/ipv6/netfilter/nft_masq_ipv6.c
2177 ++++ b/net/ipv6/netfilter/nft_masq_ipv6.c
2178 +@@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
2179 + memset(&range, 0, sizeof(range));
2180 + range.flags = priv->flags;
2181 + if (priv->sreg_proto_min) {
2182 +- range.min_proto.all =
2183 +- *(__be16 *)&regs->data[priv->sreg_proto_min];
2184 +- range.max_proto.all =
2185 +- *(__be16 *)&regs->data[priv->sreg_proto_max];
2186 ++ range.min_proto.all = (__force __be16)nft_reg_load16(
2187 ++ &regs->data[priv->sreg_proto_min]);
2188 ++ range.max_proto.all = (__force __be16)nft_reg_load16(
2189 ++ &regs->data[priv->sreg_proto_max]);
2190 + }
2191 + regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out);
2192 + }
2193 +diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
2194 +index aca44e89a881..7ef58e493fca 100644
2195 +--- a/net/ipv6/netfilter/nft_redir_ipv6.c
2196 ++++ b/net/ipv6/netfilter/nft_redir_ipv6.c
2197 +@@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
2198 +
2199 + memset(&range, 0, sizeof(range));
2200 + if (priv->sreg_proto_min) {
2201 +- range.min_proto.all =
2202 +- *(__be16 *)&regs->data[priv->sreg_proto_min],
2203 +- range.max_proto.all =
2204 +- *(__be16 *)&regs->data[priv->sreg_proto_max],
2205 ++ range.min_proto.all = (__force __be16)nft_reg_load16(
2206 ++ &regs->data[priv->sreg_proto_min]);
2207 ++ range.max_proto.all = (__force __be16)nft_reg_load16(
2208 ++ &regs->data[priv->sreg_proto_max]);
2209 + range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
2210 + }
2211 +
2212 +diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
2213 +index d7b0d171172a..2b9fda71fa8b 100644
2214 +--- a/net/netfilter/nft_ct.c
2215 ++++ b/net/netfilter/nft_ct.c
2216 +@@ -77,7 +77,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
2217 +
2218 + switch (priv->key) {
2219 + case NFT_CT_DIRECTION:
2220 +- *dest = CTINFO2DIR(ctinfo);
2221 ++ nft_reg_store8(dest, CTINFO2DIR(ctinfo));
2222 + return;
2223 + case NFT_CT_STATUS:
2224 + *dest = ct->status;
2225 +@@ -129,10 +129,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
2226 + return;
2227 + }
2228 + case NFT_CT_L3PROTOCOL:
2229 +- *dest = nf_ct_l3num(ct);
2230 ++ nft_reg_store8(dest, nf_ct_l3num(ct));
2231 + return;
2232 + case NFT_CT_PROTOCOL:
2233 +- *dest = nf_ct_protonum(ct);
2234 ++ nft_reg_store8(dest, nf_ct_protonum(ct));
2235 + return;
2236 + default:
2237 + break;
2238 +@@ -149,10 +149,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
2239 + nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
2240 + return;
2241 + case NFT_CT_PROTO_SRC:
2242 +- *dest = (__force __u16)tuple->src.u.all;
2243 ++ nft_reg_store16(dest, (__force u16)tuple->src.u.all);
2244 + return;
2245 + case NFT_CT_PROTO_DST:
2246 +- *dest = (__force __u16)tuple->dst.u.all;
2247 ++ nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
2248 + return;
2249 + default:
2250 + break;
2251 +diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
2252 +index 7c3395513ff0..cec8dc0e5e6f 100644
2253 +--- a/net/netfilter/nft_meta.c
2254 ++++ b/net/netfilter/nft_meta.c
2255 +@@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
2256 + *dest = skb->len;
2257 + break;
2258 + case NFT_META_PROTOCOL:
2259 +- *dest = 0;
2260 +- *(__be16 *)dest = skb->protocol;
2261 ++ nft_reg_store16(dest, (__force u16)skb->protocol);
2262 + break;
2263 + case NFT_META_NFPROTO:
2264 +- *dest = pkt->pf;
2265 ++ nft_reg_store8(dest, pkt->pf);
2266 + break;
2267 + case NFT_META_L4PROTO:
2268 + if (!pkt->tprot_set)
2269 + goto err;
2270 +- *dest = pkt->tprot;
2271 ++ nft_reg_store8(dest, pkt->tprot);
2272 + break;
2273 + case NFT_META_PRIORITY:
2274 + *dest = skb->priority;
2275 +@@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
2276 + case NFT_META_IIFTYPE:
2277 + if (in == NULL)
2278 + goto err;
2279 +- *dest = 0;
2280 +- *(u16 *)dest = in->type;
2281 ++ nft_reg_store16(dest, in->type);
2282 + break;
2283 + case NFT_META_OIFTYPE:
2284 + if (out == NULL)
2285 + goto err;
2286 +- *dest = 0;
2287 +- *(u16 *)dest = out->type;
2288 ++ nft_reg_store16(dest, out->type);
2289 + break;
2290 + case NFT_META_SKUID:
2291 + sk = skb_to_full_sk(skb);
2292 +@@ -142,22 +139,22 @@ void nft_meta_get_eval(const struct nft_expr *expr,
2293 + #endif
2294 + case NFT_META_PKTTYPE:
2295 + if (skb->pkt_type != PACKET_LOOPBACK) {
2296 +- *dest = skb->pkt_type;
2297 ++ nft_reg_store8(dest, skb->pkt_type);
2298 + break;
2299 + }
2300 +
2301 + switch (pkt->pf) {
2302 + case NFPROTO_IPV4:
2303 + if (ipv4_is_multicast(ip_hdr(skb)->daddr))
2304 +- *dest = PACKET_MULTICAST;
2305 ++ nft_reg_store8(dest, PACKET_MULTICAST);
2306 + else
2307 +- *dest = PACKET_BROADCAST;
2308 ++ nft_reg_store8(dest, PACKET_BROADCAST);
2309 + break;
2310 + case NFPROTO_IPV6:
2311 + if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
2312 +- *dest = PACKET_MULTICAST;
2313 ++ nft_reg_store8(dest, PACKET_MULTICAST);
2314 + else
2315 +- *dest = PACKET_BROADCAST;
2316 ++ nft_reg_store8(dest, PACKET_BROADCAST);
2317 + break;
2318 + case NFPROTO_NETDEV:
2319 + switch (skb->protocol) {
2320 +@@ -171,14 +168,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
2321 + goto err;
2322 +
2323 + if (ipv4_is_multicast(iph->daddr))
2324 +- *dest = PACKET_MULTICAST;
2325 ++ nft_reg_store8(dest, PACKET_MULTICAST);
2326 + else
2327 +- *dest = PACKET_BROADCAST;
2328 ++ nft_reg_store8(dest, PACKET_BROADCAST);
2329 +
2330 + break;
2331 + }
2332 + case htons(ETH_P_IPV6):
2333 +- *dest = PACKET_MULTICAST;
2334 ++ nft_reg_store8(dest, PACKET_MULTICAST);
2335 + break;
2336 + default:
2337 + WARN_ON_ONCE(1);
2338 +@@ -233,7 +230,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
2339 + {
2340 + const struct nft_meta *meta = nft_expr_priv(expr);
2341 + struct sk_buff *skb = pkt->skb;
2342 +- u32 value = regs->data[meta->sreg];
2343 ++ u32 *sreg = &regs->data[meta->sreg];
2344 ++ u32 value = *sreg;
2345 ++ u8 pkt_type;
2346 +
2347 + switch (meta->key) {
2348 + case NFT_META_MARK:
2349 +@@ -243,9 +242,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
2350 + skb->priority = value;
2351 + break;
2352 + case NFT_META_PKTTYPE:
2353 +- if (skb->pkt_type != value &&
2354 +- skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
2355 +- skb->pkt_type = value;
2356 ++ pkt_type = nft_reg_load8(sreg);
2357 ++
2358 ++ if (skb->pkt_type != pkt_type &&
2359 ++ skb_pkt_type_ok(pkt_type) &&
2360 ++ skb_pkt_type_ok(skb->pkt_type))
2361 ++ skb->pkt_type = pkt_type;
2362 + break;
2363 + case NFT_META_NFTRACE:
2364 + skb->nf_trace = !!value;
2365 +diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
2366 +index ee2d71753746..4c48e9bb21e2 100644
2367 +--- a/net/netfilter/nft_nat.c
2368 ++++ b/net/netfilter/nft_nat.c
2369 +@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
2370 + }
2371 +
2372 + if (priv->sreg_proto_min) {
2373 +- range.min_proto.all =
2374 +- *(__be16 *)&regs->data[priv->sreg_proto_min];
2375 +- range.max_proto.all =
2376 +- *(__be16 *)&regs->data[priv->sreg_proto_max];
2377 ++ range.min_proto.all = (__force __be16)nft_reg_load16(
2378 ++ &regs->data[priv->sreg_proto_min]);
2379 ++ range.max_proto.all = (__force __be16)nft_reg_load16(
2380 ++ &regs->data[priv->sreg_proto_max]);
2381 + range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
2382 + }
2383 +
2384 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
2385 +index b3f7980b0f27..d646aa770ac8 100644
2386 +--- a/net/sched/sch_tbf.c
2387 ++++ b/net/sched/sch_tbf.c
2388 +@@ -142,16 +142,6 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r,
2389 + return len;
2390 + }
2391 +
2392 +-/*
2393 +- * Return length of individual segments of a gso packet,
2394 +- * including all headers (MAC, IP, TCP/UDP)
2395 +- */
2396 +-static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
2397 +-{
2398 +- unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2399 +- return hdr_len + skb_gso_transport_seglen(skb);
2400 +-}
2401 +-
2402 + /* GSO packet is too big, segment it so that tbf can transmit
2403 + * each segment in time
2404 + */
2405 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2406 +index ba9cd75e4c98..447b3a8a83c3 100644
2407 +--- a/sound/pci/hda/patch_conexant.c
2408 ++++ b/sound/pci/hda/patch_conexant.c
2409 +@@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2410 + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
2411 + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
2412 + SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
2413 ++ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
2414 + SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
2415 + SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
2416 + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
2417 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
2418 +index e6ac7b9b4648..497bad9f2789 100644
2419 +--- a/sound/usb/pcm.c
2420 ++++ b/sound/usb/pcm.c
2421 +@@ -313,6 +313,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
2422 + return 0;
2423 + }
2424 +
2425 ++/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
2426 ++ * applies. Returns 1 if a quirk was found.
2427 ++ */
2428 + static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
2429 + struct usb_device *dev,
2430 + struct usb_interface_descriptor *altsd,
2431 +@@ -391,7 +394,7 @@ add_sync_ep:
2432 +
2433 + subs->data_endpoint->sync_master = subs->sync_endpoint;
2434 +
2435 +- return 0;
2436 ++ return 1;
2437 + }
2438 +
2439 + static int set_sync_endpoint(struct snd_usb_substream *subs,
2440 +@@ -430,6 +433,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
2441 + if (err < 0)
2442 + return err;
2443 +
2444 ++ /* endpoint set by quirk */
2445 ++ if (err > 0)
2446 ++ return 0;
2447 ++
2448 + if (altsd->bNumEndpoints < 2)
2449 + return 0;
2450 +
2451 +diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
2452 +index 046a4850e3df..ff32ca1d81ff 100644
2453 +--- a/tools/perf/util/unwind-libdw.c
2454 ++++ b/tools/perf/util/unwind-libdw.c
2455 +@@ -231,7 +231,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
2456 +
2457 + err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
2458 +
2459 +- if (err && !ui->max_stack)
2460 ++ if (err && ui->max_stack != max_stack)
2461 + err = 0;
2462 +
2463 + /*