Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 20 Feb 2019 11:17:56
Message-Id: 1550661446.04bedcff79ab8090728a041e9acff364903c4239.mpagano@gentoo
1 commit: 04bedcff79ab8090728a041e9acff364903c4239
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 20 11:17:26 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 20 11:17:26 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=04bedcff
7
8 proj/linux-patches: Linux patch 4.14.102
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1101_linux-4.14.102.patch | 2404 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2408 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 62f0745..439bffe 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -447,6 +447,10 @@ Patch: 1100_4.14.101.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.101
23
24 +Patch: 1101_4.14.102.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.102
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1101_linux-4.14.102.patch b/1101_linux-4.14.102.patch
33 new file mode 100644
34 index 0000000..56ab062
35 --- /dev/null
36 +++ b/1101_linux-4.14.102.patch
37 @@ -0,0 +1,2404 @@
38 +diff --git a/Documentation/devicetree/bindings/eeprom/eeprom.txt b/Documentation/devicetree/bindings/eeprom/eeprom.txt
39 +index afc04589eadf..3c9a822d576c 100644
40 +--- a/Documentation/devicetree/bindings/eeprom/eeprom.txt
41 ++++ b/Documentation/devicetree/bindings/eeprom/eeprom.txt
42 +@@ -6,7 +6,8 @@ Required properties:
43 +
44 + "atmel,24c00", "atmel,24c01", "atmel,24c02", "atmel,24c04",
45 + "atmel,24c08", "atmel,24c16", "atmel,24c32", "atmel,24c64",
46 +- "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024"
47 ++ "atmel,24c128", "atmel,24c256", "atmel,24c512", "atmel,24c1024",
48 ++ "atmel,24c2048"
49 +
50 + "catalyst,24c32"
51 +
52 +@@ -23,7 +24,7 @@ Required properties:
53 + device with <type> and manufacturer "atmel" should be used.
54 + Possible types are:
55 + "24c00", "24c01", "24c02", "24c04", "24c08", "24c16", "24c32", "24c64",
56 +- "24c128", "24c256", "24c512", "24c1024", "spd"
57 ++ "24c128", "24c256", "24c512", "24c1024", "24c2048", "spd"
58 +
59 + - reg : the I2C address of the EEPROM
60 +
61 +diff --git a/Makefile b/Makefile
62 +index d5b20b618517..837059a07bb3 100644
63 +--- a/Makefile
64 ++++ b/Makefile
65 +@@ -1,7 +1,7 @@
66 + # SPDX-License-Identifier: GPL-2.0
67 + VERSION = 4
68 + PATCHLEVEL = 14
69 +-SUBLEVEL = 101
70 ++SUBLEVEL = 102
71 + EXTRAVERSION =
72 + NAME = Petit Gorille
73 +
74 +diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
75 +index 4d17cacd1462..432402c8e47f 100644
76 +--- a/arch/alpha/include/asm/irq.h
77 ++++ b/arch/alpha/include/asm/irq.h
78 +@@ -56,15 +56,15 @@
79 +
80 + #elif defined(CONFIG_ALPHA_DP264) || \
81 + defined(CONFIG_ALPHA_LYNX) || \
82 +- defined(CONFIG_ALPHA_SHARK) || \
83 +- defined(CONFIG_ALPHA_EIGER)
84 ++ defined(CONFIG_ALPHA_SHARK)
85 + # define NR_IRQS 64
86 +
87 + #elif defined(CONFIG_ALPHA_TITAN)
88 + #define NR_IRQS 80
89 +
90 + #elif defined(CONFIG_ALPHA_RAWHIDE) || \
91 +- defined(CONFIG_ALPHA_TAKARA)
92 ++ defined(CONFIG_ALPHA_TAKARA) || \
93 ++ defined(CONFIG_ALPHA_EIGER)
94 + # define NR_IRQS 128
95 +
96 + #elif defined(CONFIG_ALPHA_WILDFIRE)
97 +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
98 +index cd3c572ee912..e9392302c5da 100644
99 +--- a/arch/alpha/mm/fault.c
100 ++++ b/arch/alpha/mm/fault.c
101 +@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
102 + /* Macro for exception fixup code to access integer registers. */
103 + #define dpf_reg(r) \
104 + (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
105 +- (r) <= 18 ? (r)+8 : (r)-10])
106 ++ (r) <= 18 ? (r)+10 : (r)-10])
107 +
108 + asmlinkage void
109 + do_page_fault(unsigned long address, unsigned long mmcsr,
110 +diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
111 +index c75507922f7d..f5902bd1a972 100644
112 +--- a/arch/arm/boot/dts/da850-evm.dts
113 ++++ b/arch/arm/boot/dts/da850-evm.dts
114 +@@ -169,7 +169,7 @@
115 +
116 + sound {
117 + compatible = "simple-audio-card";
118 +- simple-audio-card,name = "DA850/OMAP-L138 EVM";
119 ++ simple-audio-card,name = "DA850-OMAPL138 EVM";
120 + simple-audio-card,widgets =
121 + "Line", "Line In",
122 + "Line", "Line Out";
123 +diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
124 +index a0f0916156e6..c9d4cb212b72 100644
125 +--- a/arch/arm/boot/dts/da850-lcdk.dts
126 ++++ b/arch/arm/boot/dts/da850-lcdk.dts
127 +@@ -28,7 +28,7 @@
128 +
129 + sound {
130 + compatible = "simple-audio-card";
131 +- simple-audio-card,name = "DA850/OMAP-L138 LCDK";
132 ++ simple-audio-card,name = "DA850-OMAPL138 LCDK";
133 + simple-audio-card,widgets =
134 + "Line", "Line In",
135 + "Line", "Line Out";
136 +diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
137 +index cbaf06f2f78e..eb917462b219 100644
138 +--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
139 ++++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
140 +@@ -36,8 +36,8 @@
141 + compatible = "gpio-fan";
142 + pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
143 + pinctrl-names = "default";
144 +- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
145 +- &gpio1 13 GPIO_ACTIVE_LOW>;
146 ++ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
147 ++ &gpio1 13 GPIO_ACTIVE_HIGH>;
148 + gpio-fan,speed-map = <0 0
149 + 3000 1
150 + 6000 2>;
151 +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
152 +index b17ee03d280b..88286dd483ff 100644
153 +--- a/arch/arm/include/asm/assembler.h
154 ++++ b/arch/arm/include/asm/assembler.h
155 +@@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
156 + #endif
157 + .endm
158 +
159 ++ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
160 ++#ifdef CONFIG_CPU_SPECTRE
161 ++ sub \tmp, \limit, #1
162 ++ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
163 ++ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
164 ++ subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
165 ++ movlo \addr, #0 @ if (tmp < 0) addr = NULL
166 ++ csdb
167 ++#endif
168 ++ .endm
169 ++
170 + .macro uaccess_disable, tmp, isb=1
171 + #ifdef CONFIG_CPU_SW_DOMAIN_PAN
172 + /*
173 +diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
174 +index 3379c2c684c2..25d523185c6a 100644
175 +--- a/arch/arm/include/asm/cputype.h
176 ++++ b/arch/arm/include/asm/cputype.h
177 +@@ -107,6 +107,7 @@
178 + #define ARM_CPU_PART_SCORPION 0x510002d0
179 +
180 + extern unsigned int processor_id;
181 ++struct proc_info_list *lookup_processor(u32 midr);
182 +
183 + #ifdef CONFIG_CPU_CP15
184 + #define read_cpuid(reg) \
185 +diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
186 +index e25f4392e1b2..e1b6f280ab08 100644
187 +--- a/arch/arm/include/asm/proc-fns.h
188 ++++ b/arch/arm/include/asm/proc-fns.h
189 +@@ -23,7 +23,7 @@ struct mm_struct;
190 + /*
191 + * Don't change this structure - ASM code relies on it.
192 + */
193 +-extern struct processor {
194 ++struct processor {
195 + /* MISC
196 + * get data abort address/flags
197 + */
198 +@@ -79,9 +79,13 @@ extern struct processor {
199 + unsigned int suspend_size;
200 + void (*do_suspend)(void *);
201 + void (*do_resume)(void *);
202 +-} processor;
203 ++};
204 +
205 + #ifndef MULTI_CPU
206 ++static inline void init_proc_vtable(const struct processor *p)
207 ++{
208 ++}
209 ++
210 + extern void cpu_proc_init(void);
211 + extern void cpu_proc_fin(void);
212 + extern int cpu_do_idle(void);
213 +@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
214 + extern void cpu_do_suspend(void *);
215 + extern void cpu_do_resume(void *);
216 + #else
217 +-#define cpu_proc_init processor._proc_init
218 +-#define cpu_proc_fin processor._proc_fin
219 +-#define cpu_reset processor.reset
220 +-#define cpu_do_idle processor._do_idle
221 +-#define cpu_dcache_clean_area processor.dcache_clean_area
222 +-#define cpu_set_pte_ext processor.set_pte_ext
223 +-#define cpu_do_switch_mm processor.switch_mm
224 +
225 +-/* These three are private to arch/arm/kernel/suspend.c */
226 +-#define cpu_do_suspend processor.do_suspend
227 +-#define cpu_do_resume processor.do_resume
228 ++extern struct processor processor;
229 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
230 ++#include <linux/smp.h>
231 ++/*
232 ++ * This can't be a per-cpu variable because we need to access it before
233 ++ * per-cpu has been initialised. We have a couple of functions that are
234 ++ * called in a pre-emptible context, and so can't use smp_processor_id()
235 ++ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
236 ++ * function pointers for these are identical across all CPUs.
237 ++ */
238 ++extern struct processor *cpu_vtable[];
239 ++#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
240 ++#define PROC_TABLE(f) cpu_vtable[0]->f
241 ++static inline void init_proc_vtable(const struct processor *p)
242 ++{
243 ++ unsigned int cpu = smp_processor_id();
244 ++ *cpu_vtable[cpu] = *p;
245 ++ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
246 ++ cpu_vtable[0]->dcache_clean_area);
247 ++ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
248 ++ cpu_vtable[0]->set_pte_ext);
249 ++}
250 ++#else
251 ++#define PROC_VTABLE(f) processor.f
252 ++#define PROC_TABLE(f) processor.f
253 ++static inline void init_proc_vtable(const struct processor *p)
254 ++{
255 ++ processor = *p;
256 ++}
257 ++#endif
258 ++
259 ++#define cpu_proc_init PROC_VTABLE(_proc_init)
260 ++#define cpu_check_bugs PROC_VTABLE(check_bugs)
261 ++#define cpu_proc_fin PROC_VTABLE(_proc_fin)
262 ++#define cpu_reset PROC_VTABLE(reset)
263 ++#define cpu_do_idle PROC_VTABLE(_do_idle)
264 ++#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
265 ++#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
266 ++#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
267 ++
268 ++/* These two are private to arch/arm/kernel/suspend.c */
269 ++#define cpu_do_suspend PROC_VTABLE(do_suspend)
270 ++#define cpu_do_resume PROC_VTABLE(do_resume)
271 + #endif
272 +
273 + extern void cpu_resume(void);
274 +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
275 +index 57d2ad9c75ca..df8420672c7e 100644
276 +--- a/arch/arm/include/asm/thread_info.h
277 ++++ b/arch/arm/include/asm/thread_info.h
278 +@@ -124,8 +124,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
279 + struct user_vfp;
280 + struct user_vfp_exc;
281 +
282 +-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
283 +- struct user_vfp_exc __user *);
284 ++extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
285 ++ struct user_vfp_exc *);
286 + extern int vfp_restore_user_hwstate(struct user_vfp *,
287 + struct user_vfp_exc *);
288 + #endif
289 +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
290 +index 4140be431087..a5807b67ca8a 100644
291 +--- a/arch/arm/include/asm/uaccess.h
292 ++++ b/arch/arm/include/asm/uaccess.h
293 +@@ -69,6 +69,14 @@ extern int __put_user_bad(void);
294 + static inline void set_fs(mm_segment_t fs)
295 + {
296 + current_thread_info()->addr_limit = fs;
297 ++
298 ++ /*
299 ++ * Prevent a mispredicted conditional call to set_fs from forwarding
300 ++ * the wrong address limit to access_ok under speculation.
301 ++ */
302 ++ dsb(nsh);
303 ++ isb();
304 ++
305 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
306 + }
307 +
308 +@@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs)
309 + #define __inttype(x) \
310 + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
311 +
312 ++/*
313 ++ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
314 ++ * is above the current addr_limit.
315 ++ */
316 ++#define uaccess_mask_range_ptr(ptr, size) \
317 ++ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
318 ++static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
319 ++ size_t size)
320 ++{
321 ++ void __user *safe_ptr = (void __user *)ptr;
322 ++ unsigned long tmp;
323 ++
324 ++ asm volatile(
325 ++ " sub %1, %3, #1\n"
326 ++ " subs %1, %1, %0\n"
327 ++ " addhs %1, %1, #1\n"
328 ++ " subhss %1, %1, %2\n"
329 ++ " movlo %0, #0\n"
330 ++ : "+r" (safe_ptr), "=&r" (tmp)
331 ++ : "r" (size), "r" (current_thread_info()->addr_limit)
332 ++ : "cc");
333 ++
334 ++ csdb();
335 ++ return safe_ptr;
336 ++}
337 ++
338 + /*
339 + * Single-value transfer routines. They automatically use the right
340 + * size if we just have the right pointer type. Note that the functions
341 +@@ -362,6 +396,14 @@ do { \
342 + __pu_err; \
343 + })
344 +
345 ++#ifdef CONFIG_CPU_SPECTRE
346 ++/*
347 ++ * When mitigating Spectre variant 1.1, all accessors need to include
348 ++ * verification of the address space.
349 ++ */
350 ++#define __put_user(x, ptr) put_user(x, ptr)
351 ++
352 ++#else
353 + #define __put_user(x, ptr) \
354 + ({ \
355 + long __pu_err = 0; \
356 +@@ -369,12 +411,6 @@ do { \
357 + __pu_err; \
358 + })
359 +
360 +-#define __put_user_error(x, ptr, err) \
361 +-({ \
362 +- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
363 +- (void) 0; \
364 +-})
365 +-
366 + #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
367 + do { \
368 + unsigned long __pu_addr = (unsigned long)__pu_ptr; \
369 +@@ -454,6 +490,7 @@ do { \
370 + : "r" (x), "i" (-EFAULT) \
371 + : "cc")
372 +
373 ++#endif /* !CONFIG_CPU_SPECTRE */
374 +
375 + #ifdef CONFIG_MMU
376 + extern unsigned long __must_check
377 +diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
378 +index 7be511310191..d41d3598e5e5 100644
379 +--- a/arch/arm/kernel/bugs.c
380 ++++ b/arch/arm/kernel/bugs.c
381 +@@ -6,8 +6,8 @@
382 + void check_other_bugs(void)
383 + {
384 + #ifdef MULTI_CPU
385 +- if (processor.check_bugs)
386 +- processor.check_bugs();
387 ++ if (cpu_check_bugs)
388 ++ cpu_check_bugs();
389 + #endif
390 + }
391 +
392 +diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
393 +index 8733012d231f..7e662bdd5cb3 100644
394 +--- a/arch/arm/kernel/head-common.S
395 ++++ b/arch/arm/kernel/head-common.S
396 +@@ -122,6 +122,9 @@ __mmap_switched_data:
397 + .long init_thread_union + THREAD_START_SP @ sp
398 + .size __mmap_switched_data, . - __mmap_switched_data
399 +
400 ++ __FINIT
401 ++ .text
402 ++
403 + /*
404 + * This provides a C-API version of __lookup_processor_type
405 + */
406 +@@ -133,9 +136,6 @@ ENTRY(lookup_processor_type)
407 + ldmfd sp!, {r4 - r6, r9, pc}
408 + ENDPROC(lookup_processor_type)
409 +
410 +- __FINIT
411 +- .text
412 +-
413 + /*
414 + * Read processor ID register (CP#15, CR0), and look up in the linker-built
415 + * supported processor list. Note that we can't use the absolute addresses
416 +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
417 +index 8e9a3e40d949..a6d27284105a 100644
418 +--- a/arch/arm/kernel/setup.c
419 ++++ b/arch/arm/kernel/setup.c
420 +@@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2);
421 +
422 + #ifdef MULTI_CPU
423 + struct processor processor __ro_after_init;
424 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
425 ++struct processor *cpu_vtable[NR_CPUS] = {
426 ++ [0] = &processor,
427 ++};
428 ++#endif
429 + #endif
430 + #ifdef MULTI_TLB
431 + struct cpu_tlb_fns cpu_tlb __ro_after_init;
432 +@@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void)
433 + }
434 + #endif
435 +
436 +-static void __init setup_processor(void)
437 ++/*
438 ++ * locate processor in the list of supported processor types. The linker
439 ++ * builds this table for us from the entries in arch/arm/mm/proc-*.S
440 ++ */
441 ++struct proc_info_list *lookup_processor(u32 midr)
442 + {
443 +- struct proc_info_list *list;
444 ++ struct proc_info_list *list = lookup_processor_type(midr);
445 +
446 +- /*
447 +- * locate processor in the list of supported processor
448 +- * types. The linker builds this table for us from the
449 +- * entries in arch/arm/mm/proc-*.S
450 +- */
451 +- list = lookup_processor_type(read_cpuid_id());
452 + if (!list) {
453 +- pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
454 +- read_cpuid_id());
455 +- while (1);
456 ++ pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
457 ++ smp_processor_id(), midr);
458 ++ while (1)
459 ++ /* can't use cpu_relax() here as it may require MMU setup */;
460 + }
461 +
462 ++ return list;
463 ++}
464 ++
465 ++static void __init setup_processor(void)
466 ++{
467 ++ unsigned int midr = read_cpuid_id();
468 ++ struct proc_info_list *list = lookup_processor(midr);
469 ++
470 + cpu_name = list->cpu_name;
471 + __cpu_architecture = __get_cpu_architecture();
472 +
473 +-#ifdef MULTI_CPU
474 +- processor = *list->proc;
475 +-#endif
476 ++ init_proc_vtable(list->proc);
477 + #ifdef MULTI_TLB
478 + cpu_tlb = *list->tlb;
479 + #endif
480 +@@ -700,7 +710,7 @@ static void __init setup_processor(void)
481 + #endif
482 +
483 + pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
484 +- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
485 ++ list->cpu_name, midr, midr & 15,
486 + proc_arch[cpu_architecture()], get_cr());
487 +
488 + snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
489 +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
490 +index cdfe52b15a0a..02e6b6dfffa7 100644
491 +--- a/arch/arm/kernel/signal.c
492 ++++ b/arch/arm/kernel/signal.c
493 +@@ -76,8 +76,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
494 + kframe->magic = IWMMXT_MAGIC;
495 + kframe->size = IWMMXT_STORAGE_SIZE;
496 + iwmmxt_task_copy(current_thread_info(), &kframe->storage);
497 +-
498 +- err = __copy_to_user(frame, kframe, sizeof(*frame));
499 + } else {
500 + /*
501 + * For bug-compatibility with older kernels, some space
502 +@@ -85,10 +83,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
503 + * Set the magic and size appropriately so that properly
504 + * written userspace can skip it reliably:
505 + */
506 +- __put_user_error(DUMMY_MAGIC, &frame->magic, err);
507 +- __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
508 ++ *kframe = (struct iwmmxt_sigframe) {
509 ++ .magic = DUMMY_MAGIC,
510 ++ .size = IWMMXT_STORAGE_SIZE,
511 ++ };
512 + }
513 +
514 ++ err = __copy_to_user(frame, kframe, sizeof(*kframe));
515 ++
516 + return err;
517 + }
518 +
519 +@@ -134,17 +136,18 @@ static int restore_iwmmxt_context(char __user **auxp)
520 +
521 + static int preserve_vfp_context(struct vfp_sigframe __user *frame)
522 + {
523 +- const unsigned long magic = VFP_MAGIC;
524 +- const unsigned long size = VFP_STORAGE_SIZE;
525 ++ struct vfp_sigframe kframe;
526 + int err = 0;
527 +
528 +- __put_user_error(magic, &frame->magic, err);
529 +- __put_user_error(size, &frame->size, err);
530 ++ memset(&kframe, 0, sizeof(kframe));
531 ++ kframe.magic = VFP_MAGIC;
532 ++ kframe.size = VFP_STORAGE_SIZE;
533 +
534 ++ err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
535 + if (err)
536 +- return -EFAULT;
537 ++ return err;
538 +
539 +- return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
540 ++ return __copy_to_user(frame, &kframe, sizeof(kframe));
541 + }
542 +
543 + static int restore_vfp_context(char __user **auxp)
544 +@@ -296,30 +299,35 @@ static int
545 + setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
546 + {
547 + struct aux_sigframe __user *aux;
548 ++ struct sigcontext context;
549 + int err = 0;
550 +
551 +- __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
552 +- __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
553 +- __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
554 +- __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
555 +- __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
556 +- __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
557 +- __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
558 +- __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
559 +- __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
560 +- __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
561 +- __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
562 +- __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
563 +- __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
564 +- __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
565 +- __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
566 +- __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
567 +- __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
568 +-
569 +- __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
570 +- __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
571 +- __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
572 +- __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
573 ++ context = (struct sigcontext) {
574 ++ .arm_r0 = regs->ARM_r0,
575 ++ .arm_r1 = regs->ARM_r1,
576 ++ .arm_r2 = regs->ARM_r2,
577 ++ .arm_r3 = regs->ARM_r3,
578 ++ .arm_r4 = regs->ARM_r4,
579 ++ .arm_r5 = regs->ARM_r5,
580 ++ .arm_r6 = regs->ARM_r6,
581 ++ .arm_r7 = regs->ARM_r7,
582 ++ .arm_r8 = regs->ARM_r8,
583 ++ .arm_r9 = regs->ARM_r9,
584 ++ .arm_r10 = regs->ARM_r10,
585 ++ .arm_fp = regs->ARM_fp,
586 ++ .arm_ip = regs->ARM_ip,
587 ++ .arm_sp = regs->ARM_sp,
588 ++ .arm_lr = regs->ARM_lr,
589 ++ .arm_pc = regs->ARM_pc,
590 ++ .arm_cpsr = regs->ARM_cpsr,
591 ++
592 ++ .trap_no = current->thread.trap_no,
593 ++ .error_code = current->thread.error_code,
594 ++ .fault_address = current->thread.address,
595 ++ .oldmask = set->sig[0],
596 ++ };
597 ++
598 ++ err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
599 +
600 + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
601 +
602 +@@ -336,7 +344,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
603 + if (err == 0)
604 + err |= preserve_vfp_context(&aux->vfp);
605 + #endif
606 +- __put_user_error(0, &aux->end_magic, err);
607 ++ err |= __put_user(0, &aux->end_magic);
608 +
609 + return err;
610 + }
611 +@@ -468,7 +476,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
612 + /*
613 + * Set uc.uc_flags to a value which sc.trap_no would never have.
614 + */
615 +- __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
616 ++ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
617 +
618 + err |= setup_sigframe(frame, regs, set);
619 + if (err == 0)
620 +@@ -488,8 +496,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
621 +
622 + err |= copy_siginfo_to_user(&frame->info, &ksig->info);
623 +
624 +- __put_user_error(0, &frame->sig.uc.uc_flags, err);
625 +- __put_user_error(NULL, &frame->sig.uc.uc_link, err);
626 ++ err |= __put_user(0, &frame->sig.uc.uc_flags);
627 ++ err |= __put_user(NULL, &frame->sig.uc.uc_link);
628 +
629 + err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
630 + err |= setup_sigframe(&frame->sig, regs, set);
631 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
632 +index 5e31c62127a0..f57333f46242 100644
633 +--- a/arch/arm/kernel/smp.c
634 ++++ b/arch/arm/kernel/smp.c
635 +@@ -42,6 +42,7 @@
636 + #include <asm/mmu_context.h>
637 + #include <asm/pgtable.h>
638 + #include <asm/pgalloc.h>
639 ++#include <asm/procinfo.h>
640 + #include <asm/processor.h>
641 + #include <asm/sections.h>
642 + #include <asm/tlbflush.h>
643 +@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
644 + #endif
645 + }
646 +
647 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
648 ++static int secondary_biglittle_prepare(unsigned int cpu)
649 ++{
650 ++ if (!cpu_vtable[cpu])
651 ++ cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
652 ++
653 ++ return cpu_vtable[cpu] ? 0 : -ENOMEM;
654 ++}
655 ++
656 ++static void secondary_biglittle_init(void)
657 ++{
658 ++ init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
659 ++}
660 ++#else
661 ++static int secondary_biglittle_prepare(unsigned int cpu)
662 ++{
663 ++ return 0;
664 ++}
665 ++
666 ++static void secondary_biglittle_init(void)
667 ++{
668 ++}
669 ++#endif
670 ++
671 + int __cpu_up(unsigned int cpu, struct task_struct *idle)
672 + {
673 + int ret;
674 +@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
675 + if (!smp_ops.smp_boot_secondary)
676 + return -ENOSYS;
677 +
678 ++ ret = secondary_biglittle_prepare(cpu);
679 ++ if (ret)
680 ++ return ret;
681 ++
682 + /*
683 + * We need to tell the secondary core where to find
684 + * its stack and the page tables.
685 +@@ -360,6 +389,8 @@ asmlinkage void secondary_start_kernel(void)
686 + struct mm_struct *mm = &init_mm;
687 + unsigned int cpu;
688 +
689 ++ secondary_biglittle_init();
690 ++
691 + /*
692 + * The identity mapping is uncached (strongly ordered), so
693 + * switch away from it before attempting any exclusive accesses.
694 +diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
695 +index 4abe4909417f..a87684532327 100644
696 +--- a/arch/arm/kernel/sys_oabi-compat.c
697 ++++ b/arch/arm/kernel/sys_oabi-compat.c
698 +@@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
699 + int maxevents, int timeout)
700 + {
701 + struct epoll_event *kbuf;
702 ++ struct oabi_epoll_event e;
703 + mm_segment_t fs;
704 + long ret, err, i;
705 +
706 +@@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
707 + set_fs(fs);
708 + err = 0;
709 + for (i = 0; i < ret; i++) {
710 +- __put_user_error(kbuf[i].events, &events->events, err);
711 +- __put_user_error(kbuf[i].data, &events->data, err);
712 ++ e.events = kbuf[i].events;
713 ++ e.data = kbuf[i].data;
714 ++ err = __copy_to_user(events, &e, sizeof(e));
715 ++ if (err)
716 ++ break;
717 + events++;
718 + }
719 + kfree(kbuf);
720 +diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
721 +index a826df3d3814..6709a8d33963 100644
722 +--- a/arch/arm/lib/copy_from_user.S
723 ++++ b/arch/arm/lib/copy_from_user.S
724 +@@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
725 + #ifdef CONFIG_CPU_SPECTRE
726 + get_thread_info r3
727 + ldr r3, [r3, #TI_ADDR_LIMIT]
728 +- adds ip, r1, r2 @ ip=addr+size
729 +- sub r3, r3, #1 @ addr_limit - 1
730 +- cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
731 +- movcs r1, #0 @ addr = NULL
732 +- csdb
733 ++ uaccess_mask_range_ptr r1, r2, r3, ip
734 + #endif
735 +
736 + #include "copy_template.S"
737 +diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
738 +index caf5019d8161..970abe521197 100644
739 +--- a/arch/arm/lib/copy_to_user.S
740 ++++ b/arch/arm/lib/copy_to_user.S
741 +@@ -94,6 +94,11 @@
742 +
743 + ENTRY(__copy_to_user_std)
744 + WEAK(arm_copy_to_user)
745 ++#ifdef CONFIG_CPU_SPECTRE
746 ++ get_thread_info r3
747 ++ ldr r3, [r3, #TI_ADDR_LIMIT]
748 ++ uaccess_mask_range_ptr r0, r2, r3, ip
749 ++#endif
750 +
751 + #include "copy_template.S"
752 +
753 +@@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
754 + rsb r0, r0, r2
755 + copy_abort_end
756 + .popsection
757 +-
758 +diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
759 +index 9b4ed1728616..73dc7360cbdd 100644
760 +--- a/arch/arm/lib/uaccess_with_memcpy.c
761 ++++ b/arch/arm/lib/uaccess_with_memcpy.c
762 +@@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
763 + n = __copy_to_user_std(to, from, n);
764 + uaccess_restore(ua_flags);
765 + } else {
766 +- n = __copy_to_user_memcpy(to, from, n);
767 ++ n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
768 ++ from, n);
769 + }
770 + return n;
771 + }
772 +diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
773 +index a109f6482413..0f916c245a2e 100644
774 +--- a/arch/arm/mach-integrator/impd1.c
775 ++++ b/arch/arm/mach-integrator/impd1.c
776 +@@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev)
777 + sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
778 + GFP_KERNEL);
779 + chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
780 +- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
781 ++ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
782 ++ "lm%x:00700", dev->id);
783 ++ if (!lookup || !chipname || !mmciname)
784 ++ return -ENOMEM;
785 ++
786 + lookup->dev_id = mmciname;
787 + /*
788 + * Offsets on GPIO block 1:
789 +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
790 +index 81d0efb055c6..5461d589a1e2 100644
791 +--- a/arch/arm/mm/proc-macros.S
792 ++++ b/arch/arm/mm/proc-macros.S
793 +@@ -274,6 +274,13 @@
794 + .endm
795 +
796 + .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
797 ++/*
798 ++ * If we are building for big.Little with branch predictor hardening,
799 ++ * we need the processor function tables to remain available after boot.
800 ++ */
801 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
802 ++ .section ".rodata"
803 ++#endif
804 + .type \name\()_processor_functions, #object
805 + .align 2
806 + ENTRY(\name\()_processor_functions)
807 +@@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
808 + .endif
809 +
810 + .size \name\()_processor_functions, . - \name\()_processor_functions
811 ++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
812 ++ .previous
813 ++#endif
814 + .endm
815 +
816 + .macro define_cache_functions name:req
817 +diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
818 +index 5544b82a2e7a..9a07916af8dd 100644
819 +--- a/arch/arm/mm/proc-v7-bugs.c
820 ++++ b/arch/arm/mm/proc-v7-bugs.c
821 +@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
822 + case ARM_CPU_PART_CORTEX_A17:
823 + case ARM_CPU_PART_CORTEX_A73:
824 + case ARM_CPU_PART_CORTEX_A75:
825 +- if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
826 +- goto bl_error;
827 + per_cpu(harden_branch_predictor_fn, cpu) =
828 + harden_branch_predictor_bpiall;
829 + spectre_v2_method = "BPIALL";
830 +@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
831 +
832 + case ARM_CPU_PART_CORTEX_A15:
833 + case ARM_CPU_PART_BRAHMA_B15:
834 +- if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
835 +- goto bl_error;
836 + per_cpu(harden_branch_predictor_fn, cpu) =
837 + harden_branch_predictor_iciallu;
838 + spectre_v2_method = "ICIALLU";
839 +@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
840 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
841 + if ((int)res.a0 != 0)
842 + break;
843 +- if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
844 +- goto bl_error;
845 + per_cpu(harden_branch_predictor_fn, cpu) =
846 + call_hvc_arch_workaround_1;
847 +- processor.switch_mm = cpu_v7_hvc_switch_mm;
848 ++ cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
849 + spectre_v2_method = "hypervisor";
850 + break;
851 +
852 +@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
853 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
854 + if ((int)res.a0 != 0)
855 + break;
856 +- if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
857 +- goto bl_error;
858 + per_cpu(harden_branch_predictor_fn, cpu) =
859 + call_smc_arch_workaround_1;
860 +- processor.switch_mm = cpu_v7_smc_switch_mm;
861 ++ cpu_do_switch_mm = cpu_v7_smc_switch_mm;
862 + spectre_v2_method = "firmware";
863 + break;
864 +
865 +@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
866 + if (spectre_v2_method)
867 + pr_info("CPU%u: Spectre v2: using %s workaround\n",
868 + smp_processor_id(), spectre_v2_method);
869 +- return;
870 +-
871 +-bl_error:
872 +- pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
873 +- cpu);
874 + }
875 + #else
876 + static void cpu_v7_spectre_init(void)
877 +diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
878 +index 6abcd4af8274..8e11223d32a1 100644
879 +--- a/arch/arm/vfp/vfpmodule.c
880 ++++ b/arch/arm/vfp/vfpmodule.c
881 +@@ -554,12 +554,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
882 + * Save the current VFP state into the provided structures and prepare
883 + * for entry into a new function (signal handler).
884 + */
885 +-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
886 +- struct user_vfp_exc __user *ufp_exc)
887 ++int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
888 ++ struct user_vfp_exc *ufp_exc)
889 + {
890 + struct thread_info *thread = current_thread_info();
891 + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
892 +- int err = 0;
893 +
894 + /* Ensure that the saved hwstate is up-to-date. */
895 + vfp_sync_hwstate(thread);
896 +@@ -568,22 +567,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
897 + * Copy the floating point registers. There can be unused
898 + * registers see asm/hwcap.h for details.
899 + */
900 +- err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
901 +- sizeof(hwstate->fpregs));
902 ++ memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
903 ++
904 + /*
905 + * Copy the status and control register.
906 + */
907 +- __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
908 ++ ufp->fpscr = hwstate->fpscr;
909 +
910 + /*
911 + * Copy the exception registers.
912 + */
913 +- __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
914 +- __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
915 +- __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
916 +-
917 +- if (err)
918 +- return -EFAULT;
919 ++ ufp_exc->fpexc = hwstate->fpexc;
920 ++ ufp_exc->fpinst = hwstate->fpinst;
921 ++ ufp_exc->fpinst2 = hwstate->fpinst2;
922 +
923 + /* Ensure that VFP is disabled. */
924 + vfp_flush_hwstate(thread);
925 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
926 +index 7d12b0d1f359..e14a39598e8a 100644
927 +--- a/arch/x86/events/core.c
928 ++++ b/arch/x86/events/core.c
929 +@@ -2250,6 +2250,19 @@ void perf_check_microcode(void)
930 + x86_pmu.check_microcode();
931 + }
932 +
933 ++static int x86_pmu_check_period(struct perf_event *event, u64 value)
934 ++{
935 ++ if (x86_pmu.check_period && x86_pmu.check_period(event, value))
936 ++ return -EINVAL;
937 ++
938 ++ if (value && x86_pmu.limit_period) {
939 ++ if (x86_pmu.limit_period(event, value) > value)
940 ++ return -EINVAL;
941 ++ }
942 ++
943 ++ return 0;
944 ++}
945 ++
946 + static struct pmu pmu = {
947 + .pmu_enable = x86_pmu_enable,
948 + .pmu_disable = x86_pmu_disable,
949 +@@ -2274,6 +2287,7 @@ static struct pmu pmu = {
950 + .event_idx = x86_pmu_event_idx,
951 + .sched_task = x86_pmu_sched_task,
952 + .task_ctx_size = sizeof(struct x86_perf_task_context),
953 ++ .check_period = x86_pmu_check_period,
954 + };
955 +
956 + void arch_perf_update_userpage(struct perf_event *event,
957 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
958 +index 1cb5ff3ee728..9f556c94a0b8 100644
959 +--- a/arch/x86/events/intel/core.c
960 ++++ b/arch/x86/events/intel/core.c
961 +@@ -3445,6 +3445,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
962 + intel_pmu_lbr_sched_task(ctx, sched_in);
963 + }
964 +
965 ++static int intel_pmu_check_period(struct perf_event *event, u64 value)
966 ++{
967 ++ return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
968 ++}
969 ++
970 + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
971 +
972 + PMU_FORMAT_ATTR(ldlat, "config1:0-15");
973 +@@ -3525,6 +3530,8 @@ static __initconst const struct x86_pmu core_pmu = {
974 + .cpu_starting = intel_pmu_cpu_starting,
975 + .cpu_dying = intel_pmu_cpu_dying,
976 + .cpu_dead = intel_pmu_cpu_dead,
977 ++
978 ++ .check_period = intel_pmu_check_period,
979 + };
980 +
981 + static struct attribute *intel_pmu_attrs[];
982 +@@ -3568,6 +3575,8 @@ static __initconst const struct x86_pmu intel_pmu = {
983 +
984 + .guest_get_msrs = intel_guest_get_msrs,
985 + .sched_task = intel_pmu_sched_task,
986 ++
987 ++ .check_period = intel_pmu_check_period,
988 + };
989 +
990 + static __init void intel_clovertown_quirk(void)
991 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
992 +index 3c51fcaf1e34..fbbc10338987 100644
993 +--- a/arch/x86/events/perf_event.h
994 ++++ b/arch/x86/events/perf_event.h
995 +@@ -639,6 +639,11 @@ struct x86_pmu {
996 + * Intel host/guest support (KVM)
997 + */
998 + struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
999 ++
1000 ++ /*
1001 ++ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
1002 ++ */
1003 ++ int (*check_period) (struct perf_event *event, u64 period);
1004 + };
1005 +
1006 + struct x86_perf_task_context {
1007 +@@ -848,7 +853,7 @@ static inline int amd_pmu_init(void)
1008 +
1009 + #ifdef CONFIG_CPU_SUP_INTEL
1010 +
1011 +-static inline bool intel_pmu_has_bts(struct perf_event *event)
1012 ++static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
1013 + {
1014 + struct hw_perf_event *hwc = &event->hw;
1015 + unsigned int hw_event, bts_event;
1016 +@@ -859,7 +864,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
1017 + hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1018 + bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1019 +
1020 +- return hw_event == bts_event && hwc->sample_period == 1;
1021 ++ return hw_event == bts_event && period == 1;
1022 ++}
1023 ++
1024 ++static inline bool intel_pmu_has_bts(struct perf_event *event)
1025 ++{
1026 ++ struct hw_perf_event *hwc = &event->hw;
1027 ++
1028 ++ return intel_pmu_has_bts_period(event, hwc->sample_period);
1029 + }
1030 +
1031 + int intel_pmu_save_and_restart(struct perf_event *event);
1032 +diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
1033 +index 8e02b30cf08e..3ebd77770f98 100644
1034 +--- a/arch/x86/ia32/ia32_aout.c
1035 ++++ b/arch/x86/ia32/ia32_aout.c
1036 +@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
1037 + /*
1038 + * fill in the user structure for a core dump..
1039 + */
1040 +-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
1041 ++static void fill_dump(struct pt_regs *regs, struct user32 *dump)
1042 + {
1043 + u32 fs, gs;
1044 + memset(dump, 0, sizeof(*dump));
1045 +@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
1046 + fs = get_fs();
1047 + set_fs(KERNEL_DS);
1048 + has_dumped = 1;
1049 ++
1050 ++ fill_dump(cprm->regs, &dump);
1051 ++
1052 + strncpy(dump.u_comm, current->comm, sizeof(current->comm));
1053 + dump.u_ar0 = offsetof(struct user32, regs);
1054 + dump.signal = cprm->siginfo->si_signo;
1055 +- dump_thread32(cprm->regs, &dump);
1056 +
1057 + /*
1058 + * If the size of the dump file exceeds the rlimit, then see
1059 +diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
1060 +index e652a7cc6186..3f697a9e3f59 100644
1061 +--- a/arch/x86/include/asm/uv/bios.h
1062 ++++ b/arch/x86/include/asm/uv/bios.h
1063 +@@ -48,7 +48,8 @@ enum {
1064 + BIOS_STATUS_SUCCESS = 0,
1065 + BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
1066 + BIOS_STATUS_EINVAL = -EINVAL,
1067 +- BIOS_STATUS_UNAVAIL = -EBUSY
1068 ++ BIOS_STATUS_UNAVAIL = -EBUSY,
1069 ++ BIOS_STATUS_ABORT = -EINTR,
1070 + };
1071 +
1072 + /* Address map parameters */
1073 +@@ -167,4 +168,9 @@ extern long system_serial_number;
1074 +
1075 + extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
1076 +
1077 ++/*
1078 ++ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
1079 ++ */
1080 ++extern struct semaphore __efi_uv_runtime_lock;
1081 ++
1082 + #endif /* _ASM_X86_UV_BIOS_H */
1083 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1084 +index 1f5de4314291..8e5a977bf50e 100644
1085 +--- a/arch/x86/kvm/vmx.c
1086 ++++ b/arch/x86/kvm/vmx.c
1087 +@@ -2230,7 +2230,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1088 + if (!entry_only)
1089 + j = find_msr(&m->host, msr);
1090 +
1091 +- if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
1092 ++ if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
1093 ++ (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
1094 + printk_once(KERN_WARNING "Not enough msr switch entries. "
1095 + "Can't add msr %x\n", msr);
1096 + return;
1097 +diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
1098 +index 4a6a5a26c582..eb33432f2f24 100644
1099 +--- a/arch/x86/platform/uv/bios_uv.c
1100 ++++ b/arch/x86/platform/uv/bios_uv.c
1101 +@@ -29,7 +29,8 @@
1102 +
1103 + struct uv_systab *uv_systab;
1104 +
1105 +-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1106 ++static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1107 ++ u64 a4, u64 a5)
1108 + {
1109 + struct uv_systab *tab = uv_systab;
1110 + s64 ret;
1111 +@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1112 +
1113 + return ret;
1114 + }
1115 ++
1116 ++s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
1117 ++{
1118 ++ s64 ret;
1119 ++
1120 ++ if (down_interruptible(&__efi_uv_runtime_lock))
1121 ++ return BIOS_STATUS_ABORT;
1122 ++
1123 ++ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
1124 ++ up(&__efi_uv_runtime_lock);
1125 ++
1126 ++ return ret;
1127 ++}
1128 + EXPORT_SYMBOL_GPL(uv_bios_call);
1129 +
1130 + s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1131 +@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
1132 + unsigned long bios_flags;
1133 + s64 ret;
1134 +
1135 ++ if (down_interruptible(&__efi_uv_runtime_lock))
1136 ++ return BIOS_STATUS_ABORT;
1137 ++
1138 + local_irq_save(bios_flags);
1139 +- ret = uv_bios_call(which, a1, a2, a3, a4, a5);
1140 ++ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
1141 + local_irq_restore(bios_flags);
1142 +
1143 ++ up(&__efi_uv_runtime_lock);
1144 ++
1145 + return ret;
1146 + }
1147 +
1148 +diff --git a/block/blk-flush.c b/block/blk-flush.c
1149 +index 4938bec8cfef..6603352879e7 100644
1150 +--- a/block/blk-flush.c
1151 ++++ b/block/blk-flush.c
1152 +@@ -402,7 +402,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
1153 + blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
1154 + spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
1155 +
1156 +- blk_mq_run_hw_queue(hctx, true);
1157 ++ blk_mq_sched_restart(hctx);
1158 + }
1159 +
1160 + /**
1161 +diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
1162 +index 8fb74d9011da..a7907b58562a 100644
1163 +--- a/drivers/acpi/numa.c
1164 ++++ b/drivers/acpi/numa.c
1165 +@@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
1166 + {
1167 + struct acpi_srat_mem_affinity *p =
1168 + (struct acpi_srat_mem_affinity *)header;
1169 +- pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
1170 +- (unsigned long)p->base_address,
1171 +- (unsigned long)p->length,
1172 ++ pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
1173 ++ (unsigned long long)p->base_address,
1174 ++ (unsigned long long)p->length,
1175 + p->proximity_domain,
1176 + (p->flags & ACPI_SRAT_MEM_ENABLED) ?
1177 + "enabled" : "disabled",
1178 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1179 +index 93754300cb57..66c2790dcc5f 100644
1180 +--- a/drivers/cpufreq/cpufreq.c
1181 ++++ b/drivers/cpufreq/cpufreq.c
1182 +@@ -1523,17 +1523,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1183 + {
1184 + unsigned int ret_freq = 0;
1185 +
1186 +- if (!cpufreq_driver->get)
1187 ++ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
1188 + return ret_freq;
1189 +
1190 + ret_freq = cpufreq_driver->get(policy->cpu);
1191 +
1192 + /*
1193 +- * Updating inactive policies is invalid, so avoid doing that. Also
1194 +- * if fast frequency switching is used with the given policy, the check
1195 ++ * If fast frequency switching is used with the given policy, the check
1196 + * against policy->cur is pointless, so skip it in that case too.
1197 + */
1198 +- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
1199 ++ if (policy->fast_switch_enabled)
1200 + return ret_freq;
1201 +
1202 + if (ret_freq && policy->cur &&
1203 +@@ -1562,10 +1561,7 @@ unsigned int cpufreq_get(unsigned int cpu)
1204 +
1205 + if (policy) {
1206 + down_read(&policy->rwsem);
1207 +-
1208 +- if (!policy_is_inactive(policy))
1209 +- ret_freq = __cpufreq_get(policy);
1210 +-
1211 ++ ret_freq = __cpufreq_get(policy);
1212 + up_read(&policy->rwsem);
1213 +
1214 + cpufreq_cpu_put(policy);
1215 +diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
1216 +index ae54870b2788..dd7f63354ca0 100644
1217 +--- a/drivers/firmware/efi/runtime-wrappers.c
1218 ++++ b/drivers/firmware/efi/runtime-wrappers.c
1219 +@@ -49,6 +49,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
1220 + local_irq_restore(flags);
1221 + }
1222 +
1223 ++/*
1224 ++ * Expose the EFI runtime lock to the UV platform
1225 ++ */
1226 ++#ifdef CONFIG_X86_UV
1227 ++extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
1228 ++#endif
1229 ++
1230 + /*
1231 + * According to section 7.1 of the UEFI spec, Runtime Services are not fully
1232 + * reentrant, and there are particular combinations of calls that need to be
1233 +diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
1234 +index 8636e7eeb731..6eebd8ad0c52 100644
1235 +--- a/drivers/gpu/drm/bridge/tc358767.c
1236 ++++ b/drivers/gpu/drm/bridge/tc358767.c
1237 +@@ -96,6 +96,8 @@
1238 + #define DP0_STARTVAL 0x064c
1239 + #define DP0_ACTIVEVAL 0x0650
1240 + #define DP0_SYNCVAL 0x0654
1241 ++#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
1242 ++#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
1243 + #define DP0_MISC 0x0658
1244 + #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
1245 + #define BPC_6 (0 << 5)
1246 +@@ -140,6 +142,8 @@
1247 + #define DP0_LTLOOPCTRL 0x06d8
1248 + #define DP0_SNKLTCTRL 0x06e4
1249 +
1250 ++#define DP1_SRCCTRL 0x07a0
1251 ++
1252 + /* PHY */
1253 + #define DP_PHY_CTRL 0x0800
1254 + #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
1255 +@@ -148,6 +152,7 @@
1256 + #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
1257 + #define PHY_RDY BIT(16) /* PHY Main Channels Ready */
1258 + #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
1259 ++#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
1260 + #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
1261 + #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
1262 +
1263 +@@ -538,6 +543,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
1264 + unsigned long rate;
1265 + u32 value;
1266 + int ret;
1267 ++ u32 dp_phy_ctrl;
1268 +
1269 + rate = clk_get_rate(tc->refclk);
1270 + switch (rate) {
1271 +@@ -562,7 +568,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
1272 + value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
1273 + tc_write(SYS_PLLPARAM, value);
1274 +
1275 +- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
1276 ++ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
1277 ++ if (tc->link.base.num_lanes == 2)
1278 ++ dp_phy_ctrl |= PHY_2LANE;
1279 ++ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
1280 +
1281 + /*
1282 + * Initially PLLs are in bypass. Force PLL parameter update,
1283 +@@ -717,7 +726,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
1284 +
1285 + tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
1286 +
1287 +- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
1288 ++ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
1289 ++ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
1290 ++ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
1291 +
1292 + tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
1293 + DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
1294 +@@ -827,12 +838,11 @@ static int tc_main_link_setup(struct tc_data *tc)
1295 + if (!tc->mode)
1296 + return -EINVAL;
1297 +
1298 +- /* from excel file - DP0_SrcCtrl */
1299 +- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
1300 +- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
1301 +- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
1302 +- /* from excel file - DP1_SrcCtrl */
1303 +- tc_write(0x07a0, 0x00003083);
1304 ++ tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
1305 ++ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
1306 ++ tc_write(DP1_SRCCTRL,
1307 ++ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
1308 ++ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
1309 +
1310 + rate = clk_get_rate(tc->refclk);
1311 + switch (rate) {
1312 +@@ -853,8 +863,11 @@ static int tc_main_link_setup(struct tc_data *tc)
1313 + }
1314 + value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
1315 + tc_write(SYS_PLLPARAM, value);
1316 ++
1317 + /* Setup Main Link */
1318 +- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
1319 ++ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
1320 ++ if (tc->link.base.num_lanes == 2)
1321 ++ dp_phy_ctrl |= PHY_2LANE;
1322 + tc_write(DP_PHY_CTRL, dp_phy_ctrl);
1323 + msleep(100);
1324 +
1325 +@@ -1103,10 +1116,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1326 + static int tc_connector_mode_valid(struct drm_connector *connector,
1327 + struct drm_display_mode *mode)
1328 + {
1329 ++ struct tc_data *tc = connector_to_tc(connector);
1330 ++ u32 req, avail;
1331 ++ u32 bits_per_pixel = 24;
1332 ++
1333 + /* DPI interface clock limitation: upto 154 MHz */
1334 + if (mode->clock > 154000)
1335 + return MODE_CLOCK_HIGH;
1336 +
1337 ++ req = mode->clock * bits_per_pixel / 8;
1338 ++ avail = tc->link.base.num_lanes * tc->link.base.rate;
1339 ++
1340 ++ if (req > avail)
1341 ++ return MODE_BAD;
1342 ++
1343 + return MODE_OK;
1344 + }
1345 +
1346 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1347 +index 1f19e6d9a717..5d8a67c65141 100644
1348 +--- a/drivers/gpu/drm/i915/i915_gem.c
1349 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1350 +@@ -1633,6 +1633,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1351 + return 0;
1352 + }
1353 +
1354 ++static inline bool
1355 ++__vma_matches(struct vm_area_struct *vma, struct file *filp,
1356 ++ unsigned long addr, unsigned long size)
1357 ++{
1358 ++ if (vma->vm_file != filp)
1359 ++ return false;
1360 ++
1361 ++ return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
1362 ++}
1363 ++
1364 + /**
1365 + * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1366 + * it is mapped to.
1367 +@@ -1691,7 +1701,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1368 + return -EINTR;
1369 + }
1370 + vma = find_vma(mm, addr);
1371 +- if (vma)
1372 ++ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1373 + vma->vm_page_prot =
1374 + pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1375 + else
1376 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
1377 +index 2e7b4e2105ef..62cb376e2c01 100644
1378 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
1379 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
1380 +@@ -22,6 +22,7 @@
1381 + #include <engine/falcon.h>
1382 +
1383 + #include <core/gpuobj.h>
1384 ++#include <subdev/mc.h>
1385 + #include <subdev/timer.h>
1386 + #include <engine/fifo.h>
1387 +
1388 +@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
1389 + }
1390 + }
1391 +
1392 +- nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
1393 +- nvkm_wr32(device, base + 0x014, 0xffffffff);
1394 ++ if (nvkm_mc_enabled(device, engine->subdev.index)) {
1395 ++ nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
1396 ++ nvkm_wr32(device, base + 0x014, 0xffffffff);
1397 ++ }
1398 + return 0;
1399 + }
1400 +
1401 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
1402 +index 952a7cb0a59a..692d4d96766a 100644
1403 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
1404 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
1405 +@@ -131,11 +131,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
1406 + duty = nvkm_therm_update_linear(therm);
1407 + break;
1408 + case NVBIOS_THERM_FAN_OTHER:
1409 +- if (therm->cstate)
1410 ++ if (therm->cstate) {
1411 + duty = therm->cstate;
1412 +- else
1413 ++ poll = false;
1414 ++ } else {
1415 + duty = nvkm_therm_update_linear_fallback(therm);
1416 +- poll = false;
1417 ++ }
1418 + break;
1419 + }
1420 + immd = false;
1421 +diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
1422 +index 1efcfdf9f8a8..dd9dd4e40827 100644
1423 +--- a/drivers/input/misc/bma150.c
1424 ++++ b/drivers/input/misc/bma150.c
1425 +@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
1426 + idev->close = bma150_irq_close;
1427 + input_set_drvdata(idev, bma150);
1428 +
1429 ++ bma150->input = idev;
1430 ++
1431 + error = input_register_device(idev);
1432 + if (error) {
1433 + input_free_device(idev);
1434 + return error;
1435 + }
1436 +
1437 +- bma150->input = idev;
1438 + return 0;
1439 + }
1440 +
1441 +@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
1442 +
1443 + bma150_init_input_device(bma150, ipoll_dev->input);
1444 +
1445 ++ bma150->input_polled = ipoll_dev;
1446 ++ bma150->input = ipoll_dev->input;
1447 ++
1448 + error = input_register_polled_device(ipoll_dev);
1449 + if (error) {
1450 + input_free_polled_device(ipoll_dev);
1451 + return error;
1452 + }
1453 +
1454 +- bma150->input_polled = ipoll_dev;
1455 +- bma150->input = ipoll_dev->input;
1456 +-
1457 + return 0;
1458 + }
1459 +
1460 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1461 +index f2bf8fa1ab04..fce70f4ef004 100644
1462 +--- a/drivers/input/mouse/elan_i2c_core.c
1463 ++++ b/drivers/input/mouse/elan_i2c_core.c
1464 +@@ -1251,7 +1251,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1465 + static const struct acpi_device_id elan_acpi_id[] = {
1466 + { "ELAN0000", 0 },
1467 + { "ELAN0100", 0 },
1468 +- { "ELAN0501", 0 },
1469 + { "ELAN0600", 0 },
1470 + { "ELAN0602", 0 },
1471 + { "ELAN0605", 0 },
1472 +@@ -1262,6 +1261,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1473 + { "ELAN060C", 0 },
1474 + { "ELAN0611", 0 },
1475 + { "ELAN0612", 0 },
1476 ++ { "ELAN0617", 0 },
1477 + { "ELAN0618", 0 },
1478 + { "ELAN061C", 0 },
1479 + { "ELAN061D", 0 },
1480 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1481 +index 84c69e962230..fda33fc3ffcc 100644
1482 +--- a/drivers/input/mouse/elantech.c
1483 ++++ b/drivers/input/mouse/elantech.c
1484 +@@ -1121,6 +1121,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1485 + * Asus UX31 0x361f00 20, 15, 0e clickpad
1486 + * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1487 + * Avatar AVIU-145A2 0x361f00 ? clickpad
1488 ++ * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
1489 ++ * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
1490 + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1491 + * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1492 + * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
1493 +@@ -1173,6 +1175,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1494 + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
1495 + },
1496 + },
1497 ++ {
1498 ++ /* Fujitsu H780 also has a middle button */
1499 ++ .matches = {
1500 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1501 ++ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
1502 ++ },
1503 ++ },
1504 + #endif
1505 + { }
1506 + };
1507 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1508 +index 1f6d8b6be5c7..0d2005e5b24c 100644
1509 +--- a/drivers/md/dm-crypt.c
1510 ++++ b/drivers/md/dm-crypt.c
1511 +@@ -935,7 +935,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1512 + if (IS_ERR(bip))
1513 + return PTR_ERR(bip);
1514 +
1515 +- tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
1516 ++ tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1517 +
1518 + bip->bip_iter.bi_size = tag_len;
1519 + bip->bip_iter.bi_sector = io->cc->start + io->sector;
1520 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1521 +index 40b624d8255d..18d6a8a10d5d 100644
1522 +--- a/drivers/md/dm-thin.c
1523 ++++ b/drivers/md/dm-thin.c
1524 +@@ -257,6 +257,7 @@ struct pool {
1525 +
1526 + spinlock_t lock;
1527 + struct bio_list deferred_flush_bios;
1528 ++ struct bio_list deferred_flush_completions;
1529 + struct list_head prepared_mappings;
1530 + struct list_head prepared_discards;
1531 + struct list_head prepared_discards_pt2;
1532 +@@ -950,6 +951,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
1533 + mempool_free(m, m->tc->pool->mapping_pool);
1534 + }
1535 +
1536 ++static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
1537 ++{
1538 ++ struct pool *pool = tc->pool;
1539 ++ unsigned long flags;
1540 ++
1541 ++ /*
1542 ++ * If the bio has the REQ_FUA flag set we must commit the metadata
1543 ++ * before signaling its completion.
1544 ++ */
1545 ++ if (!bio_triggers_commit(tc, bio)) {
1546 ++ bio_endio(bio);
1547 ++ return;
1548 ++ }
1549 ++
1550 ++ /*
1551 ++ * Complete bio with an error if earlier I/O caused changes to the
1552 ++ * metadata that can't be committed, e.g, due to I/O errors on the
1553 ++ * metadata device.
1554 ++ */
1555 ++ if (dm_thin_aborted_changes(tc->td)) {
1556 ++ bio_io_error(bio);
1557 ++ return;
1558 ++ }
1559 ++
1560 ++ /*
1561 ++ * Batch together any bios that trigger commits and then issue a
1562 ++ * single commit for them in process_deferred_bios().
1563 ++ */
1564 ++ spin_lock_irqsave(&pool->lock, flags);
1565 ++ bio_list_add(&pool->deferred_flush_completions, bio);
1566 ++ spin_unlock_irqrestore(&pool->lock, flags);
1567 ++}
1568 ++
1569 + static void process_prepared_mapping(struct dm_thin_new_mapping *m)
1570 + {
1571 + struct thin_c *tc = m->tc;
1572 +@@ -982,7 +1016,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
1573 + */
1574 + if (bio) {
1575 + inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1576 +- bio_endio(bio);
1577 ++ complete_overwrite_bio(tc, bio);
1578 + } else {
1579 + inc_all_io_entry(tc->pool, m->cell->holder);
1580 + remap_and_issue(tc, m->cell->holder, m->data_block);
1581 +@@ -2328,7 +2362,7 @@ static void process_deferred_bios(struct pool *pool)
1582 + {
1583 + unsigned long flags;
1584 + struct bio *bio;
1585 +- struct bio_list bios;
1586 ++ struct bio_list bios, bio_completions;
1587 + struct thin_c *tc;
1588 +
1589 + tc = get_first_thin(pool);
1590 +@@ -2339,26 +2373,36 @@ static void process_deferred_bios(struct pool *pool)
1591 + }
1592 +
1593 + /*
1594 +- * If there are any deferred flush bios, we must commit
1595 +- * the metadata before issuing them.
1596 ++ * If there are any deferred flush bios, we must commit the metadata
1597 ++ * before issuing them or signaling their completion.
1598 + */
1599 + bio_list_init(&bios);
1600 ++ bio_list_init(&bio_completions);
1601 ++
1602 + spin_lock_irqsave(&pool->lock, flags);
1603 + bio_list_merge(&bios, &pool->deferred_flush_bios);
1604 + bio_list_init(&pool->deferred_flush_bios);
1605 ++
1606 ++ bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
1607 ++ bio_list_init(&pool->deferred_flush_completions);
1608 + spin_unlock_irqrestore(&pool->lock, flags);
1609 +
1610 +- if (bio_list_empty(&bios) &&
1611 ++ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1612 + !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1613 + return;
1614 +
1615 + if (commit(pool)) {
1616 ++ bio_list_merge(&bios, &bio_completions);
1617 ++
1618 + while ((bio = bio_list_pop(&bios)))
1619 + bio_io_error(bio);
1620 + return;
1621 + }
1622 + pool->last_commit_jiffies = jiffies;
1623 +
1624 ++ while ((bio = bio_list_pop(&bio_completions)))
1625 ++ bio_endio(bio);
1626 ++
1627 + while ((bio = bio_list_pop(&bios)))
1628 + generic_make_request(bio);
1629 + }
1630 +@@ -2965,6 +3009,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1631 + INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
1632 + spin_lock_init(&pool->lock);
1633 + bio_list_init(&pool->deferred_flush_bios);
1634 ++ bio_list_init(&pool->deferred_flush_completions);
1635 + INIT_LIST_HEAD(&pool->prepared_mappings);
1636 + INIT_LIST_HEAD(&pool->prepared_discards);
1637 + INIT_LIST_HEAD(&pool->prepared_discards_pt2);
1638 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1639 +index 205f86f1a6cb..31c4391f6a62 100644
1640 +--- a/drivers/md/raid1.c
1641 ++++ b/drivers/md/raid1.c
1642 +@@ -1854,6 +1854,20 @@ static void end_sync_read(struct bio *bio)
1643 + reschedule_retry(r1_bio);
1644 + }
1645 +
1646 ++static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1647 ++{
1648 ++ sector_t sync_blocks = 0;
1649 ++ sector_t s = r1_bio->sector;
1650 ++ long sectors_to_go = r1_bio->sectors;
1651 ++
1652 ++ /* make sure these bits don't get cleared. */
1653 ++ do {
1654 ++ bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1655 ++ s += sync_blocks;
1656 ++ sectors_to_go -= sync_blocks;
1657 ++ } while (sectors_to_go > 0);
1658 ++}
1659 ++
1660 + static void end_sync_write(struct bio *bio)
1661 + {
1662 + int uptodate = !bio->bi_status;
1663 +@@ -1865,16 +1879,7 @@ static void end_sync_write(struct bio *bio)
1664 + struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1665 +
1666 + if (!uptodate) {
1667 +- sector_t sync_blocks = 0;
1668 +- sector_t s = r1_bio->sector;
1669 +- long sectors_to_go = r1_bio->sectors;
1670 +- /* make sure these bits doesn't get cleared. */
1671 +- do {
1672 +- bitmap_end_sync(mddev->bitmap, s,
1673 +- &sync_blocks, 1);
1674 +- s += sync_blocks;
1675 +- sectors_to_go -= sync_blocks;
1676 +- } while (sectors_to_go > 0);
1677 ++ abort_sync_write(mddev, r1_bio);
1678 + set_bit(WriteErrorSeen, &rdev->flags);
1679 + if (!test_and_set_bit(WantReplacement, &rdev->flags))
1680 + set_bit(MD_RECOVERY_NEEDED, &
1681 +@@ -2164,8 +2169,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1682 + (i == r1_bio->read_disk ||
1683 + !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1684 + continue;
1685 +- if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
1686 ++ if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
1687 ++ abort_sync_write(mddev, r1_bio);
1688 + continue;
1689 ++ }
1690 +
1691 + bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
1692 + if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
1693 +diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
1694 +index de58762097c4..3f93e4564cab 100644
1695 +--- a/drivers/misc/eeprom/Kconfig
1696 ++++ b/drivers/misc/eeprom/Kconfig
1697 +@@ -12,7 +12,7 @@ config EEPROM_AT24
1698 + ones like at24c64, 24lc02 or fm24c04:
1699 +
1700 + 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
1701 +- 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
1702 ++ 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
1703 +
1704 + Unless you like data loss puzzles, always be sure that any chip
1705 + you configure as a 24c32 (32 kbit) or larger is NOT really a
1706 +diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
1707 +index ded48a0c77ee..59dcd97ee3de 100644
1708 +--- a/drivers/misc/eeprom/at24.c
1709 ++++ b/drivers/misc/eeprom/at24.c
1710 +@@ -170,6 +170,7 @@ static const struct i2c_device_id at24_ids[] = {
1711 + { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) },
1712 + { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) },
1713 + { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) },
1714 ++ { "24c2048", AT24_DEVICE_MAGIC(2097152 / 8, AT24_FLAG_ADDR16) },
1715 + { "at24", 0 },
1716 + { /* END OF LIST */ }
1717 + };
1718 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1719 +index 022b06e770d1..41ac9a2bc153 100644
1720 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1721 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1722 +@@ -12978,6 +12978,24 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
1723 + struct net_device *dev,
1724 + netdev_features_t features)
1725 + {
1726 ++ /*
1727 ++ * A skb with gso_size + header length > 9700 will cause a
1728 ++ * firmware panic. Drop GSO support.
1729 ++ *
1730 ++ * Eventually the upper layer should not pass these packets down.
1731 ++ *
1732 ++ * For speed, if the gso_size is <= 9000, assume there will
1733 ++ * not be 700 bytes of headers and pass it through. Only do a
1734 ++ * full (slow) validation if the gso_size is > 9000.
1735 ++ *
1736 ++ * (Due to the way SKB_BY_FRAGS works this will also do a full
1737 ++ * validation in that case.)
1738 ++ */
1739 ++ if (unlikely(skb_is_gso(skb) &&
1740 ++ (skb_shinfo(skb)->gso_size > 9000) &&
1741 ++ !skb_gso_validate_mac_len(skb, 9700)))
1742 ++ features &= ~NETIF_F_GSO_MASK;
1743 ++
1744 + features = vlan_features_check(skb, features);
1745 + return vxlan_features_check(skb, features);
1746 + }
1747 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1748 +index afb99876fa9e..06355ca832db 100644
1749 +--- a/drivers/nvme/host/pci.c
1750 ++++ b/drivers/nvme/host/pci.c
1751 +@@ -1624,8 +1624,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
1752 + struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1753 + size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
1754 +
1755 +- dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
1756 +- le64_to_cpu(desc->addr));
1757 ++ dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
1758 ++ le64_to_cpu(desc->addr),
1759 ++ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1760 + }
1761 +
1762 + kfree(dev->host_mem_desc_bufs);
1763 +@@ -1691,8 +1692,9 @@ out_free_bufs:
1764 + while (--i >= 0) {
1765 + size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
1766 +
1767 +- dma_free_coherent(dev->dev, size, bufs[i],
1768 +- le64_to_cpu(descs[i].addr));
1769 ++ dma_free_attrs(dev->dev, size, bufs[i],
1770 ++ le64_to_cpu(descs[i].addr),
1771 ++ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1772 + }
1773 +
1774 + kfree(bufs);
1775 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
1776 +index 31632c087504..8f0368330a04 100644
1777 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
1778 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
1779 +@@ -839,11 +839,24 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
1780 + return ret;
1781 + }
1782 +
1783 +- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
1784 +- if (ret) {
1785 +- dev_err(pctrl->dev, "Failed to add pin range\n");
1786 +- gpiochip_remove(&pctrl->chip);
1787 +- return ret;
1788 ++ /*
1789 ++ * For DeviceTree-supported systems, the gpio core checks the
1790 ++ * pinctrl's device node for the "gpio-ranges" property.
1791 ++ * If it is present, it takes care of adding the pin ranges
1792 ++ * for the driver. In this case the driver can skip ahead.
1793 ++ *
1794 ++ * In order to remain compatible with older, existing DeviceTree
1795 ++ * files which don't set the "gpio-ranges" property or systems that
1796 ++ * utilize ACPI the driver has to call gpiochip_add_pin_range().
1797 ++ */
1798 ++ if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) {
1799 ++ ret = gpiochip_add_pin_range(&pctrl->chip,
1800 ++ dev_name(pctrl->dev), 0, 0, chip->ngpio);
1801 ++ if (ret) {
1802 ++ dev_err(pctrl->dev, "Failed to add pin range\n");
1803 ++ gpiochip_remove(&pctrl->chip);
1804 ++ return ret;
1805 ++ }
1806 + }
1807 +
1808 + ret = gpiochip_irqchip_add(chip,
1809 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1810 +index 1e176e11dbfa..852d7d1dcbbd 100644
1811 +--- a/fs/cifs/file.c
1812 ++++ b/fs/cifs/file.c
1813 +@@ -1128,6 +1128,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1814 + return -EINVAL;
1815 + }
1816 +
1817 ++ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1818 ++ PAGE_SIZE);
1819 ++ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1820 ++ PAGE_SIZE);
1821 + max_num = (max_buf - sizeof(struct smb_hdr)) /
1822 + sizeof(LOCKING_ANDX_RANGE);
1823 + buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1824 +@@ -1466,6 +1470,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1825 + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1826 + return -EINVAL;
1827 +
1828 ++ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1829 ++ PAGE_SIZE);
1830 ++ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1831 ++ PAGE_SIZE);
1832 + max_num = (max_buf - sizeof(struct smb_hdr)) /
1833 + sizeof(LOCKING_ANDX_RANGE);
1834 + buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1835 +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
1836 +index 79078533f807..1add404618f0 100644
1837 +--- a/fs/cifs/smb2file.c
1838 ++++ b/fs/cifs/smb2file.c
1839 +@@ -130,6 +130,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1840 + if (max_buf < sizeof(struct smb2_lock_element))
1841 + return -EINVAL;
1842 +
1843 ++ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
1844 ++ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
1845 + max_num = max_buf / sizeof(struct smb2_lock_element);
1846 + buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
1847 + if (!buf)
1848 +@@ -266,6 +268,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
1849 + return -EINVAL;
1850 + }
1851 +
1852 ++ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
1853 ++ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
1854 + max_num = max_buf / sizeof(struct smb2_lock_element);
1855 + buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
1856 + if (!buf) {
1857 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1858 +index 2b47757c9c68..5e63c459dc61 100644
1859 +--- a/fs/proc/task_mmu.c
1860 ++++ b/fs/proc/task_mmu.c
1861 +@@ -459,7 +459,7 @@ struct mem_size_stats {
1862 + };
1863 +
1864 + static void smaps_account(struct mem_size_stats *mss, struct page *page,
1865 +- bool compound, bool young, bool dirty)
1866 ++ bool compound, bool young, bool dirty, bool locked)
1867 + {
1868 + int i, nr = compound ? 1 << compound_order(page) : 1;
1869 + unsigned long size = nr * PAGE_SIZE;
1870 +@@ -486,24 +486,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
1871 + else
1872 + mss->private_clean += size;
1873 + mss->pss += (u64)size << PSS_SHIFT;
1874 ++ if (locked)
1875 ++ mss->pss_locked += (u64)size << PSS_SHIFT;
1876 + return;
1877 + }
1878 +
1879 + for (i = 0; i < nr; i++, page++) {
1880 + int mapcount = page_mapcount(page);
1881 ++ unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
1882 +
1883 + if (mapcount >= 2) {
1884 + if (dirty || PageDirty(page))
1885 + mss->shared_dirty += PAGE_SIZE;
1886 + else
1887 + mss->shared_clean += PAGE_SIZE;
1888 +- mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
1889 ++ mss->pss += pss / mapcount;
1890 ++ if (locked)
1891 ++ mss->pss_locked += pss / mapcount;
1892 + } else {
1893 + if (dirty || PageDirty(page))
1894 + mss->private_dirty += PAGE_SIZE;
1895 + else
1896 + mss->private_clean += PAGE_SIZE;
1897 +- mss->pss += PAGE_SIZE << PSS_SHIFT;
1898 ++ mss->pss += pss;
1899 ++ if (locked)
1900 ++ mss->pss_locked += pss;
1901 + }
1902 + }
1903 + }
1904 +@@ -526,6 +533,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
1905 + {
1906 + struct mem_size_stats *mss = walk->private;
1907 + struct vm_area_struct *vma = walk->vma;
1908 ++ bool locked = !!(vma->vm_flags & VM_LOCKED);
1909 + struct page *page = NULL;
1910 +
1911 + if (pte_present(*pte)) {
1912 +@@ -568,7 +576,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
1913 + if (!page)
1914 + return;
1915 +
1916 +- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
1917 ++ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
1918 + }
1919 +
1920 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1921 +@@ -577,6 +585,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
1922 + {
1923 + struct mem_size_stats *mss = walk->private;
1924 + struct vm_area_struct *vma = walk->vma;
1925 ++ bool locked = !!(vma->vm_flags & VM_LOCKED);
1926 + struct page *page;
1927 +
1928 + /* FOLL_DUMP will return -EFAULT on huge zero page */
1929 +@@ -591,7 +600,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
1930 + /* pass */;
1931 + else
1932 + VM_BUG_ON_PAGE(1, page);
1933 +- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
1934 ++ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
1935 + }
1936 + #else
1937 + static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
1938 +@@ -792,11 +801,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
1939 + }
1940 + }
1941 + #endif
1942 +-
1943 + /* mmap_sem is held in m_start */
1944 + walk_page_vma(vma, &smaps_walk);
1945 +- if (vma->vm_flags & VM_LOCKED)
1946 +- mss->pss_locked += mss->pss;
1947 +
1948 + if (!rollup_mode) {
1949 + show_map_vma(m, vma, is_pid);
1950 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
1951 +index 8e22f24ded6a..956d76744c91 100644
1952 +--- a/include/linux/perf_event.h
1953 ++++ b/include/linux/perf_event.h
1954 +@@ -446,6 +446,11 @@ struct pmu {
1955 + * Filter events for PMU-specific reasons.
1956 + */
1957 + int (*filter_match) (struct perf_event *event); /* optional */
1958 ++
1959 ++ /*
1960 ++ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
1961 ++ */
1962 ++ int (*check_period) (struct perf_event *event, u64 value); /* optional */
1963 + };
1964 +
1965 + /**
1966 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1967 +index 39c2570ddcf6..50a4a5968f3a 100644
1968 +--- a/include/linux/skbuff.h
1969 ++++ b/include/linux/skbuff.h
1970 +@@ -3317,6 +3317,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
1971 + void skb_scrub_packet(struct sk_buff *skb, bool xnet);
1972 + unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
1973 + bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
1974 ++bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
1975 + struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
1976 + struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
1977 + int skb_ensure_writable(struct sk_buff *skb, int write_len);
1978 +@@ -4087,6 +4088,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
1979 + return hdr_len + skb_gso_transport_seglen(skb);
1980 + }
1981 +
1982 ++/**
1983 ++ * skb_gso_mac_seglen - Return length of individual segments of a gso packet
1984 ++ *
1985 ++ * @skb: GSO skb
1986 ++ *
1987 ++ * skb_gso_mac_seglen is used to determine the real size of the
1988 ++ * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
1989 ++ * headers (TCP/UDP).
1990 ++ */
1991 ++static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
1992 ++{
1993 ++ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1994 ++ return hdr_len + skb_gso_transport_seglen(skb);
1995 ++}
1996 ++
1997 + /* Local Checksum Offload.
1998 + * Compute outer checksum based on the assumption that the
1999 + * inner checksum will be offloaded later.
2000 +diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
2001 +index 0812cd5408c9..6e692a52936c 100644
2002 +--- a/include/trace/events/sched.h
2003 ++++ b/include/trace/events/sched.h
2004 +@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
2005 + #ifdef CREATE_TRACE_POINTS
2006 + static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
2007 + {
2008 ++ unsigned int state;
2009 ++
2010 + #ifdef CONFIG_SCHED_DEBUG
2011 + BUG_ON(p != current);
2012 + #endif /* CONFIG_SCHED_DEBUG */
2013 +@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
2014 + if (preempt)
2015 + return TASK_REPORT_MAX;
2016 +
2017 +- return 1 << __get_task_state(p);
2018 ++ /*
2019 ++ * task_state_index() uses fls() and returns a value from 0-8 range.
2020 ++ * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
2021 ++ * it for left shift operation to get the correct task->state
2022 ++ * mapping.
2023 ++ */
2024 ++ state = __get_task_state(p);
2025 ++
2026 ++ return state ? (1 << (state - 1)) : state;
2027 + }
2028 + #endif /* CREATE_TRACE_POINTS */
2029 +
2030 +diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
2031 +index 244e3213ecb0..1d1157edcf40 100644
2032 +--- a/include/uapi/linux/if_ether.h
2033 ++++ b/include/uapi/linux/if_ether.h
2034 +@@ -150,11 +150,18 @@
2035 + * This is an Ethernet frame header.
2036 + */
2037 +
2038 ++/* allow libcs like musl to deactivate this, glibc does not implement this. */
2039 ++#ifndef __UAPI_DEF_ETHHDR
2040 ++#define __UAPI_DEF_ETHHDR 1
2041 ++#endif
2042 ++
2043 ++#if __UAPI_DEF_ETHHDR
2044 + struct ethhdr {
2045 + unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
2046 + unsigned char h_source[ETH_ALEN]; /* source ether addr */
2047 + __be16 h_proto; /* packet type ID field */
2048 + } __attribute__((packed));
2049 ++#endif
2050 +
2051 +
2052 + #endif /* _UAPI_LINUX_IF_ETHER_H */
2053 +diff --git a/kernel/events/core.c b/kernel/events/core.c
2054 +index 991af683ef9e..17d5d41464c6 100644
2055 +--- a/kernel/events/core.c
2056 ++++ b/kernel/events/core.c
2057 +@@ -4738,6 +4738,11 @@ static void __perf_event_period(struct perf_event *event,
2058 + }
2059 + }
2060 +
2061 ++static int perf_event_check_period(struct perf_event *event, u64 value)
2062 ++{
2063 ++ return event->pmu->check_period(event, value);
2064 ++}
2065 ++
2066 + static int perf_event_period(struct perf_event *event, u64 __user *arg)
2067 + {
2068 + u64 value;
2069 +@@ -4754,6 +4759,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
2070 + if (event->attr.freq && value > sysctl_perf_event_sample_rate)
2071 + return -EINVAL;
2072 +
2073 ++ if (perf_event_check_period(event, value))
2074 ++ return -EINVAL;
2075 ++
2076 + event_function_call(event, __perf_event_period, &value);
2077 +
2078 + return 0;
2079 +@@ -8951,6 +8959,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
2080 + return 0;
2081 + }
2082 +
2083 ++static int perf_event_nop_int(struct perf_event *event, u64 value)
2084 ++{
2085 ++ return 0;
2086 ++}
2087 ++
2088 + static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
2089 +
2090 + static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
2091 +@@ -9251,6 +9264,9 @@ got_cpu_context:
2092 + pmu->pmu_disable = perf_pmu_nop_void;
2093 + }
2094 +
2095 ++ if (!pmu->check_period)
2096 ++ pmu->check_period = perf_event_nop_int;
2097 ++
2098 + if (!pmu->event_idx)
2099 + pmu->event_idx = perf_event_idx_default;
2100 +
2101 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
2102 +index 8b311340b241..489dc6b60053 100644
2103 +--- a/kernel/events/ring_buffer.c
2104 ++++ b/kernel/events/ring_buffer.c
2105 +@@ -719,7 +719,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
2106 + size = sizeof(struct ring_buffer);
2107 + size += nr_pages * sizeof(void *);
2108 +
2109 +- if (order_base_2(size) >= MAX_ORDER)
2110 ++ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
2111 + goto fail;
2112 +
2113 + rb = kzalloc(size, GFP_KERNEL);
2114 +diff --git a/kernel/futex.c b/kernel/futex.c
2115 +index abe04a2bb5b9..29d708d0b3d1 100644
2116 +--- a/kernel/futex.c
2117 ++++ b/kernel/futex.c
2118 +@@ -1166,11 +1166,65 @@ out_error:
2119 + return ret;
2120 + }
2121 +
2122 ++static int handle_exit_race(u32 __user *uaddr, u32 uval,
2123 ++ struct task_struct *tsk)
2124 ++{
2125 ++ u32 uval2;
2126 ++
2127 ++ /*
2128 ++ * If PF_EXITPIDONE is not yet set, then try again.
2129 ++ */
2130 ++ if (tsk && !(tsk->flags & PF_EXITPIDONE))
2131 ++ return -EAGAIN;
2132 ++
2133 ++ /*
2134 ++ * Reread the user space value to handle the following situation:
2135 ++ *
2136 ++ * CPU0 CPU1
2137 ++ *
2138 ++ * sys_exit() sys_futex()
2139 ++ * do_exit() futex_lock_pi()
2140 ++ * futex_lock_pi_atomic()
2141 ++ * exit_signals(tsk) No waiters:
2142 ++ * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
2143 ++ * mm_release(tsk) Set waiter bit
2144 ++ * exit_robust_list(tsk) { *uaddr = 0x80000PID;
2145 ++ * Set owner died attach_to_pi_owner() {
2146 ++ * *uaddr = 0xC0000000; tsk = get_task(PID);
2147 ++ * } if (!tsk->flags & PF_EXITING) {
2148 ++ * ... attach();
2149 ++ * tsk->flags |= PF_EXITPIDONE; } else {
2150 ++ * if (!(tsk->flags & PF_EXITPIDONE))
2151 ++ * return -EAGAIN;
2152 ++ * return -ESRCH; <--- FAIL
2153 ++ * }
2154 ++ *
2155 ++ * Returning ESRCH unconditionally is wrong here because the
2156 ++ * user space value has been changed by the exiting task.
2157 ++ *
2158 ++ * The same logic applies to the case where the exiting task is
2159 ++ * already gone.
2160 ++ */
2161 ++ if (get_futex_value_locked(&uval2, uaddr))
2162 ++ return -EFAULT;
2163 ++
2164 ++ /* If the user space value has changed, try again. */
2165 ++ if (uval2 != uval)
2166 ++ return -EAGAIN;
2167 ++
2168 ++ /*
2169 ++ * The exiting task did not have a robust list, the robust list was
2170 ++ * corrupted or the user space value in *uaddr is simply bogus.
2171 ++ * Give up and tell user space.
2172 ++ */
2173 ++ return -ESRCH;
2174 ++}
2175 ++
2176 + /*
2177 + * Lookup the task for the TID provided from user space and attach to
2178 + * it after doing proper sanity checks.
2179 + */
2180 +-static int attach_to_pi_owner(u32 uval, union futex_key *key,
2181 ++static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
2182 + struct futex_pi_state **ps)
2183 + {
2184 + pid_t pid = uval & FUTEX_TID_MASK;
2185 +@@ -1180,12 +1234,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
2186 + /*
2187 + * We are the first waiter - try to look up the real owner and attach
2188 + * the new pi_state to it, but bail out when TID = 0 [1]
2189 ++ *
2190 ++ * The !pid check is paranoid. None of the call sites should end up
2191 ++ * with pid == 0, but better safe than sorry. Let the caller retry
2192 + */
2193 + if (!pid)
2194 +- return -ESRCH;
2195 ++ return -EAGAIN;
2196 + p = futex_find_get_task(pid);
2197 + if (!p)
2198 +- return -ESRCH;
2199 ++ return handle_exit_race(uaddr, uval, NULL);
2200 +
2201 + if (unlikely(p->flags & PF_KTHREAD)) {
2202 + put_task_struct(p);
2203 +@@ -1205,7 +1262,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
2204 + * set, we know that the task has finished the
2205 + * cleanup:
2206 + */
2207 +- int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
2208 ++ int ret = handle_exit_race(uaddr, uval, p);
2209 +
2210 + raw_spin_unlock_irq(&p->pi_lock);
2211 + put_task_struct(p);
2212 +@@ -1262,7 +1319,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
2213 + * We are the first waiter - try to look up the owner based on
2214 + * @uval and attach to it.
2215 + */
2216 +- return attach_to_pi_owner(uval, key, ps);
2217 ++ return attach_to_pi_owner(uaddr, uval, key, ps);
2218 + }
2219 +
2220 + static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
2221 +@@ -1370,7 +1427,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
2222 + * attach to the owner. If that fails, no harm done, we only
2223 + * set the FUTEX_WAITERS bit in the user space variable.
2224 + */
2225 +- return attach_to_pi_owner(uval, key, ps);
2226 ++ return attach_to_pi_owner(uaddr, newval, key, ps);
2227 + }
2228 +
2229 + /**
2230 +diff --git a/kernel/signal.c b/kernel/signal.c
2231 +index 04b3a621b3cc..619c6160f64f 100644
2232 +--- a/kernel/signal.c
2233 ++++ b/kernel/signal.c
2234 +@@ -2268,9 +2268,12 @@ relock:
2235 + }
2236 +
2237 + /* Has this task already been marked for death? */
2238 +- ksig->info.si_signo = signr = SIGKILL;
2239 +- if (signal_group_exit(signal))
2240 ++ if (signal_group_exit(signal)) {
2241 ++ ksig->info.si_signo = signr = SIGKILL;
2242 ++ sigdelset(&current->pending.signal, SIGKILL);
2243 ++ recalc_sigpending();
2244 + goto fatal;
2245 ++ }
2246 +
2247 + for (;;) {
2248 + struct k_sigaction *ka;
2249 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
2250 +index 86718c85d8d3..fdf2ea4d64ec 100644
2251 +--- a/kernel/trace/trace_uprobe.c
2252 ++++ b/kernel/trace/trace_uprobe.c
2253 +@@ -153,7 +153,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
2254 +
2255 + ret = strncpy_from_user(dst, src, maxlen);
2256 + if (ret == maxlen)
2257 +- dst[--ret] = '\0';
2258 ++ dst[ret - 1] = '\0';
2259 ++ else if (ret >= 0)
2260 ++ /*
2261 ++ * Include the terminating null byte. In this case it
2262 ++ * was copied by strncpy_from_user but not accounted
2263 ++ * for in ret.
2264 ++ */
2265 ++ ret++;
2266 +
2267 + if (ret < 0) { /* Failed to fetch string */
2268 + ((u8 *)get_rloc_data(dest))[0] = '\0';
2269 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2270 +index 873032d1a083..6dbd2c54b2c9 100644
2271 +--- a/net/core/skbuff.c
2272 ++++ b/net/core/skbuff.c
2273 +@@ -4930,37 +4930,74 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
2274 + EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
2275 +
2276 + /**
2277 +- * skb_gso_validate_mtu - Return in case such skb fits a given MTU
2278 ++ * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
2279 + *
2280 +- * @skb: GSO skb
2281 +- * @mtu: MTU to validate against
2282 ++ * There are a couple of instances where we have a GSO skb, and we
2283 ++ * want to determine what size it would be after it is segmented.
2284 + *
2285 +- * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
2286 +- * once split.
2287 ++ * We might want to check:
2288 ++ * - L3+L4+payload size (e.g. IP forwarding)
2289 ++ * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
2290 ++ *
2291 ++ * This is a helper to do that correctly considering GSO_BY_FRAGS.
2292 ++ *
2293 ++ * @seg_len: The segmented length (from skb_gso_*_seglen). In the
2294 ++ * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
2295 ++ *
2296 ++ * @max_len: The maximum permissible length.
2297 ++ *
2298 ++ * Returns true if the segmented length <= max length.
2299 + */
2300 +-bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
2301 +-{
2302 ++static inline bool skb_gso_size_check(const struct sk_buff *skb,
2303 ++ unsigned int seg_len,
2304 ++ unsigned int max_len) {
2305 + const struct skb_shared_info *shinfo = skb_shinfo(skb);
2306 + const struct sk_buff *iter;
2307 +- unsigned int hlen;
2308 +-
2309 +- hlen = skb_gso_network_seglen(skb);
2310 +
2311 + if (shinfo->gso_size != GSO_BY_FRAGS)
2312 +- return hlen <= mtu;
2313 ++ return seg_len <= max_len;
2314 +
2315 + /* Undo this so we can re-use header sizes */
2316 +- hlen -= GSO_BY_FRAGS;
2317 ++ seg_len -= GSO_BY_FRAGS;
2318 +
2319 + skb_walk_frags(skb, iter) {
2320 +- if (hlen + skb_headlen(iter) > mtu)
2321 ++ if (seg_len + skb_headlen(iter) > max_len)
2322 + return false;
2323 + }
2324 +
2325 + return true;
2326 + }
2327 ++
2328 ++/**
2329 ++ * skb_gso_validate_mtu - Return in case such skb fits a given MTU
2330 ++ *
2331 ++ * @skb: GSO skb
2332 ++ * @mtu: MTU to validate against
2333 ++ *
2334 ++ * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
2335 ++ * once split.
2336 ++ */
2337 ++bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
2338 ++{
2339 ++ return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
2340 ++}
2341 + EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
2342 +
2343 ++/**
2344 ++ * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
2345 ++ *
2346 ++ * @skb: GSO skb
2347 ++ * @len: length to validate against
2348 ++ *
2349 ++ * skb_gso_validate_mac_len validates if a given skb will fit a wanted
2350 ++ * length once split, including L2, L3 and L4 headers and the payload.
2351 ++ */
2352 ++bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
2353 ++{
2354 ++ return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
2355 ++}
2356 ++EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
2357 ++
2358 + static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
2359 + {
2360 + int mac_len;
2361 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
2362 +index b36ecb58aa6e..107cc76b6e24 100644
2363 +--- a/net/sched/sch_tbf.c
2364 ++++ b/net/sched/sch_tbf.c
2365 +@@ -142,16 +142,6 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r,
2366 + return len;
2367 + }
2368 +
2369 +-/*
2370 +- * Return length of individual segments of a gso packet,
2371 +- * including all headers (MAC, IP, TCP/UDP)
2372 +- */
2373 +-static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
2374 +-{
2375 +- unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2376 +- return hdr_len + skb_gso_transport_seglen(skb);
2377 +-}
2378 +-
2379 + /* GSO packet is too big, segment it so that tbf can transmit
2380 + * each segment in time
2381 + */
2382 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2383 +index fb1cec46380d..d14516f31679 100644
2384 +--- a/sound/pci/hda/patch_conexant.c
2385 ++++ b/sound/pci/hda/patch_conexant.c
2386 +@@ -962,6 +962,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2387 + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
2388 + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
2389 + SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
2390 ++ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
2391 + SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
2392 + SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
2393 + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
2394 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
2395 +index d8a46d46bcd2..b1a1eb1f65aa 100644
2396 +--- a/sound/usb/pcm.c
2397 ++++ b/sound/usb/pcm.c
2398 +@@ -313,6 +313,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
2399 + return 0;
2400 + }
2401 +
2402 ++/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
2403 ++ * applies. Returns 1 if a quirk was found.
2404 ++ */
2405 + static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
2406 + struct usb_device *dev,
2407 + struct usb_interface_descriptor *altsd,
2408 +@@ -391,7 +394,7 @@ add_sync_ep:
2409 +
2410 + subs->data_endpoint->sync_master = subs->sync_endpoint;
2411 +
2412 +- return 0;
2413 ++ return 1;
2414 + }
2415 +
2416 + static int set_sync_endpoint(struct snd_usb_substream *subs,
2417 +@@ -430,6 +433,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
2418 + if (err < 0)
2419 + return err;
2420 +
2421 ++ /* endpoint set by quirk */
2422 ++ if (err > 0)
2423 ++ return 0;
2424 ++
2425 + if (altsd->bNumEndpoints < 2)
2426 + return 0;
2427 +
2428 +diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
2429 +index 30a950c9d407..068d463e5cbf 100644
2430 +--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
2431 ++++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
2432 +@@ -13,7 +13,8 @@ add_probe_vfs_getname() {
2433 + local verbose=$1
2434 + if [ $had_vfs_getname -eq 1 ] ; then
2435 + line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
2436 +- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
2437 ++ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
2438 ++ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
2439 + fi
2440 + }
2441 +