Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Mon, 30 Apr 2018 10:29:06
Message-Id: 1525084133.9a220db3d81c396e7413cfea5513377e52254613.mpagano@gentoo
1 commit: 9a220db3d81c396e7413cfea5513377e52254613
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Apr 30 10:28:53 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Apr 30 10:28:53 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a220db3
7
8 Linux patch 4.9.97
9
10 0000_README | 4 +
11 1096_linux-4.9.97.patch | 3632 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3636 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 0d1f889..efef388 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -427,6 +427,10 @@ Patch: 1095_linux-4.9.96.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.96
21
22 +Patch: 1096_linux-4.9.97.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.97
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1096_linux-4.9.97.patch b/1096_linux-4.9.97.patch
31 new file mode 100644
32 index 0000000..26c13e0
33 --- /dev/null
34 +++ b/1096_linux-4.9.97.patch
35 @@ -0,0 +1,3632 @@
36 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
37 +index 466c039c622b..5f9e51436a99 100644
38 +--- a/Documentation/kernel-parameters.txt
39 ++++ b/Documentation/kernel-parameters.txt
40 +@@ -2640,6 +2640,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
41 +
42 + noalign [KNL,ARM]
43 +
44 ++ noaltinstr [S390] Disables alternative instructions patching
45 ++ (CPU alternatives feature).
46 ++
47 + noapic [SMP,APIC] Tells the kernel to not make use of any
48 + IOAPICs that may be present in the system.
49 +
50 +diff --git a/Makefile b/Makefile
51 +index 50ae573e8951..ee3e943c3bd9 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,6 +1,6 @@
55 + VERSION = 4
56 + PATCHLEVEL = 9
57 +-SUBLEVEL = 96
58 ++SUBLEVEL = 97
59 + EXTRAVERSION =
60 + NAME = Roaring Lionus
61 +
62 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
63 +index 2d2fd79ced9d..34fbbf8fdeaa 100644
64 +--- a/arch/mips/Kconfig
65 ++++ b/arch/mips/Kconfig
66 +@@ -95,6 +95,7 @@ config MIPS_GENERIC
67 + select PCI_DRIVERS_GENERIC
68 + select PINCTRL
69 + select SMP_UP if SMP
70 ++ select SWAP_IO_SPACE
71 + select SYS_HAS_CPU_MIPS32_R1
72 + select SYS_HAS_CPU_MIPS32_R2
73 + select SYS_HAS_CPU_MIPS32_R6
74 +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
75 +index 9aa0d04c9dcc..1c4a595e8224 100644
76 +--- a/arch/s390/Kconfig
77 ++++ b/arch/s390/Kconfig
78 +@@ -118,6 +118,7 @@ config S390
79 + select GENERIC_CLOCKEVENTS
80 + select GENERIC_CPU_AUTOPROBE
81 + select GENERIC_CPU_DEVICES if !SMP
82 ++ select GENERIC_CPU_VULNERABILITIES
83 + select GENERIC_FIND_FIRST_BIT
84 + select GENERIC_SMP_IDLE_THREAD
85 + select GENERIC_TIME_VSYSCALL
86 +@@ -704,6 +705,51 @@ config SECCOMP
87 +
88 + If unsure, say Y.
89 +
90 ++config KERNEL_NOBP
91 ++ def_bool n
92 ++ prompt "Enable modified branch prediction for the kernel by default"
93 ++ help
94 ++ If this option is selected the kernel will switch to a modified
95 ++ branch prediction mode if the firmware interface is available.
96 ++ The modified branch prediction mode improves the behaviour in
97 ++ regard to speculative execution.
98 ++
99 ++ With the option enabled the kernel parameter "nobp=0" or "nospec"
100 ++ can be used to run the kernel in the normal branch prediction mode.
101 ++
102 ++ With the option disabled the modified branch prediction mode is
103 ++ enabled with the "nobp=1" kernel parameter.
104 ++
105 ++ If unsure, say N.
106 ++
107 ++config EXPOLINE
108 ++ def_bool n
109 ++ prompt "Avoid speculative indirect branches in the kernel"
110 ++ help
111 ++ Compile the kernel with the expoline compiler options to guard
112 ++ against kernel-to-user data leaks by avoiding speculative indirect
113 ++ branches.
114 ++ Requires a compiler with -mindirect-branch=thunk support for full
115 ++ protection. The kernel may run slower.
116 ++
117 ++ If unsure, say N.
118 ++
119 ++choice
120 ++ prompt "Expoline default"
121 ++ depends on EXPOLINE
122 ++ default EXPOLINE_FULL
123 ++
124 ++config EXPOLINE_OFF
125 ++ bool "spectre_v2=off"
126 ++
127 ++config EXPOLINE_AUTO
128 ++ bool "spectre_v2=auto"
129 ++
130 ++config EXPOLINE_FULL
131 ++ bool "spectre_v2=on"
132 ++
133 ++endchoice
134 ++
135 + endmenu
136 +
137 + menu "Power Management"
138 +@@ -753,6 +799,7 @@ config PFAULT
139 + config SHARED_KERNEL
140 + bool "VM shared kernel support"
141 + depends on !JUMP_LABEL
142 ++ depends on !ALTERNATIVES
143 + help
144 + Select this option, if you want to share the text segment of the
145 + Linux kernel between different VM guests. This reduces memory
146 +diff --git a/arch/s390/Makefile b/arch/s390/Makefile
147 +index 54e00526b8df..bef67c0f63e2 100644
148 +--- a/arch/s390/Makefile
149 ++++ b/arch/s390/Makefile
150 +@@ -79,6 +79,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
151 + cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
152 + endif
153 +
154 ++ifdef CONFIG_EXPOLINE
155 ++ ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
156 ++ CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
157 ++ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
158 ++ CC_FLAGS_EXPOLINE += -mindirect-branch-table
159 ++ export CC_FLAGS_EXPOLINE
160 ++ cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
161 ++ endif
162 ++endif
163 ++
164 + ifdef CONFIG_FUNCTION_TRACER
165 + # make use of hotpatch feature if the compiler supports it
166 + cc_hotpatch := -mhotpatch=0,3
167 +diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
168 +new file mode 100644
169 +index 000000000000..a72002056b54
170 +--- /dev/null
171 ++++ b/arch/s390/include/asm/alternative.h
172 +@@ -0,0 +1,149 @@
173 ++#ifndef _ASM_S390_ALTERNATIVE_H
174 ++#define _ASM_S390_ALTERNATIVE_H
175 ++
176 ++#ifndef __ASSEMBLY__
177 ++
178 ++#include <linux/types.h>
179 ++#include <linux/stddef.h>
180 ++#include <linux/stringify.h>
181 ++
182 ++struct alt_instr {
183 ++ s32 instr_offset; /* original instruction */
184 ++ s32 repl_offset; /* offset to replacement instruction */
185 ++ u16 facility; /* facility bit set for replacement */
186 ++ u8 instrlen; /* length of original instruction */
187 ++ u8 replacementlen; /* length of new instruction */
188 ++} __packed;
189 ++
190 ++void apply_alternative_instructions(void);
191 ++void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
192 ++
193 ++/*
194 ++ * |661: |662: |6620 |663:
195 ++ * +-----------+---------------------+
196 ++ * | oldinstr | oldinstr_padding |
197 ++ * | +----------+----------+
198 ++ * | | | |
199 ++ * | | >6 bytes |6/4/2 nops|
200 ++ * | |6 bytes jg----------->
201 ++ * +-----------+---------------------+
202 ++ * ^^ static padding ^^
203 ++ *
204 ++ * .altinstr_replacement section
205 ++ * +---------------------+-----------+
206 ++ * |6641: |6651:
207 ++ * | alternative instr 1 |
208 ++ * +-----------+---------+- - - - - -+
209 ++ * |6642: |6652: |
210 ++ * | alternative instr 2 | padding
211 ++ * +---------------------+- - - - - -+
212 ++ * ^ runtime ^
213 ++ *
214 ++ * .altinstructions section
215 ++ * +---------------------------------+
216 ++ * | alt_instr entries for each |
217 ++ * | alternative instr |
218 ++ * +---------------------------------+
219 ++ */
220 ++
221 ++#define b_altinstr(num) "664"#num
222 ++#define e_altinstr(num) "665"#num
223 ++
224 ++#define e_oldinstr_pad_end "663"
225 ++#define oldinstr_len "662b-661b"
226 ++#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
227 ++#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
228 ++#define oldinstr_pad_len(num) \
229 ++ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
230 ++ "((" altinstr_len(num) ")-(" oldinstr_len "))"
231 ++
232 ++#define INSTR_LEN_SANITY_CHECK(len) \
233 ++ ".if " len " > 254\n" \
234 ++ "\t.error \"cpu alternatives does not support instructions " \
235 ++ "blocks > 254 bytes\"\n" \
236 ++ ".endif\n" \
237 ++ ".if (" len ") %% 2\n" \
238 ++ "\t.error \"cpu alternatives instructions length is odd\"\n" \
239 ++ ".endif\n"
240 ++
241 ++#define OLDINSTR_PADDING(oldinstr, num) \
242 ++ ".if " oldinstr_pad_len(num) " > 6\n" \
243 ++ "\tjg " e_oldinstr_pad_end "f\n" \
244 ++ "6620:\n" \
245 ++ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
246 ++ ".else\n" \
247 ++ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
248 ++ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
249 ++ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
250 ++ ".endif\n"
251 ++
252 ++#define OLDINSTR(oldinstr, num) \
253 ++ "661:\n\t" oldinstr "\n662:\n" \
254 ++ OLDINSTR_PADDING(oldinstr, num) \
255 ++ e_oldinstr_pad_end ":\n" \
256 ++ INSTR_LEN_SANITY_CHECK(oldinstr_len)
257 ++
258 ++#define OLDINSTR_2(oldinstr, num1, num2) \
259 ++ "661:\n\t" oldinstr "\n662:\n" \
260 ++ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
261 ++ OLDINSTR_PADDING(oldinstr, num2) \
262 ++ ".else\n" \
263 ++ OLDINSTR_PADDING(oldinstr, num1) \
264 ++ ".endif\n" \
265 ++ e_oldinstr_pad_end ":\n" \
266 ++ INSTR_LEN_SANITY_CHECK(oldinstr_len)
267 ++
268 ++#define ALTINSTR_ENTRY(facility, num) \
269 ++ "\t.long 661b - .\n" /* old instruction */ \
270 ++ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
271 ++ "\t.word " __stringify(facility) "\n" /* facility bit */ \
272 ++ "\t.byte " oldinstr_total_len "\n" /* source len */ \
273 ++ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
274 ++
275 ++#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
276 ++ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
277 ++ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
278 ++
279 ++/* alternative assembly primitive: */
280 ++#define ALTERNATIVE(oldinstr, altinstr, facility) \
281 ++ ".pushsection .altinstr_replacement, \"ax\"\n" \
282 ++ ALTINSTR_REPLACEMENT(altinstr, 1) \
283 ++ ".popsection\n" \
284 ++ OLDINSTR(oldinstr, 1) \
285 ++ ".pushsection .altinstructions,\"a\"\n" \
286 ++ ALTINSTR_ENTRY(facility, 1) \
287 ++ ".popsection\n"
288 ++
289 ++#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
290 ++ ".pushsection .altinstr_replacement, \"ax\"\n" \
291 ++ ALTINSTR_REPLACEMENT(altinstr1, 1) \
292 ++ ALTINSTR_REPLACEMENT(altinstr2, 2) \
293 ++ ".popsection\n" \
294 ++ OLDINSTR_2(oldinstr, 1, 2) \
295 ++ ".pushsection .altinstructions,\"a\"\n" \
296 ++ ALTINSTR_ENTRY(facility1, 1) \
297 ++ ALTINSTR_ENTRY(facility2, 2) \
298 ++ ".popsection\n"
299 ++
300 ++/*
301 ++ * Alternative instructions for different CPU types or capabilities.
302 ++ *
303 ++ * This allows to use optimized instructions even on generic binary
304 ++ * kernels.
305 ++ *
306 ++ * oldinstr is padded with jump and nops at compile time if altinstr is
307 ++ * longer. altinstr is padded with jump and nops at run-time during patching.
308 ++ *
309 ++ * For non barrier like inlines please define new variants
310 ++ * without volatile and memory clobber.
311 ++ */
312 ++#define alternative(oldinstr, altinstr, facility) \
313 ++ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
314 ++
315 ++#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
316 ++ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
317 ++ altinstr2, facility2) ::: "memory")
318 ++
319 ++#endif /* __ASSEMBLY__ */
320 ++
321 ++#endif /* _ASM_S390_ALTERNATIVE_H */
322 +diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
323 +index 5c8db3ce61c8..03b2e5bf1206 100644
324 +--- a/arch/s390/include/asm/barrier.h
325 ++++ b/arch/s390/include/asm/barrier.h
326 +@@ -48,6 +48,30 @@ do { \
327 + #define __smp_mb__before_atomic() barrier()
328 + #define __smp_mb__after_atomic() barrier()
329 +
330 ++/**
331 ++ * array_index_mask_nospec - generate a mask for array_idx() that is
332 ++ * ~0UL when the bounds check succeeds and 0 otherwise
333 ++ * @index: array element index
334 ++ * @size: number of elements in array
335 ++ */
336 ++#define array_index_mask_nospec array_index_mask_nospec
337 ++static inline unsigned long array_index_mask_nospec(unsigned long index,
338 ++ unsigned long size)
339 ++{
340 ++ unsigned long mask;
341 ++
342 ++ if (__builtin_constant_p(size) && size > 0) {
343 ++ asm(" clgr %2,%1\n"
344 ++ " slbgr %0,%0\n"
345 ++ :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
346 ++ return mask;
347 ++ }
348 ++ asm(" clgr %1,%2\n"
349 ++ " slbgr %0,%0\n"
350 ++ :"=d" (mask) : "d" (size), "d" (index) :"cc");
351 ++ return ~mask;
352 ++}
353 ++
354 + #include <asm-generic/barrier.h>
355 +
356 + #endif /* __ASM_BARRIER_H */
357 +diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
358 +index 09b406db7529..7a8a1457dbb8 100644
359 +--- a/arch/s390/include/asm/facility.h
360 ++++ b/arch/s390/include/asm/facility.h
361 +@@ -17,6 +17,24 @@
362 +
363 + #define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
364 +
365 ++static inline void __set_facility(unsigned long nr, void *facilities)
366 ++{
367 ++ unsigned char *ptr = (unsigned char *) facilities;
368 ++
369 ++ if (nr >= MAX_FACILITY_BIT)
370 ++ return;
371 ++ ptr[nr >> 3] |= 0x80 >> (nr & 7);
372 ++}
373 ++
374 ++static inline void __clear_facility(unsigned long nr, void *facilities)
375 ++{
376 ++ unsigned char *ptr = (unsigned char *) facilities;
377 ++
378 ++ if (nr >= MAX_FACILITY_BIT)
379 ++ return;
380 ++ ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
381 ++}
382 ++
383 + static inline int __test_facility(unsigned long nr, void *facilities)
384 + {
385 + unsigned char *ptr;
386 +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
387 +index a41faf34b034..5792590d0e7c 100644
388 +--- a/arch/s390/include/asm/kvm_host.h
389 ++++ b/arch/s390/include/asm/kvm_host.h
390 +@@ -181,7 +181,8 @@ struct kvm_s390_sie_block {
391 + __u16 ipa; /* 0x0056 */
392 + __u32 ipb; /* 0x0058 */
393 + __u32 scaoh; /* 0x005c */
394 +- __u8 reserved60; /* 0x0060 */
395 ++#define FPF_BPBC 0x20
396 ++ __u8 fpf; /* 0x0060 */
397 + __u8 ecb; /* 0x0061 */
398 + __u8 ecb2; /* 0x0062 */
399 + #define ECB3_AES 0x04
400 +diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
401 +index 7b93b78f423c..ad4e0cee1557 100644
402 +--- a/arch/s390/include/asm/lowcore.h
403 ++++ b/arch/s390/include/asm/lowcore.h
404 +@@ -135,7 +135,9 @@ struct lowcore {
405 + /* Per cpu primary space access list */
406 + __u32 paste[16]; /* 0x0400 */
407 +
408 +- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
409 ++ /* br %r1 trampoline */
410 ++ __u16 br_r1_trampoline; /* 0x0440 */
411 ++ __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
412 +
413 + /*
414 + * 0xe00 contains the address of the IPL Parameter Information
415 +@@ -150,7 +152,8 @@ struct lowcore {
416 + __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
417 +
418 + /* Extended facility list */
419 +- __u64 stfle_fac_list[32]; /* 0x0f00 */
420 ++ __u64 stfle_fac_list[16]; /* 0x0f00 */
421 ++ __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
422 + __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
423 +
424 + /* Pointer to vector register save area */
425 +diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
426 +new file mode 100644
427 +index 000000000000..b4bd8c41e9d3
428 +--- /dev/null
429 ++++ b/arch/s390/include/asm/nospec-branch.h
430 +@@ -0,0 +1,17 @@
431 ++/* SPDX-License-Identifier: GPL-2.0 */
432 ++#ifndef _ASM_S390_EXPOLINE_H
433 ++#define _ASM_S390_EXPOLINE_H
434 ++
435 ++#ifndef __ASSEMBLY__
436 ++
437 ++#include <linux/types.h>
438 ++
439 ++extern int nospec_disable;
440 ++
441 ++void nospec_init_branches(void);
442 ++void nospec_auto_detect(void);
443 ++void nospec_revert(s32 *start, s32 *end);
444 ++
445 ++#endif /* __ASSEMBLY__ */
446 ++
447 ++#endif /* _ASM_S390_EXPOLINE_H */
448 +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
449 +index 6bcbbece082b..d5842126ec70 100644
450 +--- a/arch/s390/include/asm/processor.h
451 ++++ b/arch/s390/include/asm/processor.h
452 +@@ -84,6 +84,7 @@ void cpu_detect_mhz_feature(void);
453 + extern const struct seq_operations cpuinfo_op;
454 + extern int sysctl_ieee_emulation_warnings;
455 + extern void execve_tail(void);
456 ++extern void __bpon(void);
457 +
458 + /*
459 + * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
460 +@@ -359,6 +360,9 @@ extern void memcpy_absolute(void *, void *, size_t);
461 + memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
462 + }
463 +
464 ++extern int s390_isolate_bp(void);
465 ++extern int s390_isolate_bp_guest(void);
466 ++
467 + #endif /* __ASSEMBLY__ */
468 +
469 + #endif /* __ASM_S390_PROCESSOR_H */
470 +diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
471 +index f15c0398c363..84f2ae44b4e9 100644
472 +--- a/arch/s390/include/asm/thread_info.h
473 ++++ b/arch/s390/include/asm/thread_info.h
474 +@@ -79,6 +79,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
475 + #define TIF_SECCOMP 5 /* secure computing */
476 + #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
477 + #define TIF_UPROBE 7 /* breakpointed or single-stepping */
478 ++#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
479 ++#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
480 + #define TIF_31BIT 16 /* 32bit process */
481 + #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
482 + #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
483 +@@ -94,6 +96,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
484 + #define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
485 + #define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
486 + #define _TIF_UPROBE _BITUL(TIF_UPROBE)
487 ++#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
488 ++#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
489 + #define _TIF_31BIT _BITUL(TIF_31BIT)
490 + #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
491 +
492 +diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
493 +index a2ffec4139ad..81c02e198527 100644
494 +--- a/arch/s390/include/uapi/asm/kvm.h
495 ++++ b/arch/s390/include/uapi/asm/kvm.h
496 +@@ -197,6 +197,7 @@ struct kvm_guest_debug_arch {
497 + #define KVM_SYNC_VRS (1UL << 6)
498 + #define KVM_SYNC_RICCB (1UL << 7)
499 + #define KVM_SYNC_FPRS (1UL << 8)
500 ++#define KVM_SYNC_BPBC (1UL << 10)
501 + /* definition of registers in kvm_run */
502 + struct kvm_sync_regs {
503 + __u64 prefix; /* prefix register */
504 +@@ -217,7 +218,9 @@ struct kvm_sync_regs {
505 + };
506 + __u8 reserved[512]; /* for future vector expansion */
507 + __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
508 +- __u8 padding[52]; /* riccb needs to be 64byte aligned */
509 ++ __u8 bpbc : 1; /* bp mode */
510 ++ __u8 reserved2 : 7;
511 ++ __u8 padding1[51]; /* riccb needs to be 64byte aligned */
512 + __u8 riccb[64]; /* runtime instrumentation controls block */
513 + };
514 +
515 +diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
516 +index 1f0fe98f6db9..0501cac2ab95 100644
517 +--- a/arch/s390/kernel/Makefile
518 ++++ b/arch/s390/kernel/Makefile
519 +@@ -42,6 +42,7 @@ ifneq ($(CC_FLAGS_MARCH),-march=z900)
520 + CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
521 + CFLAGS_sclp.o += -march=z900
522 + CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
523 ++CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
524 + CFLAGS_als.o += -march=z900
525 + AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
526 + AFLAGS_head.o += -march=z900
527 +@@ -57,10 +58,13 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
528 + obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
529 + obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
530 + obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
531 +-obj-y += entry.o reipl.o relocate_kernel.o
532 ++obj-y += entry.o reipl.o relocate_kernel.o alternative.o
533 ++obj-y += nospec-branch.o
534 +
535 + extra-y += head.o head64.o vmlinux.lds
536 +
537 ++CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
538 ++
539 + obj-$(CONFIG_MODULES) += module.o
540 + obj-$(CONFIG_SMP) += smp.o
541 + obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
542 +diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
543 +new file mode 100644
544 +index 000000000000..b57b293998dc
545 +--- /dev/null
546 ++++ b/arch/s390/kernel/alternative.c
547 +@@ -0,0 +1,112 @@
548 ++#include <linux/module.h>
549 ++#include <asm/alternative.h>
550 ++#include <asm/facility.h>
551 ++#include <asm/nospec-branch.h>
552 ++
553 ++#define MAX_PATCH_LEN (255 - 1)
554 ++
555 ++static int __initdata_or_module alt_instr_disabled;
556 ++
557 ++static int __init disable_alternative_instructions(char *str)
558 ++{
559 ++ alt_instr_disabled = 1;
560 ++ return 0;
561 ++}
562 ++
563 ++early_param("noaltinstr", disable_alternative_instructions);
564 ++
565 ++struct brcl_insn {
566 ++ u16 opc;
567 ++ s32 disp;
568 ++} __packed;
569 ++
570 ++static u16 __initdata_or_module nop16 = 0x0700;
571 ++static u32 __initdata_or_module nop32 = 0x47000000;
572 ++static struct brcl_insn __initdata_or_module nop48 = {
573 ++ 0xc004, 0
574 ++};
575 ++
576 ++static const void *nops[] __initdata_or_module = {
577 ++ &nop16,
578 ++ &nop32,
579 ++ &nop48
580 ++};
581 ++
582 ++static void __init_or_module add_jump_padding(void *insns, unsigned int len)
583 ++{
584 ++ struct brcl_insn brcl = {
585 ++ 0xc0f4,
586 ++ len / 2
587 ++ };
588 ++
589 ++ memcpy(insns, &brcl, sizeof(brcl));
590 ++ insns += sizeof(brcl);
591 ++ len -= sizeof(brcl);
592 ++
593 ++ while (len > 0) {
594 ++ memcpy(insns, &nop16, 2);
595 ++ insns += 2;
596 ++ len -= 2;
597 ++ }
598 ++}
599 ++
600 ++static void __init_or_module add_padding(void *insns, unsigned int len)
601 ++{
602 ++ if (len > 6)
603 ++ add_jump_padding(insns, len);
604 ++ else if (len >= 2)
605 ++ memcpy(insns, nops[len / 2 - 1], len);
606 ++}
607 ++
608 ++static void __init_or_module __apply_alternatives(struct alt_instr *start,
609 ++ struct alt_instr *end)
610 ++{
611 ++ struct alt_instr *a;
612 ++ u8 *instr, *replacement;
613 ++ u8 insnbuf[MAX_PATCH_LEN];
614 ++
615 ++ /*
616 ++ * The scan order should be from start to end. A later scanned
617 ++ * alternative code can overwrite previously scanned alternative code.
618 ++ */
619 ++ for (a = start; a < end; a++) {
620 ++ int insnbuf_sz = 0;
621 ++
622 ++ instr = (u8 *)&a->instr_offset + a->instr_offset;
623 ++ replacement = (u8 *)&a->repl_offset + a->repl_offset;
624 ++
625 ++ if (!__test_facility(a->facility,
626 ++ S390_lowcore.alt_stfle_fac_list))
627 ++ continue;
628 ++
629 ++ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
630 ++ WARN_ONCE(1, "cpu alternatives instructions length is "
631 ++ "odd, skipping patching\n");
632 ++ continue;
633 ++ }
634 ++
635 ++ memcpy(insnbuf, replacement, a->replacementlen);
636 ++ insnbuf_sz = a->replacementlen;
637 ++
638 ++ if (a->instrlen > a->replacementlen) {
639 ++ add_padding(insnbuf + a->replacementlen,
640 ++ a->instrlen - a->replacementlen);
641 ++ insnbuf_sz += a->instrlen - a->replacementlen;
642 ++ }
643 ++
644 ++ s390_kernel_write(instr, insnbuf, insnbuf_sz);
645 ++ }
646 ++}
647 ++
648 ++void __init_or_module apply_alternatives(struct alt_instr *start,
649 ++ struct alt_instr *end)
650 ++{
651 ++ if (!alt_instr_disabled)
652 ++ __apply_alternatives(start, end);
653 ++}
654 ++
655 ++extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
656 ++void __init apply_alternative_instructions(void)
657 ++{
658 ++ apply_alternatives(__alt_instructions, __alt_instructions_end);
659 ++}
660 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
661 +index 62578989c74d..0c7a7d5d95f1 100644
662 +--- a/arch/s390/kernel/early.c
663 ++++ b/arch/s390/kernel/early.c
664 +@@ -299,6 +299,11 @@ static noinline __init void setup_facility_list(void)
665 + {
666 + stfle(S390_lowcore.stfle_fac_list,
667 + ARRAY_SIZE(S390_lowcore.stfle_fac_list));
668 ++ memcpy(S390_lowcore.alt_stfle_fac_list,
669 ++ S390_lowcore.stfle_fac_list,
670 ++ sizeof(S390_lowcore.alt_stfle_fac_list));
671 ++ if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
672 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
673 + }
674 +
675 + static __init void detect_diag9c(void)
676 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
677 +index 3bc2825173ef..1996afeb2e81 100644
678 +--- a/arch/s390/kernel/entry.S
679 ++++ b/arch/s390/kernel/entry.S
680 +@@ -105,6 +105,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
681 + j 3f
682 + 1: LAST_BREAK %r14
683 + UPDATE_VTIME %r14,%r15,\timer
684 ++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
685 + 2: lg %r15,__LC_ASYNC_STACK # load async stack
686 + 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
687 + .endm
688 +@@ -163,6 +164,130 @@ _PIF_WORK = (_PIF_PER_TRAP)
689 + tm off+\addr, \mask
690 + .endm
691 +
692 ++ .macro BPOFF
693 ++ .pushsection .altinstr_replacement, "ax"
694 ++660: .long 0xb2e8c000
695 ++ .popsection
696 ++661: .long 0x47000000
697 ++ .pushsection .altinstructions, "a"
698 ++ .long 661b - .
699 ++ .long 660b - .
700 ++ .word 82
701 ++ .byte 4
702 ++ .byte 4
703 ++ .popsection
704 ++ .endm
705 ++
706 ++ .macro BPON
707 ++ .pushsection .altinstr_replacement, "ax"
708 ++662: .long 0xb2e8d000
709 ++ .popsection
710 ++663: .long 0x47000000
711 ++ .pushsection .altinstructions, "a"
712 ++ .long 663b - .
713 ++ .long 662b - .
714 ++ .word 82
715 ++ .byte 4
716 ++ .byte 4
717 ++ .popsection
718 ++ .endm
719 ++
720 ++ .macro BPENTER tif_ptr,tif_mask
721 ++ .pushsection .altinstr_replacement, "ax"
722 ++662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
723 ++ .word 0xc004, 0x0000, 0x0000 # 6 byte nop
724 ++ .popsection
725 ++664: TSTMSK \tif_ptr,\tif_mask
726 ++ jz . + 8
727 ++ .long 0xb2e8d000
728 ++ .pushsection .altinstructions, "a"
729 ++ .long 664b - .
730 ++ .long 662b - .
731 ++ .word 82
732 ++ .byte 12
733 ++ .byte 12
734 ++ .popsection
735 ++ .endm
736 ++
737 ++ .macro BPEXIT tif_ptr,tif_mask
738 ++ TSTMSK \tif_ptr,\tif_mask
739 ++ .pushsection .altinstr_replacement, "ax"
740 ++662: jnz . + 8
741 ++ .long 0xb2e8d000
742 ++ .popsection
743 ++664: jz . + 8
744 ++ .long 0xb2e8c000
745 ++ .pushsection .altinstructions, "a"
746 ++ .long 664b - .
747 ++ .long 662b - .
748 ++ .word 82
749 ++ .byte 8
750 ++ .byte 8
751 ++ .popsection
752 ++ .endm
753 ++
754 ++#ifdef CONFIG_EXPOLINE
755 ++
756 ++ .macro GEN_BR_THUNK name,reg,tmp
757 ++ .section .text.\name,"axG",@progbits,\name,comdat
758 ++ .globl \name
759 ++ .hidden \name
760 ++ .type \name,@function
761 ++\name:
762 ++ .cfi_startproc
763 ++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
764 ++ exrl 0,0f
765 ++#else
766 ++ larl \tmp,0f
767 ++ ex 0,0(\tmp)
768 ++#endif
769 ++ j .
770 ++0: br \reg
771 ++ .cfi_endproc
772 ++ .endm
773 ++
774 ++ GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
775 ++ GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
776 ++ GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
777 ++
778 ++ .macro BASR_R14_R9
779 ++0: brasl %r14,__s390x_indirect_jump_r1use_r9
780 ++ .pushsection .s390_indirect_branches,"a",@progbits
781 ++ .long 0b-.
782 ++ .popsection
783 ++ .endm
784 ++
785 ++ .macro BR_R1USE_R14
786 ++0: jg __s390x_indirect_jump_r1use_r14
787 ++ .pushsection .s390_indirect_branches,"a",@progbits
788 ++ .long 0b-.
789 ++ .popsection
790 ++ .endm
791 ++
792 ++ .macro BR_R11USE_R14
793 ++0: jg __s390x_indirect_jump_r11use_r14
794 ++ .pushsection .s390_indirect_branches,"a",@progbits
795 ++ .long 0b-.
796 ++ .popsection
797 ++ .endm
798 ++
799 ++#else /* CONFIG_EXPOLINE */
800 ++
801 ++ .macro BASR_R14_R9
802 ++ basr %r14,%r9
803 ++ .endm
804 ++
805 ++ .macro BR_R1USE_R14
806 ++ br %r14
807 ++ .endm
808 ++
809 ++ .macro BR_R11USE_R14
810 ++ br %r14
811 ++ .endm
812 ++
813 ++#endif /* CONFIG_EXPOLINE */
814 ++
815 ++
816 + .section .kprobes.text, "ax"
817 + .Ldummy:
818 + /*
819 +@@ -175,6 +300,11 @@ _PIF_WORK = (_PIF_PER_TRAP)
820 + */
821 + nop 0
822 +
823 ++ENTRY(__bpon)
824 ++ .globl __bpon
825 ++ BPON
826 ++ BR_R1USE_R14
827 ++
828 + /*
829 + * Scheduler resume function, called by switch_to
830 + * gpr2 = (task_struct *) prev
831 +@@ -201,9 +331,9 @@ ENTRY(__switch_to)
832 + mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
833 + lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
834 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
835 +- bzr %r14
836 ++ jz 0f
837 + .insn s,0xb2800000,__LC_LPP # set program parameter
838 +- br %r14
839 ++0: BR_R1USE_R14
840 +
841 + .L__critical_start:
842 +
843 +@@ -215,9 +345,11 @@ ENTRY(__switch_to)
844 + */
845 + ENTRY(sie64a)
846 + stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
847 ++ lg %r12,__LC_CURRENT
848 + stg %r2,__SF_EMPTY(%r15) # save control block pointer
849 + stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
850 + xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
851 ++ mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
852 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
853 + jno .Lsie_load_guest_gprs
854 + brasl %r14,load_fpu_regs # load guest fp/vx regs
855 +@@ -234,7 +366,11 @@ ENTRY(sie64a)
856 + jnz .Lsie_skip
857 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU
858 + jo .Lsie_skip # exit if fp/vx regs changed
859 ++ BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
860 + sie 0(%r14)
861 ++.Lsie_exit:
862 ++ BPOFF
863 ++ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
864 + .Lsie_skip:
865 + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
866 + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
867 +@@ -255,9 +391,15 @@ ENTRY(sie64a)
868 + sie_exit:
869 + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
870 + stmg %r0,%r13,0(%r14) # save guest gprs 0-13
871 ++ xgr %r0,%r0 # clear guest registers to
872 ++ xgr %r1,%r1 # prevent speculative use
873 ++ xgr %r2,%r2
874 ++ xgr %r3,%r3
875 ++ xgr %r4,%r4
876 ++ xgr %r5,%r5
877 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
878 + lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
879 +- br %r14
880 ++ BR_R1USE_R14
881 + .Lsie_fault:
882 + lghi %r14,-EFAULT
883 + stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
884 +@@ -280,6 +422,7 @@ ENTRY(system_call)
885 + stpt __LC_SYNC_ENTER_TIMER
886 + .Lsysc_stmg:
887 + stmg %r8,%r15,__LC_SAVE_AREA_SYNC
888 ++ BPOFF
889 + lg %r10,__LC_LAST_BREAK
890 + lg %r12,__LC_THREAD_INFO
891 + lghi %r14,_PIF_SYSCALL
892 +@@ -289,12 +432,15 @@ ENTRY(system_call)
893 + LAST_BREAK %r13
894 + .Lsysc_vtime:
895 + UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
896 ++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
897 + stmg %r0,%r7,__PT_R0(%r11)
898 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
899 + mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
900 + mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
901 + stg %r14,__PT_FLAGS(%r11)
902 + .Lsysc_do_svc:
903 ++ # clear user controlled register to prevent speculative use
904 ++ xgr %r0,%r0
905 + lg %r10,__TI_sysc_table(%r12) # address of system call table
906 + llgh %r8,__PT_INT_CODE+2(%r11)
907 + slag %r8,%r8,2 # shift and test for svc 0
908 +@@ -312,7 +458,7 @@ ENTRY(system_call)
909 + lgf %r9,0(%r8,%r10) # get system call add.
910 + TSTMSK __TI_flags(%r12),_TIF_TRACE
911 + jnz .Lsysc_tracesys
912 +- basr %r14,%r9 # call sys_xxxx
913 ++ BASR_R14_R9 # call sys_xxxx
914 + stg %r2,__PT_R2(%r11) # store return value
915 +
916 + .Lsysc_return:
917 +@@ -324,6 +470,7 @@ ENTRY(system_call)
918 + jnz .Lsysc_work # check for work
919 + TSTMSK __LC_CPU_FLAGS,_CIF_WORK
920 + jnz .Lsysc_work
921 ++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
922 + .Lsysc_restore:
923 + lg %r14,__LC_VDSO_PER_CPU
924 + lmg %r0,%r10,__PT_R0(%r11)
925 +@@ -451,7 +598,7 @@ ENTRY(system_call)
926 + lmg %r3,%r7,__PT_R3(%r11)
927 + stg %r7,STACK_FRAME_OVERHEAD(%r15)
928 + lg %r2,__PT_ORIG_GPR2(%r11)
929 +- basr %r14,%r9 # call sys_xxx
930 ++ BASR_R14_R9 # call sys_xxx
931 + stg %r2,__PT_R2(%r11) # store return value
932 + .Lsysc_tracenogo:
933 + TSTMSK __TI_flags(%r12),_TIF_TRACE
934 +@@ -475,7 +622,7 @@ ENTRY(ret_from_fork)
935 + lmg %r9,%r10,__PT_R9(%r11) # load gprs
936 + ENTRY(kernel_thread_starter)
937 + la %r2,0(%r10)
938 +- basr %r14,%r9
939 ++ BASR_R14_R9
940 + j .Lsysc_tracenogo
941 +
942 + /*
943 +@@ -484,6 +631,7 @@ ENTRY(kernel_thread_starter)
944 +
945 + ENTRY(pgm_check_handler)
946 + stpt __LC_SYNC_ENTER_TIMER
947 ++ BPOFF
948 + stmg %r8,%r15,__LC_SAVE_AREA_SYNC
949 + lg %r10,__LC_LAST_BREAK
950 + lg %r12,__LC_THREAD_INFO
951 +@@ -508,6 +656,7 @@ ENTRY(pgm_check_handler)
952 + j 3f
953 + 2: LAST_BREAK %r14
954 + UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
955 ++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
956 + lg %r15,__LC_KERNEL_STACK
957 + lg %r14,__TI_task(%r12)
958 + aghi %r14,__TASK_thread # pointer to thread_struct
959 +@@ -517,6 +666,15 @@ ENTRY(pgm_check_handler)
960 + mvc __THREAD_trap_tdb(256,%r14),0(%r13)
961 + 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
962 + stmg %r0,%r7,__PT_R0(%r11)
963 ++ # clear user controlled registers to prevent speculative use
964 ++ xgr %r0,%r0
965 ++ xgr %r1,%r1
966 ++ xgr %r2,%r2
967 ++ xgr %r3,%r3
968 ++ xgr %r4,%r4
969 ++ xgr %r5,%r5
970 ++ xgr %r6,%r6
971 ++ xgr %r7,%r7
972 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
973 + stmg %r8,%r9,__PT_PSW(%r11)
974 + mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
975 +@@ -538,9 +696,9 @@ ENTRY(pgm_check_handler)
976 + nill %r10,0x007f
977 + sll %r10,2
978 + je .Lpgm_return
979 +- lgf %r1,0(%r10,%r1) # load address of handler routine
980 ++ lgf %r9,0(%r10,%r1) # load address of handler routine
981 + lgr %r2,%r11 # pass pointer to pt_regs
982 +- basr %r14,%r1 # branch to interrupt-handler
983 ++ BASR_R14_R9 # branch to interrupt-handler
984 + .Lpgm_return:
985 + LOCKDEP_SYS_EXIT
986 + tm __PT_PSW+1(%r11),0x01 # returning to user ?
987 +@@ -573,6 +731,7 @@ ENTRY(pgm_check_handler)
988 + ENTRY(io_int_handler)
989 + STCK __LC_INT_CLOCK
990 + stpt __LC_ASYNC_ENTER_TIMER
991 ++ BPOFF
992 + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
993 + lg %r10,__LC_LAST_BREAK
994 + lg %r12,__LC_THREAD_INFO
995 +@@ -580,6 +739,16 @@ ENTRY(io_int_handler)
996 + lmg %r8,%r9,__LC_IO_OLD_PSW
997 + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
998 + stmg %r0,%r7,__PT_R0(%r11)
999 ++ # clear user controlled registers to prevent speculative use
1000 ++ xgr %r0,%r0
1001 ++ xgr %r1,%r1
1002 ++ xgr %r2,%r2
1003 ++ xgr %r3,%r3
1004 ++ xgr %r4,%r4
1005 ++ xgr %r5,%r5
1006 ++ xgr %r6,%r6
1007 ++ xgr %r7,%r7
1008 ++ xgr %r10,%r10
1009 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
1010 + stmg %r8,%r9,__PT_PSW(%r11)
1011 + mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
1012 +@@ -614,9 +783,13 @@ ENTRY(io_int_handler)
1013 + lg %r14,__LC_VDSO_PER_CPU
1014 + lmg %r0,%r10,__PT_R0(%r11)
1015 + mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
1016 ++ tm __PT_PSW+1(%r11),0x01 # returning to user ?
1017 ++ jno .Lio_exit_kernel
1018 ++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1019 + .Lio_exit_timer:
1020 + stpt __LC_EXIT_TIMER
1021 + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1022 ++.Lio_exit_kernel:
1023 + lmg %r11,%r15,__PT_R11(%r11)
1024 + lpswe __LC_RETURN_PSW
1025 + .Lio_done:
1026 +@@ -748,6 +921,7 @@ ENTRY(io_int_handler)
1027 + ENTRY(ext_int_handler)
1028 + STCK __LC_INT_CLOCK
1029 + stpt __LC_ASYNC_ENTER_TIMER
1030 ++ BPOFF
1031 + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
1032 + lg %r10,__LC_LAST_BREAK
1033 + lg %r12,__LC_THREAD_INFO
1034 +@@ -755,6 +929,16 @@ ENTRY(ext_int_handler)
1035 + lmg %r8,%r9,__LC_EXT_OLD_PSW
1036 + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
1037 + stmg %r0,%r7,__PT_R0(%r11)
1038 ++ # clear user controlled registers to prevent speculative use
1039 ++ xgr %r0,%r0
1040 ++ xgr %r1,%r1
1041 ++ xgr %r2,%r2
1042 ++ xgr %r3,%r3
1043 ++ xgr %r4,%r4
1044 ++ xgr %r5,%r5
1045 ++ xgr %r6,%r6
1046 ++ xgr %r7,%r7
1047 ++ xgr %r10,%r10
1048 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
1049 + stmg %r8,%r9,__PT_PSW(%r11)
1050 + lghi %r1,__LC_EXT_PARAMS2
1051 +@@ -787,11 +971,12 @@ ENTRY(psw_idle)
1052 + .Lpsw_idle_stcctm:
1053 + #endif
1054 + oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
1055 ++ BPON
1056 + STCK __CLOCK_IDLE_ENTER(%r2)
1057 + stpt __TIMER_IDLE_ENTER(%r2)
1058 + .Lpsw_idle_lpsw:
1059 + lpswe __SF_EMPTY(%r15)
1060 +- br %r14
1061 ++ BR_R1USE_R14
1062 + .Lpsw_idle_end:
1063 +
1064 + /*
1065 +@@ -805,7 +990,7 @@ ENTRY(save_fpu_regs)
1066 + lg %r2,__LC_CURRENT
1067 + aghi %r2,__TASK_thread
1068 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1069 +- bor %r14
1070 ++ jo .Lsave_fpu_regs_exit
1071 + stfpc __THREAD_FPU_fpc(%r2)
1072 + .Lsave_fpu_regs_fpc_end:
1073 + lg %r3,__THREAD_FPU_regs(%r2)
1074 +@@ -835,7 +1020,8 @@ ENTRY(save_fpu_regs)
1075 + std 15,120(%r3)
1076 + .Lsave_fpu_regs_done:
1077 + oi __LC_CPU_FLAGS+7,_CIF_FPU
1078 +- br %r14
1079 ++.Lsave_fpu_regs_exit:
1080 ++ BR_R1USE_R14
1081 + .Lsave_fpu_regs_end:
1082 + #if IS_ENABLED(CONFIG_KVM)
1083 + EXPORT_SYMBOL(save_fpu_regs)
1084 +@@ -855,7 +1041,7 @@ load_fpu_regs:
1085 + lg %r4,__LC_CURRENT
1086 + aghi %r4,__TASK_thread
1087 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1088 +- bnor %r14
1089 ++ jno .Lload_fpu_regs_exit
1090 + lfpc __THREAD_FPU_fpc(%r4)
1091 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1092 + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1093 +@@ -884,7 +1070,8 @@ load_fpu_regs:
1094 + ld 15,120(%r4)
1095 + .Lload_fpu_regs_done:
1096 + ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1097 +- br %r14
1098 ++.Lload_fpu_regs_exit:
1099 ++ BR_R1USE_R14
1100 + .Lload_fpu_regs_end:
1101 +
1102 + .L__critical_end:
1103 +@@ -894,6 +1081,7 @@ load_fpu_regs:
1104 + */
1105 + ENTRY(mcck_int_handler)
1106 + STCK __LC_MCCK_CLOCK
1107 ++ BPOFF
1108 + la %r1,4095 # revalidate r1
1109 + spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
1110 + lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
1111 +@@ -925,6 +1113,16 @@ ENTRY(mcck_int_handler)
1112 + .Lmcck_skip:
1113 + lghi %r14,__LC_GPREGS_SAVE_AREA+64
1114 + stmg %r0,%r7,__PT_R0(%r11)
1115 ++ # clear user controlled registers to prevent speculative use
1116 ++ xgr %r0,%r0
1117 ++ xgr %r1,%r1
1118 ++ xgr %r2,%r2
1119 ++ xgr %r3,%r3
1120 ++ xgr %r4,%r4
1121 ++ xgr %r5,%r5
1122 ++ xgr %r6,%r6
1123 ++ xgr %r7,%r7
1124 ++ xgr %r10,%r10
1125 + mvc __PT_R8(64,%r11),0(%r14)
1126 + stmg %r8,%r9,__PT_PSW(%r11)
1127 + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1128 +@@ -950,6 +1148,7 @@ ENTRY(mcck_int_handler)
1129 + mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1130 + tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1131 + jno 0f
1132 ++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1133 + stpt __LC_EXIT_TIMER
1134 + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1135 + 0: lmg %r11,%r15,__PT_R11(%r11)
1136 +@@ -1045,7 +1244,7 @@ cleanup_critical:
1137 + jl 0f
1138 + clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1139 + jl .Lcleanup_load_fpu_regs
1140 +-0: br %r14
1141 ++0: BR_R11USE_R14
1142 +
1143 + .align 8
1144 + .Lcleanup_table:
1145 +@@ -1070,11 +1269,12 @@ cleanup_critical:
1146 + .quad .Lsie_done
1147 +
1148 + .Lcleanup_sie:
1149 ++ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1150 + lg %r9,__SF_EMPTY(%r15) # get control block pointer
1151 + ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1152 + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1153 + larl %r9,sie_exit # skip forward to sie_exit
1154 +- br %r14
1155 ++ BR_R11USE_R14
1156 + #endif
1157 +
1158 + .Lcleanup_system_call:
1159 +@@ -1116,7 +1316,8 @@ cleanup_critical:
1160 + srag %r9,%r9,23
1161 + jz 0f
1162 + mvc __TI_last_break(8,%r12),16(%r11)
1163 +-0: # set up saved register r11
1164 ++0: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1165 ++ # set up saved register r11
1166 + lg %r15,__LC_KERNEL_STACK
1167 + la %r9,STACK_FRAME_OVERHEAD(%r15)
1168 + stg %r9,24(%r11) # r11 pt_regs pointer
1169 +@@ -1131,7 +1332,7 @@ cleanup_critical:
1170 + stg %r15,56(%r11) # r15 stack pointer
1171 + # set new psw address and exit
1172 + larl %r9,.Lsysc_do_svc
1173 +- br %r14
1174 ++ BR_R11USE_R14
1175 + .Lcleanup_system_call_insn:
1176 + .quad system_call
1177 + .quad .Lsysc_stmg
1178 +@@ -1141,7 +1342,7 @@ cleanup_critical:
1179 +
1180 + .Lcleanup_sysc_tif:
1181 + larl %r9,.Lsysc_tif
1182 +- br %r14
1183 ++ BR_R11USE_R14
1184 +
1185 + .Lcleanup_sysc_restore:
1186 + # check if stpt has been executed
1187 +@@ -1158,14 +1359,14 @@ cleanup_critical:
1188 + mvc 0(64,%r11),__PT_R8(%r9)
1189 + lmg %r0,%r7,__PT_R0(%r9)
1190 + 1: lmg %r8,%r9,__LC_RETURN_PSW
1191 +- br %r14
1192 ++ BR_R11USE_R14
1193 + .Lcleanup_sysc_restore_insn:
1194 + .quad .Lsysc_exit_timer
1195 + .quad .Lsysc_done - 4
1196 +
1197 + .Lcleanup_io_tif:
1198 + larl %r9,.Lio_tif
1199 +- br %r14
1200 ++ BR_R11USE_R14
1201 +
1202 + .Lcleanup_io_restore:
1203 + # check if stpt has been executed
1204 +@@ -1179,7 +1380,7 @@ cleanup_critical:
1205 + mvc 0(64,%r11),__PT_R8(%r9)
1206 + lmg %r0,%r7,__PT_R0(%r9)
1207 + 1: lmg %r8,%r9,__LC_RETURN_PSW
1208 +- br %r14
1209 ++ BR_R11USE_R14
1210 + .Lcleanup_io_restore_insn:
1211 + .quad .Lio_exit_timer
1212 + .quad .Lio_done - 4
1213 +@@ -1232,17 +1433,17 @@ cleanup_critical:
1214 + # prepare return psw
1215 + nihh %r8,0xfcfd # clear irq & wait state bits
1216 + lg %r9,48(%r11) # return from psw_idle
1217 +- br %r14
1218 ++ BR_R11USE_R14
1219 + .Lcleanup_idle_insn:
1220 + .quad .Lpsw_idle_lpsw
1221 +
1222 + .Lcleanup_save_fpu_regs:
1223 + larl %r9,save_fpu_regs
1224 +- br %r14
1225 ++ BR_R11USE_R14
1226 +
1227 + .Lcleanup_load_fpu_regs:
1228 + larl %r9,load_fpu_regs
1229 +- br %r14
1230 ++ BR_R11USE_R14
1231 +
1232 + /*
1233 + * Integer constants
1234 +@@ -1258,7 +1459,6 @@ cleanup_critical:
1235 + .Lsie_critical_length:
1236 + .quad .Lsie_done - .Lsie_gmap
1237 + #endif
1238 +-
1239 + .section .rodata, "a"
1240 + #define SYSCALL(esame,emu) .long esame
1241 + .globl sys_call_table
1242 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
1243 +index 39127b691b78..df49f2a1a7e5 100644
1244 +--- a/arch/s390/kernel/ipl.c
1245 ++++ b/arch/s390/kernel/ipl.c
1246 +@@ -563,6 +563,7 @@ static struct kset *ipl_kset;
1247 +
1248 + static void __ipl_run(void *unused)
1249 + {
1250 ++ __bpon();
1251 + diag308(DIAG308_LOAD_CLEAR, NULL);
1252 + if (MACHINE_IS_VM)
1253 + __cpcmd("IPL", NULL, 0, NULL);
1254 +diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
1255 +index fbc07891f9e7..64ccfdf96b32 100644
1256 +--- a/arch/s390/kernel/module.c
1257 ++++ b/arch/s390/kernel/module.c
1258 +@@ -31,6 +31,9 @@
1259 + #include <linux/kernel.h>
1260 + #include <linux/moduleloader.h>
1261 + #include <linux/bug.h>
1262 ++#include <asm/alternative.h>
1263 ++#include <asm/nospec-branch.h>
1264 ++#include <asm/facility.h>
1265 +
1266 + #if 0
1267 + #define DEBUGP printk
1268 +@@ -167,7 +170,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
1269 + me->arch.got_offset = me->core_layout.size;
1270 + me->core_layout.size += me->arch.got_size;
1271 + me->arch.plt_offset = me->core_layout.size;
1272 +- me->core_layout.size += me->arch.plt_size;
1273 ++ if (me->arch.plt_size) {
1274 ++ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
1275 ++ me->arch.plt_size += PLT_ENTRY_SIZE;
1276 ++ me->core_layout.size += me->arch.plt_size;
1277 ++ }
1278 + return 0;
1279 + }
1280 +
1281 +@@ -321,9 +328,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
1282 + unsigned int *ip;
1283 + ip = me->core_layout.base + me->arch.plt_offset +
1284 + info->plt_offset;
1285 +- ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
1286 +- ip[1] = 0x100a0004;
1287 +- ip[2] = 0x07f10000;
1288 ++ ip[0] = 0x0d10e310; /* basr 1,0 */
1289 ++ ip[1] = 0x100a0004; /* lg 1,10(1) */
1290 ++ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
1291 ++ unsigned int *ij;
1292 ++ ij = me->core_layout.base +
1293 ++ me->arch.plt_offset +
1294 ++ me->arch.plt_size - PLT_ENTRY_SIZE;
1295 ++ ip[2] = 0xa7f40000 + /* j __jump_r1 */
1296 ++ (unsigned int)(u16)
1297 ++ (((unsigned long) ij - 8 -
1298 ++ (unsigned long) ip) / 2);
1299 ++ } else {
1300 ++ ip[2] = 0x07f10000; /* br %r1 */
1301 ++ }
1302 + ip[3] = (unsigned int) (val >> 32);
1303 + ip[4] = (unsigned int) val;
1304 + info->plt_initialized = 1;
1305 +@@ -428,6 +446,45 @@ int module_finalize(const Elf_Ehdr *hdr,
1306 + const Elf_Shdr *sechdrs,
1307 + struct module *me)
1308 + {
1309 ++ const Elf_Shdr *s;
1310 ++ char *secstrings, *secname;
1311 ++ void *aseg;
1312 ++
1313 ++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
1314 ++ !nospec_disable && me->arch.plt_size) {
1315 ++ unsigned int *ij;
1316 ++
1317 ++ ij = me->core_layout.base + me->arch.plt_offset +
1318 ++ me->arch.plt_size - PLT_ENTRY_SIZE;
1319 ++ if (test_facility(35)) {
1320 ++ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
1321 ++ ij[1] = 0x0005a7f4; /* j . */
1322 ++ ij[2] = 0x000007f1; /* br %r1 */
1323 ++ } else {
1324 ++ ij[0] = 0x44000000 | (unsigned int)
1325 ++ offsetof(struct lowcore, br_r1_trampoline);
1326 ++ ij[1] = 0xa7f40000; /* j . */
1327 ++ }
1328 ++ }
1329 ++
1330 ++ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1331 ++ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
1332 ++ aseg = (void *) s->sh_addr;
1333 ++ secname = secstrings + s->sh_name;
1334 ++
1335 ++ if (!strcmp(".altinstructions", secname))
1336 ++ /* patch .altinstructions */
1337 ++ apply_alternatives(aseg, aseg + s->sh_size);
1338 ++
1339 ++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
1340 ++ (!strncmp(".s390_indirect", secname, 14)))
1341 ++ nospec_revert(aseg, aseg + s->sh_size);
1342 ++
1343 ++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
1344 ++ (!strncmp(".s390_return", secname, 12)))
1345 ++ nospec_revert(aseg, aseg + s->sh_size);
1346 ++ }
1347 ++
1348 + jump_label_apply_nops(me);
1349 + return 0;
1350 + }
1351 +diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
1352 +new file mode 100644
1353 +index 000000000000..9f3b5b382743
1354 +--- /dev/null
1355 ++++ b/arch/s390/kernel/nospec-branch.c
1356 +@@ -0,0 +1,169 @@
1357 ++// SPDX-License-Identifier: GPL-2.0
1358 ++#include <linux/module.h>
1359 ++#include <linux/device.h>
1360 ++#include <asm/facility.h>
1361 ++#include <asm/nospec-branch.h>
1362 ++
1363 ++static int __init nobp_setup_early(char *str)
1364 ++{
1365 ++ bool enabled;
1366 ++ int rc;
1367 ++
1368 ++ rc = kstrtobool(str, &enabled);
1369 ++ if (rc)
1370 ++ return rc;
1371 ++ if (enabled && test_facility(82)) {
1372 ++ /*
1373 ++ * The user explicitely requested nobp=1, enable it and
1374 ++ * disable the expoline support.
1375 ++ */
1376 ++ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
1377 ++ if (IS_ENABLED(CONFIG_EXPOLINE))
1378 ++ nospec_disable = 1;
1379 ++ } else {
1380 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1381 ++ }
1382 ++ return 0;
1383 ++}
1384 ++early_param("nobp", nobp_setup_early);
1385 ++
1386 ++static int __init nospec_setup_early(char *str)
1387 ++{
1388 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1389 ++ return 0;
1390 ++}
1391 ++early_param("nospec", nospec_setup_early);
1392 ++
1393 ++static int __init nospec_report(void)
1394 ++{
1395 ++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1396 ++ pr_info("Spectre V2 mitigation: execute trampolines.\n");
1397 ++ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1398 ++ pr_info("Spectre V2 mitigation: limited branch prediction.\n");
1399 ++ return 0;
1400 ++}
1401 ++arch_initcall(nospec_report);
1402 ++
1403 ++#ifdef CONFIG_SYSFS
1404 ++ssize_t cpu_show_spectre_v1(struct device *dev,
1405 ++ struct device_attribute *attr, char *buf)
1406 ++{
1407 ++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1408 ++}
1409 ++
1410 ++ssize_t cpu_show_spectre_v2(struct device *dev,
1411 ++ struct device_attribute *attr, char *buf)
1412 ++{
1413 ++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1414 ++ return sprintf(buf, "Mitigation: execute trampolines\n");
1415 ++ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1416 ++ return sprintf(buf, "Mitigation: limited branch prediction.\n");
1417 ++ return sprintf(buf, "Vulnerable\n");
1418 ++}
1419 ++#endif
1420 ++
1421 ++#ifdef CONFIG_EXPOLINE
1422 ++
1423 ++int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
1424 ++
1425 ++static int __init nospectre_v2_setup_early(char *str)
1426 ++{
1427 ++ nospec_disable = 1;
1428 ++ return 0;
1429 ++}
1430 ++early_param("nospectre_v2", nospectre_v2_setup_early);
1431 ++
1432 ++void __init nospec_auto_detect(void)
1433 ++{
1434 ++ if (IS_ENABLED(CC_USING_EXPOLINE)) {
1435 ++ /*
1436 ++ * The kernel has been compiled with expolines.
1437 ++ * Keep expolines enabled and disable nobp.
1438 ++ */
1439 ++ nospec_disable = 0;
1440 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1441 ++ }
1442 ++ /*
1443 ++ * If the kernel has not been compiled with expolines the
1444 ++ * nobp setting decides what is done, this depends on the
1445 ++ * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
1446 ++ */
1447 ++}
1448 ++
1449 ++static int __init spectre_v2_setup_early(char *str)
1450 ++{
1451 ++ if (str && !strncmp(str, "on", 2)) {
1452 ++ nospec_disable = 0;
1453 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1454 ++ }
1455 ++ if (str && !strncmp(str, "off", 3))
1456 ++ nospec_disable = 1;
1457 ++ if (str && !strncmp(str, "auto", 4))
1458 ++ nospec_auto_detect();
1459 ++ return 0;
1460 ++}
1461 ++early_param("spectre_v2", spectre_v2_setup_early);
1462 ++
1463 ++static void __init_or_module __nospec_revert(s32 *start, s32 *end)
1464 ++{
1465 ++ enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
1466 ++ u8 *instr, *thunk, *br;
1467 ++ u8 insnbuf[6];
1468 ++ s32 *epo;
1469 ++
1470 ++ /* Second part of the instruction replace is always a nop */
1471 ++ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
1472 ++ for (epo = start; epo < end; epo++) {
1473 ++ instr = (u8 *) epo + *epo;
1474 ++ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
1475 ++ type = BRCL_EXPOLINE; /* brcl instruction */
1476 ++ else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
1477 ++ type = BRASL_EXPOLINE; /* brasl instruction */
1478 ++ else
1479 ++ continue;
1480 ++ thunk = instr + (*(int *)(instr + 2)) * 2;
1481 ++ if (thunk[0] == 0xc6 && thunk[1] == 0x00)
1482 ++ /* exrl %r0,<target-br> */
1483 ++ br = thunk + (*(int *)(thunk + 2)) * 2;
1484 ++ else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
1485 ++ thunk[6] == 0x44 && thunk[7] == 0x00 &&
1486 ++ (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
1487 ++ (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
1488 ++ /* larl %rx,<target br> + ex %r0,0(%rx) */
1489 ++ br = thunk + (*(int *)(thunk + 2)) * 2;
1490 ++ else
1491 ++ continue;
1492 ++ if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
1493 ++ continue;
1494 ++ switch (type) {
1495 ++ case BRCL_EXPOLINE:
1496 ++ /* brcl to thunk, replace with br + nop */
1497 ++ insnbuf[0] = br[0];
1498 ++ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
1499 ++ break;
1500 ++ case BRASL_EXPOLINE:
1501 ++ /* brasl to thunk, replace with basr + nop */
1502 ++ insnbuf[0] = 0x0d;
1503 ++ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
1504 ++ break;
1505 ++ }
1506 ++
1507 ++ s390_kernel_write(instr, insnbuf, 6);
1508 ++ }
1509 ++}
1510 ++
1511 ++void __init_or_module nospec_revert(s32 *start, s32 *end)
1512 ++{
1513 ++ if (nospec_disable)
1514 ++ __nospec_revert(start, end);
1515 ++}
1516 ++
1517 ++extern s32 __nospec_call_start[], __nospec_call_end[];
1518 ++extern s32 __nospec_return_start[], __nospec_return_end[];
1519 ++void __init nospec_init_branches(void)
1520 ++{
1521 ++ nospec_revert(__nospec_call_start, __nospec_call_end);
1522 ++ nospec_revert(__nospec_return_start, __nospec_return_end);
1523 ++}
1524 ++
1525 ++#endif /* CONFIG_EXPOLINE */
1526 +diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
1527 +index 81d0808085e6..d856263fd768 100644
1528 +--- a/arch/s390/kernel/processor.c
1529 ++++ b/arch/s390/kernel/processor.c
1530 +@@ -179,3 +179,21 @@ const struct seq_operations cpuinfo_op = {
1531 + .stop = c_stop,
1532 + .show = show_cpuinfo,
1533 + };
1534 ++
1535 ++int s390_isolate_bp(void)
1536 ++{
1537 ++ if (!test_facility(82))
1538 ++ return -EOPNOTSUPP;
1539 ++ set_thread_flag(TIF_ISOLATE_BP);
1540 ++ return 0;
1541 ++}
1542 ++EXPORT_SYMBOL(s390_isolate_bp);
1543 ++
1544 ++int s390_isolate_bp_guest(void)
1545 ++{
1546 ++ if (!test_facility(82))
1547 ++ return -EOPNOTSUPP;
1548 ++ set_thread_flag(TIF_ISOLATE_BP_GUEST);
1549 ++ return 0;
1550 ++}
1551 ++EXPORT_SYMBOL(s390_isolate_bp_guest);
1552 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1553 +index e974e53ab597..feb9d97a9d14 100644
1554 +--- a/arch/s390/kernel/setup.c
1555 ++++ b/arch/s390/kernel/setup.c
1556 +@@ -63,6 +63,8 @@
1557 + #include <asm/sclp.h>
1558 + #include <asm/sysinfo.h>
1559 + #include <asm/numa.h>
1560 ++#include <asm/alternative.h>
1561 ++#include <asm/nospec-branch.h>
1562 + #include "entry.h"
1563 +
1564 + /*
1565 +@@ -335,7 +337,9 @@ static void __init setup_lowcore(void)
1566 + lc->machine_flags = S390_lowcore.machine_flags;
1567 + lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
1568 + memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
1569 +- MAX_FACILITY_BIT/8);
1570 ++ sizeof(lc->stfle_fac_list));
1571 ++ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
1572 ++ sizeof(lc->alt_stfle_fac_list));
1573 + if (MACHINE_HAS_VX)
1574 + lc->vector_save_area_addr =
1575 + (unsigned long) &lc->vector_save_area;
1576 +@@ -372,6 +376,7 @@ static void __init setup_lowcore(void)
1577 + #ifdef CONFIG_SMP
1578 + lc->spinlock_lockval = arch_spin_lockval(0);
1579 + #endif
1580 ++ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1581 +
1582 + set_prefix((u32)(unsigned long) lc);
1583 + lowcore_ptr[0] = lc;
1584 +@@ -871,6 +876,9 @@ void __init setup_arch(char **cmdline_p)
1585 + init_mm.end_data = (unsigned long) &_edata;
1586 + init_mm.brk = (unsigned long) &_end;
1587 +
1588 ++ if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
1589 ++ nospec_auto_detect();
1590 ++
1591 + parse_early_param();
1592 + #ifdef CONFIG_CRASH_DUMP
1593 + /* Deactivate elfcorehdr= kernel parameter */
1594 +@@ -931,6 +939,10 @@ void __init setup_arch(char **cmdline_p)
1595 + conmode_default();
1596 + set_preferred_console();
1597 +
1598 ++ apply_alternative_instructions();
1599 ++ if (IS_ENABLED(CONFIG_EXPOLINE))
1600 ++ nospec_init_branches();
1601 ++
1602 + /* Setup zfcpdump support */
1603 + setup_zfcpdump();
1604 +
1605 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1606 +index 35531fe1c5ea..0a31110f41f6 100644
1607 +--- a/arch/s390/kernel/smp.c
1608 ++++ b/arch/s390/kernel/smp.c
1609 +@@ -205,6 +205,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
1610 + lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
1611 + lc->cpu_nr = cpu;
1612 + lc->spinlock_lockval = arch_spin_lockval(cpu);
1613 ++ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1614 + if (MACHINE_HAS_VX)
1615 + lc->vector_save_area_addr =
1616 + (unsigned long) &lc->vector_save_area;
1617 +@@ -253,7 +254,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
1618 + __ctl_store(lc->cregs_save_area, 0, 15);
1619 + save_access_regs((unsigned int *) lc->access_regs_save_area);
1620 + memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
1621 +- MAX_FACILITY_BIT/8);
1622 ++ sizeof(lc->stfle_fac_list));
1623 ++ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
1624 ++ sizeof(lc->alt_stfle_fac_list));
1625 + }
1626 +
1627 + static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
1628 +@@ -302,6 +305,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
1629 + mem_assign_absolute(lc->restart_fn, (unsigned long) func);
1630 + mem_assign_absolute(lc->restart_data, (unsigned long) data);
1631 + mem_assign_absolute(lc->restart_source, source_cpu);
1632 ++ __bpon();
1633 + asm volatile(
1634 + "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
1635 + " brc 2,0b # busy, try again\n"
1636 +@@ -875,6 +879,7 @@ void __cpu_die(unsigned int cpu)
1637 + void __noreturn cpu_die(void)
1638 + {
1639 + idle_task_exit();
1640 ++ __bpon();
1641 + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
1642 + for (;;) ;
1643 + }
1644 +diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
1645 +index 66956c09d5bf..3d04dfdabc9f 100644
1646 +--- a/arch/s390/kernel/uprobes.c
1647 ++++ b/arch/s390/kernel/uprobes.c
1648 +@@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
1649 + return orig;
1650 + }
1651 +
1652 ++bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1653 ++ struct pt_regs *regs)
1654 ++{
1655 ++ if (ctx == RP_CHECK_CHAIN_CALL)
1656 ++ return user_stack_pointer(regs) <= ret->stack;
1657 ++ else
1658 ++ return user_stack_pointer(regs) < ret->stack;
1659 ++}
1660 ++
1661 + /* Instruction Emulation */
1662 +
1663 + static void adjust_psw_addr(psw_t *psw, unsigned long len)
1664 +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
1665 +index 115bda280d50..dd96b467946b 100644
1666 +--- a/arch/s390/kernel/vmlinux.lds.S
1667 ++++ b/arch/s390/kernel/vmlinux.lds.S
1668 +@@ -99,6 +99,43 @@ SECTIONS
1669 + EXIT_DATA
1670 + }
1671 +
1672 ++ /*
1673 ++ * struct alt_inst entries. From the header (alternative.h):
1674 ++ * "Alternative instructions for different CPU types or capabilities"
1675 ++ * Think locking instructions on spinlocks.
1676 ++ * Note, that it is a part of __init region.
1677 ++ */
1678 ++ . = ALIGN(8);
1679 ++ .altinstructions : {
1680 ++ __alt_instructions = .;
1681 ++ *(.altinstructions)
1682 ++ __alt_instructions_end = .;
1683 ++ }
1684 ++
1685 ++ /*
1686 ++ * And here are the replacement instructions. The linker sticks
1687 ++ * them as binary blobs. The .altinstructions has enough data to
1688 ++ * get the address and the length of them to patch the kernel safely.
1689 ++ * Note, that it is a part of __init region.
1690 ++ */
1691 ++ .altinstr_replacement : {
1692 ++ *(.altinstr_replacement)
1693 ++ }
1694 ++
1695 ++ /*
1696 ++ * Table with the patch locations to undo expolines
1697 ++ */
1698 ++ .nospec_call_table : {
1699 ++ __nospec_call_start = . ;
1700 ++ *(.s390_indirect*)
1701 ++ __nospec_call_end = . ;
1702 ++ }
1703 ++ .nospec_return_table : {
1704 ++ __nospec_return_start = . ;
1705 ++ *(.s390_return*)
1706 ++ __nospec_return_end = . ;
1707 ++ }
1708 ++
1709 + /* early.c uses stsi, which requires page aligned data. */
1710 + . = ALIGN(PAGE_SIZE);
1711 + INIT_DATA_SECTION(0x100)
1712 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1713 +index a70ff09b4982..2032ab81b2d7 100644
1714 +--- a/arch/s390/kvm/kvm-s390.c
1715 ++++ b/arch/s390/kvm/kvm-s390.c
1716 +@@ -401,6 +401,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1717 + case KVM_CAP_S390_RI:
1718 + r = test_facility(64);
1719 + break;
1720 ++ case KVM_CAP_S390_BPB:
1721 ++ r = test_facility(82);
1722 ++ break;
1723 + default:
1724 + r = 0;
1725 + }
1726 +@@ -1713,6 +1716,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1727 + kvm_s390_set_prefix(vcpu, 0);
1728 + if (test_kvm_facility(vcpu->kvm, 64))
1729 + vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1730 ++ if (test_kvm_facility(vcpu->kvm, 82))
1731 ++ vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
1732 + /* fprs can be synchronized via vrs, even if the guest has no vx. With
1733 + * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1734 + */
1735 +@@ -1829,7 +1834,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1736 + if (test_fp_ctl(current->thread.fpu.fpc))
1737 + /* User space provided an invalid FPC, let's clear it */
1738 + current->thread.fpu.fpc = 0;
1739 +-
1740 + save_access_regs(vcpu->arch.host_acrs);
1741 + restore_access_regs(vcpu->run->s.regs.acrs);
1742 + gmap_enable(vcpu->arch.enabled_gmap);
1743 +@@ -1877,6 +1881,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1744 + current->thread.fpu.fpc = 0;
1745 + vcpu->arch.sie_block->gbea = 1;
1746 + vcpu->arch.sie_block->pp = 0;
1747 ++ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1748 + vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1749 + kvm_clear_async_pf_completion_queue(vcpu);
1750 + if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1751 +@@ -2744,6 +2749,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1752 + if (riccb->valid)
1753 + vcpu->arch.sie_block->ecb3 |= 0x01;
1754 + }
1755 ++ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
1756 ++ test_kvm_facility(vcpu->kvm, 82)) {
1757 ++ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1758 ++ vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
1759 ++ }
1760 +
1761 + kvm_run->kvm_dirty_regs = 0;
1762 + }
1763 +@@ -2762,6 +2772,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1764 + kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1765 + kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1766 + kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1767 ++ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
1768 + }
1769 +
1770 + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1771 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
1772 +index d8673e243f13..ced6c9b8f04d 100644
1773 +--- a/arch/s390/kvm/vsie.c
1774 ++++ b/arch/s390/kvm/vsie.c
1775 +@@ -217,6 +217,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1776 + memcpy(scb_o->gcr, scb_s->gcr, 128);
1777 + scb_o->pp = scb_s->pp;
1778 +
1779 ++ /* branch prediction */
1780 ++ if (test_kvm_facility(vcpu->kvm, 82)) {
1781 ++ scb_o->fpf &= ~FPF_BPBC;
1782 ++ scb_o->fpf |= scb_s->fpf & FPF_BPBC;
1783 ++ }
1784 ++
1785 + /* interrupt intercept */
1786 + switch (scb_s->icptcode) {
1787 + case ICPT_PROGI:
1788 +@@ -259,6 +265,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1789 + scb_s->ecb3 = 0;
1790 + scb_s->ecd = 0;
1791 + scb_s->fac = 0;
1792 ++ scb_s->fpf = 0;
1793 +
1794 + rc = prepare_cpuflags(vcpu, vsie_page);
1795 + if (rc)
1796 +@@ -316,6 +323,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1797 + prefix_unmapped(vsie_page);
1798 + scb_s->ecb |= scb_o->ecb & 0x10U;
1799 + }
1800 ++ /* branch prediction */
1801 ++ if (test_kvm_facility(vcpu->kvm, 82))
1802 ++ scb_s->fpf |= scb_o->fpf & FPF_BPBC;
1803 + /* SIMD */
1804 + if (test_kvm_facility(vcpu->kvm, 129)) {
1805 + scb_s->eca |= scb_o->eca & 0x00020000U;
1806 +@@ -754,6 +764,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1807 + {
1808 + struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1809 + struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
1810 ++ int guest_bp_isolation;
1811 + int rc;
1812 +
1813 + handle_last_fault(vcpu, vsie_page);
1814 +@@ -764,6 +775,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1815 + s390_handle_mcck();
1816 +
1817 + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1818 ++
1819 ++ /* save current guest state of bp isolation override */
1820 ++ guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1821 ++
1822 ++ /*
1823 ++ * The guest is running with BPBC, so we have to force it on for our
1824 ++ * nested guest. This is done by enabling BPBC globally, so the BPBC
1825 ++ * control in the SCB (which the nested guest can modify) is simply
1826 ++ * ignored.
1827 ++ */
1828 ++ if (test_kvm_facility(vcpu->kvm, 82) &&
1829 ++ vcpu->arch.sie_block->fpf & FPF_BPBC)
1830 ++ set_thread_flag(TIF_ISOLATE_BP_GUEST);
1831 ++
1832 + local_irq_disable();
1833 + guest_enter_irqoff();
1834 + local_irq_enable();
1835 +@@ -773,6 +798,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1836 + local_irq_disable();
1837 + guest_exit_irqoff();
1838 + local_irq_enable();
1839 ++
1840 ++ /* restore guest state for bp isolation override */
1841 ++ if (!guest_bp_isolation)
1842 ++ clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1843 ++
1844 + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1845 +
1846 + if (rc > 0)
1847 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
1848 +index bbfb03eccb7f..da6a287a11e4 100644
1849 +--- a/arch/x86/kernel/tsc.c
1850 ++++ b/arch/x86/kernel/tsc.c
1851 +@@ -409,7 +409,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
1852 + hpet2 -= hpet1;
1853 + tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
1854 + do_div(tmp, 1000000);
1855 +- do_div(deltatsc, tmp);
1856 ++ deltatsc = div64_u64(deltatsc, tmp);
1857 +
1858 + return (unsigned long) deltatsc;
1859 + }
1860 +diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
1861 +index 94e04c9de12b..667dc5c86fef 100644
1862 +--- a/drivers/acpi/acpi_video.c
1863 ++++ b/drivers/acpi/acpi_video.c
1864 +@@ -2069,6 +2069,25 @@ static int __init intel_opregion_present(void)
1865 + return opregion;
1866 + }
1867 +
1868 ++static bool dmi_is_desktop(void)
1869 ++{
1870 ++ const char *chassis_type;
1871 ++
1872 ++ chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
1873 ++ if (!chassis_type)
1874 ++ return false;
1875 ++
1876 ++ if (!strcmp(chassis_type, "3") || /* 3: Desktop */
1877 ++ !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
1878 ++ !strcmp(chassis_type, "5") || /* 5: Pizza Box */
1879 ++ !strcmp(chassis_type, "6") || /* 6: Mini Tower */
1880 ++ !strcmp(chassis_type, "7") || /* 7: Tower */
1881 ++ !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
1882 ++ return true;
1883 ++
1884 ++ return false;
1885 ++}
1886 ++
1887 + int acpi_video_register(void)
1888 + {
1889 + int ret = 0;
1890 +@@ -2089,8 +2108,12 @@ int acpi_video_register(void)
1891 + * win8 ready (where we also prefer the native backlight driver, so
1892 + * normally the acpi_video code should not register there anyways).
1893 + */
1894 +- if (only_lcd == -1)
1895 +- only_lcd = acpi_osi_is_win8();
1896 ++ if (only_lcd == -1) {
1897 ++ if (dmi_is_desktop() && acpi_osi_is_win8())
1898 ++ only_lcd = true;
1899 ++ else
1900 ++ only_lcd = false;
1901 ++ }
1902 +
1903 + dmi_check_system(video_dmi_table);
1904 +
1905 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1906 +index 5d475b3a0b2e..128ebd439221 100644
1907 +--- a/drivers/cdrom/cdrom.c
1908 ++++ b/drivers/cdrom/cdrom.c
1909 +@@ -2368,7 +2368,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
1910 + if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
1911 + return media_changed(cdi, 1);
1912 +
1913 +- if ((unsigned int)arg >= cdi->capacity)
1914 ++ if (arg >= cdi->capacity)
1915 + return -EINVAL;
1916 +
1917 + info = kmalloc(sizeof(*info), GFP_KERNEL);
1918 +diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1919 +index a7b2a751f6fe..cdb53586c8fe 100644
1920 +--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1921 ++++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1922 +@@ -322,19 +322,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
1923 + {
1924 + uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
1925 + ssize_t ret;
1926 ++ int retry;
1927 +
1928 + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
1929 + return 0;
1930 +
1931 +- ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
1932 +- &tmds_oen, sizeof(tmds_oen));
1933 +- if (ret) {
1934 +- DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
1935 +- enable ? "enable" : "disable");
1936 +- return ret;
1937 ++ /*
1938 ++ * LSPCON adapters in low-power state may ignore the first write, so
1939 ++ * read back and verify the written value a few times.
1940 ++ */
1941 ++ for (retry = 0; retry < 3; retry++) {
1942 ++ uint8_t tmp;
1943 ++
1944 ++ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
1945 ++ &tmds_oen, sizeof(tmds_oen));
1946 ++ if (ret) {
1947 ++ DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
1948 ++ enable ? "enable" : "disable",
1949 ++ retry + 1);
1950 ++ return ret;
1951 ++ }
1952 ++
1953 ++ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
1954 ++ &tmp, sizeof(tmp));
1955 ++ if (ret) {
1956 ++ DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
1957 ++ enable ? "enabling" : "disabling",
1958 ++ retry + 1);
1959 ++ return ret;
1960 ++ }
1961 ++
1962 ++ if (tmp == tmds_oen)
1963 ++ return 0;
1964 + }
1965 +
1966 +- return 0;
1967 ++ DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
1968 ++ enable ? "enabling" : "disabling");
1969 ++
1970 ++ return -EIO;
1971 + }
1972 + EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
1973 +
1974 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1975 +index 36a665f0e5c9..e23748cca0c0 100644
1976 +--- a/drivers/gpu/drm/i915/i915_drv.h
1977 ++++ b/drivers/gpu/drm/i915/i915_drv.h
1978 +@@ -3681,7 +3681,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
1979 + struct intel_display_error_state *error);
1980 +
1981 + int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
1982 +-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
1983 ++int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
1984 ++ u32 val, int timeout_us);
1985 ++#define sandybridge_pcode_write(dev_priv, mbox, val) \
1986 ++ sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
1987 ++
1988 + int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
1989 + u32 reply_mask, u32 reply, int timeout_base_ms);
1990 +
1991 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1992 +index ce32303b3013..c185625d67f2 100644
1993 +--- a/drivers/gpu/drm/i915/intel_display.c
1994 ++++ b/drivers/gpu/drm/i915/intel_display.c
1995 +@@ -6012,8 +6012,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
1996 +
1997 + /* Inform power controller of upcoming frequency change */
1998 + mutex_lock(&dev_priv->rps.hw_lock);
1999 +- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
2000 +- 0x80000000);
2001 ++ ret = sandybridge_pcode_write_timeout(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
2002 ++ 0x80000000, 2000);
2003 + mutex_unlock(&dev_priv->rps.hw_lock);
2004 +
2005 + if (ret) {
2006 +@@ -6044,8 +6044,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
2007 + I915_WRITE(CDCLK_CTL, val);
2008 +
2009 + mutex_lock(&dev_priv->rps.hw_lock);
2010 +- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
2011 +- DIV_ROUND_UP(cdclk, 25000));
2012 ++ ret = sandybridge_pcode_write_timeout(dev_priv,
2013 ++ HSW_PCODE_DE_WRITE_FREQ_REQ,
2014 ++ DIV_ROUND_UP(cdclk, 25000), 2000);
2015 + mutex_unlock(&dev_priv->rps.hw_lock);
2016 +
2017 + if (ret) {
2018 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
2019 +index 49de4760cc16..05427d292457 100644
2020 +--- a/drivers/gpu/drm/i915/intel_pm.c
2021 ++++ b/drivers/gpu/drm/i915/intel_pm.c
2022 +@@ -7913,8 +7913,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
2023 + return 0;
2024 + }
2025 +
2026 +-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
2027 +- u32 mbox, u32 val)
2028 ++int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
2029 ++ u32 mbox, u32 val, int timeout_us)
2030 + {
2031 + int status;
2032 +
2033 +@@ -7935,7 +7935,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
2034 +
2035 + if (intel_wait_for_register_fw(dev_priv,
2036 + GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
2037 +- 500)) {
2038 ++ timeout_us)) {
2039 + DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
2040 + return -ETIMEDOUT;
2041 + }
2042 +diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
2043 +index ec9023bd935b..d53e805d392f 100644
2044 +--- a/drivers/gpu/drm/vc4/vc4_bo.c
2045 ++++ b/drivers/gpu/drm/vc4/vc4_bo.c
2046 +@@ -80,6 +80,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
2047 + struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
2048 +
2049 + if (bo->validated_shader) {
2050 ++ kfree(bo->validated_shader->uniform_addr_offsets);
2051 + kfree(bo->validated_shader->texture_samples);
2052 + kfree(bo->validated_shader);
2053 + bo->validated_shader = NULL;
2054 +@@ -328,6 +329,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
2055 + }
2056 +
2057 + if (bo->validated_shader) {
2058 ++ kfree(bo->validated_shader->uniform_addr_offsets);
2059 + kfree(bo->validated_shader->texture_samples);
2060 + kfree(bo->validated_shader);
2061 + bo->validated_shader = NULL;
2062 +diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
2063 +index 917321ce832f..19a5bde8e490 100644
2064 +--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
2065 ++++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
2066 +@@ -874,6 +874,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
2067 + fail:
2068 + kfree(validation_state.branch_targets);
2069 + if (validated_shader) {
2070 ++ kfree(validated_shader->uniform_addr_offsets);
2071 + kfree(validated_shader->texture_samples);
2072 + kfree(validated_shader);
2073 + }
2074 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
2075 +index e6fe21a6135b..b32bf7eac3c8 100644
2076 +--- a/drivers/i2c/busses/i2c-i801.c
2077 ++++ b/drivers/i2c/busses/i2c-i801.c
2078 +@@ -243,6 +243,7 @@ struct i801_priv {
2079 + struct i2c_adapter adapter;
2080 + unsigned long smba;
2081 + unsigned char original_hstcfg;
2082 ++ unsigned char original_slvcmd;
2083 + struct pci_dev *pci_dev;
2084 + unsigned int features;
2085 +
2086 +@@ -962,13 +963,24 @@ static int i801_enable_host_notify(struct i2c_adapter *adapter)
2087 + if (!priv->host_notify)
2088 + return -ENOMEM;
2089 +
2090 +- outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
2091 ++ if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
2092 ++ outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
2093 ++ SMBSLVCMD(priv));
2094 ++
2095 + /* clear Host Notify bit to allow a new notification */
2096 + outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
2097 +
2098 + return 0;
2099 + }
2100 +
2101 ++static void i801_disable_host_notify(struct i801_priv *priv)
2102 ++{
2103 ++ if (!(priv->features & FEATURE_HOST_NOTIFY))
2104 ++ return;
2105 ++
2106 ++ outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
2107 ++}
2108 ++
2109 + static const struct i2c_algorithm smbus_algorithm = {
2110 + .smbus_xfer = i801_access,
2111 + .functionality = i801_func,
2112 +@@ -1589,6 +1601,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
2113 + outb_p(inb_p(SMBAUXCTL(priv)) &
2114 + ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
2115 +
2116 ++ /* Remember original Host Notify setting */
2117 ++ if (priv->features & FEATURE_HOST_NOTIFY)
2118 ++ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
2119 ++
2120 + /* Default timeout in interrupt mode: 200 ms */
2121 + priv->adapter.timeout = HZ / 5;
2122 +
2123 +@@ -1666,6 +1682,7 @@ static void i801_remove(struct pci_dev *dev)
2124 + pm_runtime_forbid(&dev->dev);
2125 + pm_runtime_get_noresume(&dev->dev);
2126 +
2127 ++ i801_disable_host_notify(priv);
2128 + i801_del_mux(priv);
2129 + i2c_del_adapter(&priv->adapter);
2130 + i801_acpi_remove(priv);
2131 +@@ -1679,6 +1696,15 @@ static void i801_remove(struct pci_dev *dev)
2132 + */
2133 + }
2134 +
2135 ++static void i801_shutdown(struct pci_dev *dev)
2136 ++{
2137 ++ struct i801_priv *priv = pci_get_drvdata(dev);
2138 ++
2139 ++ /* Restore config registers to avoid hard hang on some systems */
2140 ++ i801_disable_host_notify(priv);
2141 ++ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
2142 ++}
2143 ++
2144 + #ifdef CONFIG_PM
2145 + static int i801_suspend(struct device *dev)
2146 + {
2147 +@@ -1711,6 +1737,7 @@ static struct pci_driver i801_driver = {
2148 + .id_table = i801_ids,
2149 + .probe = i801_probe,
2150 + .remove = i801_remove,
2151 ++ .shutdown = i801_shutdown,
2152 + .driver = {
2153 + .pm = &i801_pm_ops,
2154 + },
2155 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
2156 +index 403df3591d29..5b8909d1b55e 100644
2157 +--- a/drivers/infiniband/hw/mlx5/qp.c
2158 ++++ b/drivers/infiniband/hw/mlx5/qp.c
2159 +@@ -2848,7 +2848,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
2160 + * If we moved a kernel QP to RESET, clean up all old CQ
2161 + * entries and reinitialize the QP.
2162 + */
2163 +- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
2164 ++ if (new_state == IB_QPS_RESET &&
2165 ++ !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
2166 + mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
2167 + ibqp->srq ? to_msrq(ibqp->srq) : NULL);
2168 + if (send_cq != recv_cq)
2169 +diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
2170 +index 930424e55439..251d64ca41ce 100644
2171 +--- a/drivers/input/misc/drv260x.c
2172 ++++ b/drivers/input/misc/drv260x.c
2173 +@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
2174 + if (!haptics)
2175 + return -ENOMEM;
2176 +
2177 +- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
2178 ++ haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
2179 + haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
2180 +
2181 + if (pdata) {
2182 +diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
2183 +index e546b014d7ad..2dcc8d0be9e7 100644
2184 +--- a/drivers/media/usb/stkwebcam/stk-sensor.c
2185 ++++ b/drivers/media/usb/stkwebcam/stk-sensor.c
2186 +@@ -228,7 +228,7 @@
2187 + static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
2188 + {
2189 + int i = 0;
2190 +- int tmpval = 0;
2191 ++ u8 tmpval = 0;
2192 +
2193 + if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
2194 + return 1;
2195 +@@ -253,7 +253,7 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
2196 + static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
2197 + {
2198 + int i = 0;
2199 +- int tmpval = 0;
2200 ++ u8 tmpval = 0;
2201 +
2202 + if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
2203 + return 1;
2204 +@@ -274,7 +274,7 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
2205 + if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
2206 + return 1;
2207 +
2208 +- *val = (u8) tmpval;
2209 ++ *val = tmpval;
2210 + return 0;
2211 + }
2212 +
2213 +diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
2214 +index 22a9aae16291..1c48f2f1e14a 100644
2215 +--- a/drivers/media/usb/stkwebcam/stk-webcam.c
2216 ++++ b/drivers/media/usb/stkwebcam/stk-webcam.c
2217 +@@ -144,7 +144,7 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
2218 + return 0;
2219 + }
2220 +
2221 +-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
2222 ++int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
2223 + {
2224 + struct usb_device *udev = dev->udev;
2225 + unsigned char *buf;
2226 +@@ -163,7 +163,7 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
2227 + sizeof(u8),
2228 + 500);
2229 + if (ret >= 0)
2230 +- memcpy(value, buf, sizeof(u8));
2231 ++ *value = *buf;
2232 +
2233 + kfree(buf);
2234 + return ret;
2235 +@@ -171,9 +171,10 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
2236 +
2237 + static int stk_start_stream(struct stk_camera *dev)
2238 + {
2239 +- int value;
2240 ++ u8 value;
2241 + int i, ret;
2242 +- int value_116, value_117;
2243 ++ u8 value_116, value_117;
2244 ++
2245 +
2246 + if (!is_present(dev))
2247 + return -ENODEV;
2248 +@@ -213,7 +214,7 @@ static int stk_start_stream(struct stk_camera *dev)
2249 +
2250 + static int stk_stop_stream(struct stk_camera *dev)
2251 + {
2252 +- int value;
2253 ++ u8 value;
2254 + int i;
2255 + if (is_present(dev)) {
2256 + stk_camera_read_reg(dev, 0x0100, &value);
2257 +diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
2258 +index 9bbfa3d9bfdd..92bb48e3c74e 100644
2259 +--- a/drivers/media/usb/stkwebcam/stk-webcam.h
2260 ++++ b/drivers/media/usb/stkwebcam/stk-webcam.h
2261 +@@ -129,7 +129,7 @@ struct stk_camera {
2262 + #define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
2263 +
2264 + int stk_camera_write_reg(struct stk_camera *, u16, u8);
2265 +-int stk_camera_read_reg(struct stk_camera *, u16, int *);
2266 ++int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
2267 +
2268 + int stk_sensor_init(struct stk_camera *);
2269 + int stk_sensor_configure(struct stk_camera *);
2270 +diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
2271 +index 7ee1667acde4..00dff9b5a6c4 100644
2272 +--- a/drivers/message/fusion/mptsas.c
2273 ++++ b/drivers/message/fusion/mptsas.c
2274 +@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
2275 + .cmd_per_lun = 7,
2276 + .use_clustering = ENABLE_CLUSTERING,
2277 + .shost_attrs = mptscsih_host_attrs,
2278 ++ .no_write_same = 1,
2279 + };
2280 +
2281 + static int mptsas_get_linkerrors(struct sas_phy *phy)
2282 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2283 +index 513457a2a7bf..13a015b8052b 100644
2284 +--- a/drivers/net/bonding/bond_main.c
2285 ++++ b/drivers/net/bonding/bond_main.c
2286 +@@ -1654,8 +1654,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
2287 + } /* switch(bond_mode) */
2288 +
2289 + #ifdef CONFIG_NET_POLL_CONTROLLER
2290 +- slave_dev->npinfo = bond->dev->npinfo;
2291 +- if (slave_dev->npinfo) {
2292 ++ if (bond->dev->npinfo) {
2293 + if (slave_enable_netpoll(new_slave)) {
2294 + netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2295 + res = -EBUSY;
2296 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
2297 +index 552de9c490c6..de336897a28a 100644
2298 +--- a/drivers/net/ethernet/ti/cpsw.c
2299 ++++ b/drivers/net/ethernet/ti/cpsw.c
2300 +@@ -124,7 +124,7 @@ do { \
2301 +
2302 + #define RX_PRIORITY_MAPPING 0x76543210
2303 + #define TX_PRIORITY_MAPPING 0x33221100
2304 +-#define CPDMA_TX_PRIORITY_MAP 0x01234567
2305 ++#define CPDMA_TX_PRIORITY_MAP 0x76543210
2306 +
2307 + #define CPSW_VLAN_AWARE BIT(1)
2308 + #define CPSW_ALE_VLAN_AWARE 1
2309 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
2310 +index dc36c2ec1d10..fa2c7bd638be 100644
2311 +--- a/drivers/net/ppp/pppoe.c
2312 ++++ b/drivers/net/ppp/pppoe.c
2313 +@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
2314 + lock_sock(sk);
2315 +
2316 + error = -EINVAL;
2317 ++
2318 ++ if (sockaddr_len != sizeof(struct sockaddr_pppox))
2319 ++ goto end;
2320 ++
2321 + if (sp->sa_protocol != PX_PROTO_OE)
2322 + goto end;
2323 +
2324 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2325 +index 8673ef3c9cdc..36963685d42a 100644
2326 +--- a/drivers/net/team/team.c
2327 ++++ b/drivers/net/team/team.c
2328 +@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
2329 + }
2330 + }
2331 +
2332 ++static bool __team_option_inst_tmp_find(const struct list_head *opts,
2333 ++ const struct team_option_inst *needle)
2334 ++{
2335 ++ struct team_option_inst *opt_inst;
2336 ++
2337 ++ list_for_each_entry(opt_inst, opts, tmp_list)
2338 ++ if (opt_inst == needle)
2339 ++ return true;
2340 ++ return false;
2341 ++}
2342 ++
2343 + static int __team_options_register(struct team *team,
2344 + const struct team_option *option,
2345 + size_t option_count)
2346 +@@ -1067,14 +1078,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
2347 + }
2348 +
2349 + #ifdef CONFIG_NET_POLL_CONTROLLER
2350 +-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
2351 ++static int __team_port_enable_netpoll(struct team_port *port)
2352 + {
2353 + struct netpoll *np;
2354 + int err;
2355 +
2356 +- if (!team->dev->npinfo)
2357 +- return 0;
2358 +-
2359 + np = kzalloc(sizeof(*np), GFP_KERNEL);
2360 + if (!np)
2361 + return -ENOMEM;
2362 +@@ -1088,6 +1096,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
2363 + return err;
2364 + }
2365 +
2366 ++static int team_port_enable_netpoll(struct team_port *port)
2367 ++{
2368 ++ if (!port->team->dev->npinfo)
2369 ++ return 0;
2370 ++
2371 ++ return __team_port_enable_netpoll(port);
2372 ++}
2373 ++
2374 + static void team_port_disable_netpoll(struct team_port *port)
2375 + {
2376 + struct netpoll *np = port->np;
2377 +@@ -1102,7 +1118,7 @@ static void team_port_disable_netpoll(struct team_port *port)
2378 + kfree(np);
2379 + }
2380 + #else
2381 +-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
2382 ++static int team_port_enable_netpoll(struct team_port *port)
2383 + {
2384 + return 0;
2385 + }
2386 +@@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
2387 + goto err_vids_add;
2388 + }
2389 +
2390 +- err = team_port_enable_netpoll(team, port);
2391 ++ err = team_port_enable_netpoll(port);
2392 + if (err) {
2393 + netdev_err(dev, "Failed to enable netpoll on device %s\n",
2394 + portname);
2395 +@@ -1908,7 +1924,7 @@ static int team_netpoll_setup(struct net_device *dev,
2396 +
2397 + mutex_lock(&team->lock);
2398 + list_for_each_entry(port, &team->port_list, list) {
2399 +- err = team_port_enable_netpoll(team, port);
2400 ++ err = __team_port_enable_netpoll(port);
2401 + if (err) {
2402 + __team_netpoll_cleanup(team);
2403 + break;
2404 +@@ -2569,6 +2585,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2405 + if (err)
2406 + goto team_put;
2407 + opt_inst->changed = true;
2408 ++
2409 ++ /* dumb/evil user-space can send us duplicate opt,
2410 ++ * keep only the last one
2411 ++ */
2412 ++ if (__team_option_inst_tmp_find(&opt_inst_list,
2413 ++ opt_inst))
2414 ++ continue;
2415 ++
2416 + list_add(&opt_inst->tmp_list, &opt_inst_list);
2417 + }
2418 + if (!opt_found) {
2419 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
2420 +index 4fb468666b19..99424c87b464 100644
2421 +--- a/drivers/net/usb/cdc_ether.c
2422 ++++ b/drivers/net/usb/cdc_ether.c
2423 +@@ -530,6 +530,7 @@ static const struct driver_info wwan_info = {
2424 + #define REALTEK_VENDOR_ID 0x0bda
2425 + #define SAMSUNG_VENDOR_ID 0x04e8
2426 + #define LENOVO_VENDOR_ID 0x17ef
2427 ++#define LINKSYS_VENDOR_ID 0x13b1
2428 + #define NVIDIA_VENDOR_ID 0x0955
2429 + #define HP_VENDOR_ID 0x03f0
2430 +
2431 +@@ -719,6 +720,15 @@ static const struct usb_device_id products[] = {
2432 + .driver_info = 0,
2433 + },
2434 +
2435 ++#if IS_ENABLED(CONFIG_USB_RTL8152)
2436 ++/* Linksys USB3GIGV1 Ethernet Adapter */
2437 ++{
2438 ++ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
2439 ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
2440 ++ .driver_info = 0,
2441 ++},
2442 ++#endif
2443 ++
2444 + /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
2445 + {
2446 + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
2447 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
2448 +index b2d7c7e32250..3cdfa2465e3f 100644
2449 +--- a/drivers/net/usb/r8152.c
2450 ++++ b/drivers/net/usb/r8152.c
2451 +@@ -519,6 +519,7 @@ enum rtl8152_flags {
2452 + #define VENDOR_ID_REALTEK 0x0bda
2453 + #define VENDOR_ID_SAMSUNG 0x04e8
2454 + #define VENDOR_ID_LENOVO 0x17ef
2455 ++#define VENDOR_ID_LINKSYS 0x13b1
2456 + #define VENDOR_ID_NVIDIA 0x0955
2457 +
2458 + #define MCU_TYPE_PLA 0x0100
2459 +@@ -4506,6 +4507,7 @@ static struct usb_device_id rtl8152_table[] = {
2460 + {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
2461 + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
2462 + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
2463 ++ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
2464 + {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
2465 + {}
2466 + };
2467 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2468 +index a497bf31953d..5aa5df24f4dc 100644
2469 +--- a/drivers/net/wireless/ath/ath10k/mac.c
2470 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
2471 +@@ -5819,9 +5819,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
2472 + sta->addr, smps, err);
2473 + }
2474 +
2475 +- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
2476 +- changed & IEEE80211_RC_NSS_CHANGED) {
2477 +- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
2478 ++ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
2479 ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
2480 + sta->addr);
2481 +
2482 + err = ath10k_station_assoc(ar, arvif->vif, sta, true);
2483 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2484 +index a35f78be8dec..acef4ec928c1 100644
2485 +--- a/drivers/net/wireless/ath/ath9k/hw.c
2486 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
2487 +@@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
2488 + int count = 50;
2489 + u32 reg, last_val;
2490 +
2491 ++ /* Check if chip failed to wake up */
2492 ++ if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
2493 ++ return false;
2494 ++
2495 + if (AR_SREV_9300(ah))
2496 + return !ath9k_hw_detect_mac_hang(ah);
2497 +
2498 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2499 +index 4182c3775a72..2681b5339810 100644
2500 +--- a/drivers/net/wireless/mac80211_hwsim.c
2501 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2502 +@@ -3346,8 +3346,11 @@ static void __net_exit hwsim_exit_net(struct net *net)
2503 + continue;
2504 +
2505 + list_del(&data->list);
2506 +- INIT_WORK(&data->destroy_work, destroy_radio);
2507 +- schedule_work(&data->destroy_work);
2508 ++ spin_unlock_bh(&hwsim_radio_lock);
2509 ++ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
2510 ++ NULL);
2511 ++ spin_lock_bh(&hwsim_radio_lock);
2512 ++
2513 + }
2514 + spin_unlock_bh(&hwsim_radio_lock);
2515 + }
2516 +diff --git a/drivers/of/base.c b/drivers/of/base.c
2517 +index a0bccb54a9bd..466b285cef3e 100644
2518 +--- a/drivers/of/base.c
2519 ++++ b/drivers/of/base.c
2520 +@@ -2109,7 +2109,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
2521 + continue;
2522 +
2523 + /* Allocate an alias_prop with enough space for the stem */
2524 +- ap = dt_alloc(sizeof(*ap) + len + 1, 4);
2525 ++ ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
2526 + if (!ap)
2527 + continue;
2528 + memset(ap, 0, sizeof(*ap) + len + 1);
2529 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2530 +index a87c8e1aef68..9c13aeeeb973 100644
2531 +--- a/drivers/pci/pci.c
2532 ++++ b/drivers/pci/pci.c
2533 +@@ -3756,27 +3756,49 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
2534 + }
2535 + EXPORT_SYMBOL(pci_wait_for_pending_transaction);
2536 +
2537 +-/*
2538 +- * We should only need to wait 100ms after FLR, but some devices take longer.
2539 +- * Wait for up to 1000ms for config space to return something other than -1.
2540 +- * Intel IGD requires this when an LCD panel is attached. We read the 2nd
2541 +- * dword because VFs don't implement the 1st dword.
2542 +- */
2543 + static void pci_flr_wait(struct pci_dev *dev)
2544 + {
2545 +- int i = 0;
2546 ++ int delay = 1, timeout = 60000;
2547 + u32 id;
2548 +
2549 +- do {
2550 +- msleep(100);
2551 ++ /*
2552 ++ * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
2553 ++ * 100ms, but may silently discard requests while the FLR is in
2554 ++ * progress. Wait 100ms before trying to access the device.
2555 ++ */
2556 ++ msleep(100);
2557 ++
2558 ++ /*
2559 ++ * After 100ms, the device should not silently discard config
2560 ++ * requests, but it may still indicate that it needs more time by
2561 ++ * responding to them with CRS completions. The Root Port will
2562 ++ * generally synthesize ~0 data to complete the read (except when
2563 ++ * CRS SV is enabled and the read was for the Vendor ID; in that
2564 ++ * case it synthesizes 0x0001 data).
2565 ++ *
2566 ++ * Wait for the device to return a non-CRS completion. Read the
2567 ++ * Command register instead of Vendor ID so we don't have to