Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Sun, 29 Apr 2018 11:48:06
Message-Id: 1525002456.8df3794a0a9edcd8fce5a3d06d0fc8d1908dbd09.mpagano@gentoo
1 commit: 8df3794a0a9edcd8fce5a3d06d0fc8d1908dbd09
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Apr 29 11:47:36 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Apr 29 11:47:36 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8df3794a
7
8 Linux patch 4.4.130
9
10 0000_README | 4 +
11 1129_linux-4.4.130.patch | 2794 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2798 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index c2625c8..33599fb 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -559,6 +559,10 @@ Patch: 1128_linux-4.4.129.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.129
21
22 +Patch: 1129_linux-4.4.130.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.130
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1129_linux-4.4.130.patch b/1129_linux-4.4.130.patch
31 new file mode 100644
32 index 0000000..e29e411
33 --- /dev/null
34 +++ b/1129_linux-4.4.130.patch
35 @@ -0,0 +1,2794 @@
36 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
37 +index f53ef1ac3122..4df6bd7d01ed 100644
38 +--- a/Documentation/kernel-parameters.txt
39 ++++ b/Documentation/kernel-parameters.txt
40 +@@ -2402,6 +2402,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
41 +
42 + noalign [KNL,ARM]
43 +
44 ++ noaltinstr [S390] Disables alternative instructions patching
45 ++ (CPU alternatives feature).
46 ++
47 + noapic [SMP,APIC] Tells the kernel to not make use of any
48 + IOAPICs that may be present in the system.
49 +
50 +diff --git a/Makefile b/Makefile
51 +index 096d7e867b6c..151477d4d5e5 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,6 +1,6 @@
55 + VERSION = 4
56 + PATCHLEVEL = 4
57 +-SUBLEVEL = 129
58 ++SUBLEVEL = 130
59 + EXTRAVERSION =
60 + NAME = Blurry Fish Butt
61 +
62 +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
63 +index 2ee95ece0498..9bdaeb38a768 100644
64 +--- a/arch/s390/Kconfig
65 ++++ b/arch/s390/Kconfig
66 +@@ -111,6 +111,7 @@ config S390
67 + select GENERIC_CLOCKEVENTS
68 + select GENERIC_CPU_AUTOPROBE
69 + select GENERIC_CPU_DEVICES if !SMP
70 ++ select GENERIC_CPU_VULNERABILITIES
71 + select GENERIC_FIND_FIRST_BIT
72 + select GENERIC_SMP_IDLE_THREAD
73 + select GENERIC_TIME_VSYSCALL
74 +@@ -705,6 +706,51 @@ config SECCOMP
75 +
76 + If unsure, say Y.
77 +
78 ++config KERNEL_NOBP
79 ++ def_bool n
80 ++ prompt "Enable modified branch prediction for the kernel by default"
81 ++ help
82 ++ If this option is selected the kernel will switch to a modified
83 ++ branch prediction mode if the firmware interface is available.
84 ++ The modified branch prediction mode improves the behaviour in
85 ++ regard to speculative execution.
86 ++
87 ++ With the option enabled the kernel parameter "nobp=0" or "nospec"
88 ++ can be used to run the kernel in the normal branch prediction mode.
89 ++
90 ++ With the option disabled the modified branch prediction mode is
91 ++ enabled with the "nobp=1" kernel parameter.
92 ++
93 ++ If unsure, say N.
94 ++
95 ++config EXPOLINE
96 ++ def_bool n
97 ++ prompt "Avoid speculative indirect branches in the kernel"
98 ++ help
99 ++ Compile the kernel with the expoline compiler options to guard
100 ++ against kernel-to-user data leaks by avoiding speculative indirect
101 ++ branches.
102 ++ Requires a compiler with -mindirect-branch=thunk support for full
103 ++ protection. The kernel may run slower.
104 ++
105 ++ If unsure, say N.
106 ++
107 ++choice
108 ++ prompt "Expoline default"
109 ++ depends on EXPOLINE
110 ++ default EXPOLINE_FULL
111 ++
112 ++config EXPOLINE_OFF
113 ++ bool "spectre_v2=off"
114 ++
115 ++config EXPOLINE_AUTO
116 ++ bool "spectre_v2=auto"
117 ++
118 ++config EXPOLINE_FULL
119 ++ bool "spectre_v2=on"
120 ++
121 ++endchoice
122 ++
123 + endmenu
124 +
125 + menu "Power Management"
126 +@@ -754,6 +800,7 @@ config PFAULT
127 + config SHARED_KERNEL
128 + bool "VM shared kernel support"
129 + depends on !JUMP_LABEL
130 ++ depends on !ALTERNATIVES
131 + help
132 + Select this option, if you want to share the text segment of the
133 + Linux kernel between different VM guests. This reduces memory
134 +diff --git a/arch/s390/Makefile b/arch/s390/Makefile
135 +index e8d4423e4f85..d924f9b6dc73 100644
136 +--- a/arch/s390/Makefile
137 ++++ b/arch/s390/Makefile
138 +@@ -77,6 +77,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
139 + cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
140 + endif
141 +
142 ++ifdef CONFIG_EXPOLINE
143 ++ ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
144 ++ CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
145 ++ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
146 ++ CC_FLAGS_EXPOLINE += -mindirect-branch-table
147 ++ export CC_FLAGS_EXPOLINE
148 ++ cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
149 ++ endif
150 ++endif
151 ++
152 + ifdef CONFIG_FUNCTION_TRACER
153 + # make use of hotpatch feature if the compiler supports it
154 + cc_hotpatch := -mhotpatch=0,3
155 +diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
156 +new file mode 100644
157 +index 000000000000..a72002056b54
158 +--- /dev/null
159 ++++ b/arch/s390/include/asm/alternative.h
160 +@@ -0,0 +1,149 @@
161 ++#ifndef _ASM_S390_ALTERNATIVE_H
162 ++#define _ASM_S390_ALTERNATIVE_H
163 ++
164 ++#ifndef __ASSEMBLY__
165 ++
166 ++#include <linux/types.h>
167 ++#include <linux/stddef.h>
168 ++#include <linux/stringify.h>
169 ++
170 ++struct alt_instr {
171 ++ s32 instr_offset; /* original instruction */
172 ++ s32 repl_offset; /* offset to replacement instruction */
173 ++ u16 facility; /* facility bit set for replacement */
174 ++ u8 instrlen; /* length of original instruction */
175 ++ u8 replacementlen; /* length of new instruction */
176 ++} __packed;
177 ++
178 ++void apply_alternative_instructions(void);
179 ++void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
180 ++
181 ++/*
182 ++ * |661: |662: |6620 |663:
183 ++ * +-----------+---------------------+
184 ++ * | oldinstr | oldinstr_padding |
185 ++ * | +----------+----------+
186 ++ * | | | |
187 ++ * | | >6 bytes |6/4/2 nops|
188 ++ * | |6 bytes jg----------->
189 ++ * +-----------+---------------------+
190 ++ * ^^ static padding ^^
191 ++ *
192 ++ * .altinstr_replacement section
193 ++ * +---------------------+-----------+
194 ++ * |6641: |6651:
195 ++ * | alternative instr 1 |
196 ++ * +-----------+---------+- - - - - -+
197 ++ * |6642: |6652: |
198 ++ * | alternative instr 2 | padding
199 ++ * +---------------------+- - - - - -+
200 ++ * ^ runtime ^
201 ++ *
202 ++ * .altinstructions section
203 ++ * +---------------------------------+
204 ++ * | alt_instr entries for each |
205 ++ * | alternative instr |
206 ++ * +---------------------------------+
207 ++ */
208 ++
209 ++#define b_altinstr(num) "664"#num
210 ++#define e_altinstr(num) "665"#num
211 ++
212 ++#define e_oldinstr_pad_end "663"
213 ++#define oldinstr_len "662b-661b"
214 ++#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
215 ++#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
216 ++#define oldinstr_pad_len(num) \
217 ++ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
218 ++ "((" altinstr_len(num) ")-(" oldinstr_len "))"
219 ++
220 ++#define INSTR_LEN_SANITY_CHECK(len) \
221 ++ ".if " len " > 254\n" \
222 ++ "\t.error \"cpu alternatives does not support instructions " \
223 ++ "blocks > 254 bytes\"\n" \
224 ++ ".endif\n" \
225 ++ ".if (" len ") %% 2\n" \
226 ++ "\t.error \"cpu alternatives instructions length is odd\"\n" \
227 ++ ".endif\n"
228 ++
229 ++#define OLDINSTR_PADDING(oldinstr, num) \
230 ++ ".if " oldinstr_pad_len(num) " > 6\n" \
231 ++ "\tjg " e_oldinstr_pad_end "f\n" \
232 ++ "6620:\n" \
233 ++ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
234 ++ ".else\n" \
235 ++ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
236 ++ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
237 ++ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
238 ++ ".endif\n"
239 ++
240 ++#define OLDINSTR(oldinstr, num) \
241 ++ "661:\n\t" oldinstr "\n662:\n" \
242 ++ OLDINSTR_PADDING(oldinstr, num) \
243 ++ e_oldinstr_pad_end ":\n" \
244 ++ INSTR_LEN_SANITY_CHECK(oldinstr_len)
245 ++
246 ++#define OLDINSTR_2(oldinstr, num1, num2) \
247 ++ "661:\n\t" oldinstr "\n662:\n" \
248 ++ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
249 ++ OLDINSTR_PADDING(oldinstr, num2) \
250 ++ ".else\n" \
251 ++ OLDINSTR_PADDING(oldinstr, num1) \
252 ++ ".endif\n" \
253 ++ e_oldinstr_pad_end ":\n" \
254 ++ INSTR_LEN_SANITY_CHECK(oldinstr_len)
255 ++
256 ++#define ALTINSTR_ENTRY(facility, num) \
257 ++ "\t.long 661b - .\n" /* old instruction */ \
258 ++ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
259 ++ "\t.word " __stringify(facility) "\n" /* facility bit */ \
260 ++ "\t.byte " oldinstr_total_len "\n" /* source len */ \
261 ++ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
262 ++
263 ++#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
264 ++ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
265 ++ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
266 ++
267 ++/* alternative assembly primitive: */
268 ++#define ALTERNATIVE(oldinstr, altinstr, facility) \
269 ++ ".pushsection .altinstr_replacement, \"ax\"\n" \
270 ++ ALTINSTR_REPLACEMENT(altinstr, 1) \
271 ++ ".popsection\n" \
272 ++ OLDINSTR(oldinstr, 1) \
273 ++ ".pushsection .altinstructions,\"a\"\n" \
274 ++ ALTINSTR_ENTRY(facility, 1) \
275 ++ ".popsection\n"
276 ++
277 ++#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
278 ++ ".pushsection .altinstr_replacement, \"ax\"\n" \
279 ++ ALTINSTR_REPLACEMENT(altinstr1, 1) \
280 ++ ALTINSTR_REPLACEMENT(altinstr2, 2) \
281 ++ ".popsection\n" \
282 ++ OLDINSTR_2(oldinstr, 1, 2) \
283 ++ ".pushsection .altinstructions,\"a\"\n" \
284 ++ ALTINSTR_ENTRY(facility1, 1) \
285 ++ ALTINSTR_ENTRY(facility2, 2) \
286 ++ ".popsection\n"
287 ++
288 ++/*
289 ++ * Alternative instructions for different CPU types or capabilities.
290 ++ *
291 ++ * This allows to use optimized instructions even on generic binary
292 ++ * kernels.
293 ++ *
294 ++ * oldinstr is padded with jump and nops at compile time if altinstr is
295 ++ * longer. altinstr is padded with jump and nops at run-time during patching.
296 ++ *
297 ++ * For non barrier like inlines please define new variants
298 ++ * without volatile and memory clobber.
299 ++ */
300 ++#define alternative(oldinstr, altinstr, facility) \
301 ++ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
302 ++
303 ++#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
304 ++ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
305 ++ altinstr2, facility2) ::: "memory")
306 ++
307 ++#endif /* __ASSEMBLY__ */
308 ++
309 ++#endif /* _ASM_S390_ALTERNATIVE_H */
310 +diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
311 +index d68e11e0df5e..e903b28e7358 100644
312 +--- a/arch/s390/include/asm/barrier.h
313 ++++ b/arch/s390/include/asm/barrier.h
314 +@@ -53,4 +53,28 @@ do { \
315 + ___p1; \
316 + })
317 +
318 ++/**
319 ++ * array_index_mask_nospec - generate a mask for array_idx() that is
320 ++ * ~0UL when the bounds check succeeds and 0 otherwise
321 ++ * @index: array element index
322 ++ * @size: number of elements in array
323 ++ */
324 ++#define array_index_mask_nospec array_index_mask_nospec
325 ++static inline unsigned long array_index_mask_nospec(unsigned long index,
326 ++ unsigned long size)
327 ++{
328 ++ unsigned long mask;
329 ++
330 ++ if (__builtin_constant_p(size) && size > 0) {
331 ++ asm(" clgr %2,%1\n"
332 ++ " slbgr %0,%0\n"
333 ++ :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
334 ++ return mask;
335 ++ }
336 ++ asm(" clgr %1,%2\n"
337 ++ " slbgr %0,%0\n"
338 ++ :"=d" (mask) : "d" (size), "d" (index) :"cc");
339 ++ return ~mask;
340 ++}
341 ++
342 + #endif /* __ASM_BARRIER_H */
343 +diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
344 +index 0aa6a7ed95a3..155fcc7bcba6 100644
345 +--- a/arch/s390/include/asm/facility.h
346 ++++ b/arch/s390/include/asm/facility.h
347 +@@ -13,6 +13,24 @@
348 +
349 + #define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
350 +
351 ++static inline void __set_facility(unsigned long nr, void *facilities)
352 ++{
353 ++ unsigned char *ptr = (unsigned char *) facilities;
354 ++
355 ++ if (nr >= MAX_FACILITY_BIT)
356 ++ return;
357 ++ ptr[nr >> 3] |= 0x80 >> (nr & 7);
358 ++}
359 ++
360 ++static inline void __clear_facility(unsigned long nr, void *facilities)
361 ++{
362 ++ unsigned char *ptr = (unsigned char *) facilities;
363 ++
364 ++ if (nr >= MAX_FACILITY_BIT)
365 ++ return;
366 ++ ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
367 ++}
368 ++
369 + static inline int __test_facility(unsigned long nr, void *facilities)
370 + {
371 + unsigned char *ptr;
372 +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
373 +index e9a983f40a24..7d9c5917da2b 100644
374 +--- a/arch/s390/include/asm/kvm_host.h
375 ++++ b/arch/s390/include/asm/kvm_host.h
376 +@@ -136,7 +136,8 @@ struct kvm_s390_sie_block {
377 + __u16 ipa; /* 0x0056 */
378 + __u32 ipb; /* 0x0058 */
379 + __u32 scaoh; /* 0x005c */
380 +- __u8 reserved60; /* 0x0060 */
381 ++#define FPF_BPBC 0x20
382 ++ __u8 fpf; /* 0x0060 */
383 + __u8 ecb; /* 0x0061 */
384 + __u8 ecb2; /* 0x0062 */
385 + #define ECB3_AES 0x04
386 +diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
387 +index afe1cfebf1a4..8520c23e419b 100644
388 +--- a/arch/s390/include/asm/lowcore.h
389 ++++ b/arch/s390/include/asm/lowcore.h
390 +@@ -155,7 +155,9 @@ struct _lowcore {
391 + /* Per cpu primary space access list */
392 + __u32 paste[16]; /* 0x0400 */
393 +
394 +- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
395 ++ /* br %r1 trampoline */
396 ++ __u16 br_r1_trampoline; /* 0x0440 */
397 ++ __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
398 +
399 + /*
400 + * 0xe00 contains the address of the IPL Parameter Information
401 +@@ -170,7 +172,8 @@ struct _lowcore {
402 + __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
403 +
404 + /* Extended facility list */
405 +- __u64 stfle_fac_list[32]; /* 0x0f00 */
406 ++ __u64 stfle_fac_list[16]; /* 0x0f00 */
407 ++ __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
408 + __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
409 +
410 + /* Pointer to vector register save area */
411 +diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
412 +new file mode 100644
413 +index 000000000000..b4bd8c41e9d3
414 +--- /dev/null
415 ++++ b/arch/s390/include/asm/nospec-branch.h
416 +@@ -0,0 +1,17 @@
417 ++/* SPDX-License-Identifier: GPL-2.0 */
418 ++#ifndef _ASM_S390_EXPOLINE_H
419 ++#define _ASM_S390_EXPOLINE_H
420 ++
421 ++#ifndef __ASSEMBLY__
422 ++
423 ++#include <linux/types.h>
424 ++
425 ++extern int nospec_disable;
426 ++
427 ++void nospec_init_branches(void);
428 ++void nospec_auto_detect(void);
429 ++void nospec_revert(s32 *start, s32 *end);
430 ++
431 ++#endif /* __ASSEMBLY__ */
432 ++
433 ++#endif /* _ASM_S390_EXPOLINE_H */
434 +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
435 +index c61ed7890cef..f915a0f1b0fc 100644
436 +--- a/arch/s390/include/asm/processor.h
437 ++++ b/arch/s390/include/asm/processor.h
438 +@@ -69,6 +69,7 @@ extern void s390_adjust_jiffies(void);
439 + extern const struct seq_operations cpuinfo_op;
440 + extern int sysctl_ieee_emulation_warnings;
441 + extern void execve_tail(void);
442 ++extern void __bpon(void);
443 +
444 + /*
445 + * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
446 +@@ -315,6 +316,9 @@ extern void memcpy_absolute(void *, void *, size_t);
447 + memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
448 + }
449 +
450 ++extern int s390_isolate_bp(void);
451 ++extern int s390_isolate_bp_guest(void);
452 ++
453 + #endif /* __ASSEMBLY__ */
454 +
455 + #endif /* __ASM_S390_PROCESSOR_H */
456 +diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
457 +index 692b9247c019..b2504163c8fa 100644
458 +--- a/arch/s390/include/asm/thread_info.h
459 ++++ b/arch/s390/include/asm/thread_info.h
460 +@@ -78,6 +78,8 @@ void arch_release_task_struct(struct task_struct *tsk);
461 + #define TIF_SECCOMP 5 /* secure computing */
462 + #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
463 + #define TIF_UPROBE 7 /* breakpointed or single-stepping */
464 ++#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
465 ++#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
466 + #define TIF_31BIT 16 /* 32bit process */
467 + #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
468 + #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
469 +@@ -93,6 +95,8 @@ void arch_release_task_struct(struct task_struct *tsk);
470 + #define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
471 + #define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
472 + #define _TIF_UPROBE _BITUL(TIF_UPROBE)
473 ++#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
474 ++#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
475 + #define _TIF_31BIT _BITUL(TIF_31BIT)
476 + #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
477 +
478 +diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
479 +index ef1a5fcc6c66..beb508a9e72c 100644
480 +--- a/arch/s390/include/uapi/asm/kvm.h
481 ++++ b/arch/s390/include/uapi/asm/kvm.h
482 +@@ -151,6 +151,7 @@ struct kvm_guest_debug_arch {
483 + #define KVM_SYNC_ARCH0 (1UL << 4)
484 + #define KVM_SYNC_PFAULT (1UL << 5)
485 + #define KVM_SYNC_VRS (1UL << 6)
486 ++#define KVM_SYNC_BPBC (1UL << 10)
487 + /* definition of registers in kvm_run */
488 + struct kvm_sync_regs {
489 + __u64 prefix; /* prefix register */
490 +@@ -168,6 +169,8 @@ struct kvm_sync_regs {
491 + __u64 vrs[32][2]; /* vector registers */
492 + __u8 reserved[512]; /* for future vector expansion */
493 + __u32 fpc; /* only valid with vector registers */
494 ++ __u8 bpbc : 1; /* bp mode */
495 ++ __u8 reserved2 : 7;
496 + };
497 +
498 + #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
499 +diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
500 +index dc167a23b920..8ccfbf22ecbb 100644
501 +--- a/arch/s390/kernel/Makefile
502 ++++ b/arch/s390/kernel/Makefile
503 +@@ -44,10 +44,13 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
504 + obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
505 + obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
506 + obj-y += runtime_instr.o cache.o dumpstack.o
507 +-obj-y += entry.o reipl.o relocate_kernel.o
508 ++obj-y += entry.o reipl.o relocate_kernel.o alternative.o
509 ++obj-y += nospec-branch.o
510 +
511 + extra-y += head.o head64.o vmlinux.lds
512 +
513 ++CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
514 ++
515 + obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
516 + obj-$(CONFIG_SMP) += smp.o
517 + obj-$(CONFIG_SCHED_BOOK) += topology.o
518 +diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
519 +new file mode 100644
520 +index 000000000000..b57b293998dc
521 +--- /dev/null
522 ++++ b/arch/s390/kernel/alternative.c
523 +@@ -0,0 +1,112 @@
524 ++#include <linux/module.h>
525 ++#include <asm/alternative.h>
526 ++#include <asm/facility.h>
527 ++#include <asm/nospec-branch.h>
528 ++
529 ++#define MAX_PATCH_LEN (255 - 1)
530 ++
531 ++static int __initdata_or_module alt_instr_disabled;
532 ++
533 ++static int __init disable_alternative_instructions(char *str)
534 ++{
535 ++ alt_instr_disabled = 1;
536 ++ return 0;
537 ++}
538 ++
539 ++early_param("noaltinstr", disable_alternative_instructions);
540 ++
541 ++struct brcl_insn {
542 ++ u16 opc;
543 ++ s32 disp;
544 ++} __packed;
545 ++
546 ++static u16 __initdata_or_module nop16 = 0x0700;
547 ++static u32 __initdata_or_module nop32 = 0x47000000;
548 ++static struct brcl_insn __initdata_or_module nop48 = {
549 ++ 0xc004, 0
550 ++};
551 ++
552 ++static const void *nops[] __initdata_or_module = {
553 ++ &nop16,
554 ++ &nop32,
555 ++ &nop48
556 ++};
557 ++
558 ++static void __init_or_module add_jump_padding(void *insns, unsigned int len)
559 ++{
560 ++ struct brcl_insn brcl = {
561 ++ 0xc0f4,
562 ++ len / 2
563 ++ };
564 ++
565 ++ memcpy(insns, &brcl, sizeof(brcl));
566 ++ insns += sizeof(brcl);
567 ++ len -= sizeof(brcl);
568 ++
569 ++ while (len > 0) {
570 ++ memcpy(insns, &nop16, 2);
571 ++ insns += 2;
572 ++ len -= 2;
573 ++ }
574 ++}
575 ++
576 ++static void __init_or_module add_padding(void *insns, unsigned int len)
577 ++{
578 ++ if (len > 6)
579 ++ add_jump_padding(insns, len);
580 ++ else if (len >= 2)
581 ++ memcpy(insns, nops[len / 2 - 1], len);
582 ++}
583 ++
584 ++static void __init_or_module __apply_alternatives(struct alt_instr *start,
585 ++ struct alt_instr *end)
586 ++{
587 ++ struct alt_instr *a;
588 ++ u8 *instr, *replacement;
589 ++ u8 insnbuf[MAX_PATCH_LEN];
590 ++
591 ++ /*
592 ++ * The scan order should be from start to end. A later scanned
593 ++ * alternative code can overwrite previously scanned alternative code.
594 ++ */
595 ++ for (a = start; a < end; a++) {
596 ++ int insnbuf_sz = 0;
597 ++
598 ++ instr = (u8 *)&a->instr_offset + a->instr_offset;
599 ++ replacement = (u8 *)&a->repl_offset + a->repl_offset;
600 ++
601 ++ if (!__test_facility(a->facility,
602 ++ S390_lowcore.alt_stfle_fac_list))
603 ++ continue;
604 ++
605 ++ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
606 ++ WARN_ONCE(1, "cpu alternatives instructions length is "
607 ++ "odd, skipping patching\n");
608 ++ continue;
609 ++ }
610 ++
611 ++ memcpy(insnbuf, replacement, a->replacementlen);
612 ++ insnbuf_sz = a->replacementlen;
613 ++
614 ++ if (a->instrlen > a->replacementlen) {
615 ++ add_padding(insnbuf + a->replacementlen,
616 ++ a->instrlen - a->replacementlen);
617 ++ insnbuf_sz += a->instrlen - a->replacementlen;
618 ++ }
619 ++
620 ++ s390_kernel_write(instr, insnbuf, insnbuf_sz);
621 ++ }
622 ++}
623 ++
624 ++void __init_or_module apply_alternatives(struct alt_instr *start,
625 ++ struct alt_instr *end)
626 ++{
627 ++ if (!alt_instr_disabled)
628 ++ __apply_alternatives(start, end);
629 ++}
630 ++
631 ++extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
632 ++void __init apply_alternative_instructions(void)
633 ++{
634 ++ apply_alternatives(__alt_instructions, __alt_instructions_end);
635 ++}
636 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
637 +index ee7b8e7ca4f8..8eccead675d4 100644
638 +--- a/arch/s390/kernel/early.c
639 ++++ b/arch/s390/kernel/early.c
640 +@@ -279,6 +279,11 @@ static noinline __init void setup_facility_list(void)
641 + {
642 + stfle(S390_lowcore.stfle_fac_list,
643 + ARRAY_SIZE(S390_lowcore.stfle_fac_list));
644 ++ memcpy(S390_lowcore.alt_stfle_fac_list,
645 ++ S390_lowcore.stfle_fac_list,
646 ++ sizeof(S390_lowcore.alt_stfle_fac_list));
647 ++ if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
648 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
649 + }
650 +
651 + static __init void detect_diag9c(void)
652 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
653 +index 4612ed7ec2e5..c63730326215 100644
654 +--- a/arch/s390/kernel/entry.S
655 ++++ b/arch/s390/kernel/entry.S
656 +@@ -104,6 +104,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
657 + j 3f
658 + 1: LAST_BREAK %r14
659 + UPDATE_VTIME %r14,%r15,\timer
660 ++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
661 + 2: lg %r15,__LC_ASYNC_STACK # load async stack
662 + 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
663 + .endm
664 +@@ -162,8 +163,137 @@ _PIF_WORK = (_PIF_PER_TRAP)
665 + tm off+\addr, \mask
666 + .endm
667 +
668 ++ .macro BPOFF
669 ++ .pushsection .altinstr_replacement, "ax"
670 ++660: .long 0xb2e8c000
671 ++ .popsection
672 ++661: .long 0x47000000
673 ++ .pushsection .altinstructions, "a"
674 ++ .long 661b - .
675 ++ .long 660b - .
676 ++ .word 82
677 ++ .byte 4
678 ++ .byte 4
679 ++ .popsection
680 ++ .endm
681 ++
682 ++ .macro BPON
683 ++ .pushsection .altinstr_replacement, "ax"
684 ++662: .long 0xb2e8d000
685 ++ .popsection
686 ++663: .long 0x47000000
687 ++ .pushsection .altinstructions, "a"
688 ++ .long 663b - .
689 ++ .long 662b - .
690 ++ .word 82
691 ++ .byte 4
692 ++ .byte 4
693 ++ .popsection
694 ++ .endm
695 ++
696 ++ .macro BPENTER tif_ptr,tif_mask
697 ++ .pushsection .altinstr_replacement, "ax"
698 ++662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
699 ++ .word 0xc004, 0x0000, 0x0000 # 6 byte nop
700 ++ .popsection
701 ++664: TSTMSK \tif_ptr,\tif_mask
702 ++ jz . + 8
703 ++ .long 0xb2e8d000
704 ++ .pushsection .altinstructions, "a"
705 ++ .long 664b - .
706 ++ .long 662b - .
707 ++ .word 82
708 ++ .byte 12
709 ++ .byte 12
710 ++ .popsection
711 ++ .endm
712 ++
713 ++ .macro BPEXIT tif_ptr,tif_mask
714 ++ TSTMSK \tif_ptr,\tif_mask
715 ++ .pushsection .altinstr_replacement, "ax"
716 ++662: jnz . + 8
717 ++ .long 0xb2e8d000
718 ++ .popsection
719 ++664: jz . + 8
720 ++ .long 0xb2e8c000
721 ++ .pushsection .altinstructions, "a"
722 ++ .long 664b - .
723 ++ .long 662b - .
724 ++ .word 82
725 ++ .byte 8
726 ++ .byte 8
727 ++ .popsection
728 ++ .endm
729 ++
730 ++#ifdef CONFIG_EXPOLINE
731 ++
732 ++ .macro GEN_BR_THUNK name,reg,tmp
733 ++ .section .text.\name,"axG",@progbits,\name,comdat
734 ++ .globl \name
735 ++ .hidden \name
736 ++ .type \name,@function
737 ++\name:
738 ++ .cfi_startproc
739 ++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
740 ++ exrl 0,0f
741 ++#else
742 ++ larl \tmp,0f
743 ++ ex 0,0(\tmp)
744 ++#endif
745 ++ j .
746 ++0: br \reg
747 ++ .cfi_endproc
748 ++ .endm
749 ++
750 ++ GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
751 ++ GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
752 ++ GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
753 ++
754 ++ .macro BASR_R14_R9
755 ++0: brasl %r14,__s390x_indirect_jump_r1use_r9
756 ++ .pushsection .s390_indirect_branches,"a",@progbits
757 ++ .long 0b-.
758 ++ .popsection
759 ++ .endm
760 ++
761 ++ .macro BR_R1USE_R14
762 ++0: jg __s390x_indirect_jump_r1use_r14
763 ++ .pushsection .s390_indirect_branches,"a",@progbits
764 ++ .long 0b-.
765 ++ .popsection
766 ++ .endm
767 ++
768 ++ .macro BR_R11USE_R14
769 ++0: jg __s390x_indirect_jump_r11use_r14
770 ++ .pushsection .s390_indirect_branches,"a",@progbits
771 ++ .long 0b-.
772 ++ .popsection
773 ++ .endm
774 ++
775 ++#else /* CONFIG_EXPOLINE */
776 ++
777 ++ .macro BASR_R14_R9
778 ++ basr %r14,%r9
779 ++ .endm
780 ++
781 ++ .macro BR_R1USE_R14
782 ++ br %r14
783 ++ .endm
784 ++
785 ++ .macro BR_R11USE_R14
786 ++ br %r14
787 ++ .endm
788 ++
789 ++#endif /* CONFIG_EXPOLINE */
790 ++
791 ++
792 + .section .kprobes.text, "ax"
793 +
794 ++ENTRY(__bpon)
795 ++ .globl __bpon
796 ++ BPON
797 ++ BR_R1USE_R14
798 ++
799 + /*
800 + * Scheduler resume function, called by switch_to
801 + * gpr2 = (task_struct *) prev
802 +@@ -190,9 +320,9 @@ ENTRY(__switch_to)
803 + mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
804 + lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
805 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
806 +- bzr %r14
807 ++ jz 0f
808 + .insn s,0xb2800000,__LC_LPP # set program parameter
809 +- br %r14
810 ++0: BR_R1USE_R14
811 +
812 + .L__critical_start:
813 +
814 +@@ -204,9 +334,11 @@ ENTRY(__switch_to)
815 + */
816 + ENTRY(sie64a)
817 + stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
818 ++ lg %r12,__LC_CURRENT
819 + stg %r2,__SF_EMPTY(%r15) # save control block pointer
820 + stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
821 + xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
822 ++ mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
823 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
824 + jno .Lsie_load_guest_gprs
825 + brasl %r14,load_fpu_regs # load guest fp/vx regs
826 +@@ -223,7 +355,11 @@ ENTRY(sie64a)
827 + jnz .Lsie_skip
828 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU
829 + jo .Lsie_skip # exit if fp/vx regs changed
830 ++ BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
831 + sie 0(%r14)
832 ++.Lsie_exit:
833 ++ BPOFF
834 ++ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
835 + .Lsie_skip:
836 + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
837 + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
838 +@@ -244,9 +380,15 @@ ENTRY(sie64a)
839 + sie_exit:
840 + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
841 + stmg %r0,%r13,0(%r14) # save guest gprs 0-13
842 ++ xgr %r0,%r0 # clear guest registers to
843 ++ xgr %r1,%r1 # prevent speculative use
844 ++ xgr %r2,%r2
845 ++ xgr %r3,%r3
846 ++ xgr %r4,%r4
847 ++ xgr %r5,%r5
848 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
849 + lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
850 +- br %r14
851 ++ BR_R1USE_R14
852 + .Lsie_fault:
853 + lghi %r14,-EFAULT
854 + stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
855 +@@ -267,6 +409,7 @@ ENTRY(system_call)
856 + stpt __LC_SYNC_ENTER_TIMER
857 + .Lsysc_stmg:
858 + stmg %r8,%r15,__LC_SAVE_AREA_SYNC
859 ++ BPOFF
860 + lg %r10,__LC_LAST_BREAK
861 + lg %r12,__LC_THREAD_INFO
862 + lghi %r14,_PIF_SYSCALL
863 +@@ -276,12 +419,15 @@ ENTRY(system_call)
864 + LAST_BREAK %r13
865 + .Lsysc_vtime:
866 + UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
867 ++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
868 + stmg %r0,%r7,__PT_R0(%r11)
869 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
870 + mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
871 + mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
872 + stg %r14,__PT_FLAGS(%r11)
873 + .Lsysc_do_svc:
874 ++ # clear user controlled register to prevent speculative use
875 ++ xgr %r0,%r0
876 + lg %r10,__TI_sysc_table(%r12) # address of system call table
877 + llgh %r8,__PT_INT_CODE+2(%r11)
878 + slag %r8,%r8,2 # shift and test for svc 0
879 +@@ -299,7 +445,7 @@ ENTRY(system_call)
880 + lgf %r9,0(%r8,%r10) # get system call add.
881 + TSTMSK __TI_flags(%r12),_TIF_TRACE
882 + jnz .Lsysc_tracesys
883 +- basr %r14,%r9 # call sys_xxxx
884 ++ BASR_R14_R9 # call sys_xxxx
885 + stg %r2,__PT_R2(%r11) # store return value
886 +
887 + .Lsysc_return:
888 +@@ -311,6 +457,7 @@ ENTRY(system_call)
889 + jnz .Lsysc_work # check for work
890 + TSTMSK __LC_CPU_FLAGS,_CIF_WORK
891 + jnz .Lsysc_work
892 ++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
893 + .Lsysc_restore:
894 + lg %r14,__LC_VDSO_PER_CPU
895 + lmg %r0,%r10,__PT_R0(%r11)
896 +@@ -438,7 +585,7 @@ ENTRY(system_call)
897 + lmg %r3,%r7,__PT_R3(%r11)
898 + stg %r7,STACK_FRAME_OVERHEAD(%r15)
899 + lg %r2,__PT_ORIG_GPR2(%r11)
900 +- basr %r14,%r9 # call sys_xxx
901 ++ BASR_R14_R9 # call sys_xxx
902 + stg %r2,__PT_R2(%r11) # store return value
903 + .Lsysc_tracenogo:
904 + TSTMSK __TI_flags(%r12),_TIF_TRACE
905 +@@ -462,7 +609,7 @@ ENTRY(ret_from_fork)
906 + lmg %r9,%r10,__PT_R9(%r11) # load gprs
907 + ENTRY(kernel_thread_starter)
908 + la %r2,0(%r10)
909 +- basr %r14,%r9
910 ++ BASR_R14_R9
911 + j .Lsysc_tracenogo
912 +
913 + /*
914 +@@ -471,6 +618,7 @@ ENTRY(kernel_thread_starter)
915 +
916 + ENTRY(pgm_check_handler)
917 + stpt __LC_SYNC_ENTER_TIMER
918 ++ BPOFF
919 + stmg %r8,%r15,__LC_SAVE_AREA_SYNC
920 + lg %r10,__LC_LAST_BREAK
921 + lg %r12,__LC_THREAD_INFO
922 +@@ -495,6 +643,7 @@ ENTRY(pgm_check_handler)
923 + j 3f
924 + 2: LAST_BREAK %r14
925 + UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
926 ++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
927 + lg %r15,__LC_KERNEL_STACK
928 + lg %r14,__TI_task(%r12)
929 + aghi %r14,__TASK_thread # pointer to thread_struct
930 +@@ -504,6 +653,15 @@ ENTRY(pgm_check_handler)
931 + mvc __THREAD_trap_tdb(256,%r14),0(%r13)
932 + 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
933 + stmg %r0,%r7,__PT_R0(%r11)
934 ++ # clear user controlled registers to prevent speculative use
935 ++ xgr %r0,%r0
936 ++ xgr %r1,%r1
937 ++ xgr %r2,%r2
938 ++ xgr %r3,%r3
939 ++ xgr %r4,%r4
940 ++ xgr %r5,%r5
941 ++ xgr %r6,%r6
942 ++ xgr %r7,%r7
943 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
944 + stmg %r8,%r9,__PT_PSW(%r11)
945 + mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
946 +@@ -525,9 +683,9 @@ ENTRY(pgm_check_handler)
947 + nill %r10,0x007f
948 + sll %r10,2
949 + je .Lpgm_return
950 +- lgf %r1,0(%r10,%r1) # load address of handler routine
951 ++ lgf %r9,0(%r10,%r1) # load address of handler routine
952 + lgr %r2,%r11 # pass pointer to pt_regs
953 +- basr %r14,%r1 # branch to interrupt-handler
954 ++ BASR_R14_R9 # branch to interrupt-handler
955 + .Lpgm_return:
956 + LOCKDEP_SYS_EXIT
957 + tm __PT_PSW+1(%r11),0x01 # returning to user ?
958 +@@ -560,6 +718,7 @@ ENTRY(pgm_check_handler)
959 + ENTRY(io_int_handler)
960 + STCK __LC_INT_CLOCK
961 + stpt __LC_ASYNC_ENTER_TIMER
962 ++ BPOFF
963 + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
964 + lg %r10,__LC_LAST_BREAK
965 + lg %r12,__LC_THREAD_INFO
966 +@@ -567,6 +726,16 @@ ENTRY(io_int_handler)
967 + lmg %r8,%r9,__LC_IO_OLD_PSW
968 + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
969 + stmg %r0,%r7,__PT_R0(%r11)
970 ++ # clear user controlled registers to prevent speculative use
971 ++ xgr %r0,%r0
972 ++ xgr %r1,%r1
973 ++ xgr %r2,%r2
974 ++ xgr %r3,%r3
975 ++ xgr %r4,%r4
976 ++ xgr %r5,%r5
977 ++ xgr %r6,%r6
978 ++ xgr %r7,%r7
979 ++ xgr %r10,%r10
980 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
981 + stmg %r8,%r9,__PT_PSW(%r11)
982 + mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
983 +@@ -601,9 +770,13 @@ ENTRY(io_int_handler)
984 + lg %r14,__LC_VDSO_PER_CPU
985 + lmg %r0,%r10,__PT_R0(%r11)
986 + mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
987 ++ tm __PT_PSW+1(%r11),0x01 # returning to user ?
988 ++ jno .Lio_exit_kernel
989 ++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
990 + .Lio_exit_timer:
991 + stpt __LC_EXIT_TIMER
992 + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
993 ++.Lio_exit_kernel:
994 + lmg %r11,%r15,__PT_R11(%r11)
995 + lpswe __LC_RETURN_PSW
996 + .Lio_done:
997 +@@ -735,6 +908,7 @@ ENTRY(io_int_handler)
998 + ENTRY(ext_int_handler)
999 + STCK __LC_INT_CLOCK
1000 + stpt __LC_ASYNC_ENTER_TIMER
1001 ++ BPOFF
1002 + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
1003 + lg %r10,__LC_LAST_BREAK
1004 + lg %r12,__LC_THREAD_INFO
1005 +@@ -742,6 +916,16 @@ ENTRY(ext_int_handler)
1006 + lmg %r8,%r9,__LC_EXT_OLD_PSW
1007 + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
1008 + stmg %r0,%r7,__PT_R0(%r11)
1009 ++ # clear user controlled registers to prevent speculative use
1010 ++ xgr %r0,%r0
1011 ++ xgr %r1,%r1
1012 ++ xgr %r2,%r2
1013 ++ xgr %r3,%r3
1014 ++ xgr %r4,%r4
1015 ++ xgr %r5,%r5
1016 ++ xgr %r6,%r6
1017 ++ xgr %r7,%r7
1018 ++ xgr %r10,%r10
1019 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
1020 + stmg %r8,%r9,__PT_PSW(%r11)
1021 + lghi %r1,__LC_EXT_PARAMS2
1022 +@@ -773,11 +957,12 @@ ENTRY(psw_idle)
1023 + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
1024 + .Lpsw_idle_stcctm:
1025 + #endif
1026 ++ BPON
1027 + STCK __CLOCK_IDLE_ENTER(%r2)
1028 + stpt __TIMER_IDLE_ENTER(%r2)
1029 + .Lpsw_idle_lpsw:
1030 + lpswe __SF_EMPTY(%r15)
1031 +- br %r14
1032 ++ BR_R1USE_R14
1033 + .Lpsw_idle_end:
1034 +
1035 + /*
1036 +@@ -791,7 +976,7 @@ ENTRY(save_fpu_regs)
1037 + lg %r2,__LC_CURRENT
1038 + aghi %r2,__TASK_thread
1039 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1040 +- bor %r14
1041 ++ jo .Lsave_fpu_regs_exit
1042 + stfpc __THREAD_FPU_fpc(%r2)
1043 + .Lsave_fpu_regs_fpc_end:
1044 + lg %r3,__THREAD_FPU_regs(%r2)
1045 +@@ -821,7 +1006,8 @@ ENTRY(save_fpu_regs)
1046 + std 15,120(%r3)
1047 + .Lsave_fpu_regs_done:
1048 + oi __LC_CPU_FLAGS+7,_CIF_FPU
1049 +- br %r14
1050 ++.Lsave_fpu_regs_exit:
1051 ++ BR_R1USE_R14
1052 + .Lsave_fpu_regs_end:
1053 +
1054 + /*
1055 +@@ -838,7 +1024,7 @@ load_fpu_regs:
1056 + lg %r4,__LC_CURRENT
1057 + aghi %r4,__TASK_thread
1058 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1059 +- bnor %r14
1060 ++ jno .Lload_fpu_regs_exit
1061 + lfpc __THREAD_FPU_fpc(%r4)
1062 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1063 + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1064 +@@ -867,7 +1053,8 @@ load_fpu_regs:
1065 + ld 15,120(%r4)
1066 + .Lload_fpu_regs_done:
1067 + ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1068 +- br %r14
1069 ++.Lload_fpu_regs_exit:
1070 ++ BR_R1USE_R14
1071 + .Lload_fpu_regs_end:
1072 +
1073 + .L__critical_end:
1074 +@@ -877,6 +1064,7 @@ load_fpu_regs:
1075 + */
1076 + ENTRY(mcck_int_handler)
1077 + STCK __LC_MCCK_CLOCK
1078 ++ BPOFF
1079 + la %r1,4095 # revalidate r1
1080 + spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
1081 + lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
1082 +@@ -908,6 +1096,16 @@ ENTRY(mcck_int_handler)
1083 + .Lmcck_skip:
1084 + lghi %r14,__LC_GPREGS_SAVE_AREA+64
1085 + stmg %r0,%r7,__PT_R0(%r11)
1086 ++ # clear user controlled registers to prevent speculative use
1087 ++ xgr %r0,%r0
1088 ++ xgr %r1,%r1
1089 ++ xgr %r2,%r2
1090 ++ xgr %r3,%r3
1091 ++ xgr %r4,%r4
1092 ++ xgr %r5,%r5
1093 ++ xgr %r6,%r6
1094 ++ xgr %r7,%r7
1095 ++ xgr %r10,%r10
1096 + mvc __PT_R8(64,%r11),0(%r14)
1097 + stmg %r8,%r9,__PT_PSW(%r11)
1098 + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1099 +@@ -933,6 +1131,7 @@ ENTRY(mcck_int_handler)
1100 + mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1101 + tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1102 + jno 0f
1103 ++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1104 + stpt __LC_EXIT_TIMER
1105 + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1106 + 0: lmg %r11,%r15,__PT_R11(%r11)
1107 +@@ -1028,7 +1227,7 @@ cleanup_critical:
1108 + jl 0f
1109 + clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1110 + jl .Lcleanup_load_fpu_regs
1111 +-0: br %r14
1112 ++0: BR_R11USE_R14
1113 +
1114 + .align 8
1115 + .Lcleanup_table:
1116 +@@ -1053,11 +1252,12 @@ cleanup_critical:
1117 + .quad .Lsie_done
1118 +
1119 + .Lcleanup_sie:
1120 ++ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1121 + lg %r9,__SF_EMPTY(%r15) # get control block pointer
1122 + ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1123 + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1124 + larl %r9,sie_exit # skip forward to sie_exit
1125 +- br %r14
1126 ++ BR_R11USE_R14
1127 + #endif
1128 +
1129 + .Lcleanup_system_call:
1130 +@@ -1099,7 +1299,8 @@ cleanup_critical:
1131 + srag %r9,%r9,23
1132 + jz 0f
1133 + mvc __TI_last_break(8,%r12),16(%r11)
1134 +-0: # set up saved register r11
1135 ++0: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1136 ++ # set up saved register r11
1137 + lg %r15,__LC_KERNEL_STACK
1138 + la %r9,STACK_FRAME_OVERHEAD(%r15)
1139 + stg %r9,24(%r11) # r11 pt_regs pointer
1140 +@@ -1114,7 +1315,7 @@ cleanup_critical:
1141 + stg %r15,56(%r11) # r15 stack pointer
1142 + # set new psw address and exit
1143 + larl %r9,.Lsysc_do_svc
1144 +- br %r14
1145 ++ BR_R11USE_R14
1146 + .Lcleanup_system_call_insn:
1147 + .quad system_call
1148 + .quad .Lsysc_stmg
1149 +@@ -1124,7 +1325,7 @@ cleanup_critical:
1150 +
1151 + .Lcleanup_sysc_tif:
1152 + larl %r9,.Lsysc_tif
1153 +- br %r14
1154 ++ BR_R11USE_R14
1155 +
1156 + .Lcleanup_sysc_restore:
1157 + # check if stpt has been executed
1158 +@@ -1141,14 +1342,14 @@ cleanup_critical:
1159 + mvc 0(64,%r11),__PT_R8(%r9)
1160 + lmg %r0,%r7,__PT_R0(%r9)
1161 + 1: lmg %r8,%r9,__LC_RETURN_PSW
1162 +- br %r14
1163 ++ BR_R11USE_R14
1164 + .Lcleanup_sysc_restore_insn:
1165 + .quad .Lsysc_exit_timer
1166 + .quad .Lsysc_done - 4
1167 +
1168 + .Lcleanup_io_tif:
1169 + larl %r9,.Lio_tif
1170 +- br %r14
1171 ++ BR_R11USE_R14
1172 +
1173 + .Lcleanup_io_restore:
1174 + # check if stpt has been executed
1175 +@@ -1162,7 +1363,7 @@ cleanup_critical:
1176 + mvc 0(64,%r11),__PT_R8(%r9)
1177 + lmg %r0,%r7,__PT_R0(%r9)
1178 + 1: lmg %r8,%r9,__LC_RETURN_PSW
1179 +- br %r14
1180 ++ BR_R11USE_R14
1181 + .Lcleanup_io_restore_insn:
1182 + .quad .Lio_exit_timer
1183 + .quad .Lio_done - 4
1184 +@@ -1214,17 +1415,17 @@ cleanup_critical:
1185 + # prepare return psw
1186 + nihh %r8,0xfcfd # clear irq & wait state bits
1187 + lg %r9,48(%r11) # return from psw_idle
1188 +- br %r14
1189 ++ BR_R11USE_R14
1190 + .Lcleanup_idle_insn:
1191 + .quad .Lpsw_idle_lpsw
1192 +
1193 + .Lcleanup_save_fpu_regs:
1194 + larl %r9,save_fpu_regs
1195 +- br %r14
1196 ++ BR_R11USE_R14
1197 +
1198 + .Lcleanup_load_fpu_regs:
1199 + larl %r9,load_fpu_regs
1200 +- br %r14
1201 ++ BR_R11USE_R14
1202 +
1203 + /*
1204 + * Integer constants
1205 +@@ -1240,7 +1441,6 @@ cleanup_critical:
1206 + .Lsie_critical_length:
1207 + .quad .Lsie_done - .Lsie_gmap
1208 + #endif
1209 +-
1210 + .section .rodata, "a"
1211 + #define SYSCALL(esame,emu) .long esame
1212 + .globl sys_call_table
1213 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
1214 +index e73979236659..837bb301023f 100644
1215 +--- a/arch/s390/kernel/ipl.c
1216 ++++ b/arch/s390/kernel/ipl.c
1217 +@@ -563,6 +563,7 @@ static struct kset *ipl_kset;
1218 +
1219 + static void __ipl_run(void *unused)
1220 + {
1221 ++ __bpon();
1222 + diag308(DIAG308_IPL, NULL);
1223 + if (MACHINE_IS_VM)
1224 + __cpcmd("IPL", NULL, 0, NULL);
1225 +diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
1226 +index 0c1a679314dd..9bd1933848b8 100644
1227 +--- a/arch/s390/kernel/module.c
1228 ++++ b/arch/s390/kernel/module.c
1229 +@@ -31,6 +31,9 @@
1230 + #include <linux/kernel.h>
1231 + #include <linux/moduleloader.h>
1232 + #include <linux/bug.h>
1233 ++#include <asm/alternative.h>
1234 ++#include <asm/nospec-branch.h>
1235 ++#include <asm/facility.h>
1236 +
1237 + #if 0
1238 + #define DEBUGP printk
1239 +@@ -163,7 +166,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
1240 + me->arch.got_offset = me->core_size;
1241 + me->core_size += me->arch.got_size;
1242 + me->arch.plt_offset = me->core_size;
1243 +- me->core_size += me->arch.plt_size;
1244 ++ if (me->arch.plt_size) {
1245 ++ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
1246 ++ me->arch.plt_size += PLT_ENTRY_SIZE;
1247 ++ me->core_size += me->arch.plt_size;
1248 ++ }
1249 + return 0;
1250 + }
1251 +
1252 +@@ -317,9 +324,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
1253 + unsigned int *ip;
1254 + ip = me->module_core + me->arch.plt_offset +
1255 + info->plt_offset;
1256 +- ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
1257 +- ip[1] = 0x100a0004;
1258 +- ip[2] = 0x07f10000;
1259 ++ ip[0] = 0x0d10e310; /* basr 1,0 */
1260 ++ ip[1] = 0x100a0004; /* lg 1,10(1) */
1261 ++ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
1262 ++ unsigned int *ij;
1263 ++ ij = me->module_core +
1264 ++ me->arch.plt_offset +
1265 ++ me->arch.plt_size - PLT_ENTRY_SIZE;
1266 ++ ip[2] = 0xa7f40000 + /* j __jump_r1 */
1267 ++ (unsigned int)(u16)
1268 ++ (((unsigned long) ij - 8 -
1269 ++ (unsigned long) ip) / 2);
1270 ++ } else {
1271 ++ ip[2] = 0x07f10000; /* br %r1 */
1272 ++ }
1273 + ip[3] = (unsigned int) (val >> 32);
1274 + ip[4] = (unsigned int) val;
1275 + info->plt_initialized = 1;
1276 +@@ -424,6 +442,45 @@ int module_finalize(const Elf_Ehdr *hdr,
1277 + const Elf_Shdr *sechdrs,
1278 + struct module *me)
1279 + {
1280 ++ const Elf_Shdr *s;
1281 ++ char *secstrings, *secname;
1282 ++ void *aseg;
1283 ++
1284 ++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
1285 ++ !nospec_disable && me->arch.plt_size) {
1286 ++ unsigned int *ij;
1287 ++
1288 ++ ij = me->module_core + me->arch.plt_offset +
1289 ++ me->arch.plt_size - PLT_ENTRY_SIZE;
1290 ++ if (test_facility(35)) {
1291 ++ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
1292 ++ ij[1] = 0x0005a7f4; /* j . */
1293 ++ ij[2] = 0x000007f1; /* br %r1 */
1294 ++ } else {
1295 ++ ij[0] = 0x44000000 | (unsigned int)
1296 ++ offsetof(struct _lowcore, br_r1_trampoline);
1297 ++ ij[1] = 0xa7f40000; /* j . */
1298 ++ }
1299 ++ }
1300 ++
1301 ++ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1302 ++ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
1303 ++ aseg = (void *) s->sh_addr;
1304 ++ secname = secstrings + s->sh_name;
1305 ++
1306 ++ if (!strcmp(".altinstructions", secname))
1307 ++ /* patch .altinstructions */
1308 ++ apply_alternatives(aseg, aseg + s->sh_size);
1309 ++
1310 ++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
1311 ++ (!strncmp(".s390_indirect", secname, 14)))
1312 ++ nospec_revert(aseg, aseg + s->sh_size);
1313 ++
1314 ++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
1315 ++ (!strncmp(".s390_return", secname, 12)))
1316 ++ nospec_revert(aseg, aseg + s->sh_size);
1317 ++ }
1318 ++
1319 + jump_label_apply_nops(me);
1320 + vfree(me->arch.syminfo);
1321 + me->arch.syminfo = NULL;
1322 +diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
1323 +new file mode 100644
1324 +index 000000000000..9f3b5b382743
1325 +--- /dev/null
1326 ++++ b/arch/s390/kernel/nospec-branch.c
1327 +@@ -0,0 +1,169 @@
1328 ++// SPDX-License-Identifier: GPL-2.0
1329 ++#include <linux/module.h>
1330 ++#include <linux/device.h>
1331 ++#include <asm/facility.h>
1332 ++#include <asm/nospec-branch.h>
1333 ++
1334 ++static int __init nobp_setup_early(char *str)
1335 ++{
1336 ++ bool enabled;
1337 ++ int rc;
1338 ++
1339 ++ rc = kstrtobool(str, &enabled);
1340 ++ if (rc)
1341 ++ return rc;
1342 ++ if (enabled && test_facility(82)) {
1343 ++ /*
1344 ++ * The user explicitely requested nobp=1, enable it and
1345 ++ * disable the expoline support.
1346 ++ */
1347 ++ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
1348 ++ if (IS_ENABLED(CONFIG_EXPOLINE))
1349 ++ nospec_disable = 1;
1350 ++ } else {
1351 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1352 ++ }
1353 ++ return 0;
1354 ++}
1355 ++early_param("nobp", nobp_setup_early);
1356 ++
1357 ++static int __init nospec_setup_early(char *str)
1358 ++{
1359 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1360 ++ return 0;
1361 ++}
1362 ++early_param("nospec", nospec_setup_early);
1363 ++
1364 ++static int __init nospec_report(void)
1365 ++{
1366 ++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1367 ++ pr_info("Spectre V2 mitigation: execute trampolines.\n");
1368 ++ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1369 ++ pr_info("Spectre V2 mitigation: limited branch prediction.\n");
1370 ++ return 0;
1371 ++}
1372 ++arch_initcall(nospec_report);
1373 ++
1374 ++#ifdef CONFIG_SYSFS
1375 ++ssize_t cpu_show_spectre_v1(struct device *dev,
1376 ++ struct device_attribute *attr, char *buf)
1377 ++{
1378 ++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1379 ++}
1380 ++
1381 ++ssize_t cpu_show_spectre_v2(struct device *dev,
1382 ++ struct device_attribute *attr, char *buf)
1383 ++{
1384 ++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1385 ++ return sprintf(buf, "Mitigation: execute trampolines\n");
1386 ++ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1387 ++ return sprintf(buf, "Mitigation: limited branch prediction.\n");
1388 ++ return sprintf(buf, "Vulnerable\n");
1389 ++}
1390 ++#endif
1391 ++
1392 ++#ifdef CONFIG_EXPOLINE
1393 ++
1394 ++int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
1395 ++
1396 ++static int __init nospectre_v2_setup_early(char *str)
1397 ++{
1398 ++ nospec_disable = 1;
1399 ++ return 0;
1400 ++}
1401 ++early_param("nospectre_v2", nospectre_v2_setup_early);
1402 ++
1403 ++void __init nospec_auto_detect(void)
1404 ++{
1405 ++ if (IS_ENABLED(CC_USING_EXPOLINE)) {
1406 ++ /*
1407 ++ * The kernel has been compiled with expolines.
1408 ++ * Keep expolines enabled and disable nobp.
1409 ++ */
1410 ++ nospec_disable = 0;
1411 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1412 ++ }
1413 ++ /*
1414 ++ * If the kernel has not been compiled with expolines the
1415 ++ * nobp setting decides what is done, this depends on the
1416 ++ * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
1417 ++ */
1418 ++}
1419 ++
1420 ++static int __init spectre_v2_setup_early(char *str)
1421 ++{
1422 ++ if (str && !strncmp(str, "on", 2)) {
1423 ++ nospec_disable = 0;
1424 ++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1425 ++ }
1426 ++ if (str && !strncmp(str, "off", 3))
1427 ++ nospec_disable = 1;
1428 ++ if (str && !strncmp(str, "auto", 4))
1429 ++ nospec_auto_detect();
1430 ++ return 0;
1431 ++}
1432 ++early_param("spectre_v2", spectre_v2_setup_early);
1433 ++
1434 ++static void __init_or_module __nospec_revert(s32 *start, s32 *end)
1435 ++{
1436 ++ enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
1437 ++ u8 *instr, *thunk, *br;
1438 ++ u8 insnbuf[6];
1439 ++ s32 *epo;
1440 ++
1441 ++ /* Second part of the instruction replace is always a nop */
1442 ++ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
1443 ++ for (epo = start; epo < end; epo++) {
1444 ++ instr = (u8 *) epo + *epo;
1445 ++ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
1446 ++ type = BRCL_EXPOLINE; /* brcl instruction */
1447 ++ else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
1448 ++ type = BRASL_EXPOLINE; /* brasl instruction */
1449 ++ else
1450 ++ continue;
1451 ++ thunk = instr + (*(int *)(instr + 2)) * 2;
1452 ++ if (thunk[0] == 0xc6 && thunk[1] == 0x00)
1453 ++ /* exrl %r0,<target-br> */
1454 ++ br = thunk + (*(int *)(thunk + 2)) * 2;
1455 ++ else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
1456 ++ thunk[6] == 0x44 && thunk[7] == 0x00 &&
1457 ++ (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
1458 ++ (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
1459 ++ /* larl %rx,<target br> + ex %r0,0(%rx) */
1460 ++ br = thunk + (*(int *)(thunk + 2)) * 2;
1461 ++ else
1462 ++ continue;
1463 ++ if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
1464 ++ continue;
1465 ++ switch (type) {
1466 ++ case BRCL_EXPOLINE:
1467 ++ /* brcl to thunk, replace with br + nop */
1468 ++ insnbuf[0] = br[0];
1469 ++ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
1470 ++ break;
1471 ++ case BRASL_EXPOLINE:
1472 ++ /* brasl to thunk, replace with basr + nop */
1473 ++ insnbuf[0] = 0x0d;
1474 ++ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
1475 ++ break;
1476 ++ }
1477 ++
1478 ++ s390_kernel_write(instr, insnbuf, 6);
1479 ++ }
1480 ++}
1481 ++
1482 ++void __init_or_module nospec_revert(s32 *start, s32 *end)
1483 ++{
1484 ++ if (nospec_disable)
1485 ++ __nospec_revert(start, end);
1486 ++}
1487 ++
1488 ++extern s32 __nospec_call_start[], __nospec_call_end[];
1489 ++extern s32 __nospec_return_start[], __nospec_return_end[];
1490 ++void __init nospec_init_branches(void)
1491 ++{
1492 ++ nospec_revert(__nospec_call_start, __nospec_call_end);
1493 ++ nospec_revert(__nospec_return_start, __nospec_return_end);
1494 ++}
1495 ++
1496 ++#endif /* CONFIG_EXPOLINE */
1497 +diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
1498 +index 7ce00e7a709a..ab236bd970bb 100644
1499 +--- a/arch/s390/kernel/processor.c
1500 ++++ b/arch/s390/kernel/processor.c
1501 +@@ -13,6 +13,7 @@
1502 + #include <linux/cpu.h>
1503 + #include <asm/diag.h>
1504 + #include <asm/elf.h>
1505 ++#include <asm/facility.h>
1506 + #include <asm/lowcore.h>
1507 + #include <asm/param.h>
1508 + #include <asm/smp.h>
1509 +@@ -113,3 +114,20 @@ const struct seq_operations cpuinfo_op = {
1510 + .show = show_cpuinfo,
1511 + };
1512 +
1513 ++int s390_isolate_bp(void)
1514 ++{
1515 ++ if (!test_facility(82))
1516 ++ return -EOPNOTSUPP;
1517 ++ set_thread_flag(TIF_ISOLATE_BP);
1518 ++ return 0;
1519 ++}
1520 ++EXPORT_SYMBOL(s390_isolate_bp);
1521 ++
1522 ++int s390_isolate_bp_guest(void)
1523 ++{
1524 ++ if (!test_facility(82))
1525 ++ return -EOPNOTSUPP;
1526 ++ set_thread_flag(TIF_ISOLATE_BP_GUEST);
1527 ++ return 0;
1528 ++}
1529 ++EXPORT_SYMBOL(s390_isolate_bp_guest);
1530 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1531 +index d097d71685df..e7a43a30e3ff 100644
1532 +--- a/arch/s390/kernel/setup.c
1533 ++++ b/arch/s390/kernel/setup.c
1534 +@@ -63,6 +63,8 @@
1535 + #include <asm/sclp.h>
1536 + #include <asm/sysinfo.h>
1537 + #include <asm/numa.h>
1538 ++#include <asm/alternative.h>
1539 ++#include <asm/nospec-branch.h>
1540 + #include "entry.h"
1541 +
1542 + /*
1543 +@@ -333,7 +335,9 @@ static void __init setup_lowcore(void)
1544 + lc->machine_flags = S390_lowcore.machine_flags;
1545 + lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
1546 + memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
1547 +- MAX_FACILITY_BIT/8);
1548 ++ sizeof(lc->stfle_fac_list));
1549 ++ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
1550 ++ sizeof(lc->alt_stfle_fac_list));
1551 + if (MACHINE_HAS_VX)
1552 + lc->vector_save_area_addr =
1553 + (unsigned long) &lc->vector_save_area;
1554 +@@ -370,6 +374,7 @@ static void __init setup_lowcore(void)
1555 + #ifdef CONFIG_SMP
1556 + lc->spinlock_lockval = arch_spin_lockval(0);
1557 + #endif
1558 ++ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1559 +
1560 + set_prefix((u32)(unsigned long) lc);
1561 + lowcore_ptr[0] = lc;
1562 +@@ -841,6 +846,9 @@ void __init setup_arch(char **cmdline_p)
1563 + init_mm.end_data = (unsigned long) &_edata;
1564 + init_mm.brk = (unsigned long) &_end;
1565 +
1566 ++ if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
1567 ++ nospec_auto_detect();
1568 ++
1569 + parse_early_param();
1570 + os_info_init();
1571 + setup_ipl();
1572 +@@ -893,6 +901,10 @@ void __init setup_arch(char **cmdline_p)
1573 + conmode_default();
1574 + set_preferred_console();
1575 +
1576 ++ apply_alternative_instructions();
1577 ++ if (IS_ENABLED(CONFIG_EXPOLINE))
1578 ++ nospec_init_branches();
1579 ++
1580 + /* Setup zfcpdump support */
1581 + setup_zfcpdump();
1582 +
1583 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1584 +index 9062df575afe..77f4f334a465 100644
1585 +--- a/arch/s390/kernel/smp.c
1586 ++++ b/arch/s390/kernel/smp.c
1587 +@@ -200,6 +200,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
1588 + lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
1589 + lc->cpu_nr = cpu;
1590 + lc->spinlock_lockval = arch_spin_lockval(cpu);
1591 ++ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1592 + if (MACHINE_HAS_VX)
1593 + lc->vector_save_area_addr =
1594 + (unsigned long) &lc->vector_save_area;
1595 +@@ -250,7 +251,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
1596 + __ctl_store(lc->cregs_save_area, 0, 15);
1597 + save_access_regs((unsigned int *) lc->access_regs_save_area);
1598 + memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
1599 +- MAX_FACILITY_BIT/8);
1600 ++ sizeof(lc->stfle_fac_list));
1601 ++ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
1602 ++ sizeof(lc->alt_stfle_fac_list));
1603 + }
1604 +
1605 + static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
1606 +@@ -299,6 +302,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
1607 + mem_assign_absolute(lc->restart_fn, (unsigned long) func);
1608 + mem_assign_absolute(lc->restart_data, (unsigned long) data);
1609 + mem_assign_absolute(lc->restart_source, source_cpu);
1610 ++ __bpon();
1611 + asm volatile(
1612 + "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
1613 + " brc 2,0b # busy, try again\n"
1614 +@@ -888,6 +892,7 @@ void __cpu_die(unsigned int cpu)
1615 + void __noreturn cpu_die(void)
1616 + {
1617 + idle_task_exit();
1618 ++ __bpon();
1619 + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
1620 + for (;;) ;
1621 + }
1622 +diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
1623 +index 66956c09d5bf..3d04dfdabc9f 100644
1624 +--- a/arch/s390/kernel/uprobes.c
1625 ++++ b/arch/s390/kernel/uprobes.c
1626 +@@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
1627 + return orig;
1628 + }
1629 +
1630 ++bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1631 ++ struct pt_regs *regs)
1632 ++{
1633 ++ if (ctx == RP_CHECK_CHAIN_CALL)
1634 ++ return user_stack_pointer(regs) <= ret->stack;
1635 ++ else
1636 ++ return user_stack_pointer(regs) < ret->stack;
1637 ++}
1638 ++
1639 + /* Instruction Emulation */
1640 +
1641 + static void adjust_psw_addr(psw_t *psw, unsigned long len)
1642 +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
1643 +index 6c553f6e791a..a4ae08e416e6 100644
1644 +--- a/arch/s390/kernel/vmlinux.lds.S
1645 ++++ b/arch/s390/kernel/vmlinux.lds.S
1646 +@@ -78,6 +78,43 @@ SECTIONS
1647 + EXIT_DATA
1648 + }
1649 +
1650 ++ /*
1651 ++ * struct alt_inst entries. From the header (alternative.h):
1652 ++ * "Alternative instructions for different CPU types or capabilities"
1653 ++ * Think locking instructions on spinlocks.
1654 ++ * Note, that it is a part of __init region.
1655 ++ */
1656 ++ . = ALIGN(8);
1657 ++ .altinstructions : {
1658 ++ __alt_instructions = .;
1659 ++ *(.altinstructions)
1660 ++ __alt_instructions_end = .;
1661 ++ }
1662 ++
1663 ++ /*
1664 ++ * And here are the replacement instructions. The linker sticks
1665 ++ * them as binary blobs. The .altinstructions has enough data to
1666 ++ * get the address and the length of them to patch the kernel safely.
1667 ++ * Note, that it is a part of __init region.
1668 ++ */
1669 ++ .altinstr_replacement : {
1670 ++ *(.altinstr_replacement)
1671 ++ }
1672 ++
1673 ++ /*
1674 ++ * Table with the patch locations to undo expolines
1675 ++ */
1676 ++ .nospec_call_table : {
1677 ++ __nospec_call_start = . ;
1678 ++ *(.s390_indirect*)
1679 ++ __nospec_call_end = . ;
1680 ++ }
1681 ++ .nospec_return_table : {
1682 ++ __nospec_return_start = . ;
1683 ++ *(.s390_return*)
1684 ++ __nospec_return_end = . ;
1685 ++ }
1686 ++
1687 + /* early.c uses stsi, which requires page aligned data. */
1688 + . = ALIGN(PAGE_SIZE);
1689 + INIT_DATA_SECTION(0x100)
1690 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1691 +index 23e3f5d77a24..b011140e6b06 100644
1692 +--- a/arch/s390/kvm/kvm-s390.c
1693 ++++ b/arch/s390/kvm/kvm-s390.c
1694 +@@ -257,6 +257,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1695 + case KVM_CAP_S390_VECTOR_REGISTERS:
1696 + r = MACHINE_HAS_VX;
1697 + break;
1698 ++ case KVM_CAP_S390_BPB:
1699 ++ r = test_facility(82);
1700 ++ break;
1701 + default:
1702 + r = 0;
1703 + }
1704 +@@ -1264,6 +1267,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1705 + KVM_SYNC_PFAULT;
1706 + if (test_kvm_facility(vcpu->kvm, 129))
1707 + vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1708 ++ if (test_kvm_facility(vcpu->kvm, 82))
1709 ++ vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
1710 +
1711 + if (kvm_is_ucontrol(vcpu->kvm))
1712 + return __kvm_ucontrol_vcpu_init(vcpu);
1713 +@@ -1327,6 +1332,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1714 + current->thread.fpu.fpc = 0;
1715 + vcpu->arch.sie_block->gbea = 1;
1716 + vcpu->arch.sie_block->pp = 0;
1717 ++ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1718 + vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1719 + kvm_clear_async_pf_completion_queue(vcpu);
1720 + if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1721 +@@ -2145,6 +2151,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1722 + if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1723 + kvm_clear_async_pf_completion_queue(vcpu);
1724 + }
1725 ++ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
1726 ++ test_kvm_facility(vcpu->kvm, 82)) {
1727 ++ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1728 ++ vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
1729 ++ }
1730 + kvm_run->kvm_dirty_regs = 0;
1731 + }
1732 +
1733 +@@ -2162,6 +2173,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1734 + kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1735 + kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1736 + kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1737 ++ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
1738 + }
1739 +
1740 + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1741 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
1742 +index eb02087650d2..c42d4a3d9494 100644
1743 +--- a/arch/x86/kernel/tsc.c
1744 ++++ b/arch/x86/kernel/tsc.c
1745 +@@ -408,7 +408,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
1746 + hpet2 -= hpet1;
1747 + tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
1748 + do_div(tmp, 1000000);
1749 +- do_div(deltatsc, tmp);
1750 ++ deltatsc = div64_u64(deltatsc, tmp);
1751 +
1752 + return (unsigned long) deltatsc;
1753 + }
1754 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1755 +index c206ccda899b..b5f245d2875c 100644
1756 +--- a/drivers/cdrom/cdrom.c
1757 ++++ b/drivers/cdrom/cdrom.c
1758 +@@ -2358,7 +2358,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
1759 + if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
1760 + return media_changed(cdi, 1);
1761 +
1762 +- if ((unsigned int)arg >= cdi->capacity)
1763 ++ if (arg >= cdi->capacity)
1764 + return -EINVAL;
1765 +
1766 + info = kmalloc(sizeof(*info), GFP_KERNEL);
1767 +diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
1768 +index 930424e55439..251d64ca41ce 100644
1769 +--- a/drivers/input/misc/drv260x.c
1770 ++++ b/drivers/input/misc/drv260x.c
1771 +@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
1772 + if (!haptics)
1773 + return -ENOMEM;
1774 +
1775 +- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
1776 ++ haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
1777 + haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
1778 +
1779 + if (pdata) {
1780 +diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
1781 +index 7ebccfa8072a..cb790b68920f 100644
1782 +--- a/drivers/message/fusion/mptsas.c
1783 ++++ b/drivers/message/fusion/mptsas.c
1784 +@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
1785 + .cmd_per_lun = 7,
1786 + .use_clustering = ENABLE_CLUSTERING,
1787 + .shost_attrs = mptscsih_host_attrs,
1788 ++ .no_write_same = 1,
1789 + };
1790 +
1791 + static int mptsas_get_linkerrors(struct sas_phy *phy)
1792 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1793 +index 278d12888cab..339118f3c718 100644
1794 +--- a/drivers/net/bonding/bond_main.c
1795 ++++ b/drivers/net/bonding/bond_main.c
1796 +@@ -1614,8 +1614,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1797 + } /* switch(bond_mode) */
1798 +
1799 + #ifdef CONFIG_NET_POLL_CONTROLLER
1800 +- slave_dev->npinfo = bond->dev->npinfo;
1801 +- if (slave_dev->npinfo) {
1802 ++ if (bond->dev->npinfo) {
1803 + if (slave_enable_netpoll(new_slave)) {
1804 + netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1805 + res = -EBUSY;
1806 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1807 +index b7b859c3a0c7..583d50f80b24 100644
1808 +--- a/drivers/net/ppp/pppoe.c
1809 ++++ b/drivers/net/ppp/pppoe.c
1810 +@@ -638,6 +638,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
1811 + lock_sock(sk);
1812 +
1813 + error = -EINVAL;
1814 ++
1815 ++ if (sockaddr_len != sizeof(struct sockaddr_pppox))
1816 ++ goto end;
1817 ++
1818 + if (sp->sa_protocol != PX_PROTO_OE)
1819 + goto end;
1820 +
1821 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1822 +index 9bca36e1fefd..e74709e4b5dd 100644
1823 +--- a/drivers/net/team/team.c
1824 ++++ b/drivers/net/team/team.c
1825 +@@ -247,6 +247,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
1826 + }
1827 + }
1828 +
1829 ++static bool __team_option_inst_tmp_find(const struct list_head *opts,
1830 ++ const struct team_option_inst *needle)
1831 ++{
1832 ++ struct team_option_inst *opt_inst;
1833 ++
1834 ++ list_for_each_entry(opt_inst, opts, tmp_list)
1835 ++ if (opt_inst == needle)
1836 ++ return true;
1837 ++ return false;
1838 ++}
1839 ++
1840 + static int __team_options_register(struct team *team,
1841 + const struct team_option *option,
1842 + size_t option_count)
1843 +@@ -1039,14 +1050,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
1844 + }
1845 +
1846 + #ifdef CONFIG_NET_POLL_CONTROLLER
1847 +-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1848 ++static int __team_port_enable_netpoll(struct team_port *port)
1849 + {
1850 + struct netpoll *np;
1851 + int err;
1852 +
1853 +- if (!team->dev->npinfo)
1854 +- return 0;
1855 +-
1856 + np = kzalloc(sizeof(*np), GFP_KERNEL);
1857 + if (!np)
1858 + return -ENOMEM;
1859 +@@ -1060,6 +1068,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1860 + return err;
1861 + }
1862 +
1863 ++static int team_port_enable_netpoll(struct team_port *port)
1864 ++{
1865 ++ if (!port->team->dev->npinfo)
1866 ++ return 0;
1867 ++
1868 ++ return __team_port_enable_netpoll(port);
1869 ++}
1870 ++
1871 + static void team_port_disable_netpoll(struct team_port *port)
1872 + {
1873 + struct netpoll *np = port->np;
1874 +@@ -1074,7 +1090,7 @@ static void team_port_disable_netpoll(struct team_port *port)
1875 + kfree(np);
1876 + }
1877 + #else
1878 +-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1879 ++static int team_port_enable_netpoll(struct team_port *port)
1880 + {
1881 + return 0;
1882 + }
1883 +@@ -1181,7 +1197,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1884 + goto err_vids_add;
1885 + }
1886 +
1887 +- err = team_port_enable_netpoll(team, port);
1888 ++ err = team_port_enable_netpoll(port);
1889 + if (err) {
1890 + netdev_err(dev, "Failed to enable netpoll on device %s\n",
1891 + portname);
1892 +@@ -1889,7 +1905,7 @@ static int team_netpoll_setup(struct net_device *dev,
1893 +
1894 + mutex_lock(&team->lock);
1895 + list_for_each_entry(port, &team->port_list, list) {
1896 +- err = team_port_enable_netpoll(team, port);
1897 ++ err = __team_port_enable_netpoll(port);
1898 + if (err) {
1899 + __team_netpoll_cleanup(team);
1900 + break;
1901 +@@ -2544,6 +2560,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1902 + if (err)
1903 + goto team_put;
1904 + opt_inst->changed = true;
1905 ++
1906 ++ /* dumb/evil user-space can send us duplicate opt,
1907 ++ * keep only the last one
1908 ++ */
1909 ++ if (__team_option_inst_tmp_find(&opt_inst_list,
1910 ++ opt_inst))
1911 ++ continue;
1912 ++
1913 + list_add(&opt_inst->tmp_list, &opt_inst_list);
1914 + }
1915 + if (!opt_found) {
1916 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
1917 +index 6578127db847..f71abe50ea6f 100644
1918 +--- a/drivers/net/usb/cdc_ether.c
1919 ++++ b/drivers/net/usb/cdc_ether.c
1920 +@@ -461,6 +461,7 @@ static const struct driver_info wwan_info = {
1921 + #define REALTEK_VENDOR_ID 0x0bda
1922 + #define SAMSUNG_VENDOR_ID 0x04e8
1923 + #define LENOVO_VENDOR_ID 0x17ef
1924 ++#define LINKSYS_VENDOR_ID 0x13b1
1925 + #define NVIDIA_VENDOR_ID 0x0955
1926 + #define HP_VENDOR_ID 0x03f0
1927 +
1928 +@@ -650,6 +651,15 @@ static const struct usb_device_id products[] = {
1929 + .driver_info = 0,
1930 + },
1931 +
1932 ++#if IS_ENABLED(CONFIG_USB_RTL8152)
1933 ++/* Linksys USB3GIGV1 Ethernet Adapter */
1934 ++{
1935 ++ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
1936 ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
1937 ++ .driver_info = 0,
1938 ++},
1939 ++#endif
1940 ++
1941 + /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
1942 + {
1943 + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
1944 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1945 +index 89950f5cea71..b2c1a435357f 100644
1946 +--- a/drivers/net/usb/r8152.c
1947 ++++ b/drivers/net/usb/r8152.c
1948 +@@ -506,6 +506,7 @@ enum rtl8152_flags {
1949 + #define VENDOR_ID_REALTEK 0x0bda
1950 + #define VENDOR_ID_SAMSUNG 0x04e8
1951 + #define VENDOR_ID_LENOVO 0x17ef
1952 ++#define VENDOR_ID_LINKSYS 0x13b1
1953 + #define VENDOR_ID_NVIDIA 0x0955
1954 +
1955 + #define MCU_TYPE_PLA 0x0100
1956 +@@ -4376,6 +4377,7 @@ static struct usb_device_id rtl8152_table[] = {
1957 + {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
1958 + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
1959 + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
1960 ++ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
1961 + {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
1962 + {}
1963 + };
1964 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
1965 +index 09e14ce85dd0..0c8efdff4843 100644
1966 +--- a/drivers/net/wireless/ath/ath10k/mac.c
1967 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
1968 +@@ -5285,9 +5285,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
1969 + sta->addr, smps, err);
1970 + }
1971 +
1972 +- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
1973 +- changed & IEEE80211_RC_NSS_CHANGED) {
1974 +- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
1975 ++ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
1976 ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
1977 + sta->addr);
1978 +
1979 + err = ath10k_station_assoc(ar, arvif->vif, sta, true);
1980 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
1981 +index 41382f89abe1..4435c7bbb625 100644
1982 +--- a/drivers/net/wireless/ath/ath9k/hw.c
1983 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
1984 +@@ -1595,6 +1595,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
1985 + int count = 50;
1986 + u32 reg, last_val;
1987 +
1988 ++ /* Check if chip failed to wake up */
1989 ++ if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
1990 ++ return false;
1991 ++
1992 + if (AR_SREV_9300(ah))
1993 + return !ath9k_hw_detect_mac_hang(ah);
1994 +
1995 +diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
1996 +index 6fa9364d1c07..835f1054976b 100644
1997 +--- a/drivers/s390/char/Makefile
1998 ++++ b/drivers/s390/char/Makefile
1999 +@@ -2,6 +2,8 @@
2000 + # S/390 character devices
2001 + #
2002 +
2003 ++CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
2004 ++
2005 + obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
2006 + sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
2007 + sclp_early.o
2008 +diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
2009 +index 1e16331891a9..f9d6a9f00640 100644
2010 +--- a/drivers/s390/cio/chsc.c
2011 ++++ b/drivers/s390/cio/chsc.c
2012 +@@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
2013 +
2014 + static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
2015 + {
2016 ++ struct channel_path *chp;
2017 + struct chp_link link;
2018 + struct chp_id chpid;
2019 + int status;
2020 +@@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
2021 + chpid.id = sei_area->rsid;
2022 + /* allocate a new channel path structure, if needed */
2023 + status = chp_get_status(chpid);
2024 +- if (status < 0)
2025 +- chp_new(chpid);
2026 +- else if (!status)
2027 ++ if (!status)
2028 + return;
2029 ++
2030 ++ if (status < 0) {
2031 ++ chp_new(chpid);
2032 ++ } else {
2033 ++ chp = chpid_to_chp(chpid);
2034 ++ mutex_lock(&chp->lock);
2035 ++ chp_update_desc(chp);
2036 ++ mutex_unlock(&chp->lock);
2037 ++ }
2038 + memset(&link, 0, sizeof(struct chp_link));
2039 + link.chpid = chpid;
2040 + if ((sei_area->vf & 0xc0) != 0) {
2041 +diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
2042 +index d4c3e5512dd5..b69dfc706440 100644
2043 +--- a/drivers/staging/android/ion/ion_system_heap.c
2044 ++++ b/drivers/staging/android/ion/ion_system_heap.c
2045 +@@ -27,7 +27,7 @@
2046 + #include "ion_priv.h"
2047 +
2048 + static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
2049 +- __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
2050 ++ __GFP_NORETRY) & ~__GFP_RECLAIM;
2051 + static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
2052 + static const unsigned int orders[] = {8, 4, 0};
2053 + static const int num_orders = ARRAY_SIZE(orders);
2054 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
2055 +index 49a0d6b027c1..76dacd5307b9 100644
2056 +--- a/fs/cifs/dir.c
2057 ++++ b/fs/cifs/dir.c
2058 +@@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
2059 + goto mknod_out;
2060 + }
2061 +
2062 ++ if (!S_ISCHR(mode) && !S_ISBLK(mode))
2063 ++ goto mknod_out;
2064 ++
2065 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
2066 + goto mknod_out;
2067 +
2068 +@@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
2069 +
2070 + buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
2071 + if (buf == NULL) {
2072 +- kfree(full_path);
2073 + rc = -ENOMEM;
2074 +- free_xid(xid);
2075 +- return rc;
2076 ++ goto mknod_out;
2077 + }
2078 +
2079 + if (backup_cred(cifs_sb))
2080 +@@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
2081 + pdev->minor = cpu_to_le64(MINOR(device_number));
2082 + rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
2083 + &bytes_written, iov, 1);
2084 +- } /* else if (S_ISFIFO) */
2085 ++ }
2086 + tcon->ses->server->ops->close(xid, tcon, &fid);
2087 + d_drop(direntry);
2088 +
2089 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
2090 +index 4759df4eb8ce..9398d1b70545 100644
2091 +--- a/fs/jbd2/journal.c
2092 ++++ b/fs/jbd2/journal.c
2093 +@@ -275,11 +275,11 @@ loop:
2094 + goto loop;
2095 +
2096 + end_loop:
2097 +- write_unlock(&journal->j_state_lock);
2098 + del_timer_sync(&journal->j_commit_timer);
2099 + journal->j_task = NULL;
2100 + wake_up(&journal->j_wait_done_commit);
2101 + jbd_debug(1, "Journal thread exiting.\n");
2102 ++ write_unlock(&journal->j_state_lock);
2103 + return 0;
2104 + }
2105 +
2106 +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
2107 +index 19db03dbbd00..dd676ba758ee 100644
2108 +--- a/include/linux/if_vlan.h
2109 ++++ b/include/linux/if_vlan.h
2110 +@@ -585,7 +585,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
2111 + * Returns true if the skb is tagged with multiple vlan headers, regardless
2112 + * of whether it is hardware accelerated or not.
2113 + */
2114 +-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2115 ++static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
2116 + {
2117 + __be16 protocol = skb->protocol;
2118 +
2119 +@@ -596,6 +596,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2120 + protocol != htons(ETH_P_8021AD)))
2121 + return false;
2122 +
2123 ++ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
2124 ++ return false;
2125 ++
2126 + veh = (struct vlan_ethhdr *)skb->data;
2127 + protocol = veh->h_vlan_encapsulated_proto;
2128 + }
2129 +@@ -613,7 +616,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2130 + *
2131 + * Returns features without unsafe ones if the skb has multiple tags.
2132 + */
2133 +-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
2134 ++static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
2135 + netdev_features_t features)
2136 + {
2137 + if (skb_vlan_tagged_multi(skb)) {
2138 +diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
2139 +index fe994d2e5286..ea985aa7a6c5 100644
2140 +--- a/include/net/llc_conn.h
2141 ++++ b/include/net/llc_conn.h
2142 +@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
2143 +
2144 + struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
2145 + struct proto *prot, int kern);
2146 ++void llc_sk_stop_all_timers(struct sock *sk, bool sync);
2147 + void llc_sk_free(struct sock *sk);
2148 +
2149 + void llc_sk_reset(struct sock *sk);
2150 +diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
2151 +index 03f3618612aa..376d0ab5b9f2 100644
2152 +--- a/include/uapi/linux/kvm.h
2153 ++++ b/include/uapi/linux/kvm.h
2154 +@@ -831,6 +831,7 @@ struct kvm_ppc_smmu_info {
2155 + #define KVM_CAP_GUEST_DEBUG_HW_WPS 120
2156 + #define KVM_CAP_SPLIT_IRQCHIP 121
2157 + #define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
2158 ++#define KVM_CAP_S390_BPB 152
2159 +
2160 + #ifdef KVM_CAP_IRQ_ROUTING
2161 +
2162 +diff --git a/kernel/events/core.c b/kernel/events/core.c
2163 +index 835ac4d9f349..6aeb0ef4fe70 100644
2164 +--- a/kernel/events/core.c
2165 ++++ b/kernel/events/core.c
2166 +@@ -8133,9 +8133,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
2167 + * __u16 sample size limit.
2168 + */
2169 + if (attr->sample_stack_user >= USHRT_MAX)
2170 +- ret = -EINVAL;
2171 ++ return -EINVAL;
2172 + else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
2173 +- ret = -EINVAL;
2174 ++ return -EINVAL;
2175 + }
2176 +
2177 + if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
2178 +diff --git a/net/core/dev.c b/net/core/dev.c
2179 +index dc63c37d5301..3bcbf931a910 100644
2180 +--- a/net/core/dev.c
2181 ++++ b/net/core/dev.c
2182 +@@ -2706,7 +2706,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
2183 + }
2184 + EXPORT_SYMBOL(passthru_features_check);
2185 +
2186 +-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2187 ++static netdev_features_t dflt_features_check(struct sk_buff *skb,
2188 + struct net_device *dev,
2189 + netdev_features_t features)
2190 + {
2191 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2192 +index 33432e64804c..f60b93627876 100644
2193 +--- a/net/core/neighbour.c
2194 ++++ b/net/core/neighbour.c
2195 +@@ -54,7 +54,8 @@ do { \
2196 + static void neigh_timer_handler(unsigned long arg);
2197 + static void __neigh_notify(struct neighbour *n, int type, int flags);
2198 + static void neigh_update_notify(struct neighbour *neigh);
2199 +-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
2200 ++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
2201 ++ struct net_device *dev);
2202 +
2203 + #ifdef CONFIG_PROC_FS
2204 + static const struct file_operations neigh_stat_seq_fops;
2205 +@@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2206 + {
2207 + write_lock_bh(&tbl->lock);
2208 + neigh_flush_dev(tbl, dev);
2209 +- pneigh_ifdown(tbl, dev);
2210 +- write_unlock_bh(&tbl->lock);
2211 ++ pneigh_ifdown_and_unlock(tbl, dev);
2212 +
2213 + del_timer_sync(&tbl->proxy_timer);
2214 + pneigh_queue_purge(&tbl->proxy_queue);
2215 +@@ -645,9 +645,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
2216 + return -ENOENT;
2217 + }
2218 +
2219 +-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2220 ++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
2221 ++ struct net_device *dev)
2222 + {
2223 +- struct pneigh_entry *n, **np;
2224 ++ struct pneigh_entry *n, **np, *freelist = NULL;
2225 + u32 h;
2226 +
2227 + for (h = 0; h <= PNEIGH_HASHMASK; h++) {
2228 +@@ -655,16 +656,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2229 + while ((n = *np) != NULL) {
2230 + if (!dev || n->dev == dev) {
2231 + *np = n->next;
2232 +- if (tbl->pdestructor)
2233 +- tbl->pdestructor(n);
2234 +- if (n->dev)
2235 +- dev_put(n->dev);
2236 +- kfree(n);
2237 ++ n->next = freelist;
2238 ++ freelist = n;
2239 + continue;
2240 + }
2241 + np = &n->next;
2242 + }
2243 + }
2244 ++ write_unlock_bh(&tbl->lock);
2245 ++ while ((n = freelist)) {
2246 ++ freelist = n->next;
2247 ++ n->next = NULL;
2248 ++ if (tbl->pdestructor)
2249 ++ tbl->pdestructor(n);
2250 ++ if (n->dev)
2251 ++ dev_put(n->dev);
2252 ++ kfree(n);
2253 ++ }
2254 + return -ENOENT;
2255 + }
2256 +
2257 +@@ -2280,12 +2288,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2258 +
2259 + err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2260 + if (!err) {
2261 +- if (tb[NDA_IFINDEX])
2262 ++ if (tb[NDA_IFINDEX]) {
2263 ++ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2264 ++ return -EINVAL;
2265 + filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2266 +-
2267 +- if (tb[NDA_MASTER])
2268 ++ }
2269 ++ if (tb[NDA_MASTER]) {
2270 ++ if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2271 ++ return -EINVAL;
2272 + filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2273 +-
2274 ++ }
2275 + if (filter_idx || filter_master_idx)
2276 + flags |= NLM_F_DUMP_FILTERED;
2277 + }
2278 +diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
2279 +index 6abc5012200b..e26df2764e83 100644
2280 +--- a/net/dns_resolver/dns_key.c
2281 ++++ b/net/dns_resolver/dns_key.c
2282 +@@ -25,6 +25,7 @@
2283 + #include <linux/moduleparam.h>
2284 + #include <linux/slab.h>
2285 + #include <linux/string.h>
2286 ++#include <linux/ratelimit.h>
2287 + #include <linux/kernel.h>
2288 + #include <linux/keyctl.h>
2289 + #include <linux/err.h>
2290 +@@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
2291 +
2292 + next_opt = memchr(opt, '#', end - opt) ?: end;
2293 + opt_len = next_opt - opt;
2294 +- if (!opt_len) {
2295 +- printk(KERN_WARNING
2296 +- "Empty option to dns_resolver key\n");
2297 ++ if (opt_len <= 0 || opt_len > 128) {
2298 ++ pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
2299 ++ opt_len);
2300 + return -EINVAL;
2301 + }
2302 +
2303 +@@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
2304 + }
2305 +
2306 + bad_option_value:
2307 +- printk(KERN_WARNING
2308 +- "Option '%*.*s' to dns_resolver key:"
2309 +- " bad/missing value\n",
2310 +- opt_nlen, opt_nlen, opt);
2311 ++ pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
2312 ++ opt_nlen, opt_nlen, opt);
2313 + return -EINVAL;
2314 + } while (opt = next_opt + 1, opt < end);
2315 + }
2316 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2317 +index 23d77ff1da59..82d2b55c953a 100644
2318 +--- a/net/ipv4/tcp.c
2319 ++++ b/net/ipv4/tcp.c
2320 +@@ -2589,8 +2589,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2321 +
2322 + #ifdef CONFIG_TCP_MD5SIG
2323 + case TCP_MD5SIG:
2324 +- /* Read the IP->Key mappings from userspace */
2325 +- err = tp->af_specific->md5_parse(sk, optval, optlen);
2326 ++ if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
2327 ++ err = tp->af_specific->md5_parse(sk, optval, optlen);
2328 ++ else
2329 ++ err = -EINVAL;
2330 + break;
2331 + #endif
2332 + case TCP_USER_TIMEOUT:
2333 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2334 +index 96115d1e0d90..ed018760502e 100644
2335 +--- a/net/ipv4/tcp_input.c
2336 ++++ b/net/ipv4/tcp_input.c
2337 +@@ -3869,11 +3869,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
2338 + int length = (th->doff << 2) - sizeof(*th);
2339 + const u8 *ptr = (const u8 *)(th + 1);
2340 +
2341 +- /* If the TCP option is too short, we can short cut */
2342 +- if (length < TCPOLEN_MD5SIG)
2343 +- return NULL;
2344 +-
2345 +- while (length > 0) {
2346 ++ /* If not enough data remaining, we can short cut */
2347 ++ while (length >= TCPOLEN_MD5SIG) {
2348 + int opcode = *ptr++;
2349 + int opsize;
2350 +
2351 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2352 +index 99920fcea97c..2f6d8f57fdd4 100644
2353 +--- a/net/ipv6/route.c
2354 ++++ b/net/ipv6/route.c
2355 +@@ -2711,6 +2711,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2356 +
2357 + static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2358 + [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2359 ++ [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
2360 + [RTA_OIF] = { .type = NLA_U32 },
2361 + [RTA_IIF] = { .type = NLA_U32 },
2362 + [RTA_PRIORITY] = { .type = NLA_U32 },
2363 +@@ -2719,6 +2720,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2364 + [RTA_PREF] = { .type = NLA_U8 },
2365 + [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
2366 + [RTA_ENCAP] = { .type = NLA_NESTED },
2367 ++ [RTA_TABLE] = { .type = NLA_U32 },
2368 + };
2369 +
2370 + static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2371 +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
2372 +index 67f2e72723b2..2764c4bd072c 100644
2373 +--- a/net/l2tp/l2tp_ppp.c
2374 ++++ b/net/l2tp/l2tp_ppp.c
2375 +@@ -606,6 +606,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
2376 + lock_sock(sk);
2377 +
2378 + error = -EINVAL;
2379 ++
2380 ++ if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
2381 ++ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
2382 ++ sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
2383 ++ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
2384 ++ goto end;
2385 ++
2386 + if (sp->sa_protocol != PX_PROTO_OL2TP)
2387 + goto end;
2388 +
2389 +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
2390 +index 1e698768aca8..09f2f3471ad6 100644
2391 +--- a/net/llc/af_llc.c
2392 ++++ b/net/llc/af_llc.c
2393 +@@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
2394 + llc->laddr.lsap, llc->daddr.lsap);
2395 + if (!llc_send_disc(sk))
2396 + llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
2397 +- if (!sock_flag(sk, SOCK_ZAPPED))
2398 ++ if (!sock_flag(sk, SOCK_ZAPPED)) {
2399 ++ struct llc_sap *sap = llc->sap;
2400 ++
2401 ++ /* Hold this for release_sock(), so that llc_backlog_rcv()
2402 ++ * could still use it.
2403 ++ */
2404 ++ llc_sap_hold(sap);
2405 + llc_sap_remove_socket(llc->sap, sk);
2406 +- release_sock(sk);
2407 ++ release_sock(sk);
2408 ++ llc_sap_put(sap);
2409 ++ } else {
2410 ++ release_sock(sk);
2411 ++ }
2412 + if (llc->dev)
2413 + dev_put(llc->dev);
2414 + sock_put(sk);
2415 +diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
2416 +index ea225bd2672c..f8d4ab8ca1a5 100644
2417 +--- a/net/llc/llc_c_ac.c
2418 ++++ b/net/llc/llc_c_ac.c
2419 +@@ -1096,14 +1096,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
2420 +
2421 + int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
2422 + {
2423 +- struct llc_sock *llc = llc_sk(sk);
2424 +-
2425 +- del_timer(&llc->pf_cycle_timer.timer);
2426 +- del_timer(&llc->ack_timer.timer);
2427 +- del_timer(&llc->rej_sent_timer.timer);
2428 +- del_timer(&llc->busy_state_timer.timer);
2429 +- llc->ack_must_be_send = 0;
2430 +- llc->ack_pf = 0;
2431 ++ llc_sk_stop_all_timers(sk, false);
2432 + return 0;
2433 + }
2434 +
2435 +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
2436 +index 8bc5a1bd2d45..d861b74ad068 100644
2437 +--- a/net/llc/llc_conn.c
2438 ++++ b/net/llc/llc_conn.c
2439 +@@ -951,6 +951,26 @@ out:
2440 + return sk;
2441 + }
2442 +
2443 ++void llc_sk_stop_all_timers(struct sock *sk, bool sync)
2444 ++{
2445 ++ struct llc_sock *llc = llc_sk(sk);
2446 ++
2447 ++ if (sync) {
2448 ++ del_timer_sync(&llc->pf_cycle_timer.timer);
2449 ++ del_timer_sync(&llc->ack_timer.timer);
2450 ++ del_timer_sync(&llc->rej_sent_timer.timer);
2451 ++ del_timer_sync(&llc->busy_state_timer.timer);
2452 ++ } else {
2453 ++ del_timer(&llc->pf_cycle_timer.timer);
2454 ++ del_timer(&llc->ack_timer.timer);
2455 ++ del_timer(&llc->rej_sent_timer.timer);
2456 ++ del_timer(&llc->busy_state_timer.timer);
2457 ++ }
2458 ++
2459 ++ llc->ack_must_be_send = 0;
2460 ++ llc->ack_pf = 0;
2461 ++}
2462 ++
2463 + /**
2464 + * llc_sk_free - Frees a LLC socket
2465 + * @sk - socket to free
2466 +@@ -963,7 +983,7 @@ void llc_sk_free(struct sock *sk)
2467 +
2468 + llc->state = LLC_CONN_OUT_OF_SVC;
2469 + /* Stop all (possibly) running timers */
2470 +- llc_conn_ac_stop_all_timers(sk, NULL);
2471 ++ llc_sk_stop_all_timers(sk, true);
2472 + #ifdef DEBUG_LLC_CONN_ALLOC
2473 + printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
2474 + skb_queue_len(&llc->pdu_unack_q),
2475 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2476 +index 92ca3e106c2b..f165514a4db5 100644
2477 +--- a/net/packet/af_packet.c
2478 ++++ b/net/packet/af_packet.c
2479 +@@ -332,11 +332,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
2480 + skb_set_queue_mapping(skb, queue_index);
2481 + }
2482 +
2483 +-/* register_prot_hook must be invoked with the po->bind_lock held,
2484 ++/* __register_prot_hook must be invoked through register_prot_hook
2485 + * or from a context in which asynchronous accesses to the packet
2486 + * socket is not possible (packet_create()).
2487 + */
2488 +-static void register_prot_hook(struct sock *sk)
2489 ++static void __register_prot_hook(struct sock *sk)
2490 + {
2491 + struct packet_sock *po = pkt_sk(sk);
2492 +
2493 +@@ -351,8 +351,13 @@ static void register_prot_hook(struct sock *sk)
2494 + }
2495 + }
2496 +
2497 +-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
2498 +- * held. If the sync parameter is true, we will temporarily drop
2499 ++static void register_prot_hook(struct sock *sk)
2500 ++{
2501 ++ lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
2502 ++ __register_prot_hook(sk);
2503 ++}
2504 ++
2505 ++/* If the sync parameter is true, we will temporarily drop
2506 + * the po->bind_lock and do a synchronize_net to make sure no
2507 + * asynchronous packet processing paths still refer to the elements
2508 + * of po->prot_hook. If the sync parameter is false, it is the
2509 +@@ -362,6 +367,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
2510 + {
2511 + struct packet_sock *po = pkt_sk(sk);
2512 +
2513 ++ lockdep_assert_held_once(&po->bind_lock);
2514 ++
2515 + po->running = 0;
2516 +
2517 + if (po->fanout)
2518 +@@ -2892,6 +2899,7 @@ static int packet_release(struct socket *sock)
2519 +
2520 + packet_flush_mclist(sk);
2521 +
2522 ++ lock_sock(sk);
2523 + if (po->rx_ring.pg_vec) {
2524 + memset(&req_u, 0, sizeof(req_u));
2525 + packet_set_ring(sk, &req_u, 1, 0);
2526 +@@ -2901,6 +2909,7 @@ static int packet_release(struct socket *sock)
2527 + memset(&req_u, 0, sizeof(req_u));
2528 + packet_set_ring(sk, &req_u, 1, 1);
2529 + }
2530 ++ release_sock(sk);
2531 +
2532 + f = fanout_release(sk);
2533 +
2534 +@@ -3134,7 +3143,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2535 +
2536 + if (proto) {
2537 + po->prot_hook.type = proto;
2538 +- register_prot_hook(sk);
2539 ++ __register_prot_hook(sk);
2540 + }
2541 +
2542 + mutex_lock(&net->packet.sklist_lock);
2543 +@@ -3570,6 +3579,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2544 + union tpacket_req_u req_u;
2545 + int len;
2546 +
2547 ++ lock_sock(sk);
2548 + switch (po->tp_version) {
2549 + case TPACKET_V1:
2550 + case TPACKET_V2:
2551 +@@ -3580,14 +3590,21 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2552 + len = sizeof(req_u.req3);
2553 + break;
2554 + }
2555 +- if (optlen < len)
2556 +- return -EINVAL;
2557 +- if (pkt_sk(sk)->has_vnet_hdr)
2558 +- return -EINVAL;
2559 +- if (copy_from_user(&req_u.req, optval, len))
2560 +- return -EFAULT;
2561 +- return packet_set_ring(sk, &req_u, 0,
2562 +- optname == PACKET_TX_RING);
2563 ++ if (optlen < len) {
2564 ++ ret = -EINVAL;
2565 ++ } else {
2566 ++ if (pkt_sk(sk)->has_vnet_hdr) {
2567 ++ ret = -EINVAL;
2568 ++ } else {
2569 ++ if (copy_from_user(&req_u.req, optval, len))
2570 ++ ret = -EFAULT;
2571 ++ else
2572 ++ ret = packet_set_ring(sk, &req_u, 0,
2573 ++ optname == PACKET_TX_RING);
2574 ++ }
2575 ++ }
2576 ++ release_sock(sk);
2577 ++ return ret;
2578 + }
2579 + case PACKET_COPY_THRESH:
2580 + {
2581 +@@ -3653,12 +3670,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2582 +
2583 + if (optlen != sizeof(val))
2584 + return -EINVAL;
2585 +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2586 +- return -EBUSY;
2587 + if (copy_from_user(&val, optval, sizeof(val)))
2588 + return -EFAULT;
2589 +- po->tp_loss = !!val;
2590 +- return 0;
2591 ++
2592 ++ lock_sock(sk);
2593 ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
2594 ++ ret = -EBUSY;
2595 ++ } else {
2596 ++ po->tp_loss = !!val;
2597 ++ ret = 0;
2598 ++ }
2599 ++ release_sock(sk);
2600 ++ return ret;
2601 + }
2602 + case PACKET_AUXDATA:
2603 + {
2604 +@@ -3669,7 +3692,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2605 + if (copy_from_user(&val, optval, sizeof(val)))
2606 + return -EFAULT;
2607 +
2608 ++ lock_sock(sk);
2609 + po->auxdata = !!val;
2610 ++ release_sock(sk);
2611 + return 0;
2612 + }
2613 + case PACKET_ORIGDEV:
2614 +@@ -3681,7 +3706,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2615 + if (copy_from_user(&val, optval, sizeof(val)))
2616 + return -EFAULT;
2617 +
2618 ++ lock_sock(sk);
2619 + po->origdev = !!val;
2620 ++ release_sock(sk);
2621 + return 0;
2622 + }
2623 + case PACKET_VNET_HDR:
2624 +@@ -3690,15 +3717,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2625 +
2626 + if (sock->type != SOCK_RAW)
2627 + return -EINVAL;
2628 +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2629 +- return -EBUSY;
2630 + if (optlen < sizeof(val))
2631 + return -EINVAL;
2632 + if (copy_from_user(&val, optval, sizeof(val)))
2633 + return -EFAULT;
2634 +
2635 +- po->has_vnet_hdr = !!val;
2636 +- return 0;
2637 ++ lock_sock(sk);
2638 ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
2639 ++ ret = -EBUSY;
2640 ++ } else {
2641 ++ po->has_vnet_hdr = !!val;
2642 ++ ret = 0;
2643 ++ }
2644 ++ release_sock(sk);
2645 ++ return ret;
2646 + }
2647 + case PACKET_TIMESTAMP:
2648 + {
2649 +@@ -3736,11 +3768,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2650 +
2651 + if (optlen != sizeof(val))
2652 + return -EINVAL;
2653 +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2654 +- return -EBUSY;
2655 + if (copy_from_user(&val, optval, sizeof(val)))
2656 + return -EFAULT;
2657 +- po->tp_tx_has_off = !!val;
2658 ++
2659 ++ lock_sock(sk);
2660 ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
2661 ++ ret = -EBUSY;
2662 ++ } else {
2663 ++ po->tp_tx_has_off = !!val;
2664 ++ ret = 0;
2665 ++ }
2666 ++ release_sock(sk);
2667 + return 0;
2668 + }
2669 + case PACKET_QDISC_BYPASS:
2670 +@@ -4116,7 +4154,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2671 + /* Added to avoid minimal code churn */
2672 + struct tpacket_req *req = &req_u->req;
2673 +
2674 +- lock_sock(sk);
2675 + /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
2676 + if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
2677 + WARN(1, "Tx-ring is not supported.\n");
2678 +@@ -4252,7 +4289,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2679 + if (pg_vec)
2680 + free_pg_vec(pg_vec, order, req->tp_block_nr);
2681 + out:
2682 +- release_sock(sk);
2683 + return err;
2684 + }
2685 +
2686 +diff --git a/net/packet/internal.h b/net/packet/internal.h
2687 +index d55bfc34d6b3..1309e2a7baad 100644
2688 +--- a/net/packet/internal.h
2689 ++++ b/net/packet/internal.h
2690 +@@ -109,10 +109,12 @@ struct packet_sock {
2691 + int copy_thresh;
2692 + spinlock_t bind_lock;
2693 + struct mutex pg_vec_lock;
2694 +- unsigned int running:1, /* prot_hook is attached*/
2695 +- auxdata:1,
2696 ++ unsigned int running; /* bind_lock must be held */
2697 ++ unsigned int auxdata:1, /* writer must hold sock lock */
2698 + origdev:1,
2699 +- has_vnet_hdr:1;
2700 ++ has_vnet_hdr:1,
2701 ++ tp_loss:1,
2702 ++ tp_tx_has_off:1;
2703 + int pressure;
2704 + int ifindex; /* bound device */
2705 + __be16 num;
2706 +@@ -122,8 +124,6 @@ struct packet_sock {
2707 + enum tpacket_versions tp_version;
2708 + unsigned int tp_hdrlen;
2709 + unsigned int tp_reserve;
2710 +- unsigned int tp_loss:1;
2711 +- unsigned int tp_tx_has_off:1;
2712 + unsigned int tp_tstamp;
2713 + struct net_device __rcu *cached_dev;
2714 + int (*xmit)(struct sk_buff *skb);
2715 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
2716 +index edb8514b4e00..1cd7b7e33fa3 100644
2717 +--- a/net/sctp/ipv6.c
2718 ++++ b/net/sctp/ipv6.c
2719 +@@ -519,46 +519,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
2720 + addr->v6.sin6_scope_id = 0;
2721 + }
2722 +
2723 +-/* Compare addresses exactly.
2724 +- * v4-mapped-v6 is also in consideration.
2725 +- */
2726 +-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
2727 +- const union sctp_addr *addr2)
2728 ++static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
2729 ++ const union sctp_addr *addr2)
2730 + {
2731 + if (addr1->sa.sa_family != addr2->sa.sa_family) {
2732 + if (addr1->sa.sa_family == AF_INET &&
2733 + addr2->sa.sa_family == AF_INET6 &&
2734 +- ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
2735 +- if (addr2->v6.sin6_port == addr1->v4.sin_port &&
2736 +- addr2->v6.sin6_addr.s6_addr32[3] ==
2737 +- addr1->v4.sin_addr.s_addr)
2738 +- return 1;
2739 +- }
2740 ++ ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
2741 ++ addr2->v6.sin6_addr.s6_addr32[3] ==
2742 ++ addr1->v4.sin_addr.s_addr)
2743 ++ return 1;
2744 ++
2745 + if (addr2->sa.sa_family == AF_INET &&
2746 + addr1->sa.sa_family == AF_INET6 &&
2747 +- ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
2748 +- if (addr1->v6.sin6_port == addr2->v4.sin_port &&
2749 +- addr1->v6.sin6_addr.s6_addr32[3] ==
2750 +- addr2->v4.sin_addr.s_addr)
2751 +- return 1;
2752 +- }
2753 ++ ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
2754 ++ addr1->v6.sin6_addr.s6_addr32[3] ==
2755 ++ addr2->v4.sin_addr.s_addr)
2756 ++ return 1;
2757 ++
2758 + return 0;
2759 + }
2760 +- if (addr1->v6.sin6_port != addr2->v6.sin6_port)
2761 +- return 0;
2762 ++
2763 + if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
2764 + return 0;
2765 ++
2766 + /* If this is a linklocal address, compare the scope_id. */
2767 +- if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
2768 +- if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
2769 +- (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
2770 +- return 0;
2771 +- }
2772 +- }
2773 ++ if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
2774 ++ addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
2775 ++ addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
2776 ++ return 0;
2777 +
2778 + return 1;
2779 + }
2780 +
2781 ++/* Compare addresses exactly.
2782 ++ * v4-mapped-v6 is also in consideration.
2783 ++ */
2784 ++static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
2785 ++ const union sctp_addr *addr2)
2786 ++{
2787 ++ return __sctp_v6_cmp_addr(addr1, addr2) &&
2788 ++ addr1->v6.sin6_port == addr2->v6.sin6_port;
2789 ++}
2790 ++
2791 + /* Initialize addr struct to INADDR_ANY. */
2792 + static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
2793 + {
2794 +@@ -843,8 +846,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
2795 + const union sctp_addr *addr2,
2796 + struct sctp_sock *opt)
2797 + {
2798 +- struct sctp_af *af1, *af2;
2799 + struct sock *sk = sctp_opt2sk(opt);
2800 ++ struct sctp_af *af1, *af2;
2801 +
2802 + af1 = sctp_get_af_specific(addr1->sa.sa_family);
2803 + af2 = sctp_get_af_specific(addr2->sa.sa_family);
2804 +@@ -860,10 +863,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
2805 + if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
2806 + return 1;
2807 +
2808 +- if (addr1->sa.sa_family != addr2->sa.sa_family)
2809 +- return 0;
2810 +-
2811 +- return af1->cmp_addr(addr1, addr2);
2812 ++ return __sctp_v6_cmp_addr(addr1, addr2);
2813 + }
2814 +
2815 + /* Verify that the provided sockaddr looks bindable. Common verification,
2816 +diff --git a/net/tipc/net.c b/net/tipc/net.c
2817 +index 77bf9113c7a7..2763bd369b79 100644
2818 +--- a/net/tipc/net.c
2819 ++++ b/net/tipc/net.c
2820 +@@ -44,7 +44,8 @@
2821 +
2822 + static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
2823 + [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
2824 +- [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
2825 ++ [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
2826 ++ [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
2827 + };
2828 +
2829 + /*