Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Sat, 26 May 2018 13:44:01
Message-Id: 1527342219.3c3346936a6fc0211942c65b5eda8b8a964c0599.mpagano@gentoo
1 commit: 3c3346936a6fc0211942c65b5eda8b8a964c0599
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat May 26 13:43:39 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat May 26 13:43:39 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3c334693
7
8 Linux patch 4.4.133
9
10 0000_README | 4 +
11 1132_linux-4.4.133.patch | 5643 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5647 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 04783a3..2913d51 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -571,6 +571,10 @@ Patch: 1131_linux-4.4.132.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.132
21
22 +Patch: 1132_linux-4.4.133.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.133
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1132_linux-4.4.133.patch b/1132_linux-4.4.133.patch
31 new file mode 100644
32 index 0000000..1d40524
33 --- /dev/null
34 +++ b/1132_linux-4.4.133.patch
35 @@ -0,0 +1,5643 @@
36 +diff --git a/Makefile b/Makefile
37 +index ace4a655548a..ac52ee65685b 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 132
44 ++SUBLEVEL = 133
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
49 +index f939794363ac..56474690e685 100644
50 +--- a/arch/alpha/include/asm/futex.h
51 ++++ b/arch/alpha/include/asm/futex.h
52 +@@ -29,18 +29,10 @@
53 + : "r" (uaddr), "r"(oparg) \
54 + : "memory")
55 +
56 +-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
57 ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
58 ++ u32 __user *uaddr)
59 + {
60 +- int op = (encoded_op >> 28) & 7;
61 +- int cmp = (encoded_op >> 24) & 15;
62 +- int oparg = (encoded_op << 8) >> 20;
63 +- int cmparg = (encoded_op << 20) >> 20;
64 + int oldval = 0, ret;
65 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
66 +- oparg = 1 << oparg;
67 +-
68 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
69 +- return -EFAULT;
70 +
71 + pagefault_disable();
72 +
73 +@@ -66,17 +58,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
74 +
75 + pagefault_enable();
76 +
77 +- if (!ret) {
78 +- switch (cmp) {
79 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
80 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
81 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
82 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
83 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
84 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
85 +- default: ret = -ENOSYS;
86 +- }
87 +- }
88 ++ if (!ret)
89 ++ *oval = oldval;
90 ++
91 + return ret;
92 + }
93 +
94 +diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
95 +index 11e1b1f3acda..eb887dd13e74 100644
96 +--- a/arch/arc/include/asm/futex.h
97 ++++ b/arch/arc/include/asm/futex.h
98 +@@ -73,20 +73,11 @@
99 +
100 + #endif
101 +
102 +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
103 ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
104 ++ u32 __user *uaddr)
105 + {
106 +- int op = (encoded_op >> 28) & 7;
107 +- int cmp = (encoded_op >> 24) & 15;
108 +- int oparg = (encoded_op << 8) >> 20;
109 +- int cmparg = (encoded_op << 20) >> 20;
110 + int oldval = 0, ret;
111 +
112 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
113 +- oparg = 1 << oparg;
114 +-
115 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
116 +- return -EFAULT;
117 +-
118 + #ifndef CONFIG_ARC_HAS_LLSC
119 + preempt_disable(); /* to guarantee atomic r-m-w of futex op */
120 + #endif
121 +@@ -118,30 +109,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
122 + preempt_enable();
123 + #endif
124 +
125 +- if (!ret) {
126 +- switch (cmp) {
127 +- case FUTEX_OP_CMP_EQ:
128 +- ret = (oldval == cmparg);
129 +- break;
130 +- case FUTEX_OP_CMP_NE:
131 +- ret = (oldval != cmparg);
132 +- break;
133 +- case FUTEX_OP_CMP_LT:
134 +- ret = (oldval < cmparg);
135 +- break;
136 +- case FUTEX_OP_CMP_GE:
137 +- ret = (oldval >= cmparg);
138 +- break;
139 +- case FUTEX_OP_CMP_LE:
140 +- ret = (oldval <= cmparg);
141 +- break;
142 +- case FUTEX_OP_CMP_GT:
143 +- ret = (oldval > cmparg);
144 +- break;
145 +- default:
146 +- ret = -ENOSYS;
147 +- }
148 +- }
149 ++ if (!ret)
150 ++ *oval = oldval;
151 ++
152 + return ret;
153 + }
154 +
155 +diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
156 +index 7a032dd84bb2..9e096d811bed 100644
157 +--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
158 ++++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
159 +@@ -88,7 +88,6 @@
160 + clocks = <&clks 201>;
161 + VDDA-supply = <&reg_2p5v>;
162 + VDDIO-supply = <&reg_3p3v>;
163 +- lrclk-strength = <3>;
164 + };
165 + };
166 +
167 +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
168 +index 2c16d9e7c03c..4a275fba6059 100644
169 +--- a/arch/arm/include/asm/assembler.h
170 ++++ b/arch/arm/include/asm/assembler.h
171 +@@ -530,4 +530,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
172 + #endif
173 + .endm
174 +
175 ++#ifdef CONFIG_KPROBES
176 ++#define _ASM_NOKPROBE(entry) \
177 ++ .pushsection "_kprobe_blacklist", "aw" ; \
178 ++ .balign 4 ; \
179 ++ .long entry; \
180 ++ .popsection
181 ++#else
182 ++#define _ASM_NOKPROBE(entry)
183 ++#endif
184 ++
185 + #endif /* __ASM_ASSEMBLER_H__ */
186 +diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
187 +index 6795368ad023..cc414382dab4 100644
188 +--- a/arch/arm/include/asm/futex.h
189 ++++ b/arch/arm/include/asm/futex.h
190 +@@ -128,20 +128,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
191 + #endif /* !SMP */
192 +
193 + static inline int
194 +-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
195 ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
196 + {
197 +- int op = (encoded_op >> 28) & 7;
198 +- int cmp = (encoded_op >> 24) & 15;
199 +- int oparg = (encoded_op << 8) >> 20;
200 +- int cmparg = (encoded_op << 20) >> 20;
201 + int oldval = 0, ret, tmp;
202 +
203 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
204 +- oparg = 1 << oparg;
205 +-
206 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
207 +- return -EFAULT;
208 +-
209 + #ifndef CONFIG_SMP
210 + preempt_disable();
211 + #endif
212 +@@ -172,17 +162,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
213 + preempt_enable();
214 + #endif
215 +
216 +- if (!ret) {
217 +- switch (cmp) {
218 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
219 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
220 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
221 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
222 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
223 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
224 +- default: ret = -ENOSYS;
225 +- }
226 +- }
227 ++ if (!ret)
228 ++ *oval = oldval;
229 ++
230 + return ret;
231 + }
232 +
233 +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
234 +index c92b535150a0..306a2a581785 100644
235 +--- a/arch/arm/kernel/traps.c
236 ++++ b/arch/arm/kernel/traps.c
237 +@@ -19,6 +19,7 @@
238 + #include <linux/uaccess.h>
239 + #include <linux/hardirq.h>
240 + #include <linux/kdebug.h>
241 ++#include <linux/kprobes.h>
242 + #include <linux/module.h>
243 + #include <linux/kexec.h>
244 + #include <linux/bug.h>
245 +@@ -395,7 +396,8 @@ void unregister_undef_hook(struct undef_hook *hook)
246 + raw_spin_unlock_irqrestore(&undef_lock, flags);
247 + }
248 +
249 +-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
250 ++static nokprobe_inline
251 ++int call_undef_hook(struct pt_regs *regs, unsigned int instr)
252 + {
253 + struct undef_hook *hook;
254 + unsigned long flags;
255 +@@ -468,6 +470,7 @@ die_sig:
256 +
257 + arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
258 + }
259 ++NOKPROBE_SYMBOL(do_undefinstr)
260 +
261 + /*
262 + * Handle FIQ similarly to NMI on x86 systems.
263 +diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
264 +index df73914e81c8..746e7801dcdf 100644
265 +--- a/arch/arm/lib/getuser.S
266 ++++ b/arch/arm/lib/getuser.S
267 +@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
268 + mov r0, #0
269 + ret lr
270 + ENDPROC(__get_user_1)
271 ++_ASM_NOKPROBE(__get_user_1)
272 +
273 + ENTRY(__get_user_2)
274 + check_uaccess r0, 2, r1, r2, __get_user_bad
275 +@@ -58,6 +59,7 @@ rb .req r0
276 + mov r0, #0
277 + ret lr
278 + ENDPROC(__get_user_2)
279 ++_ASM_NOKPROBE(__get_user_2)
280 +
281 + ENTRY(__get_user_4)
282 + check_uaccess r0, 4, r1, r2, __get_user_bad
283 +@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
284 + mov r0, #0
285 + ret lr
286 + ENDPROC(__get_user_4)
287 ++_ASM_NOKPROBE(__get_user_4)
288 +
289 + ENTRY(__get_user_8)
290 + check_uaccess r0, 8, r1, r2, __get_user_bad8
291 +@@ -78,6 +81,7 @@ ENTRY(__get_user_8)
292 + mov r0, #0
293 + ret lr
294 + ENDPROC(__get_user_8)
295 ++_ASM_NOKPROBE(__get_user_8)
296 +
297 + #ifdef __ARMEB__
298 + ENTRY(__get_user_32t_8)
299 +@@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
300 + mov r0, #0
301 + ret lr
302 + ENDPROC(__get_user_32t_8)
303 ++_ASM_NOKPROBE(__get_user_32t_8)
304 +
305 + ENTRY(__get_user_64t_1)
306 + check_uaccess r0, 1, r1, r2, __get_user_bad8
307 +@@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
308 + mov r0, #0
309 + ret lr
310 + ENDPROC(__get_user_64t_1)
311 ++_ASM_NOKPROBE(__get_user_64t_1)
312 +
313 + ENTRY(__get_user_64t_2)
314 + check_uaccess r0, 2, r1, r2, __get_user_bad8
315 +@@ -114,6 +120,7 @@ rb .req r0
316 + mov r0, #0
317 + ret lr
318 + ENDPROC(__get_user_64t_2)
319 ++_ASM_NOKPROBE(__get_user_64t_2)
320 +
321 + ENTRY(__get_user_64t_4)
322 + check_uaccess r0, 4, r1, r2, __get_user_bad8
323 +@@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
324 + mov r0, #0
325 + ret lr
326 + ENDPROC(__get_user_64t_4)
327 ++_ASM_NOKPROBE(__get_user_64t_4)
328 + #endif
329 +
330 + __get_user_bad8:
331 +@@ -131,6 +139,8 @@ __get_user_bad:
332 + ret lr
333 + ENDPROC(__get_user_bad)
334 + ENDPROC(__get_user_bad8)
335 ++_ASM_NOKPROBE(__get_user_bad)
336 ++_ASM_NOKPROBE(__get_user_bad8)
337 +
338 + .pushsection __ex_table, "a"
339 + .long 1b, __get_user_bad
340 +diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
341 +index bcdecc25461b..b2aa9b32bff2 100644
342 +--- a/arch/arm/probes/kprobes/opt-arm.c
343 ++++ b/arch/arm/probes/kprobes/opt-arm.c
344 +@@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
345 + {
346 + unsigned long flags;
347 + struct kprobe *p = &op->kp;
348 +- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
349 ++ struct kprobe_ctlblk *kcb;
350 +
351 + /* Save skipped registers */
352 + regs->ARM_pc = (unsigned long)op->kp.addr;
353 + regs->ARM_ORIG_r0 = ~0UL;
354 +
355 + local_irq_save(flags);
356 ++ kcb = get_kprobe_ctlblk();
357 +
358 + if (kprobe_running()) {
359 + kprobes_inc_nmissed_count(&op->kp);
360 +@@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
361 +
362 + local_irq_restore(flags);
363 + }
364 ++NOKPROBE_SYMBOL(optimized_callback)
365 +
366 + int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
367 + {
368 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
369 +index 02c08671553e..5b47218809e0 100644
370 +--- a/arch/arm64/Kconfig
371 ++++ b/arch/arm64/Kconfig
372 +@@ -375,6 +375,20 @@ config ARM64_ERRATUM_843419
373 +
374 + If unsure, say Y.
375 +
376 ++config ARM64_ERRATUM_1024718
377 ++ bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
378 ++ default y
379 ++ help
380 ++ This option adds work around for Arm Cortex-A55 Erratum 1024718.
381 ++
382 ++ Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
383 ++ update of the hardware dirty bit when the DBM/AP bits are updated
384 ++ without a break-before-make. The work around is to disable the usage
385 ++ of hardware DBM locally on the affected cores. CPUs not affected by
386 ++ erratum will continue to use the feature.
387 ++
388 ++ If unsure, say Y.
389 ++
390 + config CAVIUM_ERRATUM_22375
391 + bool "Cavium erratum 22375, 24313"
392 + default y
393 +diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
394 +index 12eff928ef8b..f68abb17aa4b 100644
395 +--- a/arch/arm64/include/asm/assembler.h
396 ++++ b/arch/arm64/include/asm/assembler.h
397 +@@ -23,6 +23,7 @@
398 + #ifndef __ASM_ASSEMBLER_H
399 + #define __ASM_ASSEMBLER_H
400 +
401 ++#include <asm/cputype.h>
402 + #include <asm/ptrace.h>
403 + #include <asm/thread_info.h>
404 +
405 +@@ -204,4 +205,63 @@ lr .req x30 // link register
406 + .size __pi_##x, . - x; \
407 + ENDPROC(x)
408 +
409 ++ /*
410 ++ * mov_q - move an immediate constant into a 64-bit register using
411 ++ * between 2 and 4 movz/movk instructions (depending on the
412 ++ * magnitude and sign of the operand)
413 ++ */
414 ++ .macro mov_q, reg, val
415 ++ .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
416 ++ movz \reg, :abs_g1_s:\val
417 ++ .else
418 ++ .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
419 ++ movz \reg, :abs_g2_s:\val
420 ++ .else
421 ++ movz \reg, :abs_g3:\val
422 ++ movk \reg, :abs_g2_nc:\val
423 ++ .endif
424 ++ movk \reg, :abs_g1_nc:\val
425 ++ .endif
426 ++ movk \reg, :abs_g0_nc:\val
427 ++ .endm
428 ++
429 ++/*
430 ++ * Check the MIDR_EL1 of the current CPU for a given model and a range of
431 ++ * variant/revision. See asm/cputype.h for the macros used below.
432 ++ *
433 ++ * model: MIDR_CPU_PART of CPU
434 ++ * rv_min: Minimum of MIDR_CPU_VAR_REV()
435 ++ * rv_max: Maximum of MIDR_CPU_VAR_REV()
436 ++ * res: Result register.
437 ++ * tmp1, tmp2, tmp3: Temporary registers
438 ++ *
439 ++ * Corrupts: res, tmp1, tmp2, tmp3
440 ++ * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
441 ++ */
442 ++ .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
443 ++ mrs \res, midr_el1
444 ++ mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
445 ++ mov_q \tmp2, MIDR_CPU_PART_MASK
446 ++ and \tmp3, \res, \tmp2 // Extract model
447 ++ and \tmp1, \res, \tmp1 // rev & variant
448 ++ mov_q \tmp2, \model
449 ++ cmp \tmp3, \tmp2
450 ++ cset \res, eq
451 ++ cbz \res, .Ldone\@ // Model matches ?
452 ++
453 ++ .if (\rv_min != 0) // Skip min check if rv_min == 0
454 ++ mov_q \tmp3, \rv_min
455 ++ cmp \tmp1, \tmp3
456 ++ cset \res, ge
457 ++ .endif // \rv_min != 0
458 ++ /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
459 ++ .if ((\rv_min != \rv_max) || \rv_min == 0)
460 ++ mov_q \tmp2, \rv_max
461 ++ cmp \tmp1, \tmp2
462 ++ cset \tmp2, le
463 ++ and \res, \res, \tmp2
464 ++ .endif
465 ++.Ldone\@:
466 ++ .endm
467 ++
468 + #endif /* __ASM_ASSEMBLER_H */
469 +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
470 +index 1a5949364ed0..f43e10cfeda2 100644
471 +--- a/arch/arm64/include/asm/cputype.h
472 ++++ b/arch/arm64/include/asm/cputype.h
473 +@@ -57,6 +57,14 @@
474 + #define MIDR_IMPLEMENTOR(midr) \
475 + (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
476 +
477 ++#define MIDR_CPU_VAR_REV(var, rev) \
478 ++ (((var) << MIDR_VARIANT_SHIFT) | (rev))
479 ++
480 ++#define MIDR_CPU_PART_MASK \
481 ++ (MIDR_IMPLEMENTOR_MASK | \
482 ++ MIDR_ARCHITECTURE_MASK | \
483 ++ MIDR_PARTNUM_MASK)
484 ++
485 + #define MIDR_CPU_PART(imp, partnum) \
486 + (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
487 + (0xf << MIDR_ARCHITECTURE_SHIFT) | \
488 +@@ -70,11 +78,14 @@
489 + #define ARM_CPU_PART_FOUNDATION 0xD00
490 + #define ARM_CPU_PART_CORTEX_A57 0xD07
491 + #define ARM_CPU_PART_CORTEX_A53 0xD03
492 ++#define ARM_CPU_PART_CORTEX_A55 0xD05
493 +
494 + #define APM_CPU_PART_POTENZA 0x000
495 +
496 + #define CAVIUM_CPU_PART_THUNDERX 0x0A1
497 +
498 ++#define MIDR_CORTEX_A55 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
499 ++
500 + #ifndef __ASSEMBLY__
501 +
502 + /*
503 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
504 +index f50753573989..195fd56b2377 100644
505 +--- a/arch/arm64/include/asm/futex.h
506 ++++ b/arch/arm64/include/asm/futex.h
507 +@@ -53,20 +53,10 @@
508 + : "memory")
509 +
510 + static inline int
511 +-futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
512 ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
513 + {
514 +- int op = (encoded_op >> 28) & 7;
515 +- int cmp = (encoded_op >> 24) & 15;
516 +- int oparg = (int)(encoded_op << 8) >> 20;
517 +- int cmparg = (int)(encoded_op << 20) >> 20;
518 + int oldval = 0, ret, tmp;
519 +
520 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
521 +- oparg = 1U << (oparg & 0x1f);
522 +-
523 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
524 +- return -EFAULT;
525 +-
526 + pagefault_disable();
527 +
528 + switch (op) {
529 +@@ -96,17 +86,9 @@ futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
530 +
531 + pagefault_enable();
532 +
533 +- if (!ret) {
534 +- switch (cmp) {
535 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
536 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
537 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
538 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
539 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
540 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
541 +- default: ret = -ENOSYS;
542 +- }
543 +- }
544 ++ if (!ret)
545 ++ *oval = oldval;
546 ++
547 + return ret;
548 + }
549 +
550 +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
551 +index 18201e9e8cc7..f09636738007 100644
552 +--- a/arch/arm64/mm/proc.S
553 ++++ b/arch/arm64/mm/proc.S
554 +@@ -221,6 +221,11 @@ ENTRY(__cpu_setup)
555 + cbz x9, 2f
556 + cmp x9, #2
557 + b.lt 1f
558 ++#ifdef CONFIG_ARM64_ERRATUM_1024718
559 ++ /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */
560 ++ cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4
561 ++ cbnz x1, 1f
562 ++#endif
563 + orr x10, x10, #TCR_HD // hardware Dirty flag update
564 + 1: orr x10, x10, #TCR_HA // hardware Access flag update
565 + 2:
566 +diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
567 +index 4bea27f50a7a..2702bd802d44 100644
568 +--- a/arch/frv/include/asm/futex.h
569 ++++ b/arch/frv/include/asm/futex.h
570 +@@ -7,7 +7,8 @@
571 + #include <asm/errno.h>
572 + #include <asm/uaccess.h>
573 +
574 +-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
575 ++extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
576 ++ u32 __user *uaddr);
577 +
578 + static inline int
579 + futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
580 +diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
581 +index d155ca9e5098..37f7b2bf7f73 100644
582 +--- a/arch/frv/kernel/futex.c
583 ++++ b/arch/frv/kernel/futex.c
584 +@@ -186,20 +186,10 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_o
585 + /*
586 + * do the futex operations
587 + */
588 +-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
589 ++int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
590 + {
591 +- int op = (encoded_op >> 28) & 7;
592 +- int cmp = (encoded_op >> 24) & 15;
593 +- int oparg = (encoded_op << 8) >> 20;
594 +- int cmparg = (encoded_op << 20) >> 20;
595 + int oldval = 0, ret;
596 +
597 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
598 +- oparg = 1 << oparg;
599 +-
600 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
601 +- return -EFAULT;
602 +-
603 + pagefault_disable();
604 +
605 + switch (op) {
606 +@@ -225,18 +215,9 @@ int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
607 +
608 + pagefault_enable();
609 +
610 +- if (!ret) {
611 +- switch (cmp) {
612 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
613 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
614 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
615 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
616 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
617 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
618 +- default: ret = -ENOSYS; break;
619 +- }
620 +- }
621 ++ if (!ret)
622 ++ *oval = oldval;
623 +
624 + return ret;
625 +
626 +-} /* end futex_atomic_op_inuser() */
627 ++} /* end arch_futex_atomic_op_inuser() */
628 +diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
629 +index 7e597f8434da..c607b77c8215 100644
630 +--- a/arch/hexagon/include/asm/futex.h
631 ++++ b/arch/hexagon/include/asm/futex.h
632 +@@ -31,18 +31,9 @@
633 +
634 +
635 + static inline int
636 +-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
637 ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
638 + {
639 +- int op = (encoded_op >> 28) & 7;
640 +- int cmp = (encoded_op >> 24) & 15;
641 +- int oparg = (encoded_op << 8) >> 20;
642 +- int cmparg = (encoded_op << 20) >> 20;
643 + int oldval = 0, ret;
644 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
645 +- oparg = 1 << oparg;
646 +-
647 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
648 +- return -EFAULT;
649 +
650 + pagefault_disable();
651 +
652 +@@ -72,30 +63,9 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
653 +
654 + pagefault_enable();
655 +
656 +- if (!ret) {
657 +- switch (cmp) {
658 +- case FUTEX_OP_CMP_EQ:
659 +- ret = (oldval == cmparg);
660 +- break;
661 +- case FUTEX_OP_CMP_NE:
662 +- ret = (oldval != cmparg);
663 +- break;
664 +- case FUTEX_OP_CMP_LT:
665 +- ret = (oldval < cmparg);
666 +- break;
667 +- case FUTEX_OP_CMP_GE:
668 +- ret = (oldval >= cmparg);
669 +- break;
670 +- case FUTEX_OP_CMP_LE:
671 +- ret = (oldval <= cmparg);
672 +- break;
673 +- case FUTEX_OP_CMP_GT:
674 +- ret = (oldval > cmparg);
675 +- break;
676 +- default:
677 +- ret = -ENOSYS;
678 +- }
679 +- }
680 ++ if (!ret)
681 ++ *oval = oldval;
682 ++
683 + return ret;
684 + }
685 +
686 +diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
687 +index 76acbcd5c060..6d67dc1eaf2b 100644
688 +--- a/arch/ia64/include/asm/futex.h
689 ++++ b/arch/ia64/include/asm/futex.h
690 +@@ -45,18 +45,9 @@ do { \
691 + } while (0)
692 +
693 + static inline int
694 +-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
695 ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
696 + {
697 +- int op = (encoded_op >> 28) & 7;
698 +- int cmp = (encoded_op >> 24) & 15;
699 +- int oparg = (encoded_op << 8) >> 20;
700 +- int cmparg = (encoded_op << 20) >> 20;
701 + int oldval = 0, ret;
702 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
703 +- oparg = 1 << oparg;
704 +-
705 +- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
706 +- return -EFAULT;
707 +
708 + pagefault_disable();
709 +
710 +@@ -84,17 +75,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
711 +
712 + pagefault_enable();
713 +
714 +- if (!ret) {
715 +- switch (cmp) {
716 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
717 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
718 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
719 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
720 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
721 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
722 +- default: ret = -ENOSYS;
723 +- }
724 +- }
725 ++ if (!ret)
726 ++ *oval = oldval;
727 ++
728 + return ret;
729 + }
730 +
731 +diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
732 +index 01848f056f43..a9dad9e5e132 100644
733 +--- a/arch/microblaze/include/asm/futex.h
734 ++++ b/arch/microblaze/include/asm/futex.h
735 +@@ -29,18 +29,9 @@
736 + })
737 +
738 + static inline int
739 +-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
740 ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
741 + {
742 +- int op = (encoded_op >> 28) & 7;
743 +- int cmp = (encoded_op >> 24) & 15;
744 +- int oparg = (encoded_op << 8) >> 20;
745 +- int cmparg = (encoded_op << 20) >> 20;
746 + int oldval = 0, ret;
747 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
748 +- oparg = 1 << oparg;
749 +-
750 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
751 +- return -EFAULT;
752 +
753 + pagefault_disable();
754 +
755 +@@ -66,30 +57,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
756 +
757 + pagefault_enable();
758 +
759 +- if (!ret) {
760 +- switch (cmp) {
761 +- case FUTEX_OP_CMP_EQ:
762 +- ret = (oldval == cmparg);
763 +- break;
764 +- case FUTEX_OP_CMP_NE:
765 +- ret = (oldval != cmparg);
766 +- break;
767 +- case FUTEX_OP_CMP_LT:
768 +- ret = (oldval < cmparg);
769 +- break;
770 +- case FUTEX_OP_CMP_GE:
771 +- ret = (oldval >= cmparg);
772 +- break;
773 +- case FUTEX_OP_CMP_LE:
774 +- ret = (oldval <= cmparg);
775 +- break;
776 +- case FUTEX_OP_CMP_GT:
777 +- ret = (oldval > cmparg);
778 +- break;
779 +- default:
780 +- ret = -ENOSYS;
781 +- }
782 +- }
783 ++ if (!ret)
784 ++ *oval = oldval;
785 ++
786 + return ret;
787 + }
788 +
789 +diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
790 +index 1de190bdfb9c..a9e61ea54ca9 100644
791 +--- a/arch/mips/include/asm/futex.h
792 ++++ b/arch/mips/include/asm/futex.h
793 +@@ -83,18 +83,9 @@
794 + }
795 +
796 + static inline int
797 +-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
798 ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
799 + {
800 +- int op = (encoded_op >> 28) & 7;
801 +- int cmp = (encoded_op >> 24) & 15;
802 +- int oparg = (encoded_op << 8) >> 20;
803 +- int cmparg = (encoded_op << 20) >> 20;
804 + int oldval = 0, ret;
805 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
806 +- oparg = 1 << oparg;
807 +-
808 +- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
809 +- return -EFAULT;
810 +
811 + pagefault_disable();
812 +
813 +@@ -125,17 +116,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
814 +
815 + pagefault_enable();
816 +
817 +- if (!ret) {
818 +- switch (cmp) {
819 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
820 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
821 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
822 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
823 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
824 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
825 +- default: ret = -ENOSYS;
826 +- }
827 +- }
828 ++ if (!ret)
829 ++ *oval = oldval;
830 ++
831 + return ret;
832 + }
833 +
834 +diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
835 +index 49df14805a9b..ae5b64981d72 100644
836 +--- a/arch/parisc/include/asm/futex.h
837 ++++ b/arch/parisc/include/asm/futex.h
838 +@@ -32,20 +32,11 @@ _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
839 + }
840 +
841 + static inline int
842 +-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
843 ++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
844 + {
845 + unsigned long int flags;
846 + u32 val;
847 +- int op = (encoded_op >> 28) & 7;
848 +- int cmp = (encoded_op >> 24) & 15;
849 +- int oparg = (encoded_op << 8) >> 20;
850 +- int cmparg = (encoded_op << 20) >> 20;
851 + int oldval = 0, ret;
852 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
853 +- oparg = 1 << oparg;
854 +-
855 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
856 +- return -EFAULT;
857 +
858 + pagefault_disable();
859 +
860 +@@ -98,17 +89,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
861 +
862 + pagefault_enable();
863 +
864 +- if (!ret) {
865 +- switch (cmp) {
866 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
867 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
868 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
869 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
870 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
871 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
872 +- default: ret = -ENOSYS;
873 +- }
874 +- }
875 ++ if (!ret)
876 ++ *oval = oldval;
877 ++
878 + return ret;
879 + }
880 +
881 +diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
882 +index e05808a328db..b0629249778b 100644
883 +--- a/arch/powerpc/include/asm/firmware.h
884 ++++ b/arch/powerpc/include/asm/firmware.h
885 +@@ -47,12 +47,10 @@
886 + #define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
887 + #define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
888 + #define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
889 +-#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
890 + #define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
891 + #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
892 + #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
893 + #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
894 +-#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
895 +
896 + #ifndef __ASSEMBLY__
897 +
898 +@@ -70,8 +68,7 @@ enum {
899 + FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
900 + FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
901 + FW_FEATURE_PSERIES_ALWAYS = 0,
902 +- FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
903 +- FW_FEATURE_OPALv3,
904 ++ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
905 + FW_FEATURE_POWERNV_ALWAYS = 0,
906 + FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
907 + FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
908 +diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
909 +index 2a9cf845473b..f4c7467f7465 100644
910 +--- a/arch/powerpc/include/asm/futex.h
911 ++++ b/arch/powerpc/include/asm/futex.h
912 +@@ -31,18 +31,10 @@
913 + : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
914 + : "cr0", "memory")
915 +
916 +-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
917 ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
918 ++ u32 __user *uaddr)
919 + {
920 +- int op = (encoded_op >> 28) & 7;
921 +- int cmp = (encoded_op >> 24) & 15;
922 +- int oparg = (encoded_op << 8) >> 20;
923 +- int cmparg = (encoded_op << 20) >> 20;
924 + int oldval = 0, ret;
925 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
926 +- oparg = 1 << oparg;
927 +-
928 +- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
929 +- return -EFAULT;
930 +
931 + pagefault_disable();
932 +
933 +@@ -68,17 +60,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
934 +
935 + pagefault_enable();
936 +
937 +- if (!ret) {
938 +- switch (cmp) {
939 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
940 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
941 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
942 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
943 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
944 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
945 +- default: ret = -ENOSYS;
946 +- }
947 +- }
948 ++ if (!ret)
949 ++ *oval = oldval;
950 ++
951 + return ret;
952 + }
953 +
954 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
955 +index 44c8d03558ac..318224784114 100644
956 +--- a/arch/powerpc/kernel/setup-common.c
957 ++++ b/arch/powerpc/kernel/setup-common.c
958 +@@ -217,14 +217,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
959 + unsigned short maj;
960 + unsigned short min;
961 +
962 +- /* We only show online cpus: disable preempt (overzealous, I
963 +- * knew) to prevent cpu going down. */
964 +- preempt_disable();
965 +- if (!cpu_online(cpu_id)) {
966 +- preempt_enable();
967 +- return 0;
968 +- }
969 +-
970 + #ifdef CONFIG_SMP
971 + pvr = per_cpu(cpu_pvr, cpu_id);
972 + #else
973 +@@ -329,9 +321,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
974 + #ifdef CONFIG_SMP
975 + seq_printf(m, "\n");
976 + #endif
977 +-
978 +- preempt_enable();
979 +-
980 + /* If this is the last cpu, print the summary */
981 + if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
982 + show_cpuinfo_summary(m);
983 +diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
984 +index 92736851c795..3f653f5201e7 100644
985 +--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
986 ++++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
987 +@@ -48,8 +48,8 @@ static int pnv_eeh_init(void)
988 + struct pci_controller *hose;
989 + struct pnv_phb *phb;
990 +
991 +- if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
992 +- pr_warn("%s: OPALv3 is required !\n",
993 ++ if (!firmware_has_feature(FW_FEATURE_OPAL)) {
994 ++ pr_warn("%s: OPAL is required !\n",
995 + __func__);
996 + return -EINVAL;
997 + }
998 +diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
999 +index 59d735d2e5c0..15bfbcd5debc 100644
1000 +--- a/arch/powerpc/platforms/powernv/idle.c
1001 ++++ b/arch/powerpc/platforms/powernv/idle.c
1002 +@@ -242,7 +242,7 @@ static int __init pnv_init_idle_states(void)
1003 + if (cpuidle_disable != IDLE_NO_OVERRIDE)
1004 + goto out;
1005 +
1006 +- if (!firmware_has_feature(FW_FEATURE_OPALv3))
1007 ++ if (!firmware_has_feature(FW_FEATURE_OPAL))
1008 + goto out;
1009 +
1010 + power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
1011 +diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
1012 +index 1bceb95f422d..5584247f5029 100644
1013 +--- a/arch/powerpc/platforms/powernv/opal-nvram.c
1014 ++++ b/arch/powerpc/platforms/powernv/opal-nvram.c
1015 +@@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
1016 + return count;
1017 + }
1018 +
1019 ++/*
1020 ++ * This can be called in the panic path with interrupts off, so use
1021 ++ * mdelay in that case.
1022 ++ */
1023 + static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
1024 + {
1025 + s64 rc = OPAL_BUSY;
1026 +@@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
1027 + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
1028 + rc = opal_write_nvram(__pa(buf), count, off);
1029 + if (rc == OPAL_BUSY_EVENT) {
1030 +- msleep(OPAL_BUSY_DELAY_MS);
1031 ++ if (in_interrupt() || irqs_disabled())
1032 ++ mdelay(OPAL_BUSY_DELAY_MS);
1033 ++ else
1034 ++ msleep(OPAL_BUSY_DELAY_MS);
1035 + opal_poll_events(NULL);
1036 + } else if (rc == OPAL_BUSY) {
1037 +- msleep(OPAL_BUSY_DELAY_MS);
1038 ++ if (in_interrupt() || irqs_disabled())
1039 ++ mdelay(OPAL_BUSY_DELAY_MS);
1040 ++ else
1041 ++ msleep(OPAL_BUSY_DELAY_MS);
1042 + }
1043 + }
1044 +
1045 +diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
1046 +index 7634d1c62299..d0ac535cf5d7 100644
1047 +--- a/arch/powerpc/platforms/powernv/opal-xscom.c
1048 ++++ b/arch/powerpc/platforms/powernv/opal-xscom.c
1049 +@@ -126,7 +126,7 @@ static const struct scom_controller opal_scom_controller = {
1050 +
1051 + static int opal_xscom_init(void)
1052 + {
1053 +- if (firmware_has_feature(FW_FEATURE_OPALv3))
1054 ++ if (firmware_has_feature(FW_FEATURE_OPAL))
1055 + scom_init(&opal_scom_controller);
1056 + return 0;
1057 + }
1058 +diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
1059 +index ae29eaf85e9e..e48826aa314c 100644
1060 +--- a/arch/powerpc/platforms/powernv/opal.c
1061 ++++ b/arch/powerpc/platforms/powernv/opal.c
1062 +@@ -98,16 +98,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
1063 + pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
1064 + opal.size, sizep, runtimesz);
1065 +
1066 +- powerpc_firmware_features |= FW_FEATURE_OPAL;
1067 + if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
1068 +- powerpc_firmware_features |= FW_FEATURE_OPALv2;
1069 +- powerpc_firmware_features |= FW_FEATURE_OPALv3;
1070 +- pr_info("OPAL V3 detected !\n");
1071 +- } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
1072 +- powerpc_firmware_features |= FW_FEATURE_OPALv2;
1073 +- pr_info("OPAL V2 detected !\n");
1074 ++ powerpc_firmware_features |= FW_FEATURE_OPAL;
1075 ++ pr_info("OPAL detected !\n");
1076 + } else {
1077 +- pr_info("OPAL V1 detected !\n");
1078 ++ panic("OPAL != V3 detected, no longer supported.\n");
1079 + }
1080 +
1081 + /* Reinit all cores with the right endian */
1082 +@@ -352,17 +347,15 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
1083 + * enough room and be done with it
1084 + */
1085 + spin_lock_irqsave(&opal_write_lock, flags);
1086 +- if (firmware_has_feature(FW_FEATURE_OPALv2)) {
1087 +- rc = opal_console_write_buffer_space(vtermno, &olen);
1088 +- len = be64_to_cpu(olen);
1089 +- if (rc || len < total_len) {
1090 +- spin_unlock_irqrestore(&opal_write_lock, flags);
1091 +- /* Closed -> drop characters */
1092 +- if (rc)
1093 +- return total_len;
1094 +- opal_poll_events(NULL);
1095 +- return -EAGAIN;
1096 +- }
1097 ++ rc = opal_console_write_buffer_space(vtermno, &olen);
1098 ++ len = be64_to_cpu(olen);
1099 ++ if (rc || len < total_len) {
1100 ++ spin_unlock_irqrestore(&opal_write_lock, flags);
1101 ++ /* Closed -> drop characters */
1102 ++ if (rc)
1103 ++ return total_len;
1104 ++ opal_poll_events(NULL);
1105 ++ return -EAGAIN;
1106 + }
1107 +
1108 + /* We still try to handle partial completions, though they
1109 +@@ -696,10 +689,7 @@ static int __init opal_init(void)
1110 + }
1111 +
1112 + /* Register OPAL consoles if any ports */
1113 +- if (firmware_has_feature(FW_FEATURE_OPALv2))
1114 +- consoles = of_find_node_by_path("/ibm,opal/consoles");
1115 +- else
1116 +- consoles = of_node_get(opal_node);
1117 ++ consoles = of_find_node_by_path("/ibm,opal/consoles");
1118 + if (consoles) {
1119 + for_each_child_of_node(consoles, np) {
1120 + if (strcmp(np->name, "serial"))
1121 +diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
1122 +index ecb7f3220355..eac3b7cc78c6 100644
1123 +--- a/arch/powerpc/platforms/powernv/pci-ioda.c
1124 ++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
1125 +@@ -344,7 +344,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
1126 + return;
1127 + }
1128 +
1129 +- if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
1130 ++ if (!firmware_has_feature(FW_FEATURE_OPAL)) {
1131 + pr_info(" Firmware too old to support M64 window\n");
1132 + return;
1133 + }
1134 +diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
1135 +index 30c6b3b7be90..c57afc619b20 100644
1136 +--- a/arch/powerpc/platforms/powernv/setup.c
1137 ++++ b/arch/powerpc/platforms/powernv/setup.c
1138 +@@ -140,12 +140,8 @@ static void pnv_show_cpuinfo(struct seq_file *m)
1139 + if (root)
1140 + model = of_get_property(root, "model", NULL);
1141 + seq_printf(m, "machine\t\t: PowerNV %s\n", model);
1142 +- if (firmware_has_feature(FW_FEATURE_OPALv3))
1143 +- seq_printf(m, "firmware\t: OPAL v3\n");
1144 +- else if (firmware_has_feature(FW_FEATURE_OPALv2))
1145 +- seq_printf(m, "firmware\t: OPAL v2\n");
1146 +- else if (firmware_has_feature(FW_FEATURE_OPAL))
1147 +- seq_printf(m, "firmware\t: OPAL v1\n");
1148 ++ if (firmware_has_feature(FW_FEATURE_OPAL))
1149 ++ seq_printf(m, "firmware\t: OPAL\n");
1150 + else
1151 + seq_printf(m, "firmware\t: BML\n");
1152 + of_node_put(root);
1153 +@@ -274,9 +270,9 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
1154 + {
1155 + xics_kexec_teardown_cpu(secondary);
1156 +
1157 +- /* On OPAL v3, we return all CPUs to firmware */
1158 ++ /* On OPAL, we return all CPUs to firmware */
1159 +
1160 +- if (!firmware_has_feature(FW_FEATURE_OPALv3))
1161 ++ if (!firmware_has_feature(FW_FEATURE_OPAL))
1162 + return;
1163 +
1164 + if (secondary) {
1165 +diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
1166 +index ca264833ee64..ad7b1a3dbed0 100644
1167 +--- a/arch/powerpc/platforms/powernv/smp.c
1168 ++++ b/arch/powerpc/platforms/powernv/smp.c
1169 +@@ -61,14 +61,15 @@ static int pnv_smp_kick_cpu(int nr)
1170 + unsigned long start_here =
1171 + __pa(ppc_function_entry(generic_secondary_smp_init));
1172 + long rc;
1173 ++ uint8_t status;
1174 +
1175 + BUG_ON(nr < 0 || nr >= NR_CPUS);
1176 +
1177 + /*
1178 +- * If we already started or OPALv2 is not supported, we just
1179 ++ * If we already started or OPAL is not supported, we just
1180 + * kick the CPU via the PACA
1181 + */
1182 +- if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
1183 ++ if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
1184 + goto kick;
1185 +
1186 + /*
1187 +@@ -77,55 +78,42 @@ static int pnv_smp_kick_cpu(int nr)
1188 + * first time. OPAL v3 allows us to query OPAL to know if it
1189 + * has the CPUs, so we do that
1190 + */
1191 +- if (firmware_has_feature(FW_FEATURE_OPALv3)) {
1192 +- uint8_t status;
1193 +-
1194 +- rc = opal_query_cpu_status(pcpu, &status);
1195 +- if (rc != OPAL_SUCCESS) {
1196 +- pr_warn("OPAL Error %ld querying CPU %d state\n",
1197 +- rc, nr);
1198 +- return -ENODEV;
1199 +- }
1200 ++ rc = opal_query_cpu_status(pcpu, &status);
1201 ++ if (rc != OPAL_SUCCESS) {
1202 ++ pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
1203 ++ return -ENODEV;
1204 ++ }
1205 +
1206 +- /*
1207 +- * Already started, just kick it, probably coming from
1208 +- * kexec and spinning
1209 +- */
1210 +- if (status == OPAL_THREAD_STARTED)
1211 +- goto kick;
1212 ++ /*
1213 ++ * Already started, just kick it, probably coming from
1214 ++ * kexec and spinning
1215 ++ */
1216 ++ if (status == OPAL_THREAD_STARTED)
1217 ++ goto kick;
1218 +
1219 +- /*
1220 +- * Available/inactive, let's kick it
1221 +- */
1222 +- if (status == OPAL_THREAD_INACTIVE) {
1223 +- pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
1224 +- nr, pcpu);
1225 +- rc = opal_start_cpu(pcpu, start_here);
1226 +- if (rc != OPAL_SUCCESS) {
1227 +- pr_warn("OPAL Error %ld starting CPU %d\n",
1228 +- rc, nr);
1229 +- return -ENODEV;
1230 +- }
1231 +- } else {
1232 +- /*
1233 +- * An unavailable CPU (or any other unknown status)
1234 +- * shouldn't be started. It should also
1235 +- * not be in the possible map but currently it can
1236 +- * happen
1237 +- */
1238 +- pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
1239 +- " (status %d)...\n", nr, pcpu, status);
1240 ++ /*
1241 ++ * Available/inactive, let's kick it
1242 ++ */
1243 ++ if (status == OPAL_THREAD_INACTIVE) {
1244 ++ pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
1245 ++ rc = opal_start_cpu(pcpu, start_here);
1246 ++ if (rc != OPAL_SUCCESS) {
1247 ++ pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
1248 + return -ENODEV;
1249 + }
1250 + } else {
1251 + /*
1252 +- * On OPAL v2, we just kick it and hope for the best,
1253 +- * we must not test the error from opal_start_cpu() or
1254 +- * we would fail to get CPUs from kexec.
1255 ++ * An unavailable CPU (or any other unknown status)
1256 ++ * shouldn't be started. It should also
1257 ++ * not be in the possible map but currently it can
1258 ++ * happen
1259 + */
1260 +- opal_start_cpu(pcpu, start_here);
1261 ++ pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
1262 ++ " (status %d)...\n", nr, pcpu, status);
1263 ++ return -ENODEV;
1264 + }
1265 +- kick:
1266 ++
1267 ++kick:
1268 + return smp_generic_kick_cpu(nr);
1269 + }
1270 +
1271 +diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
1272 +new file mode 100644
1273 +index 000000000000..955d620db23e
1274 +--- /dev/null
1275 ++++ b/arch/s390/include/asm/alternative-asm.h
1276 +@@ -0,0 +1,108 @@
1277 ++/* SPDX-License-Identifier: GPL-2.0 */
1278 ++#ifndef _ASM_S390_ALTERNATIVE_ASM_H
1279 ++#define _ASM_S390_ALTERNATIVE_ASM_H
1280 ++
1281 ++#ifdef __ASSEMBLY__
1282 ++
1283 ++/*
1284 ++ * Check the length of an instruction sequence. The length may not be larger
1285 ++ * than 254 bytes and it has to be divisible by 2.
1286 ++ */
1287 ++.macro alt_len_check start,end
1288 ++ .if ( \end - \start ) > 254
1289 ++ .error "cpu alternatives does not support instructions blocks > 254 bytes\n"
1290 ++ .endif
1291 ++ .if ( \end - \start ) % 2
1292 ++ .error "cpu alternatives instructions length is odd\n"
1293 ++ .endif
1294 ++.endm
1295 ++
1296 ++/*
1297 ++ * Issue one struct alt_instr descriptor entry (need to put it into
1298 ++ * the section .altinstructions, see below). This entry contains
1299 ++ * enough information for the alternatives patching code to patch an
1300 ++ * instruction. See apply_alternatives().
1301 ++ */
1302 ++.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
1303 ++ .long \orig_start - .
1304 ++ .long \alt_start - .
1305 ++ .word \feature
1306 ++ .byte \orig_end - \orig_start
1307 ++ .byte \alt_end - \alt_start
1308 ++.endm
1309 ++
1310 ++/*
1311 ++ * Fill up @bytes with nops. The macro emits 6-byte nop instructions
1312 ++ * for the bulk of the area, possibly followed by a 4-byte and/or
1313 ++ * a 2-byte nop if the size of the area is not divisible by 6.
1314 ++ */
1315 ++.macro alt_pad_fill bytes
1316 ++ .fill ( \bytes ) / 6, 6, 0xc0040000
1317 ++ .fill ( \bytes ) % 6 / 4, 4, 0x47000000
1318 ++ .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700
1319 ++.endm
1320 ++
1321 ++/*
1322 ++ * Fill up @bytes with nops. If the number of bytes is larger
1323 ++ * than 6, emit a jg instruction to branch over all nops, then
1324 ++ * fill an area of size (@bytes - 6) with nop instructions.
1325 ++ */
1326 ++.macro alt_pad bytes
1327 ++ .if ( \bytes > 0 )
1328 ++ .if ( \bytes > 6 )
1329 ++ jg . + \bytes
1330 ++ alt_pad_fill \bytes - 6
1331 ++ .else
1332 ++ alt_pad_fill \bytes
1333 ++ .endif
1334 ++ .endif
1335 ++.endm
1336 ++
1337 ++/*
1338 ++ * Define an alternative between two instructions. If @feature is
1339 ++ * present, early code in apply_alternatives() replaces @oldinstr with
1340 ++ * @newinstr. ".skip" directive takes care of proper instruction padding
1341 ++ * in case @newinstr is longer than @oldinstr.
1342 ++ */
1343 ++.macro ALTERNATIVE oldinstr, newinstr, feature
1344 ++ .pushsection .altinstr_replacement,"ax"
1345 ++770: \newinstr
1346 ++771: .popsection
1347 ++772: \oldinstr
1348 ++773: alt_len_check 770b, 771b
1349 ++ alt_len_check 772b, 773b
1350 ++ alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
1351 ++774: .pushsection .altinstructions,"a"
1352 ++ alt_entry 772b, 774b, 770b, 771b, \feature
1353 ++ .popsection
1354 ++.endm
1355 ++
1356 ++/*
1357 ++ * Define an alternative between two instructions. If @feature is
1358 ++ * present, early code in apply_alternatives() replaces @oldinstr with
1359 ++ * @newinstr. ".skip" directive takes care of proper instruction padding
1360 ++ * in case @newinstr is longer than @oldinstr.
1361 ++ */
1362 ++.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
1363 ++ .pushsection .altinstr_replacement,"ax"
1364 ++770: \newinstr1
1365 ++771: \newinstr2
1366 ++772: .popsection
1367 ++773: \oldinstr
1368 ++774: alt_len_check 770b, 771b
1369 ++ alt_len_check 771b, 772b
1370 ++ alt_len_check 773b, 774b
1371 ++ .if ( 771b - 770b > 772b - 771b )
1372 ++ alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
1373 ++ .else
1374 ++ alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
1375 ++ .endif
1376 ++775: .pushsection .altinstructions,"a"
1377 ++ alt_entry 773b, 775b, 770b, 771b,\feature1
1378 ++ alt_entry 773b, 775b, 771b, 772b,\feature2
1379 ++ .popsection
1380 ++.endm
1381 ++
1382 ++#endif /* __ASSEMBLY__ */
1383 ++
1384 ++#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
1385 +diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
1386 +index a4811aa0304d..8f8eec9e1198 100644
1387 +--- a/arch/s390/include/asm/futex.h
1388 ++++ b/arch/s390/include/asm/futex.h
1389 +@@ -21,17 +21,12 @@
1390 + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
1391 + "m" (*uaddr) : "cc");
1392 +
1393 +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
1394 ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
1395 ++ u32 __user *uaddr)
1396 + {
1397 +- int op = (encoded_op >> 28) & 7;
1398 +- int cmp = (encoded_op >> 24) & 15;
1399 +- int oparg = (encoded_op << 8) >> 20;
1400 +- int cmparg = (encoded_op << 20) >> 20;
1401 + int oldval = 0, newval, ret;
1402 +
1403 + load_kernel_asce();
1404 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
1405 +- oparg = 1 << oparg;
1406 +
1407 + pagefault_disable();
1408 + switch (op) {
1409 +@@ -60,17 +55,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
1410 + }
1411 + pagefault_enable();
1412 +
1413 +- if (!ret) {
1414 +- switch (cmp) {
1415 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
1416 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
1417 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
1418 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
1419 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
1420 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
1421 +- default: ret = -ENOSYS;
1422 +- }
1423 +- }
1424 ++ if (!ret)
1425 ++ *oval = oldval;
1426 ++
1427 + return ret;
1428 + }
1429 +
1430 +diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
1431 +new file mode 100644
1432 +index 000000000000..087fc9b972c5
1433 +--- /dev/null
1434 ++++ b/arch/s390/include/asm/nospec-insn.h
1435 +@@ -0,0 +1,182 @@
1436 ++/* SPDX-License-Identifier: GPL-2.0 */
1437 ++#ifndef _ASM_S390_NOSPEC_ASM_H
1438 ++#define _ASM_S390_NOSPEC_ASM_H
1439 ++
1440 ++#ifdef __ASSEMBLY__
1441 ++
1442 ++#ifdef CONFIG_EXPOLINE
1443 ++
1444 ++/*
1445 ++ * The expoline macros are used to create thunks in the same format
1446 ++ * as gcc generates them. The 'comdat' section flag makes sure that
1447 ++ * the various thunks are merged into a single copy.
1448 ++ */
1449 ++ .macro __THUNK_PROLOG_NAME name
1450 ++ .pushsection .text.\name,"axG",@progbits,\name,comdat
1451 ++ .globl \name
1452 ++ .hidden \name
1453 ++ .type \name,@function
1454 ++\name:
1455 ++ .cfi_startproc
1456 ++ .endm
1457 ++
1458 ++ .macro __THUNK_EPILOG
1459 ++ .cfi_endproc
1460 ++ .popsection
1461 ++ .endm
1462 ++
1463 ++ .macro __THUNK_PROLOG_BR r1,r2
1464 ++ __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
1465 ++ .endm
1466 ++
1467 ++ .macro __THUNK_PROLOG_BC d0,r1,r2
1468 ++ __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
1469 ++ .endm
1470 ++
1471 ++ .macro __THUNK_BR r1,r2
1472 ++ jg __s390x_indirect_jump_r\r2\()use_r\r1
1473 ++ .endm
1474 ++
1475 ++ .macro __THUNK_BC d0,r1,r2
1476 ++ jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
1477 ++ .endm
1478 ++
1479 ++ .macro __THUNK_BRASL r1,r2,r3
1480 ++ brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
1481 ++ .endm
1482 ++
1483 ++ .macro __DECODE_RR expand,reg,ruse
1484 ++ .set __decode_fail,1
1485 ++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1486 ++ .ifc \reg,%r\r1
1487 ++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1488 ++ .ifc \ruse,%r\r2
1489 ++ \expand \r1,\r2
1490 ++ .set __decode_fail,0
1491 ++ .endif
1492 ++ .endr
1493 ++ .endif
1494 ++ .endr
1495 ++ .if __decode_fail == 1
1496 ++ .error "__DECODE_RR failed"
1497 ++ .endif
1498 ++ .endm
1499 ++
1500 ++ .macro __DECODE_RRR expand,rsave,rtarget,ruse
1501 ++ .set __decode_fail,1
1502 ++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1503 ++ .ifc \rsave,%r\r1
1504 ++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1505 ++ .ifc \rtarget,%r\r2
1506 ++ .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1507 ++ .ifc \ruse,%r\r3
1508 ++ \expand \r1,\r2,\r3
1509 ++ .set __decode_fail,0
1510 ++ .endif
1511 ++ .endr
1512 ++ .endif
1513 ++ .endr
1514 ++ .endif
1515 ++ .endr
1516 ++ .if __decode_fail == 1
1517 ++ .error "__DECODE_RRR failed"
1518 ++ .endif
1519 ++ .endm
1520 ++
1521 ++ .macro __DECODE_DRR expand,disp,reg,ruse
1522 ++ .set __decode_fail,1
1523 ++ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1524 ++ .ifc \reg,%r\r1
1525 ++ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1526 ++ .ifc \ruse,%r\r2
1527 ++ \expand \disp,\r1,\r2
1528 ++ .set __decode_fail,0
1529 ++ .endif
1530 ++ .endr
1531 ++ .endif
1532 ++ .endr
1533 ++ .if __decode_fail == 1
1534 ++ .error "__DECODE_DRR failed"
1535 ++ .endif
1536 ++ .endm
1537 ++
1538 ++ .macro __THUNK_EX_BR reg,ruse
1539 ++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
1540 ++ exrl 0,555f
1541 ++ j .
1542 ++#else
1543 ++ larl \ruse,555f
1544 ++ ex 0,0(\ruse)
1545 ++ j .
1546 ++#endif
1547 ++555: br \reg
1548 ++ .endm
1549 ++
1550 ++ .macro __THUNK_EX_BC disp,reg,ruse
1551 ++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
1552 ++ exrl 0,556f
1553 ++ j .
1554 ++#else
1555 ++ larl \ruse,556f
1556 ++ ex 0,0(\ruse)
1557 ++ j .
1558 ++#endif
1559 ++556: b \disp(\reg)
1560 ++ .endm
1561 ++
1562 ++ .macro GEN_BR_THUNK reg,ruse=%r1
1563 ++ __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
1564 ++ __THUNK_EX_BR \reg,\ruse
1565 ++ __THUNK_EPILOG
1566 ++ .endm
1567 ++
1568 ++ .macro GEN_B_THUNK disp,reg,ruse=%r1
1569 ++ __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
1570 ++ __THUNK_EX_BC \disp,\reg,\ruse
1571 ++ __THUNK_EPILOG
1572 ++ .endm
1573 ++
1574 ++ .macro BR_EX reg,ruse=%r1
1575 ++557: __DECODE_RR __THUNK_BR,\reg,\ruse
1576 ++ .pushsection .s390_indirect_branches,"a",@progbits
1577 ++ .long 557b-.
1578 ++ .popsection
1579 ++ .endm
1580 ++
1581 ++ .macro B_EX disp,reg,ruse=%r1
1582 ++558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
1583 ++ .pushsection .s390_indirect_branches,"a",@progbits
1584 ++ .long 558b-.
1585 ++ .popsection
1586 ++ .endm
1587 ++
1588 ++ .macro BASR_EX rsave,rtarget,ruse=%r1
1589 ++559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
1590 ++ .pushsection .s390_indirect_branches,"a",@progbits
1591 ++ .long 559b-.
1592 ++ .popsection
1593 ++ .endm
1594 ++
1595 ++#else
1596 ++ .macro GEN_BR_THUNK reg,ruse=%r1
1597 ++ .endm
1598 ++
1599 ++ .macro GEN_B_THUNK disp,reg,ruse=%r1
1600 ++ .endm
1601 ++
1602 ++ .macro BR_EX reg,ruse=%r1
1603 ++ br \reg
1604 ++ .endm
1605 ++
1606 ++ .macro B_EX disp,reg,ruse=%r1
1607 ++ b \disp(\reg)
1608 ++ .endm
1609 ++
1610 ++ .macro BASR_EX rsave,rtarget,ruse=%r1
1611 ++ basr \rsave,\rtarget
1612 ++ .endm
1613 ++#endif
1614 ++
1615 ++#endif /* __ASSEMBLY__ */
1616 ++
1617 ++#endif /* _ASM_S390_NOSPEC_ASM_H */
1618 +diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
1619 +index 8ccfbf22ecbb..c4d4d4ef5e58 100644
1620 +--- a/arch/s390/kernel/Makefile
1621 ++++ b/arch/s390/kernel/Makefile
1622 +@@ -49,6 +49,7 @@ obj-y += nospec-branch.o
1623 +
1624 + extra-y += head.o head64.o vmlinux.lds
1625 +
1626 ++obj-$(CONFIG_SYSFS) += nospec-sysfs.o
1627 + CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
1628 +
1629 + obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
1630 +diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
1631 +index 326f717df587..61fca549a93b 100644
1632 +--- a/arch/s390/kernel/base.S
1633 ++++ b/arch/s390/kernel/base.S
1634 +@@ -8,18 +8,22 @@
1635 +
1636 + #include <linux/linkage.h>
1637 + #include <asm/asm-offsets.h>
1638 ++#include <asm/nospec-insn.h>
1639 + #include <asm/ptrace.h>
1640 + #include <asm/sigp.h>
1641 +
1642 ++ GEN_BR_THUNK %r9
1643 ++ GEN_BR_THUNK %r14
1644 ++
1645 + ENTRY(s390_base_mcck_handler)
1646 + basr %r13,0
1647 + 0: lg %r15,__LC_PANIC_STACK # load panic stack
1648 + aghi %r15,-STACK_FRAME_OVERHEAD
1649 + larl %r1,s390_base_mcck_handler_fn
1650 +- lg %r1,0(%r1)
1651 +- ltgr %r1,%r1
1652 ++ lg %r9,0(%r1)
1653 ++ ltgr %r9,%r9
1654 + jz 1f
1655 +- basr %r14,%r1
1656 ++ BASR_EX %r14,%r9
1657 + 1: la %r1,4095
1658 + lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
1659 + lpswe __LC_MCK_OLD_PSW
1660 +@@ -36,10 +40,10 @@ ENTRY(s390_base_ext_handler)
1661 + basr %r13,0
1662 + 0: aghi %r15,-STACK_FRAME_OVERHEAD
1663 + larl %r1,s390_base_ext_handler_fn
1664 +- lg %r1,0(%r1)
1665 +- ltgr %r1,%r1
1666 ++ lg %r9,0(%r1)
1667 ++ ltgr %r9,%r9
1668 + jz 1f
1669 +- basr %r14,%r1
1670 ++ BASR_EX %r14,%r9
1671 + 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
1672 + ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
1673 + lpswe __LC_EXT_OLD_PSW
1674 +@@ -56,10 +60,10 @@ ENTRY(s390_base_pgm_handler)
1675 + basr %r13,0
1676 + 0: aghi %r15,-STACK_FRAME_OVERHEAD
1677 + larl %r1,s390_base_pgm_handler_fn
1678 +- lg %r1,0(%r1)
1679 +- ltgr %r1,%r1
1680 ++ lg %r9,0(%r1)
1681 ++ ltgr %r9,%r9
1682 + jz 1f
1683 +- basr %r14,%r1
1684 ++ BASR_EX %r14,%r9
1685 + lmg %r0,%r15,__LC_SAVE_AREA_SYNC
1686 + lpswe __LC_PGM_OLD_PSW
1687 + 1: lpswe disabled_wait_psw-0b(%r13)
1688 +@@ -116,7 +120,7 @@ ENTRY(diag308_reset)
1689 + larl %r4,.Lcontinue_psw # Restore PSW flags
1690 + lpswe 0(%r4)
1691 + .Lcontinue:
1692 +- br %r14
1693 ++ BR_EX %r14
1694 + .align 16
1695 + .Lrestart_psw:
1696 + .long 0x00080000,0x80000000 + .Lrestart_part2
1697 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
1698 +index c63730326215..5416d5d68308 100644
1699 +--- a/arch/s390/kernel/entry.S
1700 ++++ b/arch/s390/kernel/entry.S
1701 +@@ -23,6 +23,7 @@
1702 + #include <asm/vx-insn.h>
1703 + #include <asm/setup.h>
1704 + #include <asm/nmi.h>
1705 ++#include <asm/nospec-insn.h>
1706 +
1707 + __PT_R0 = __PT_GPRS
1708 + __PT_R1 = __PT_GPRS + 8
1709 +@@ -225,74 +226,16 @@ _PIF_WORK = (_PIF_PER_TRAP)
1710 + .popsection
1711 + .endm
1712 +
1713 +-#ifdef CONFIG_EXPOLINE
1714 +-
1715 +- .macro GEN_BR_THUNK name,reg,tmp
1716 +- .section .text.\name,"axG",@progbits,\name,comdat
1717 +- .globl \name
1718 +- .hidden \name
1719 +- .type \name,@function
1720 +-\name:
1721 +- .cfi_startproc
1722 +-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
1723 +- exrl 0,0f
1724 +-#else
1725 +- larl \tmp,0f
1726 +- ex 0,0(\tmp)
1727 +-#endif
1728 +- j .
1729 +-0: br \reg
1730 +- .cfi_endproc
1731 +- .endm
1732 +-
1733 +- GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
1734 +- GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
1735 +- GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
1736 +-
1737 +- .macro BASR_R14_R9
1738 +-0: brasl %r14,__s390x_indirect_jump_r1use_r9
1739 +- .pushsection .s390_indirect_branches,"a",@progbits
1740 +- .long 0b-.
1741 +- .popsection
1742 +- .endm
1743 +-
1744 +- .macro BR_R1USE_R14
1745 +-0: jg __s390x_indirect_jump_r1use_r14
1746 +- .pushsection .s390_indirect_branches,"a",@progbits
1747 +- .long 0b-.
1748 +- .popsection
1749 +- .endm
1750 +-
1751 +- .macro BR_R11USE_R14
1752 +-0: jg __s390x_indirect_jump_r11use_r14
1753 +- .pushsection .s390_indirect_branches,"a",@progbits
1754 +- .long 0b-.
1755 +- .popsection
1756 +- .endm
1757 +-
1758 +-#else /* CONFIG_EXPOLINE */
1759 +-
1760 +- .macro BASR_R14_R9
1761 +- basr %r14,%r9
1762 +- .endm
1763 +-
1764 +- .macro BR_R1USE_R14
1765 +- br %r14
1766 +- .endm
1767 +-
1768 +- .macro BR_R11USE_R14
1769 +- br %r14
1770 +- .endm
1771 +-
1772 +-#endif /* CONFIG_EXPOLINE */
1773 +-
1774 ++ GEN_BR_THUNK %r9
1775 ++ GEN_BR_THUNK %r14
1776 ++ GEN_BR_THUNK %r14,%r11
1777 +
1778 + .section .kprobes.text, "ax"
1779 +
1780 + ENTRY(__bpon)
1781 + .globl __bpon
1782 + BPON
1783 +- BR_R1USE_R14
1784 ++ BR_EX %r14
1785 +
1786 + /*
1787 + * Scheduler resume function, called by switch_to
1788 +@@ -322,7 +265,7 @@ ENTRY(__switch_to)
1789 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
1790 + jz 0f
1791 + .insn s,0xb2800000,__LC_LPP # set program parameter
1792 +-0: BR_R1USE_R14
1793 ++0: BR_EX %r14
1794 +
1795 + .L__critical_start:
1796 +
1797 +@@ -388,7 +331,7 @@ sie_exit:
1798 + xgr %r5,%r5
1799 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
1800 + lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
1801 +- BR_R1USE_R14
1802 ++ BR_EX %r14
1803 + .Lsie_fault:
1804 + lghi %r14,-EFAULT
1805 + stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
1806 +@@ -445,7 +388,7 @@ ENTRY(system_call)
1807 + lgf %r9,0(%r8,%r10) # get system call add.
1808 + TSTMSK __TI_flags(%r12),_TIF_TRACE
1809 + jnz .Lsysc_tracesys
1810 +- BASR_R14_R9 # call sys_xxxx
1811 ++ BASR_EX %r14,%r9 # call sys_xxxx
1812 + stg %r2,__PT_R2(%r11) # store return value
1813 +
1814 + .Lsysc_return:
1815 +@@ -585,7 +528,7 @@ ENTRY(system_call)
1816 + lmg %r3,%r7,__PT_R3(%r11)
1817 + stg %r7,STACK_FRAME_OVERHEAD(%r15)
1818 + lg %r2,__PT_ORIG_GPR2(%r11)
1819 +- BASR_R14_R9 # call sys_xxx
1820 ++ BASR_EX %r14,%r9 # call sys_xxx
1821 + stg %r2,__PT_R2(%r11) # store return value
1822 + .Lsysc_tracenogo:
1823 + TSTMSK __TI_flags(%r12),_TIF_TRACE
1824 +@@ -609,7 +552,7 @@ ENTRY(ret_from_fork)
1825 + lmg %r9,%r10,__PT_R9(%r11) # load gprs
1826 + ENTRY(kernel_thread_starter)
1827 + la %r2,0(%r10)
1828 +- BASR_R14_R9
1829 ++ BASR_EX %r14,%r9
1830 + j .Lsysc_tracenogo
1831 +
1832 + /*
1833 +@@ -685,7 +628,7 @@ ENTRY(pgm_check_handler)
1834 + je .Lpgm_return
1835 + lgf %r9,0(%r10,%r1) # load address of handler routine
1836 + lgr %r2,%r11 # pass pointer to pt_regs
1837 +- BASR_R14_R9 # branch to interrupt-handler
1838 ++ BASR_EX %r14,%r9 # branch to interrupt-handler
1839 + .Lpgm_return:
1840 + LOCKDEP_SYS_EXIT
1841 + tm __PT_PSW+1(%r11),0x01 # returning to user ?
1842 +@@ -962,7 +905,7 @@ ENTRY(psw_idle)
1843 + stpt __TIMER_IDLE_ENTER(%r2)
1844 + .Lpsw_idle_lpsw:
1845 + lpswe __SF_EMPTY(%r15)
1846 +- BR_R1USE_R14
1847 ++ BR_EX %r14
1848 + .Lpsw_idle_end:
1849 +
1850 + /*
1851 +@@ -1007,7 +950,7 @@ ENTRY(save_fpu_regs)
1852 + .Lsave_fpu_regs_done:
1853 + oi __LC_CPU_FLAGS+7,_CIF_FPU
1854 + .Lsave_fpu_regs_exit:
1855 +- BR_R1USE_R14
1856 ++ BR_EX %r14
1857 + .Lsave_fpu_regs_end:
1858 +
1859 + /*
1860 +@@ -1054,7 +997,7 @@ load_fpu_regs:
1861 + .Lload_fpu_regs_done:
1862 + ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1863 + .Lload_fpu_regs_exit:
1864 +- BR_R1USE_R14
1865 ++ BR_EX %r14
1866 + .Lload_fpu_regs_end:
1867 +
1868 + .L__critical_end:
1869 +@@ -1227,7 +1170,7 @@ cleanup_critical:
1870 + jl 0f
1871 + clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1872 + jl .Lcleanup_load_fpu_regs
1873 +-0: BR_R11USE_R14
1874 ++0: BR_EX %r14
1875 +
1876 + .align 8
1877 + .Lcleanup_table:
1878 +@@ -1257,7 +1200,7 @@ cleanup_critical:
1879 + ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1880 + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1881 + larl %r9,sie_exit # skip forward to sie_exit
1882 +- BR_R11USE_R14
1883 ++ BR_EX %r14
1884 + #endif
1885 +
1886 + .Lcleanup_system_call:
1887 +@@ -1315,7 +1258,7 @@ cleanup_critical:
1888 + stg %r15,56(%r11) # r15 stack pointer
1889 + # set new psw address and exit
1890 + larl %r9,.Lsysc_do_svc
1891 +- BR_R11USE_R14
1892 ++ BR_EX %r14,%r11
1893 + .Lcleanup_system_call_insn:
1894 + .quad system_call
1895 + .quad .Lsysc_stmg
1896 +@@ -1325,7 +1268,7 @@ cleanup_critical:
1897 +
1898 + .Lcleanup_sysc_tif:
1899 + larl %r9,.Lsysc_tif
1900 +- BR_R11USE_R14
1901 ++ BR_EX %r14,%r11
1902 +
1903 + .Lcleanup_sysc_restore:
1904 + # check if stpt has been executed
1905 +@@ -1342,14 +1285,14 @@ cleanup_critical:
1906 + mvc 0(64,%r11),__PT_R8(%r9)
1907 + lmg %r0,%r7,__PT_R0(%r9)
1908 + 1: lmg %r8,%r9,__LC_RETURN_PSW
1909 +- BR_R11USE_R14
1910 ++ BR_EX %r14,%r11
1911 + .Lcleanup_sysc_restore_insn:
1912 + .quad .Lsysc_exit_timer
1913 + .quad .Lsysc_done - 4
1914 +
1915 + .Lcleanup_io_tif:
1916 + larl %r9,.Lio_tif
1917 +- BR_R11USE_R14
1918 ++ BR_EX %r14,%r11
1919 +
1920 + .Lcleanup_io_restore:
1921 + # check if stpt has been executed
1922 +@@ -1363,7 +1306,7 @@ cleanup_critical:
1923 + mvc 0(64,%r11),__PT_R8(%r9)
1924 + lmg %r0,%r7,__PT_R0(%r9)
1925 + 1: lmg %r8,%r9,__LC_RETURN_PSW
1926 +- BR_R11USE_R14
1927 ++ BR_EX %r14,%r11
1928 + .Lcleanup_io_restore_insn:
1929 + .quad .Lio_exit_timer
1930 + .quad .Lio_done - 4
1931 +@@ -1415,17 +1358,17 @@ cleanup_critical:
1932 + # prepare return psw
1933 + nihh %r8,0xfcfd # clear irq & wait state bits
1934 + lg %r9,48(%r11) # return from psw_idle
1935 +- BR_R11USE_R14
1936 ++ BR_EX %r14,%r11
1937 + .Lcleanup_idle_insn:
1938 + .quad .Lpsw_idle_lpsw
1939 +
1940 + .Lcleanup_save_fpu_regs:
1941 + larl %r9,save_fpu_regs
1942 +- BR_R11USE_R14
1943 ++ BR_EX %r14,%r11
1944 +
1945 + .Lcleanup_load_fpu_regs:
1946 + larl %r9,load_fpu_regs
1947 +- BR_R11USE_R14
1948 ++ BR_EX %r14,%r11
1949 +
1950 + /*
1951 + * Integer constants
1952 +diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
1953 +index f41d5208aaf7..590e9394b4dd 100644
1954 +--- a/arch/s390/kernel/irq.c
1955 ++++ b/arch/s390/kernel/irq.c
1956 +@@ -173,10 +173,9 @@ void do_softirq_own_stack(void)
1957 + new -= STACK_FRAME_OVERHEAD;
1958 + ((struct stack_frame *) new)->back_chain = old;
1959 + asm volatile(" la 15,0(%0)\n"
1960 +- " basr 14,%2\n"
1961 ++ " brasl 14,__do_softirq\n"
1962 + " la 15,0(%1)\n"
1963 +- : : "a" (new), "a" (old),
1964 +- "a" (__do_softirq)
1965 ++ : : "a" (new), "a" (old)
1966 + : "0", "1", "2", "3", "4", "5", "14",
1967 + "cc", "memory" );
1968 + } else {
1969 +diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
1970 +index 9f3b5b382743..d5eed651b5ab 100644
1971 +--- a/arch/s390/kernel/nospec-branch.c
1972 ++++ b/arch/s390/kernel/nospec-branch.c
1973 +@@ -44,24 +44,6 @@ static int __init nospec_report(void)
1974 + }
1975 + arch_initcall(nospec_report);
1976 +
1977 +-#ifdef CONFIG_SYSFS
1978 +-ssize_t cpu_show_spectre_v1(struct device *dev,
1979 +- struct device_attribute *attr, char *buf)
1980 +-{
1981 +- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1982 +-}
1983 +-
1984 +-ssize_t cpu_show_spectre_v2(struct device *dev,
1985 +- struct device_attribute *attr, char *buf)
1986 +-{
1987 +- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1988 +- return sprintf(buf, "Mitigation: execute trampolines\n");
1989 +- if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1990 +- return sprintf(buf, "Mitigation: limited branch prediction.\n");
1991 +- return sprintf(buf, "Vulnerable\n");
1992 +-}
1993 +-#endif
1994 +-
1995 + #ifdef CONFIG_EXPOLINE
1996 +
1997 + int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
1998 +@@ -112,7 +94,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
1999 + s32 *epo;
2000 +
2001 + /* Second part of the instruction replace is always a nop */
2002 +- memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
2003 + for (epo = start; epo < end; epo++) {
2004 + instr = (u8 *) epo + *epo;
2005 + if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
2006 +@@ -133,18 +114,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
2007 + br = thunk + (*(int *)(thunk + 2)) * 2;
2008 + else
2009 + continue;
2010 +- if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
2011 ++ /* Check for unconditional branch 0x07f? or 0x47f???? */
2012 ++ if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
2013 + continue;
2014 ++
2015 ++ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
2016 + switch (type) {
2017 + case BRCL_EXPOLINE:
2018 +- /* brcl to thunk, replace with br + nop */
2019 + insnbuf[0] = br[0];
2020 + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
2021 ++ if (br[0] == 0x47) {
2022 ++ /* brcl to b, replace with bc + nopr */
2023 ++ insnbuf[2] = br[2];
2024 ++ insnbuf[3] = br[3];
2025 ++ } else {
2026 ++ /* brcl to br, replace with bcr + nop */
2027 ++ }
2028 + break;
2029 + case BRASL_EXPOLINE:
2030 +- /* brasl to thunk, replace with basr + nop */
2031 +- insnbuf[0] = 0x0d;
2032 + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
2033 ++ if (br[0] == 0x47) {
2034 ++ /* brasl to b, replace with bas + nopr */
2035 ++ insnbuf[0] = 0x4d;
2036 ++ insnbuf[2] = br[2];
2037 ++ insnbuf[3] = br[3];
2038 ++ } else {
2039 ++ /* brasl to br, replace with basr + nop */
2040 ++ insnbuf[0] = 0x0d;
2041 ++ }
2042 + break;
2043 + }
2044 +
2045 +diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
2046 +new file mode 100644
2047 +index 000000000000..8affad5f18cb
2048 +--- /dev/null
2049 ++++ b/arch/s390/kernel/nospec-sysfs.c
2050 +@@ -0,0 +1,21 @@
2051 ++// SPDX-License-Identifier: GPL-2.0
2052 ++#include <linux/device.h>
2053 ++#include <linux/cpu.h>
2054 ++#include <asm/facility.h>
2055 ++#include <asm/nospec-branch.h>
2056 ++
2057 ++ssize_t cpu_show_spectre_v1(struct device *dev,
2058 ++ struct device_attribute *attr, char *buf)
2059 ++{
2060 ++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
2061 ++}
2062 ++
2063 ++ssize_t cpu_show_spectre_v2(struct device *dev,
2064 ++ struct device_attribute *attr, char *buf)
2065 ++{
2066 ++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
2067 ++ return sprintf(buf, "Mitigation: execute trampolines\n");
2068 ++ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
2069 ++ return sprintf(buf, "Mitigation: limited branch prediction\n");
2070 ++ return sprintf(buf, "Vulnerable\n");
2071 ++}
2072 +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
2073 +index 3d8da1e742c2..b79d51459cf2 100644
2074 +--- a/arch/s390/kernel/perf_cpum_sf.c
2075 ++++ b/arch/s390/kernel/perf_cpum_sf.c
2076 +@@ -744,6 +744,10 @@ static int __hw_perf_event_init(struct perf_event *event)
2077 + */
2078 + rate = 0;
2079 + if (attr->freq) {
2080 ++ if (!attr->sample_freq) {
2081 ++ err = -EINVAL;
2082 ++ goto out;
2083 ++ }
2084 + rate = freq_to_sample_rate(&si, attr->sample_freq);
2085 + rate = hw_limit_rate(&si, rate);
2086 + attr->freq = 0;
2087 +diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
2088 +index 52aab0bd84f8..6b1b91c17b40 100644
2089 +--- a/arch/s390/kernel/reipl.S
2090 ++++ b/arch/s390/kernel/reipl.S
2091 +@@ -6,8 +6,11 @@
2092 +
2093 + #include <linux/linkage.h>
2094 + #include <asm/asm-offsets.h>
2095 ++#include <asm/nospec-insn.h>
2096 + #include <asm/sigp.h>
2097 +
2098 ++ GEN_BR_THUNK %r14
2099 ++
2100 + #
2101 + # store_status
2102 + #
2103 +@@ -62,7 +65,7 @@ ENTRY(store_status)
2104 + st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
2105 + larl %r2,store_status
2106 + stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
2107 +- br %r14
2108 ++ BR_EX %r14
2109 +
2110 + .section .bss
2111 + .align 8
2112 +diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
2113 +index 2d6b6e81f812..60a829c77378 100644
2114 +--- a/arch/s390/kernel/swsusp.S
2115 ++++ b/arch/s390/kernel/swsusp.S
2116 +@@ -12,6 +12,7 @@
2117 + #include <asm/ptrace.h>
2118 + #include <asm/thread_info.h>
2119 + #include <asm/asm-offsets.h>
2120 ++#include <asm/nospec-insn.h>
2121 + #include <asm/sigp.h>
2122 +
2123 + /*
2124 +@@ -23,6 +24,8 @@
2125 + * (see below) in the resume process.
2126 + * This function runs with disabled interrupts.
2127 + */
2128 ++ GEN_BR_THUNK %r14
2129 ++
2130 + .section .text
2131 + ENTRY(swsusp_arch_suspend)
2132 + stmg %r6,%r15,__SF_GPRS(%r15)
2133 +@@ -102,7 +105,7 @@ ENTRY(swsusp_arch_suspend)
2134 + spx 0x318(%r1)
2135 + lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
2136 + lghi %r2,0
2137 +- br %r14
2138 ++ BR_EX %r14
2139 +
2140 + /*
2141 + * Restore saved memory image to correct place and restore register context.
2142 +@@ -196,11 +199,10 @@ pgm_check_entry:
2143 + larl %r15,init_thread_union
2144 + ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
2145 + larl %r2,.Lpanic_string
2146 +- larl %r3,_sclp_print_early
2147 + lghi %r1,0
2148 + sam31
2149 + sigp %r1,%r0,SIGP_SET_ARCHITECTURE
2150 +- basr %r14,%r3
2151 ++ brasl %r14,_sclp_print_early
2152 + larl %r3,.Ldisabled_wait_31
2153 + lpsw 0(%r3)
2154 + 4:
2155 +@@ -266,7 +268,7 @@ restore_registers:
2156 + /* Return 0 */
2157 + lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
2158 + lghi %r2,0
2159 +- br %r14
2160 ++ BR_EX %r14
2161 +
2162 + .section .data..nosave,"aw",@progbits
2163 + .align 8
2164 +diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
2165 +index c6d553e85ab1..16c5998b9792 100644
2166 +--- a/arch/s390/lib/mem.S
2167 ++++ b/arch/s390/lib/mem.S
2168 +@@ -5,6 +5,9 @@
2169 + */
2170 +
2171 + #include <linux/linkage.h>
2172 ++#include <asm/nospec-insn.h>
2173 ++
2174 ++ GEN_BR_THUNK %r14
2175 +
2176 + /*
2177 + * memset implementation
2178 +@@ -38,7 +41,7 @@ ENTRY(memset)
2179 + .Lmemset_clear_rest:
2180 + larl %r3,.Lmemset_xc
2181 + ex %r4,0(%r3)
2182 +- br %r14
2183 ++ BR_EX %r14
2184 + .Lmemset_fill:
2185 + stc %r3,0(%r2)
2186 + cghi %r4,1
2187 +@@ -55,7 +58,7 @@ ENTRY(memset)
2188 + .Lmemset_fill_rest:
2189 + larl %r3,.Lmemset_mvc
2190 + ex %r4,0(%r3)
2191 +- br %r14
2192 ++ BR_EX %r14
2193 + .Lmemset_xc:
2194 + xc 0(1,%r1),0(%r1)
2195 + .Lmemset_mvc:
2196 +@@ -77,7 +80,7 @@ ENTRY(memcpy)
2197 + .Lmemcpy_rest:
2198 + larl %r5,.Lmemcpy_mvc
2199 + ex %r4,0(%r5)
2200 +- br %r14
2201 ++ BR_EX %r14
2202 + .Lmemcpy_loop:
2203 + mvc 0(256,%r1),0(%r3)
2204 + la %r1,256(%r1)
2205 +diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
2206 +index a1c917d881ec..fa716f2a95a7 100644
2207 +--- a/arch/s390/net/bpf_jit.S
2208 ++++ b/arch/s390/net/bpf_jit.S
2209 +@@ -8,6 +8,7 @@
2210 + */
2211 +
2212 + #include <linux/linkage.h>
2213 ++#include <asm/nospec-insn.h>
2214 + #include "bpf_jit.h"
2215 +
2216 + /*
2217 +@@ -53,7 +54,7 @@ ENTRY(sk_load_##NAME##_pos); \
2218 + clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
2219 + jh sk_load_##NAME##_slow; \
2220 + LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
2221 +- b OFF_OK(%r6); /* Return */ \
2222 ++ B_EX OFF_OK,%r6; /* Return */ \
2223 + \
2224 + sk_load_##NAME##_slow:; \
2225 + lgr %r2,%r7; /* Arg1 = skb pointer */ \
2226 +@@ -63,11 +64,14 @@ sk_load_##NAME##_slow:; \
2227 + brasl %r14,skb_copy_bits; /* Get data from skb */ \
2228 + LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
2229 + ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
2230 +- br %r6; /* Return */
2231 ++ BR_EX %r6; /* Return */
2232 +
2233 + sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
2234 + sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
2235 +
2236 ++ GEN_BR_THUNK %r6
2237 ++ GEN_B_THUNK OFF_OK,%r6
2238 ++
2239 + /*
2240 + * Load 1 byte from SKB (optimized version)
2241 + */
2242 +@@ -79,7 +83,7 @@ ENTRY(sk_load_byte_pos)
2243 + clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
2244 + jnl sk_load_byte_slow
2245 + llgc %r14,0(%r3,%r12) # Get byte from skb
2246 +- b OFF_OK(%r6) # Return OK
2247 ++ B_EX OFF_OK,%r6 # Return OK
2248 +
2249 + sk_load_byte_slow:
2250 + lgr %r2,%r7 # Arg1 = skb pointer
2251 +@@ -89,7 +93,7 @@ sk_load_byte_slow:
2252 + brasl %r14,skb_copy_bits # Get data from skb
2253 + llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
2254 + ltgr %r2,%r2 # Set cc to (%r2 != 0)
2255 +- br %r6 # Return cc
2256 ++ BR_EX %r6 # Return cc
2257 +
2258 + #define sk_negative_common(NAME, SIZE, LOAD) \
2259 + sk_load_##NAME##_slow_neg:; \
2260 +@@ -103,7 +107,7 @@ sk_load_##NAME##_slow_neg:; \
2261 + jz bpf_error; \
2262 + LOAD %r14,0(%r2); /* Get data from pointer */ \
2263 + xr %r3,%r3; /* Set cc to zero */ \
2264 +- br %r6; /* Return cc */
2265 ++ BR_EX %r6; /* Return cc */
2266 +
2267 + sk_negative_common(word, 4, llgf)
2268 + sk_negative_common(half, 2, llgh)
2269 +@@ -112,4 +116,4 @@ sk_negative_common(byte, 1, llgc)
2270 + bpf_error:
2271 + # force a return 0 from jit handler
2272 + ltgr %r15,%r15 # Set condition code
2273 +- br %r6
2274 ++ BR_EX %r6
2275 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
2276 +index 1395eeb6005f..a26528afceb2 100644
2277 +--- a/arch/s390/net/bpf_jit_comp.c
2278 ++++ b/arch/s390/net/bpf_jit_comp.c
2279 +@@ -24,6 +24,8 @@
2280 + #include <linux/bpf.h>
2281 + #include <asm/cacheflush.h>
2282 + #include <asm/dis.h>
2283 ++#include <asm/facility.h>
2284 ++#include <asm/nospec-branch.h>
2285 + #include "bpf_jit.h"
2286 +
2287 + int bpf_jit_enable __read_mostly;
2288 +@@ -41,6 +43,8 @@ struct bpf_jit {
2289 + int base_ip; /* Base address for literal pool */
2290 + int ret0_ip; /* Address of return 0 */
2291 + int exit_ip; /* Address of exit */
2292 ++ int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
2293 ++ int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
2294 + int tail_call_start; /* Tail call start offset */
2295 + int labels[1]; /* Labels for local jumps */
2296 + };
2297 +@@ -248,6 +252,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
2298 + REG_SET_SEEN(b2); \
2299 + })
2300 +
2301 ++#define EMIT6_PCREL_RILB(op, b, target) \
2302 ++({ \
2303 ++ int rel = (target - jit->prg) / 2; \
2304 ++ _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
2305 ++ REG_SET_SEEN(b); \
2306 ++})
2307 ++
2308 ++#define EMIT6_PCREL_RIL(op, target) \
2309 ++({ \
2310 ++ int rel = (target - jit->prg) / 2; \
2311 ++ _EMIT6(op | rel >> 16, rel & 0xffff); \
2312 ++})
2313 ++
2314 + #define _EMIT6_IMM(op, imm) \
2315 + ({ \
2316 + unsigned int __imm = (imm); \
2317 +@@ -475,8 +492,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
2318 + EMIT4(0xb9040000, REG_2, BPF_REG_0);
2319 + /* Restore registers */
2320 + save_restore_regs(jit, REGS_RESTORE);
2321 ++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
2322 ++ jit->r14_thunk_ip = jit->prg;
2323 ++ /* Generate __s390_indirect_jump_r14 thunk */
2324 ++ if (test_facility(35)) {
2325 ++ /* exrl %r0,.+10 */
2326 ++ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
2327 ++ } else {
2328 ++ /* larl %r1,.+14 */
2329 ++ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
2330 ++ /* ex 0,0(%r1) */
2331 ++ EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
2332 ++ }
2333 ++ /* j . */
2334 ++ EMIT4_PCREL(0xa7f40000, 0);
2335 ++ }
2336 + /* br %r14 */
2337 + _EMIT2(0x07fe);
2338 ++
2339 ++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
2340 ++ (jit->seen & SEEN_FUNC)) {
2341 ++ jit->r1_thunk_ip = jit->prg;
2342 ++ /* Generate __s390_indirect_jump_r1 thunk */
2343 ++ if (test_facility(35)) {
2344 ++ /* exrl %r0,.+10 */
2345 ++ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
2346 ++ /* j . */
2347 ++ EMIT4_PCREL(0xa7f40000, 0);
2348 ++ /* br %r1 */
2349 ++ _EMIT2(0x07f1);
2350 ++ } else {
2351 ++ /* larl %r1,.+14 */
2352 ++ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
2353 ++ /* ex 0,S390_lowcore.br_r1_tampoline */
2354 ++ EMIT4_DISP(0x44000000, REG_0, REG_0,
2355 ++ offsetof(struct _lowcore, br_r1_trampoline));
2356 ++ /* j . */
2357 ++ EMIT4_PCREL(0xa7f40000, 0);
2358 ++ }
2359 ++ }
2360 + }
2361 +
2362 + /*
2363 +@@ -980,8 +1034,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
2364 + /* lg %w1,<d(imm)>(%l) */
2365 + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
2366 + EMIT_CONST_U64(func));
2367 +- /* basr %r14,%w1 */
2368 +- EMIT2(0x0d00, REG_14, REG_W1);
2369 ++ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
2370 ++ /* brasl %r14,__s390_indirect_jump_r1 */
2371 ++ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
2372 ++ } else {
2373 ++ /* basr %r14,%w1 */
2374 ++ EMIT2(0x0d00, REG_14, REG_W1);
2375 ++ }
2376 + /* lgr %b0,%r2: load return value into %b0 */
2377 + EMIT4(0xb9040000, BPF_REG_0, REG_2);
2378 + if (bpf_helper_changes_skb_data((void *)func)) {
2379 +diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
2380 +index 7be39a646fbd..e05187d26d76 100644
2381 +--- a/arch/sh/include/asm/futex.h
2382 ++++ b/arch/sh/include/asm/futex.h
2383 +@@ -10,20 +10,11 @@
2384 + /* XXX: UP variants, fix for SH-4A and SMP.. */
2385 + #include <asm/futex-irq.h>
2386 +
2387 +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2388 ++static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
2389 ++ u32 __user *uaddr)
2390 + {
2391 +- int op = (encoded_op >> 28) & 7;
2392 +- int cmp = (encoded_op >> 24) & 15;
2393 +- int oparg = (encoded_op << 8) >> 20;
2394 +- int cmparg = (encoded_op << 20) >> 20;
2395 + int oldval = 0, ret;
2396 +
2397 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
2398 +- oparg = 1 << oparg;
2399 +-
2400 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2401 +- return -EFAULT;
2402 +-
2403 + pagefault_disable();
2404 +
2405 + switch (op) {
2406 +@@ -49,17 +40,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2407 +
2408 + pagefault_enable();
2409 +
2410 +- if (!ret) {
2411 +- switch (cmp) {
2412 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
2413 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
2414 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
2415 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
2416 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
2417 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
2418 +- default: ret = -ENOSYS;
2419 +- }
2420 +- }
2421 ++ if (!ret)
2422 ++ *oval = oldval;
2423 +
2424 + return ret;
2425 + }
2426 +diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
2427 +index 4e899b0dabf7..1cfd89d92208 100644
2428 +--- a/arch/sparc/include/asm/futex_64.h
2429 ++++ b/arch/sparc/include/asm/futex_64.h
2430 +@@ -29,22 +29,14 @@
2431 + : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
2432 + : "memory")
2433 +
2434 +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2435 ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
2436 ++ u32 __user *uaddr)
2437 + {
2438 +- int op = (encoded_op >> 28) & 7;
2439 +- int cmp = (encoded_op >> 24) & 15;
2440 +- int oparg = (encoded_op << 8) >> 20;
2441 +- int cmparg = (encoded_op << 20) >> 20;
2442 + int oldval = 0, ret, tem;
2443 +
2444 +- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
2445 +- return -EFAULT;
2446 + if (unlikely((((unsigned long) uaddr) & 0x3UL)))
2447 + return -EINVAL;
2448 +
2449 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
2450 +- oparg = 1 << oparg;
2451 +-
2452 + pagefault_disable();
2453 +
2454 + switch (op) {
2455 +@@ -69,17 +61,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2456 +
2457 + pagefault_enable();
2458 +
2459 +- if (!ret) {
2460 +- switch (cmp) {
2461 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
2462 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
2463 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
2464 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
2465 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
2466 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
2467 +- default: ret = -ENOSYS;
2468 +- }
2469 +- }
2470 ++ if (!ret)
2471 ++ *oval = oldval;
2472 ++
2473 + return ret;
2474 + }
2475 +
2476 +diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
2477 +index 1a6ef1b69cb1..d96d9dab5c0b 100644
2478 +--- a/arch/tile/include/asm/futex.h
2479 ++++ b/arch/tile/include/asm/futex.h
2480 +@@ -106,12 +106,9 @@
2481 + lock = __atomic_hashed_lock((int __force *)uaddr)
2482 + #endif
2483 +
2484 +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2485 ++static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
2486 ++ u32 __user *uaddr)
2487 + {
2488 +- int op = (encoded_op >> 28) & 7;
2489 +- int cmp = (encoded_op >> 24) & 15;
2490 +- int oparg = (encoded_op << 8) >> 20;
2491 +- int cmparg = (encoded_op << 20) >> 20;
2492 + int uninitialized_var(val), ret;
2493 +
2494 + __futex_prolog();
2495 +@@ -119,12 +116,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2496 + /* The 32-bit futex code makes this assumption, so validate it here. */
2497 + BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
2498 +
2499 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
2500 +- oparg = 1 << oparg;
2501 +-
2502 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2503 +- return -EFAULT;
2504 +-
2505 + pagefault_disable();
2506 + switch (op) {
2507 + case FUTEX_OP_SET:
2508 +@@ -148,30 +139,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2509 + }
2510 + pagefault_enable();
2511 +
2512 +- if (!ret) {
2513 +- switch (cmp) {
2514 +- case FUTEX_OP_CMP_EQ:
2515 +- ret = (val == cmparg);
2516 +- break;
2517 +- case FUTEX_OP_CMP_NE:
2518 +- ret = (val != cmparg);
2519 +- break;
2520 +- case FUTEX_OP_CMP_LT:
2521 +- ret = (val < cmparg);
2522 +- break;
2523 +- case FUTEX_OP_CMP_GE:
2524 +- ret = (val >= cmparg);
2525 +- break;
2526 +- case FUTEX_OP_CMP_LE:
2527 +- ret = (val <= cmparg);
2528 +- break;
2529 +- case FUTEX_OP_CMP_GT:
2530 +- ret = (val > cmparg);
2531 +- break;
2532 +- default:
2533 +- ret = -ENOSYS;
2534 +- }
2535 +- }
2536 ++ if (!ret)
2537 ++ *oval = val;
2538 ++
2539 + return ret;
2540 + }
2541 +
2542 +diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
2543 +index 583d539a4197..2bc6651791cc 100644
2544 +--- a/arch/x86/boot/compressed/eboot.c
2545 ++++ b/arch/x86/boot/compressed/eboot.c
2546 +@@ -364,7 +364,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
2547 + if (status != EFI_SUCCESS)
2548 + goto free_struct;
2549 +
2550 +- memcpy(rom->romdata, pci->romimage, pci->romsize);
2551 ++ memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
2552 ++ pci->romsize);
2553 + return status;
2554 +
2555 + free_struct:
2556 +@@ -470,7 +471,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
2557 + if (status != EFI_SUCCESS)
2558 + goto free_struct;
2559 +
2560 +- memcpy(rom->romdata, pci->romimage, pci->romsize);
2561 ++ memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
2562 ++ pci->romsize);
2563 + return status;
2564 +
2565 + free_struct:
2566 +diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
2567 +index b4c1f5453436..f4dc9b63bdda 100644
2568 +--- a/arch/x86/include/asm/futex.h
2569 ++++ b/arch/x86/include/asm/futex.h
2570 +@@ -41,20 +41,11 @@
2571 + "+m" (*uaddr), "=&r" (tem) \
2572 + : "r" (oparg), "i" (-EFAULT), "1" (0))
2573 +
2574 +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2575 ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
2576 ++ u32 __user *uaddr)
2577 + {
2578 +- int op = (encoded_op >> 28) & 7;
2579 +- int cmp = (encoded_op >> 24) & 15;
2580 +- int oparg = (encoded_op << 8) >> 20;
2581 +- int cmparg = (encoded_op << 20) >> 20;
2582 + int oldval = 0, ret, tem;
2583 +
2584 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
2585 +- oparg = 1 << oparg;
2586 +-
2587 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2588 +- return -EFAULT;
2589 +-
2590 + pagefault_disable();
2591 +
2592 + switch (op) {
2593 +@@ -80,30 +71,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2594 +
2595 + pagefault_enable();
2596 +
2597 +- if (!ret) {
2598 +- switch (cmp) {
2599 +- case FUTEX_OP_CMP_EQ:
2600 +- ret = (oldval == cmparg);
2601 +- break;
2602 +- case FUTEX_OP_CMP_NE:
2603 +- ret = (oldval != cmparg);
2604 +- break;
2605 +- case FUTEX_OP_CMP_LT:
2606 +- ret = (oldval < cmparg);
2607 +- break;
2608 +- case FUTEX_OP_CMP_GE:
2609 +- ret = (oldval >= cmparg);
2610 +- break;
2611 +- case FUTEX_OP_CMP_LE:
2612 +- ret = (oldval <= cmparg);
2613 +- break;
2614 +- case FUTEX_OP_CMP_GT:
2615 +- ret = (oldval > cmparg);
2616 +- break;
2617 +- default:
2618 +- ret = -ENOSYS;
2619 +- }
2620 +- }
2621 ++ if (!ret)
2622 ++ *oval = oldval;
2623 ++
2624 + return ret;
2625 + }
2626 +
2627 +diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
2628 +index 469b23d6acc2..fd7e9937ddd6 100644
2629 +--- a/arch/x86/kernel/machine_kexec_32.c
2630 ++++ b/arch/x86/kernel/machine_kexec_32.c
2631 +@@ -71,12 +71,17 @@ static void load_segments(void)
2632 + static void machine_kexec_free_page_tables(struct kimage *image)
2633 + {
2634 + free_page((unsigned long)image->arch.pgd);
2635 ++ image->arch.pgd = NULL;
2636 + #ifdef CONFIG_X86_PAE
2637 + free_page((unsigned long)image->arch.pmd0);
2638 ++ image->arch.pmd0 = NULL;
2639 + free_page((unsigned long)image->arch.pmd1);
2640 ++ image->arch.pmd1 = NULL;
2641 + #endif
2642 + free_page((unsigned long)image->arch.pte0);
2643 ++ image->arch.pte0 = NULL;
2644 + free_page((unsigned long)image->arch.pte1);
2645 ++ image->arch.pte1 = NULL;
2646 + }
2647 +
2648 + static int machine_kexec_alloc_page_tables(struct kimage *image)
2649 +@@ -93,7 +98,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
2650 + !image->arch.pmd0 || !image->arch.pmd1 ||
2651 + #endif
2652 + !image->arch.pte0 || !image->arch.pte1) {
2653 +- machine_kexec_free_page_tables(image);
2654 + return -ENOMEM;
2655 + }
2656 + return 0;
2657 +diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
2658 +index ca6e65250b1a..13d6b8ac0b0b 100644
2659 +--- a/arch/x86/kernel/machine_kexec_64.c
2660 ++++ b/arch/x86/kernel/machine_kexec_64.c
2661 +@@ -37,8 +37,11 @@ static struct kexec_file_ops *kexec_file_loaders[] = {
2662 + static void free_transition_pgtable(struct kimage *image)
2663 + {
2664 + free_page((unsigned long)image->arch.pud);
2665 ++ image->arch.pud = NULL;
2666 + free_page((unsigned long)image->arch.pmd);
2667 ++ image->arch.pmd = NULL;
2668 + free_page((unsigned long)image->arch.pte);
2669 ++ image->arch.pte = NULL;
2670 + }
2671 +
2672 + static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
2673 +@@ -79,7 +82,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
2674 + set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
2675 + return 0;
2676 + err:
2677 +- free_transition_pgtable(image);
2678 + return result;
2679 + }
2680 +
2681 +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
2682 +index 63146c378f1e..2b05f681a1fd 100644
2683 +--- a/arch/x86/xen/mmu.c
2684 ++++ b/arch/x86/xen/mmu.c
2685 +@@ -1316,8 +1316,6 @@ void xen_flush_tlb_all(void)
2686 + struct mmuext_op *op;
2687 + struct multicall_space mcs;
2688 +
2689 +- trace_xen_mmu_flush_tlb_all(0);
2690 +-
2691 + preempt_disable();
2692 +
2693 + mcs = xen_mc_entry(sizeof(*op));
2694 +@@ -1335,8 +1333,6 @@ static void xen_flush_tlb(void)
2695 + struct mmuext_op *op;
2696 + struct multicall_space mcs;
2697 +
2698 +- trace_xen_mmu_flush_tlb(0);
2699 +-
2700 + preempt_disable();
2701 +
2702 + mcs = xen_mc_entry(sizeof(*op));
2703 +diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
2704 +index 72bfc1cbc2b5..5bfbc1c401d4 100644
2705 +--- a/arch/xtensa/include/asm/futex.h
2706 ++++ b/arch/xtensa/include/asm/futex.h
2707 +@@ -44,18 +44,10 @@
2708 + : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
2709 + : "memory")
2710 +
2711 +-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2712 ++static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
2713 ++ u32 __user *uaddr)
2714 + {
2715 +- int op = (encoded_op >> 28) & 7;
2716 +- int cmp = (encoded_op >> 24) & 15;
2717 +- int oparg = (encoded_op << 8) >> 20;
2718 +- int cmparg = (encoded_op << 20) >> 20;
2719 + int oldval = 0, ret;
2720 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
2721 +- oparg = 1 << oparg;
2722 +-
2723 +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
2724 +- return -EFAULT;
2725 +
2726 + #if !XCHAL_HAVE_S32C1I
2727 + return -ENOSYS;
2728 +@@ -89,19 +81,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
2729 +
2730 + pagefault_enable();
2731 +
2732 +- if (ret)
2733 +- return ret;
2734 ++ if (!ret)
2735 ++ *oval = oldval;
2736 +
2737 +- switch (cmp) {
2738 +- case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
2739 +- case FUTEX_OP_CMP_NE: return (oldval != cmparg);
2740 +- case FUTEX_OP_CMP_LT: return (oldval < cmparg);
2741 +- case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
2742 +- case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
2743 +- case FUTEX_OP_CMP_GT: return (oldval > cmparg);
2744 +- }
2745 +-
2746 +- return -ENOSYS;
2747 ++ return ret;
2748 + }
2749 +
2750 + static inline int
2751 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
2752 +index 7ff8b15a3422..88728d997088 100644
2753 +--- a/drivers/cpufreq/intel_pstate.c
2754 ++++ b/drivers/cpufreq/intel_pstate.c
2755 +@@ -1361,6 +1361,11 @@ static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
2756 + static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2757 + #endif /* CONFIG_ACPI */
2758 +
2759 ++static const struct x86_cpu_id hwp_support_ids[] __initconst = {
2760 ++ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
2761 ++ {}
2762 ++};
2763 ++
2764 + static int __init intel_pstate_init(void)
2765 + {
2766 + int cpu, rc = 0;
2767 +@@ -1370,17 +1375,16 @@ static int __init intel_pstate_init(void)
2768 + if (no_load)
2769 + return -ENODEV;
2770 +
2771 ++ if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
2772 ++ copy_cpu_funcs(&core_params.funcs);
2773 ++ hwp_active++;
2774 ++ goto hwp_cpu_matched;
2775 ++ }
2776 ++
2777 + id = x86_match_cpu(intel_pstate_cpu_ids);
2778 + if (!id)
2779 + return -ENODEV;
2780 +
2781 +- /*
2782 +- * The Intel pstate driver will be ignored if the platform
2783 +- * firmware has its own power management modes.
2784 +- */
2785 +- if (intel_pstate_platform_pwr_mgmt_exists())
2786 +- return -ENODEV;
2787 +-
2788 + cpu_def = (struct cpu_defaults *)id->driver_data;
2789 +
2790 + copy_pid_params(&cpu_def->pid_policy);
2791 +@@ -1389,17 +1393,20 @@ static int __init intel_pstate_init(void)
2792 + if (intel_pstate_msrs_not_valid())
2793 + return -ENODEV;
2794 +
2795 ++hwp_cpu_matched:
2796 ++ /*
2797 ++ * The Intel pstate driver will be ignored if the platform
2798 ++ * firmware has its own power management modes.
2799 ++ */
2800 ++ if (intel_pstate_platform_pwr_mgmt_exists())
2801 ++ return -ENODEV;
2802 ++
2803 + pr_info("Intel P-state driver initializing.\n");
2804 +
2805 + all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
2806 + if (!all_cpu_data)
2807 + return -ENOMEM;
2808 +
2809 +- if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
2810 +- pr_info("intel_pstate: HWP enabled\n");
2811 +- hwp_active++;
2812 +- }
2813 +-
2814 + if (!hwp_active && hwp_only)
2815 + goto out;
2816 +
2817 +@@ -1410,6 +1417,9 @@ static int __init intel_pstate_init(void)
2818 + intel_pstate_debug_expose_params();
2819 + intel_pstate_sysfs_expose_params();
2820 +
2821 ++ if (hwp_active)
2822 ++ pr_info("intel_pstate: HWP enabled\n");
2823 ++
2824 + return rc;
2825 + out:
2826 + get_online_cpus();
2827 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
2828 +index c4b0ef65988c..57e6c45724e7 100644
2829 +--- a/drivers/cpufreq/powernv-cpufreq.c
2830 ++++ b/drivers/cpufreq/powernv-cpufreq.c
2831 +@@ -592,7 +592,7 @@ static int __init powernv_cpufreq_init(void)
2832 + int rc = 0;
2833 +
2834 + /* Don't probe on pseries (guest) platforms */
2835 +- if (!firmware_has_feature(FW_FEATURE_OPALv3))
2836 ++ if (!firmware_has_feature(FW_FEATURE_OPAL))
2837 + return -ENODEV;
2838 +
2839 + /* Discover pstates from device tree and init */
2840 +diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
2841 +index 344058f8501a..d5657d50ac40 100644
2842 +--- a/drivers/cpuidle/coupled.c
2843 ++++ b/drivers/cpuidle/coupled.c
2844 +@@ -119,7 +119,6 @@ struct cpuidle_coupled {
2845 +
2846 + #define CPUIDLE_COUPLED_NOT_IDLE (-1)
2847 +
2848 +-static DEFINE_MUTEX(cpuidle_coupled_lock);
2849 + static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
2850 +
2851 + /*
2852 +diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
2853 +index d5c5a476360f..c44a843cb405 100644
2854 +--- a/drivers/cpuidle/cpuidle-powernv.c
2855 ++++ b/drivers/cpuidle/cpuidle-powernv.c
2856 +@@ -282,7 +282,7 @@ static int powernv_idle_probe(void)
2857 + if (cpuidle_disable != IDLE_NO_OVERRIDE)
2858 + return -ENODEV;
2859 +
2860 +- if (firmware_has_feature(FW_FEATURE_OPALv3)) {
2861 ++ if (firmware_has_feature(FW_FEATURE_OPAL)) {
2862 + cpuidle_state_table = powernv_states;
2863 + /* Device tree can indicate more idle states */
2864 + max_idle_state = powernv_add_idle_states();
2865 +diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
2866 +index 2a8122444614..9ba4aaa9f755 100644
2867 +--- a/drivers/gpio/gpio-rcar.c
2868 ++++ b/drivers/gpio/gpio-rcar.c
2869 +@@ -200,6 +200,48 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
2870 + return 0;
2871 + }
2872 +
2873 ++static void gpio_rcar_irq_bus_lock(struct irq_data *d)
2874 ++{
2875 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2876 ++ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
2877 ++ gpio_chip);
2878 ++
2879 ++ pm_runtime_get_sync(&p->pdev->dev);
2880 ++}
2881 ++
2882 ++static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
2883 ++{
2884 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2885 ++ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
2886 ++ gpio_chip);
2887 ++
2888 ++ pm_runtime_put(&p->pdev->dev);
2889 ++}
2890 ++
2891 ++
2892 ++static int gpio_rcar_irq_request_resources(struct irq_data *d)
2893 ++{
2894 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2895 ++ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
2896 ++ gpio_chip);
2897 ++ int error;
2898 ++
2899 ++ error = pm_runtime_get_sync(&p->pdev->dev);
2900 ++ if (error < 0)
2901 ++ return error;
2902 ++
2903 ++ return 0;
2904 ++}
2905 ++
2906 ++static void gpio_rcar_irq_release_resources(struct irq_data *d)
2907 ++{
2908 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
2909 ++ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
2910 ++ gpio_chip);
2911 ++
2912 ++ pm_runtime_put(&p->pdev->dev);
2913 ++}
2914 ++
2915 + static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
2916 + {
2917 + struct gpio_rcar_priv *p = dev_id;
2918 +@@ -460,6 +502,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
2919 + irq_chip->irq_unmask = gpio_rcar_irq_enable;
2920 + irq_chip->irq_set_type = gpio_rcar_irq_set_type;
2921 + irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
2922 ++ irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
2923 ++ irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
2924 ++ irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
2925 ++ irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
2926 + irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
2927 +
2928 + ret = gpiochip_add(gpio_chip);
2929 +diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
2930 +index bb9e9fc45e1b..82d23bd3a742 100644
2931 +--- a/drivers/net/bonding/bond_alb.c
2932 ++++ b/drivers/net/bonding/bond_alb.c
2933 +@@ -453,7 +453,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
2934 + {
2935 + int i;
2936 +
2937 +- if (!client_info->slave)
2938 ++ if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
2939 + return;
2940 +
2941 + for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
2942 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
2943 +index 3bba92fc9c1a..1325825d5225 100644
2944 +--- a/drivers/net/ethernet/broadcom/tg3.c
2945 ++++ b/drivers/net/ethernet/broadcom/tg3.c
2946 +@@ -8722,14 +8722,15 @@ static void tg3_free_consistent(struct tg3 *tp)
2947 + tg3_mem_rx_release(tp);
2948 + tg3_mem_tx_release(tp);
2949 +
2950 +- /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
2951 +- tg3_full_lock(tp, 0);
2952 ++ /* tp->hw_stats can be referenced safely:
2953 ++ * 1. under rtnl_lock
2954 ++ * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
2955 ++ */
2956 + if (tp->hw_stats) {
2957 + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
2958 + tp->hw_stats, tp->stats_mapping);
2959 + tp->hw_stats = NULL;
2960 + }
2961 +- tg3_full_unlock(tp);
2962 + }
2963 +
2964 + /*
2965 +@@ -14163,7 +14164,7 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
2966 + struct tg3 *tp = netdev_priv(dev);
2967 +
2968 + spin_lock_bh(&tp->lock);
2969 +- if (!tp->hw_stats) {
2970 ++ if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
2971 + *stats = tp->net_stats_prev;
2972 + spin_unlock_bh(&tp->lock);
2973 + return stats;
2974 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
2975 +index ddb5541882f5..bcfac000199e 100644
2976 +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
2977 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
2978 +@@ -967,6 +967,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
2979 + if (!coal->tx_max_coalesced_frames_irq)
2980 + return -EINVAL;
2981 +
2982 ++ if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
2983 ++ coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
2984 ++ coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
2985 ++ coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
2986 ++ netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
2987 ++ __func__, MLX4_EN_MAX_COAL_TIME);
2988 ++ return -ERANGE;
2989 ++ }
2990 ++
2991 ++ if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
2992 ++ coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
2993 ++ netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
2994 ++ __func__, MLX4_EN_MAX_COAL_PKTS);
2995 ++ return -ERANGE;
2996 ++ }
2997 ++
2998 + priv->rx_frames = (coal->rx_max_coalesced_frames ==
2999 + MLX4_EN_AUTO_CONF) ?
3000 + MLX4_EN_RX_COAL_TARGET :
3001 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3002 +index 10aa6544cf4d..607daaffae98 100644
3003 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3004 ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
3005 +@@ -140,6 +140,9 @@ enum {
3006 + #define MLX4_EN_TX_COAL_PKTS 16
3007 + #define MLX4_EN_TX_COAL_TIME 0x10
3008 +
3009 ++#define MLX4_EN_MAX_COAL_PKTS U16_MAX
3010 ++#define MLX4_EN_MAX_COAL_TIME U16_MAX
3011 ++
3012 + #define MLX4_EN_RX_RATE_LOW 400000
3013 + #define MLX4_EN_RX_COAL_TIME_LOW 0
3014 + #define MLX4_EN_RX_RATE_HIGH 450000
3015 +@@ -518,8 +521,8 @@ struct mlx4_en_priv {
3016 + u16 rx_usecs_low;
3017 + u32 pkt_rate_high;
3018 + u16 rx_usecs_high;
3019 +- u16 sample_interval;
3020 +- u16 adaptive_rx_coal;
3021 ++ u32 sample_interval;
3022 ++ u32 adaptive_rx_coal;
3023 + u32 msg_enable;
3024 + u32 loopback_ok;
3025 + u32 validate_loopback;
3026 +diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
3027 +index ef668d300800..d987d571fdd6 100644
3028 +--- a/drivers/net/ethernet/realtek/8139too.c
3029 ++++ b/drivers/net/ethernet/realtek/8139too.c
3030 +@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
3031 + struct rtl8139_private *tp = netdev_priv(dev);
3032 + const int irq = tp->pci_dev->irq;
3033 +
3034 +- disable_irq(irq);
3035 ++ disable_irq_nosync(irq);
3036 + rtl8139_interrupt(irq, dev);
3037 + enable_irq(irq);
3038 + }
3039 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
3040 +index a82c89af7124..8b4069ea52ce 100644
3041 +--- a/drivers/net/ethernet/realtek/r8169.c
3042 ++++ b/drivers/net/ethernet/realtek/r8169.c
3043 +@@ -4832,6 +4832,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
3044 + static void rtl_pll_power_up(struct rtl8169_private *tp)
3045 + {
3046 + rtl_generic_op(tp, tp->pll_power_ops.up);
3047 ++
3048 ++ /* give MAC/PHY some time to resume */
3049 ++ msleep(20);
3050 + }
3051 +
3052 + static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
3053 +diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
3054 +index ab6051a43134..ccebf89aa1e4 100644
3055 +--- a/drivers/net/ethernet/sun/niu.c
3056 ++++ b/drivers/net/ethernet/sun/niu.c
3057 +@@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3058 +
3059 + len = (val & RCR_ENTRY_L2_LEN) >>
3060 + RCR_ENTRY_L2_LEN_SHIFT;
3061 +- len -= ETH_FCS_LEN;
3062 ++ append_size = len + ETH_HLEN + ETH_FCS_LEN;
3063 +
3064 + addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3065 + RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3066 +@@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3067 + RCR_ENTRY_PKTBUFSZ_SHIFT];
3068 +
3069 + off = addr & ~PAGE_MASK;
3070 +- append_size = rcr_size;
3071 + if (num_rcr == 1) {
3072 + int ptype;
3073 +
3074 +@@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3075 + else
3076 + skb_checksum_none_assert(skb);
3077 + } else if (!(val & RCR_ENTRY_MULTI))
3078 +- append_size = len - skb->len;
3079 ++ append_size = append_size - skb->len;
3080 +
3081 + niu_rx_skb_append(skb, page, off, append_size, rcr_size);
3082 + if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3083 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
3084 +index a6f0a8f516d6..8aaa09b3c753 100644
3085 +--- a/drivers/net/usb/qmi_wwan.c
3086 ++++ b/drivers/net/usb/qmi_wwan.c
3087 +@@ -855,6 +855,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
3088 + id->driver_info = (unsigned long)&qmi_wwan_info;
3089 + }
3090 +
3091 ++ /* There are devices where the same interface number can be
3092 ++ * configured as different functions. We should only bind to
3093 ++ * vendor specific functions when matching on interface number
3094 ++ */
3095 ++ if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
3096 ++ desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
3097 ++ dev_dbg(&intf->dev,
3098 ++ "Rejecting interface number match for class %02x\n",
3099 ++ desc->bInterfaceClass);
3100 ++ return -ENODEV;
3101 ++ }
3102 ++
3103 + /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
3104 + if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
3105 + dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
3106 +diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
3107 +index 48b3866a9ded..35286907c636 100644
3108 +--- a/drivers/s390/cio/qdio_setup.c
3109 ++++ b/drivers/s390/cio/qdio_setup.c
3110 +@@ -140,7 +140,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
3111 + int i;
3112 +
3113 + for (i = 0; i < nr_queues; i++) {
3114 +- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
3115 ++ q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
3116 + if (!q)
3117 + return -ENOMEM;
3118 +
3119 +@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
3120 + {
3121 + struct ciw *ciw;
3122 + struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
3123 +- int rc;
3124 +
3125 + memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
3126 + memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
3127 +@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
3128 + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
3129 + if (!ciw) {
3130 + DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
3131 +- rc = -EINVAL;
3132 +- goto out_err;
3133 ++ return -EINVAL;
3134 + }
3135 + irq_ptr->equeue = *ciw;
3136 +
3137 + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
3138 + if (!ciw) {
3139 + DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
3140 +- rc = -EINVAL;
3141 +- goto out_err;
3142 ++ return -EINVAL;
3143 + }
3144 + irq_ptr->aqueue = *ciw;
3145 +
3146 +@@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
3147 + irq_ptr->orig_handler = init_data->cdev->handler;
3148 + init_data->cdev->handler = qdio_int_handler;
3149 + return 0;
3150 +-out_err:
3151 +- qdio_release_memory(irq_ptr);
3152 +- return rc;
3153 + }
3154 +
3155 + void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
3156 +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
3157 +index 34367d172961..4534a7ce77b8 100644
3158 +--- a/drivers/s390/scsi/zfcp_dbf.c
3159 ++++ b/drivers/s390/scsi/zfcp_dbf.c
3160 +@@ -3,7 +3,7 @@
3161 + *
3162 + * Debug traces for zfcp.
3163 + *
3164 +- * Copyright IBM Corp. 2002, 2017
3165 ++ * Copyright IBM Corp. 2002, 2018
3166 + */
3167 +
3168 + #define KMSG_COMPONENT "zfcp"
3169 +@@ -287,6 +287,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
3170 + spin_unlock_irqrestore(&dbf->rec_lock, flags);
3171 + }
3172 +
3173 ++/**
3174 ++ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
3175 ++ * @tag: identifier for event
3176 ++ * @adapter: adapter on which the erp_action should run
3177 ++ * @port: remote port involved in the erp_action
3178 ++ * @sdev: scsi device involved in the erp_action
3179 ++ * @want: wanted erp_action
3180 ++ * @need: required erp_action
3181 ++ *
3182 ++ * The adapter->erp_lock must not be held.
3183 ++ */
3184 ++void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
3185 ++ struct zfcp_port *port, struct scsi_device *sdev,
3186 ++ u8 want, u8 need)
3187 ++{
3188 ++ unsigned long flags;
3189 ++
3190 ++ read_lock_irqsave(&adapter->erp_lock, flags);
3191 ++ zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
3192 ++ read_unlock_irqrestore(&adapter->erp_lock, flags);
3193 ++}
3194 +
3195 + /**
3196 + * zfcp_dbf_rec_run_lvl - trace event related to running recovery
3197 +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
3198 +index 21c8c689b02b..7a7984a50683 100644
3199 +--- a/drivers/s390/scsi/zfcp_ext.h
3200 ++++ b/drivers/s390/scsi/zfcp_ext.h
3201 +@@ -3,7 +3,7 @@
3202 + *
3203 + * External function declarations.
3204 + *
3205 +- * Copyright IBM Corp. 2002, 2016
3206 ++ * Copyright IBM Corp. 2002, 2018
3207 + */
3208 +
3209 + #ifndef ZFCP_EXT_H
3210 +@@ -34,6 +34,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
3211 + extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
3212 + extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
3213 + struct zfcp_port *, struct scsi_device *, u8, u8);
3214 ++extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
3215 ++ struct zfcp_port *port,
3216 ++ struct scsi_device *sdev, u8 want, u8 need);
3217 + extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
3218 + extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
3219 + struct zfcp_erp_action *erp);
3220 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
3221 +index a9b8104b982e..bb99db2948ab 100644
3222 +--- a/drivers/s390/scsi/zfcp_scsi.c
3223 ++++ b/drivers/s390/scsi/zfcp_scsi.c
3224 +@@ -3,7 +3,7 @@
3225 + *
3226 + * Interface to Linux SCSI midlayer.
3227 + *
3228 +- * Copyright IBM Corp. 2002, 2017
3229 ++ * Copyright IBM Corp. 2002, 2018
3230 + */
3231 +
3232 + #define KMSG_COMPONENT "zfcp"
3233 +@@ -616,9 +616,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
3234 + ids.port_id = port->d_id;
3235 + ids.roles = FC_RPORT_ROLE_FCP_TARGET;
3236 +
3237 +- zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
3238 +- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
3239 +- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
3240 ++ zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
3241 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
3242 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
3243 + rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
3244 + if (!rport) {
3245 + dev_err(&port->adapter->ccw_device->dev,
3246 +@@ -640,9 +640,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
3247 + struct fc_rport *rport = port->rport;
3248 +
3249 + if (rport) {
3250 +- zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
3251 +- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
3252 +- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
3253 ++ zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
3254 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
3255 ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
3256 + fc_remote_port_delete(rport);
3257 + port->rport = NULL;
3258 + }
3259 +diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
3260 +index 519dac4e341e..9a8c2f97ed70 100644
3261 +--- a/drivers/scsi/libsas/sas_scsi_host.c
3262 ++++ b/drivers/scsi/libsas/sas_scsi_host.c
3263 +@@ -222,6 +222,7 @@ out_done:
3264 + static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
3265 + {
3266 + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
3267 ++ struct domain_device *dev = cmd_to_domain_dev(cmd);
3268 + struct sas_task *task = TO_SAS_TASK(cmd);
3269 +
3270 + /* At this point, we only get called following an actual abort
3271 +@@ -230,6 +231,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
3272 + */
3273 + sas_end_task(cmd, task);
3274 +
3275 ++ if (dev_is_sata(dev)) {
3276 ++ /* defer commands to libata so that libata EH can
3277 ++ * handle ata qcs correctly
3278 ++ */
3279 ++ list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
3280 ++ return;
3281 ++ }
3282 ++
3283 + /* now finish the command and move it on to the error
3284 + * handler done list, this also takes it off the
3285 + * error handler pending list.
3286 +@@ -237,22 +246,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
3287 + scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
3288 + }
3289 +
3290 +-static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
3291 +-{
3292 +- struct domain_device *dev = cmd_to_domain_dev(cmd);
3293 +- struct sas_ha_struct *ha = dev->port->ha;
3294 +- struct sas_task *task = TO_SAS_TASK(cmd);
3295 +-
3296 +- if (!dev_is_sata(dev)) {
3297 +- sas_eh_finish_cmd(cmd);
3298 +- return;
3299 +- }
3300 +-
3301 +- /* report the timeout to libata */
3302 +- sas_end_task(cmd, task);
3303 +- list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
3304 +-}
3305 +-
3306 + static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
3307 + {
3308 + struct scsi_cmnd *cmd, *n;
3309 +@@ -260,7 +253,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
3310 + list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
3311 + if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
3312 + cmd->device->lun == my_cmd->device->lun)
3313 +- sas_eh_defer_cmd(cmd);
3314 ++ sas_eh_finish_cmd(cmd);
3315 + }
3316 + }
3317 +
3318 +@@ -622,12 +615,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
3319 + case TASK_IS_DONE:
3320 + SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
3321 + task);
3322 +- sas_eh_defer_cmd(cmd);
3323 ++ sas_eh_finish_cmd(cmd);
3324 + continue;
3325 + case TASK_IS_ABORTED:
3326 + SAS_DPRINTK("%s: task 0x%p is aborted\n",
3327 + __func__, task);
3328 +- sas_eh_defer_cmd(cmd);
3329 ++ sas_eh_finish_cmd(cmd);
3330 + continue;
3331 + case TASK_IS_AT_LU:
3332 + SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
3333 +@@ -638,7 +631,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
3334 + "recovered\n",
3335 + SAS_ADDR(task->dev),
3336 + cmd->device->lun);
3337 +- sas_eh_defer_cmd(cmd);
3338 ++ sas_eh_finish_cmd(cmd);
3339 + sas_scsi_clear_queue_lu(work_q, cmd);
3340 + goto Again;
3341 + }
3342 +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
3343 +index cb19c9ad1b57..841f3fbec77c 100644
3344 +--- a/drivers/scsi/sg.c
3345 ++++ b/drivers/scsi/sg.c
3346 +@@ -1903,7 +1903,7 @@ retry:
3347 + num = (rem_sz > scatter_elem_sz_prev) ?
3348 + scatter_elem_sz_prev : rem_sz;
3349 +
3350 +- schp->pages[k] = alloc_pages(gfp_mask, order);
3351 ++ schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
3352 + if (!schp->pages[k])
3353 + goto out;
3354 +
3355 +diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
3356 +index 58efa98313aa..24c07fea9de2 100644
3357 +--- a/drivers/spi/spi-pxa2xx.h
3358 ++++ b/drivers/spi/spi-pxa2xx.h
3359 +@@ -38,7 +38,7 @@ struct driver_data {
3360 +
3361 + /* SSP register addresses */
3362 + void __iomem *ioaddr;
3363 +- u32 ssdr_physical;
3364 ++ phys_addr_t ssdr_physical;
3365 +
3366 + /* SSP masks*/
3367 + u32 dma_cr1;
3368 +diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
3369 +index 266e2b0ce9a8..47ccd73a74f0 100644
3370 +--- a/drivers/usb/usbip/stub.h
3371 ++++ b/drivers/usb/usbip/stub.h
3372 +@@ -88,6 +88,7 @@ struct bus_id_priv {
3373 + struct stub_device *sdev;
3374 + struct usb_device *udev;
3375 + char shutdown_busid;
3376 ++ spinlock_t busid_lock;
3377 + };
3378 +
3379 + /* stub_priv is allocated from stub_priv_cache */
3380 +@@ -98,6 +99,7 @@ extern struct usb_device_driver stub_driver;
3381 +
3382 + /* stub_main.c */
3383 + struct bus_id_priv *get_busid_priv(const char *busid);
3384 ++void put_busid_priv(struct bus_id_priv *bid);
3385 + int del_match_busid(char *busid);
3386 + void stub_device_cleanup_urbs(struct stub_device *sdev);
3387 +
3388 +diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
3389 +index 0931f3271119..4aad99a59958 100644
3390 +--- a/drivers/usb/usbip/stub_dev.c
3391 ++++ b/drivers/usb/usbip/stub_dev.c
3392 +@@ -314,9 +314,9 @@ static int stub_probe(struct usb_device *udev)
3393 + struct stub_device *sdev = NULL;
3394 + const char *udev_busid = dev_name(&udev->dev);
3395 + struct bus_id_priv *busid_priv;
3396 +- int rc;
3397 ++ int rc = 0;
3398 +
3399 +- dev_dbg(&udev->dev, "Enter\n");
3400 ++ dev_dbg(&udev->dev, "Enter probe\n");
3401 +
3402 + /* check we should claim or not by busid_table */
3403 + busid_priv = get_busid_priv(udev_busid);
3404 +@@ -331,13 +331,15 @@ static int stub_probe(struct usb_device *udev)
3405 + * other matched drivers by the driver core.
3406 + * See driver_probe_device() in driver/base/dd.c
3407 + */
3408 +- return -ENODEV;
3409 ++ rc = -ENODEV;
3410 ++ goto call_put_busid_priv;
3411 + }
3412 +
3413 + if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
3414 + dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
3415 + udev_busid);
3416 +- return -ENODEV;
3417 ++ rc = -ENODEV;
3418 ++ goto call_put_busid_priv;
3419 + }
3420 +
3421 + if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
3422 +@@ -345,13 +347,16 @@ static int stub_probe(struct usb_device *udev)
3423 + "%s is attached on vhci_hcd... skip!\n",
3424 + udev_busid);
3425 +
3426 +- return -ENODEV;
3427 ++ rc = -ENODEV;
3428 ++ goto call_put_busid_priv;
3429 + }
3430 +
3431 + /* ok, this is my device */
3432 + sdev = stub_device_alloc(udev);
3433 +- if (!sdev)
3434 +- return -ENOMEM;
3435 ++ if (!sdev) {
3436 ++ rc = -ENOMEM;
3437 ++ goto call_put_busid_priv;
3438 ++ }
3439 +
3440 + dev_info(&udev->dev,
3441 + "usbip-host: register new device (bus %u dev %u)\n",
3442 +@@ -383,7 +388,9 @@ static int stub_probe(struct usb_device *udev)
3443 + }
3444 + busid_priv->status = STUB_BUSID_ALLOC;
3445 +
3446 +- return 0;
3447 ++ rc = 0;
3448 ++ goto call_put_busid_priv;
3449 ++
3450 + err_files:
3451 + usb_hub_release_port(udev->parent, udev->portnum,
3452 + (struct usb_dev_state *) udev);
3453 +@@ -394,6 +401,9 @@ err_port:
3454 +
3455 + busid_priv->sdev = NULL;
3456 + stub_device_free(sdev);
3457 ++
3458 ++call_put_busid_priv:
3459 ++ put_busid_priv(busid_priv);
3460 + return rc;
3461 + }
3462 +
3463 +@@ -419,7 +429,7 @@ static void stub_disconnect(struct usb_device *udev)
3464 + struct bus_id_priv *busid_priv;
3465 + int rc;
3466 +
3467 +- dev_dbg(&udev->dev, "Enter\n");
3468 ++ dev_dbg(&udev->dev, "Enter disconnect\n");
3469 +
3470 + busid_priv = get_busid_priv(udev_busid);
3471 + if (!busid_priv) {
3472 +@@ -432,7 +442,7 @@ static void stub_disconnect(struct usb_device *udev)
3473 + /* get stub_device */
3474 + if (!sdev) {
3475 + dev_err(&udev->dev, "could not get device");
3476 +- return;
3477 ++ goto call_put_busid_priv;
3478 + }
3479 +
3480 + dev_set_drvdata(&udev->dev, NULL);
3481 +@@ -447,12 +457,12 @@ static void stub_disconnect(struct usb_device *udev)
3482 + (struct usb_dev_state *) udev);
3483 + if (rc) {
3484 + dev_dbg(&udev->dev, "unable to release port\n");
3485 +- return;
3486 ++ goto call_put_busid_priv;
3487 + }
3488 +
3489 + /* If usb reset is called from event handler */
3490 + if (busid_priv->sdev->ud.eh == current)
3491 +- return;
3492 ++ goto call_put_busid_priv;
3493 +
3494 + /* shutdown the current connection */
3495 + shutdown_busid(busid_priv);
3496 +@@ -463,12 +473,11 @@ static void stub_disconnect(struct usb_device *udev)
3497 + busid_priv->sdev = NULL;
3498 + stub_device_free(sdev);
3499 +
3500 +- if (busid_priv->status == STUB_BUSID_ALLOC) {
3501 ++ if (busid_priv->status == STUB_BUSID_ALLOC)
3502 + busid_priv->status = STUB_BUSID_ADDED;
3503 +- } else {
3504 +- busid_priv->status = STUB_BUSID_OTHER;
3505 +- del_match_busid((char *)udev_busid);
3506 +- }
3507 ++
3508 ++call_put_busid_priv:
3509 ++ put_busid_priv(busid_priv);
3510 + }
3511 +
3512 + #ifdef CONFIG_PM
3513 +diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
3514 +index f761e02e75c9..fa90496ca7a8 100644
3515 +--- a/drivers/usb/usbip/stub_main.c
3516 ++++ b/drivers/usb/usbip/stub_main.c
3517 +@@ -28,6 +28,7 @@
3518 + #define DRIVER_DESC "USB/IP Host Driver"
3519 +
3520 + struct kmem_cache *stub_priv_cache;
3521 ++
3522 + /*
3523 + * busid_tables defines matching busids that usbip can grab. A user can change
3524 + * dynamically what device is locally used and what device is exported to a
3525 +@@ -39,6 +40,8 @@ static spinlock_t busid_table_lock;
3526 +
3527 + static void init_busid_table(void)
3528 + {
3529 ++ int i;
3530 ++
3531 + /*
3532 + * This also sets the bus_table[i].status to
3533 + * STUB_BUSID_OTHER, which is 0.
3534 +@@ -46,6 +49,9 @@ static void init_busid_table(void)
3535 + memset(busid_table, 0, sizeof(busid_table));
3536 +
3537 + spin_lock_init(&busid_table_lock);
3538 ++
3539 ++ for (i = 0; i < MAX_BUSID; i++)
3540 ++ spin_lock_init(&busid_table[i].busid_lock);
3541 + }
3542 +
3543 + /*
3544 +@@ -57,15 +63,20 @@ static int get_busid_idx(const char *busid)
3545 + int i;
3546 + int idx = -1;
3547 +
3548 +- for (i = 0; i < MAX_BUSID; i++)
3549 ++ for (i = 0; i < MAX_BUSID; i++) {
3550 ++ spin_lock(&busid_table[i].busid_lock);
3551 + if (busid_table[i].name[0])
3552 + if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
3553 + idx = i;
3554 ++ spin_unlock(&busid_table[i].busid_lock);
3555 + break;
3556 + }
3557 ++ spin_unlock(&busid_table[i].busid_lock);
3558 ++ }
3559 + return idx;
3560 + }
3561 +
3562 ++/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
3563 + struct bus_id_priv *get_busid_priv(const char *busid)
3564 + {
3565 + int idx;
3566 +@@ -73,13 +84,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
3567 +
3568 + spin_lock(&busid_table_lock);
3569 + idx = get_busid_idx(busid);
3570 +- if (idx >= 0)
3571 ++ if (idx >= 0) {
3572 + bid = &(busid_table[idx]);
3573 ++ /* get busid_lock before returning */
3574 ++ spin_lock(&bid->busid_lock);
3575 ++ }
3576 + spin_unlock(&busid_table_lock);
3577 +
3578 + return bid;
3579 + }
3580 +
3581 ++void put_busid_priv(struct bus_id_priv *bid)
3582 ++{
3583 ++ if (bid)
3584 ++ spin_unlock(&bid->busid_lock);
3585 ++}
3586 ++
3587 + static int add_match_busid(char *busid)
3588 + {
3589 + int i;
3590 +@@ -92,15 +112,19 @@ static int add_match_busid(char *busid)
3591 + goto out;
3592 + }
3593 +
3594 +- for (i = 0; i < MAX_BUSID; i++)
3595 ++ for (i = 0; i < MAX_BUSID; i++) {
3596 ++ spin_lock(&busid_table[i].busid_lock);
3597 + if (!busid_table[i].name[0]) {
3598 + strlcpy(busid_table[i].name, busid, BUSID_SIZE);
3599 + if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
3600 + (busid_table[i].status != STUB_BUSID_REMOV))
3601 + busid_table[i].status = STUB_BUSID_ADDED;
3602 + ret = 0;
3603 ++ spin_unlock(&busid_table[i].busid_lock);
3604 + break;
3605 + }
3606 ++ spin_unlock(&busid_table[i].busid_lock);
3607 ++ }
3608 +
3609 + out:
3610 + spin_unlock(&busid_table_lock);
3611 +@@ -121,6 +145,8 @@ int del_match_busid(char *busid)
3612 + /* found */
3613 + ret = 0;
3614 +
3615 ++ spin_lock(&busid_table[idx].busid_lock);
3616 ++
3617 + if (busid_table[idx].status == STUB_BUSID_OTHER)
3618 + memset(busid_table[idx].name, 0, BUSID_SIZE);
3619 +
3620 +@@ -128,6 +154,7 @@ int del_match_busid(char *busid)
3621 + (busid_table[idx].status != STUB_BUSID_ADDED))
3622 + busid_table[idx].status = STUB_BUSID_REMOV;
3623 +
3624 ++ spin_unlock(&busid_table[idx].busid_lock);
3625 + out:
3626 + spin_unlock(&busid_table_lock);
3627 +
3628 +@@ -140,9 +167,12 @@ static ssize_t show_match_busid(struct device_driver *drv, char *buf)
3629 + char *out = buf;
3630 +
3631 + spin_lock(&busid_table_lock);
3632 +- for (i = 0; i < MAX_BUSID; i++)
3633 ++ for (i = 0; i < MAX_BUSID; i++) {
3634 ++ spin_lock(&busid_table[i].busid_lock);
3635 + if (busid_table[i].name[0])
3636 + out += sprintf(out, "%s ", busid_table[i].name);
3637 ++ spin_unlock(&busid_table[i].busid_lock);
3638 ++ }
3639 + spin_unlock(&busid_table_lock);
3640 + out += sprintf(out, "\n");
3641 +
3642 +@@ -184,6 +214,51 @@ static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
3643 + static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
3644 + store_match_busid);
3645 +
3646 ++static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
3647 ++{
3648 ++ int ret;
3649 ++
3650 ++ /* device_attach() callers should hold parent lock for USB */
3651 ++ if (busid_priv->udev->dev.parent)
3652 ++ device_lock(busid_priv->udev->dev.parent);
3653 ++ ret = device_attach(&busid_priv->udev->dev);
3654 ++ if (busid_priv->udev->dev.parent)
3655 ++ device_unlock(busid_priv->udev->dev.parent);
3656 ++ if (ret < 0) {
3657 ++ dev_err(&busid_priv->udev->dev, "rebind failed\n");
3658 ++ return ret;
3659 ++ }
3660 ++ return 0;
3661 ++}
3662 ++
3663 ++static void stub_device_rebind(void)
3664 ++{
3665 ++#if IS_MODULE(CONFIG_USBIP_HOST)
3666 ++ struct bus_id_priv *busid_priv;
3667 ++ int i;
3668 ++
3669 ++ /* update status to STUB_BUSID_OTHER so probe ignores the device */
3670 ++ spin_lock(&busid_table_lock);
3671 ++ for (i = 0; i < MAX_BUSID; i++) {
3672 ++ if (busid_table[i].name[0] &&
3673 ++ busid_table[i].shutdown_busid) {
3674 ++ busid_priv = &(busid_table[i]);
3675 ++ busid_priv->status = STUB_BUSID_OTHER;
3676 ++ }
3677 ++ }
3678 ++ spin_unlock(&busid_table_lock);
3679 ++
3680 ++ /* now run rebind - no need to hold locks. driver files are removed */
3681 ++ for (i = 0; i < MAX_BUSID; i++) {
3682 ++ if (busid_table[i].name[0] &&
3683 ++ busid_table[i].shutdown_busid) {
3684 ++ busid_priv = &(busid_table[i]);
3685 ++ do_rebind(busid_table[i].name, busid_priv);
3686 ++ }
3687 ++ }
3688 ++#endif
3689 ++}
3690 ++
3691 + static ssize_t rebind_store(struct device_driver *dev, const char *buf,
3692 + size_t count)
3693 + {
3694 +@@ -201,16 +276,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
3695 + if (!bid)
3696 + return -ENODEV;
3697 +
3698 +- /* device_attach() callers should hold parent lock for USB */
3699 +- if (bid->udev->dev.parent)
3700 +- device_lock(bid->udev->dev.parent);
3701 +- ret = device_attach(&bid->udev->dev);
3702 +- if (bid->udev->dev.parent)
3703 +- device_unlock(bid->udev->dev.parent);
3704 +- if (ret < 0) {
3705 +- dev_err(&bid->udev->dev, "rebind failed\n");
3706 ++ /* mark the device for deletion so probe ignores it during rescan */
3707 ++ bid->status = STUB_BUSID_OTHER;
3708 ++ /* release the busid lock */
3709 ++ put_busid_priv(bid);
3710 ++
3711 ++ ret = do_rebind((char *) buf, bid);
3712 ++ if (ret < 0)
3713 + return ret;
3714 +- }
3715 ++
3716 ++ /* delete device from busid_table */
3717 ++ del_match_busid((char *) buf);
3718 +
3719 + return count;
3720 + }
3721 +@@ -333,6 +409,9 @@ static void __exit usbip_host_exit(void)
3722 + */
3723 + usb_deregister_device_driver(&stub_driver);
3724 +
3725 ++ /* initiate scan to attach devices */
3726 ++ stub_device_rebind();
3727 ++
3728 + kmem_cache_destroy(stub_priv_cache);
3729 + }
3730 +
3731 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3732 +index 0f2b7c622ce3..e2f5be261532 100644
3733 +--- a/fs/btrfs/ctree.c
3734 ++++ b/fs/btrfs/ctree.c
3735 +@@ -2497,10 +2497,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
3736 + if (p->reada)
3737 + reada_for_search(root, p, level, slot, key->objectid);
3738 +
3739 +- btrfs_release_path(p);
3740 +-
3741 + ret = -EAGAIN;
3742 +- tmp = read_tree_block(root, blocknr, 0);
3743 ++ tmp = read_tree_block(root, blocknr, gen);
3744 + if (!IS_ERR(tmp)) {
3745 + /*
3746 + * If the read above didn't mark this buffer up to date,
3747 +@@ -2512,6 +2510,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
3748 + ret = -EIO;
3749 + free_extent_buffer(tmp);
3750 + }
3751 ++
3752 ++ btrfs_release_path(p);
3753 + return ret;
3754 + }
3755 +
3756 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3757 +index d6359af9789d..6ba022ed4a52 100644
3758 +--- a/fs/btrfs/tree-log.c
3759 ++++ b/fs/btrfs/tree-log.c
3760 +@@ -4568,6 +4568,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3761 + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3762 + u64 logged_isize = 0;
3763 + bool need_log_inode_item = true;
3764 ++ bool xattrs_logged = false;
3765 +
3766 + path = btrfs_alloc_path();
3767 + if (!path)
3768 +@@ -4808,6 +4809,7 @@ next_slot:
3769 + err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
3770 + if (err)
3771 + goto out_unlock;
3772 ++ xattrs_logged = true;
3773 + if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
3774 + btrfs_release_path(path);
3775 + btrfs_release_path(dst_path);
3776 +@@ -4820,6 +4822,11 @@ log_extents:
3777 + btrfs_release_path(dst_path);
3778 + if (need_log_inode_item) {
3779 + err = log_inode_item(trans, log, dst_path, inode);
3780 ++ if (!err && !xattrs_logged) {
3781 ++ err = btrfs_log_all_xattrs(trans, root, inode, path,
3782 ++ dst_path);
3783 ++ btrfs_release_path(path);
3784 ++ }
3785 + if (err)
3786 + goto out_unlock;
3787 + }
3788 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3789 +index 6d874b1cd53c..ed75d70b4bc2 100644
3790 +--- a/fs/btrfs/volumes.c
3791 ++++ b/fs/btrfs/volumes.c
3792 +@@ -3850,6 +3850,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3793 + return 0;
3794 + }
3795 +
3796 ++ /*
3797 ++ * A ro->rw remount sequence should continue with the paused balance
3798 ++ * regardless of who pauses it, system or the user as of now, so set
3799 ++ * the resume flag.
3800 ++ */
3801 ++ spin_lock(&fs_info->balance_lock);
3802 ++ fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
3803 ++ spin_unlock(&fs_info->balance_lock);
3804 ++
3805 + tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3806 + return PTR_ERR_OR_ZERO(tsk);
3807 + }
3808 +diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
3809 +index 0aa9bf6e6e53..f600c43f0047 100644
3810 +--- a/fs/ext2/inode.c
3811 ++++ b/fs/ext2/inode.c
3812 +@@ -1175,21 +1175,11 @@ do_indirects:
3813 +
3814 + static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
3815 + {
3816 +- /*
3817 +- * XXX: it seems like a bug here that we don't allow
3818 +- * IS_APPEND inode to have blocks-past-i_size trimmed off.
3819 +- * review and fix this.
3820 +- *
3821 +- * Also would be nice to be able to handle IO errors and such,
3822 +- * but that's probably too much to ask.
3823 +- */
3824 + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3825 + S_ISLNK(inode->i_mode)))
3826 + return;
3827 + if (ext2_inode_is_fast_symlink(inode))
3828 + return;
3829 +- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3830 +- return;
3831 +
3832 + dax_sem_down_write(EXT2_I(inode));
3833 + __ext2_truncate_blocks(inode, offset);
3834 +diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
3835 +index 7302d96ae8bf..fa40e756c501 100644
3836 +--- a/fs/hfsplus/super.c
3837 ++++ b/fs/hfsplus/super.c
3838 +@@ -585,6 +585,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
3839 + return 0;
3840 +
3841 + out_put_hidden_dir:
3842 ++ cancel_delayed_work_sync(&sbi->sync_work);
3843 + iput(sbi->hidden_dir);
3844 + out_put_root:
3845 + dput(sb->s_root);
3846 +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
3847 +index a2edb0049eb5..f038d4ac9aec 100644
3848 +--- a/fs/lockd/svc.c
3849 ++++ b/fs/lockd/svc.c
3850 +@@ -271,6 +271,8 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
3851 + if (ln->nlmsvc_users) {
3852 + if (--ln->nlmsvc_users == 0) {
3853 + nlm_shutdown_hosts_net(net);
3854 ++ cancel_delayed_work_sync(&ln->grace_period_end);
3855 ++ locks_end_grace(&ln->lockd_manager);
3856 + svc_shutdown_net(serv, net);
3857 + dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
3858 + }
3859 +diff --git a/fs/pipe.c b/fs/pipe.c
3860 +index 39eff9a67253..1e7263bb837a 100644
3861 +--- a/fs/pipe.c
3862 ++++ b/fs/pipe.c
3863 +@@ -616,6 +616,9 @@ struct pipe_inode_info *alloc_pipe_info(void)
3864 + unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
3865 + struct user_struct *user = get_current_user();
3866 +
3867 ++ if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
3868 ++ pipe_bufs = pipe_max_size >> PAGE_SHIFT;
3869 ++
3870 + if (!too_many_pipe_buffers_hard(user)) {
3871 + if (too_many_pipe_buffers_soft(user))
3872 + pipe_bufs = 1;
3873 +diff --git a/fs/proc/base.c b/fs/proc/base.c
3874 +index dd732400578e..4a666ec7fb64 100644
3875 +--- a/fs/proc/base.c
3876 ++++ b/fs/proc/base.c
3877 +@@ -953,6 +953,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
3878 + unsigned long src = *ppos;
3879 + int ret = 0;
3880 + struct mm_struct *mm = file->private_data;
3881 ++ unsigned long env_start, env_end;
3882 +
3883 + /* Ensure the process spawned far enough to have an environment. */
3884 + if (!mm || !mm->env_end)
3885 +@@ -965,19 +966,25 @@ static ssize_t environ_read(struct file *file, char __user *buf,
3886 + ret = 0;
3887 + if (!atomic_inc_not_zero(&mm->mm_users))
3888 + goto free;
3889 ++
3890 ++ down_read(&mm->mmap_sem);
3891 ++ env_start = mm->env_start;
3892 ++ env_end = mm->env_end;
3893 ++ up_read(&mm->mmap_sem);
3894 ++
3895 + while (count > 0) {
3896 + size_t this_len, max_len;
3897 + int retval;
3898 +
3899 +- if (src >= (mm->env_end - mm->env_start))
3900 ++ if (src >= (env_end - env_start))
3901 + break;
3902 +
3903 +- this_len = mm->env_end - (mm->env_start + src);
3904 ++ this_len = env_end - (env_start + src);
3905 +
3906 + max_len = min_t(size_t, PAGE_SIZE, count);
3907 + this_len = min(max_len, this_len);
3908 +
3909 +- retval = access_remote_vm(mm, (mm->env_start + src),
3910 ++ retval = access_remote_vm(mm, (env_start + src),
3911 + page, this_len, 0);
3912 +
3913 + if (retval <= 0) {
3914 +@@ -3075,6 +3082,44 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
3915 + return 0;
3916 + }
3917 +
3918 ++/*
3919 ++ * proc_tid_comm_permission is a special permission function exclusively
3920 ++ * used for the node /proc/<pid>/task/<tid>/comm.
3921 ++ * It bypasses generic permission checks in the case where a task of the same
3922 ++ * task group attempts to access the node.
3923 ++ * The rationale behind this is that glibc and bionic access this node for
3924 ++ * cross thread naming (pthread_set/getname_np(!self)). However, if
3925 ++ * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0,
3926 ++ * which locks out the cross thread naming implementation.
3927 ++ * This function makes sure that the node is always accessible for members of
3928 ++ * same thread group.
3929 ++ */
3930 ++static int proc_tid_comm_permission(struct inode *inode, int mask)
3931 ++{
3932 ++ bool is_same_tgroup;
3933 ++ struct task_struct *task;
3934 ++
3935 ++ task = get_proc_task(inode);
3936 ++ if (!task)
3937 ++ return -ESRCH;
3938 ++ is_same_tgroup = same_thread_group(current, task);
3939 ++ put_task_struct(task);
3940 ++
3941 ++ if (likely(is_same_tgroup && !(mask & MAY_EXEC))) {
3942 ++ /* This file (/proc/<pid>/task/<tid>/comm) can always be
3943 ++ * read or written by the members of the corresponding
3944 ++ * thread group.
3945 ++ */
3946 ++ return 0;
3947 ++ }
3948 ++
3949 ++ return generic_permission(inode, mask);
3950 ++}
3951 ++
3952 ++static const struct inode_operations proc_tid_comm_inode_operations = {
3953 ++ .permission = proc_tid_comm_permission,
3954 ++};
3955 ++
3956 + /*
3957 + * Tasks
3958 + */
3959 +@@ -3093,7 +3138,9 @@ static const struct pid_entry tid_base_stuff[] = {
3960 + #ifdef CONFIG_SCHED_DEBUG
3961 + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3962 + #endif
3963 +- REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
3964 ++ NOD("comm", S_IFREG|S_IRUGO|S_IWUSR,
3965 ++ &proc_tid_comm_inode_operations,
3966 ++ &proc_pid_set_comm_operations, {}),
3967 + #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3968 + ONE("syscall", S_IRUSR, proc_pid_syscall),
3969 + #endif
3970 +diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
3971 +index 9155a5a0d3b9..df4661abadc4 100644
3972 +--- a/fs/proc/meminfo.c
3973 ++++ b/fs/proc/meminfo.c
3974 +@@ -57,11 +57,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
3975 + /*
3976 + * Estimate the amount of memory available for userspace allocations,
3977 + * without causing swapping.
3978 +- *
3979 +- * Free memory cannot be taken below the low watermark, before the
3980 +- * system starts swapping.
3981 + */
3982 +- available = i.freeram - wmark_low;
3983 ++ available = i.freeram - totalreserve_pages;
3984 +
3985 + /*
3986 + * Not all the page cache can be freed, otherwise the system will
3987 +diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
3988 +index bf2d34c9d804..f0d8b1c51343 100644
3989 +--- a/include/asm-generic/futex.h
3990 ++++ b/include/asm-generic/futex.h
3991 +@@ -13,7 +13,7 @@
3992 + */
3993 +
3994 + /**
3995 +- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
3996 ++ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
3997 + * argument and comparison of the previous
3998 + * futex value with another constant.
3999 + *
4000 +@@ -25,18 +25,11 @@
4001 + * <0 - On error
4002 + */
4003 + static inline int
4004 +-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
4005 ++arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
4006 + {
4007 +- int op = (encoded_op >> 28) & 7;
4008 +- int cmp = (encoded_op >> 24) & 15;
4009 +- int oparg = (encoded_op << 8) >> 20;
4010 +- int cmparg = (encoded_op << 20) >> 20;
4011 + int oldval, ret;
4012 + u32 tmp;
4013 +
4014 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
4015 +- oparg = 1 << oparg;
4016 +-
4017 + preempt_disable();
4018 + pagefault_disable();
4019 +
4020 +@@ -74,17 +67,9 @@ out_pagefault_enable:
4021 + pagefault_enable();
4022 + preempt_enable();
4023 +
4024 +- if (ret == 0) {
4025 +- switch (cmp) {
4026 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
4027 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
4028 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
4029 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
4030 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
4031 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
4032 +- default: ret = -ENOSYS;
4033 +- }
4034 +- }
4035 ++ if (ret == 0)
4036 ++ *oval = oldval;
4037 ++
4038 + return ret;
4039 + }
4040 +
4041 +@@ -126,18 +111,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
4042 +
4043 + #else
4044 + static inline int
4045 +-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
4046 ++arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
4047 + {
4048 +- int op = (encoded_op >> 28) & 7;
4049 +- int cmp = (encoded_op >> 24) & 15;
4050 +- int oparg = (encoded_op << 8) >> 20;
4051 +- int cmparg = (encoded_op << 20) >> 20;
4052 + int oldval = 0, ret;
4053 +- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
4054 +- oparg = 1 << oparg;
4055 +-
4056 +- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
4057 +- return -EFAULT;
4058 +
4059 + pagefault_disable();
4060 +
4061 +@@ -153,17 +129,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
4062 +
4063 + pagefault_enable();
4064 +
4065 +- if (!ret) {
4066 +- switch (cmp) {
4067 +- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
4068 +- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
4069 +- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
4070 +- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
4071 +- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
4072 +- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
4073 +- default: ret = -ENOSYS;
4074 +- }
4075 +- }
4076 ++ if (!ret)
4077 ++ *oval = oldval;
4078 ++
4079 + return ret;
4080 + }
4081 +
4082 +diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
4083 +index c47c68e535e8..a16d1851cfb1 100644
4084 +--- a/include/linux/dmaengine.h
4085 ++++ b/include/linux/dmaengine.h
4086 +@@ -767,6 +767,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
4087 + sg_dma_address(&sg) = buf;
4088 + sg_dma_len(&sg) = len;
4089 +
4090 ++ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
4091 ++ return NULL;
4092 ++
4093 + return chan->device->device_prep_slave_sg(chan, &sg, 1,
4094 + dir, flags, NULL);
4095 + }
4096 +@@ -775,6 +778,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
4097 + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
4098 + enum dma_transfer_direction dir, unsigned long flags)
4099 + {
4100 ++ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
4101 ++ return NULL;
4102 ++
4103 + return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
4104 + dir, flags, NULL);
4105 + }
4106 +@@ -786,6 +792,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
4107 + enum dma_transfer_direction dir, unsigned long flags,
4108 + struct rio_dma_ext *rio_ext)
4109 + {
4110 ++ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
4111 ++ return NULL;
4112 ++
4113 + return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
4114 + dir, flags, rio_ext);
4115 + }
4116 +@@ -796,6 +805,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
4117 + size_t period_len, enum dma_transfer_direction dir,
4118 + unsigned long flags)
4119 + {
4120 ++ if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
4121 ++ return NULL;
4122 ++
4123 + return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
4124 + period_len, dir, flags);
4125 + }
4126 +@@ -804,6 +816,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
4127 + struct dma_chan *chan, struct dma_interleaved_template *xt,
4128 + unsigned long flags)
4129 + {
4130 ++ if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
4131 ++ return NULL;
4132 ++
4133 + return chan->device->device_prep_interleaved_dma(chan, xt, flags);
4134 + }
4135 +
4136 +@@ -811,7 +826,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
4137 + struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
4138 + unsigned long flags)
4139 + {
4140 +- if (!chan || !chan->device)
4141 ++ if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
4142 + return NULL;
4143 +
4144 + return chan->device->device_prep_dma_memset(chan, dest, value,
4145 +@@ -824,6 +839,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
4146 + struct scatterlist *src_sg, unsigned int src_nents,
4147 + unsigned long flags)
4148 + {
4149 ++ if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
4150 ++ return NULL;
4151 ++
4152 + return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
4153 + src_sg, src_nents, flags);
4154 + }
4155 +diff --git a/include/linux/efi.h b/include/linux/efi.h
4156 +index 47be3ad7d3e5..7af95b58ebf3 100644
4157 +--- a/include/linux/efi.h
4158 ++++ b/include/linux/efi.h
4159 +@@ -364,8 +364,8 @@ typedef struct {
4160 + u32 attributes;
4161 + u32 get_bar_attributes;
4162 + u32 set_bar_attributes;
4163 +- uint64_t romsize;
4164 +- void *romimage;
4165 ++ u64 romsize;
4166 ++ u32 romimage;
4167 + } efi_pci_io_protocol_32;
4168 +
4169 + typedef struct {
4170 +@@ -384,8 +384,8 @@ typedef struct {
4171 + u64 attributes;
4172 + u64 get_bar_attributes;
4173 + u64 set_bar_attributes;
4174 +- uint64_t romsize;
4175 +- void *romimage;
4176 ++ u64 romsize;
4177 ++ u64 romimage;
4178 + } efi_pci_io_protocol_64;
4179 +
4180 + typedef struct {
4181 +diff --git a/include/linux/signal.h b/include/linux/signal.h
4182 +index d80259afb9e5..bcc094cb697c 100644
4183 +--- a/include/linux/signal.h
4184 ++++ b/include/linux/signal.h
4185 +@@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set)
4186 + }
4187 + }
4188 +
4189 ++static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
4190 ++{
4191 ++ switch (_NSIG_WORDS) {
4192 ++ case 4:
4193 ++ return (set1->sig[3] == set2->sig[3]) &&
4194 ++ (set1->sig[2] == set2->sig[2]) &&
4195 ++ (set1->sig[1] == set2->sig[1]) &&
4196 ++ (set1->sig[0] == set2->sig[0]);
4197 ++ case 2:
4198 ++ return (set1->sig[1] == set2->sig[1]) &&
4199 ++ (set1->sig[0] == set2->sig[0]);
4200 ++ case 1:
4201 ++ return set1->sig[0] == set2->sig[0];
4202 ++ }
4203 ++ return 0;
4204 ++}
4205 ++
4206 + #define sigmask(sig) (1UL << ((sig) - 1))
4207 +
4208 + #ifndef __HAVE_ARCH_SIG_SETOPS
4209 +diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
4210 +index f0f1793cfa49..115216ec7cfe 100644
4211 +--- a/include/linux/timekeeper_internal.h
4212 ++++ b/include/linux/timekeeper_internal.h
4213 +@@ -56,7 +56,7 @@ struct tk_read_base {
4214 + * interval.
4215 + * @xtime_remainder: Shifted nano seconds left over when rounding
4216 + * @cycle_interval
4217 +- * @raw_interval: Raw nano seconds accumulated per NTP interval.
4218 ++ * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
4219 + * @ntp_error: Difference between accumulated time and NTP time in ntp
4220 + * shifted nano seconds.
4221 + * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
4222 +@@ -97,7 +97,7 @@ struct timekeeper {
4223 + cycle_t cycle_interval;
4224 + u64 xtime_interval;
4225 + s64 xtime_remainder;
4226 +- u32 raw_interval;
4227 ++ u64 raw_interval;
4228 + /* The ntp_tick_length() value currently being used.
4229 + * This cached copy ensures we consistently apply the tick
4230 + * length for an entire tick, as ntp_tick_length may change
4231 +diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
4232 +index bce990f5a35d..d6be935caa50 100644
4233 +--- a/include/trace/events/xen.h
4234 ++++ b/include/trace/events/xen.h
4235 +@@ -377,22 +377,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
4236 + DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
4237 + DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
4238 +
4239 +-TRACE_EVENT(xen_mmu_flush_tlb_all,
4240 +- TP_PROTO(int x),
4241 +- TP_ARGS(x),
4242 +- TP_STRUCT__entry(__array(char, x, 0)),
4243 +- TP_fast_assign((void)x),
4244 +- TP_printk("%s", "")
4245 +- );
4246 +-
4247 +-TRACE_EVENT(xen_mmu_flush_tlb,
4248 +- TP_PROTO(int x),
4249 +- TP_ARGS(x),
4250 +- TP_STRUCT__entry(__array(char, x, 0)),
4251 +- TP_fast_assign((void)x),
4252 +- TP_printk("%s", "")
4253 +- );
4254 +-
4255 + TRACE_EVENT(xen_mmu_flush_tlb_single,
4256 + TP_PROTO(unsigned long addr),
4257 + TP_ARGS(addr),
4258 +diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
4259 +index 1f0b4cf5dd03..f4227173b5d8 100644
4260 +--- a/include/uapi/linux/nl80211.h
4261 ++++ b/include/uapi/linux/nl80211.h
4262 +@@ -2195,6 +2195,8 @@ enum nl80211_attrs {
4263 + #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
4264 + #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
4265 +
4266 ++#define NL80211_WIPHY_NAME_MAXLEN 128
4267 ++
4268 + #define NL80211_MAX_SUPP_RATES 32
4269 + #define NL80211_MAX_SUPP_HT_RATES 77
4270 + #define NL80211_MAX_SUPP_REG_RULES 64
4271 +diff --git a/kernel/auditsc.c b/kernel/auditsc.c
4272 +index 4bdea31cf6ce..7444f95f3ee9 100644
4273 +--- a/kernel/auditsc.c
4274 ++++ b/kernel/auditsc.c
4275 +@@ -1981,14 +1981,15 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
4276 + if (!audit_enabled)
4277 + return;
4278 +
4279 ++ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
4280 ++ if (!ab)
4281 ++ return;
4282 ++
4283 + uid = from_kuid(&init_user_ns, task_uid(current));
4284 + oldloginuid = from_kuid(&init_user_ns, koldloginuid);
4285 + loginuid = from_kuid(&init_user_ns, kloginuid),
4286 + tty = audit_get_tty(current);
4287 +
4288 +- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
4289 +- if (!ab)
4290 +- return;
4291 + audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
4292 + audit_log_task_context(ab);
4293 + audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
4294 +diff --git a/kernel/exit.c b/kernel/exit.c
4295 +index ffba5df4abd5..f20e6339761b 100644
4296 +--- a/kernel/exit.c
4297 ++++ b/kernel/exit.c
4298 +@@ -1608,6 +1608,10 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
4299 + __WNOTHREAD|__WCLONE|__WALL))
4300 + return -EINVAL;
4301 +
4302 ++ /* -INT_MIN is not defined */
4303 ++ if (upid == INT_MIN)
4304 ++ return -ESRCH;
4305 ++
4306 + if (upid == -1)
4307 + type = PIDTYPE_MAX;
4308 + else if (upid < 0) {
4309 +diff --git a/kernel/futex.c b/kernel/futex.c
4310 +index a12aa6785361..a26d217c99fe 100644
4311 +--- a/kernel/futex.c
4312 ++++ b/kernel/futex.c
4313 +@@ -666,13 +666,14 @@ again:
4314 + * this reference was taken by ihold under the page lock
4315 + * pinning the inode in place so i_lock was unnecessary. The
4316 + * only way for this check to fail is if the inode was
4317 +- * truncated in parallel so warn for now if this happens.
4318 ++ * truncated in parallel which is almost certainly an
4319 ++ * application bug. In such a case, just retry.
4320 + *
4321 + * We are not calling into get_futex_key_refs() in file-backed
4322 + * cases, therefore a successful atomic_inc return below will
4323 + * guarantee that get_futex_key() will still imply smp_mb(); (B).
4324 + */
4325 +- if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
4326 ++ if (!atomic_inc_not_zero(&inode->i_count)) {
4327 + rcu_read_unlock();
4328 + put_page(page_head);
4329 +
4330 +@@ -1452,6 +1453,45 @@ out:
4331 + return ret;
4332 + }
4333 +
4334 ++static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
4335 ++{
4336 ++ unsigned int op = (encoded_op & 0x70000000) >> 28;
4337 ++ unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
4338 ++ int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
4339 ++ int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
4340 ++ int oldval, ret;
4341 ++
4342 ++ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
4343 ++ if (oparg < 0 || oparg > 31)
4344 ++ return -EINVAL;
4345 ++ oparg = 1 << oparg;
4346 ++ }
4347 ++
4348 ++ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
4349 ++ return -EFAULT;
4350 ++
4351 ++ ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
4352 ++ if (ret)
4353 ++ return ret;
4354 ++
4355 ++ switch (cmp) {
4356 ++ case FUTEX_OP_CMP_EQ:
4357 ++ return oldval == cmparg;
4358 ++ case FUTEX_OP_CMP_NE:
4359 ++ return oldval != cmparg;
4360 ++ case FUTEX_OP_CMP_LT:
4361 ++ return oldval < cmparg;
4362 ++ case FUTEX_OP_CMP_GE:
4363 ++ return oldval >= cmparg;
4364 ++ case FUTEX_OP_CMP_LE:
4365 ++ return oldval <= cmparg;
4366 ++ case FUTEX_OP_CMP_GT:
4367 ++ return oldval > cmparg;
4368 ++ default:
4369 ++ return -ENOSYS;
4370 ++ }
4371 ++}
4372 ++
4373 + /*
4374 + * Wake up all waiters hashed on the physical page that is mapped
4375 + * to this virtual address:
4376 +diff --git a/kernel/signal.c b/kernel/signal.c
4377 +index 4a548c6a4118..7d75bc2d042f 100644
4378 +--- a/kernel/signal.c
4379 ++++ b/kernel/signal.c
4380 +@@ -2495,6 +2495,13 @@ void __set_current_blocked(const sigset_t *newset)
4381 + {
4382 + struct task_struct *tsk = current;
4383 +
4384 ++ /*
4385 ++ * In case the signal mask hasn't changed, there is nothing we need
4386 ++ * to do. The current->blocked shouldn't be modified by other task.
4387 ++ */
4388 ++ if (sigequalsets(&tsk->blocked, newset))
4389 ++ return;
4390 ++
4391 + spin_lock_irq(&tsk->sighand->siglock);
4392 + __set_task_blocked(tsk, newset);
4393 + spin_unlock_irq(&tsk->sighand->siglock);
4394 +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
4395 +index d2a20e83ebae..22d7454b387b 100644
4396 +--- a/kernel/time/tick-broadcast.c
4397 ++++ b/kernel/time/tick-broadcast.c
4398 +@@ -610,6 +610,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
4399 + now = ktime_get();
4400 + /* Find all expired events */
4401 + for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
4402 ++ /*
4403 ++ * Required for !SMP because for_each_cpu() reports
4404 ++ * unconditionally CPU0 as set on UP kernels.
4405 ++ */
4406 ++ if (!IS_ENABLED(CONFIG_SMP) &&
4407 ++ cpumask_empty(tick_broadcast_oneshot_mask))
4408 ++ break;
4409 ++
4410 + td = &per_cpu(tick_cpu_device, cpu);
4411 + if (td->evtdev->next_event.tv64 <= now.tv64) {
4412 + cpumask_set_cpu(cpu, tmpmask);
4413 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
4414 +index 6e4866834d26..fed86b2dfc89 100644
4415 +--- a/kernel/time/timekeeping.c
4416 ++++ b/kernel/time/timekeeping.c
4417 +@@ -277,8 +277,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
4418 + /* Go back from cycles -> shifted ns */
4419 + tk->xtime_interval = (u64) interval * clock->mult;
4420 + tk->xtime_remainder = ntpinterval - tk->xtime_interval;
4421 +- tk->raw_interval =
4422 +- ((u64) interval * clock->mult) >> clock->shift;
4423 ++ tk->raw_interval = interval * clock->mult;
4424 +
4425 + /* if changing clocks, convert xtime_nsec shift units */
4426 + if (old_clock) {
4427 +@@ -1767,7 +1766,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
4428 + unsigned int *clock_set)
4429 + {
4430 + cycle_t interval = tk->cycle_interval << shift;
4431 +- u64 raw_nsecs;
4432 ++ u64 snsec_per_sec;
4433 +
4434 + /* If the offset is smaller than a shifted interval, do nothing */
4435 + if (offset < interval)
4436 +@@ -1782,14 +1781,15 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
4437 + *clock_set |= accumulate_nsecs_to_secs(tk);
4438 +
4439 + /* Accumulate raw time */
4440 +- raw_nsecs = (u64)tk->raw_interval << shift;
4441 +- raw_nsecs += tk->raw_time.tv_nsec;
4442 +- if (raw_nsecs >= NSEC_PER_SEC) {
4443 +- u64 raw_secs = raw_nsecs;
4444 +- raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
4445 +- tk->raw_time.tv_sec += raw_secs;
4446 ++ tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
4447 ++ tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
4448 ++ snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
4449 ++ while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
4450 ++ tk->tkr_raw.xtime_nsec -= snsec_per_sec;
4451 ++ tk->raw_time.tv_sec++;
4452 + }
4453 +- tk->raw_time.tv_nsec = raw_nsecs;
4454 ++ tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
4455 ++ tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
4456 +
4457 + /* Accumulate error between NTP and clock interval */
4458 + tk->ntp_error += tk->ntp_tick << shift;
4459 +diff --git a/mm/Kconfig b/mm/Kconfig
4460 +index 97a4e06b15c0..5753f69b23f4 100644
4461 +--- a/mm/Kconfig
4462 ++++ b/mm/Kconfig
4463 +@@ -628,6 +628,7 @@ config DEFERRED_STRUCT_PAGE_INIT
4464 + default n
4465 + depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
4466 + depends on MEMORY_HOTPLUG
4467 ++ depends on !NEED_PER_CPU_KM
4468 + help
4469 + Ordinarily all struct pages are initialised during early boot in a
4470 + single thread. On very large machines this can take a considerable
4471 +diff --git a/mm/filemap.c b/mm/filemap.c
4472 +index b15f1d8bba43..21e750b6e810 100644
4473 +--- a/mm/filemap.c
4474 ++++ b/mm/filemap.c
4475 +@@ -1581,6 +1581,15 @@ find_page:
4476 + index, last_index - index);
4477 + }
4478 + if (!PageUptodate(page)) {
4479 ++ /*
4480 ++ * See comment in do_read_cache_page on why
4481 ++ * wait_on_page_locked is used to avoid unnecessarily
4482 ++ * serialisations and why it's safe.
4483 ++ */
4484 ++ wait_on_page_locked_killable(page);
4485 ++ if (PageUptodate(page))
4486 ++ goto page_ok;
4487 ++
4488 + if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
4489 + !mapping->a_ops->is_partially_uptodate)
4490 + goto page_not_up_to_date;
4491 +@@ -2215,7 +2224,7 @@ static struct page *wait_on_page_read(struct page *page)
4492 + return page;
4493 + }
4494 +
4495 +-static struct page *__read_cache_page(struct address_space *mapping,
4496 ++static struct page *do_read_cache_page(struct address_space *mapping,
4497 + pgoff_t index,
4498 + int (*filler)(void *, struct page *),
4499 + void *data,
4500 +@@ -2237,53 +2246,74 @@ repeat:
4501 + /* Presumably ENOMEM for radix tree node */
4502 + return ERR_PTR(err);
4503 + }
4504 ++
4505 ++filler:
4506 + err = filler(data, page);
4507 + if (err < 0) {
4508 + page_cache_release(page);
4509 +- page = ERR_PTR(err);
4510 +- } else {
4511 +- page = wait_on_page_read(page);
4512 ++ return ERR_PTR(err);
4513 + }
4514 +- }
4515 +- return page;
4516 +-}
4517 +
4518 +-static struct page *do_read_cache_page(struct address_space *mapping,
4519 +- pgoff_t index,
4520 +- int (*filler)(void *, struct page *),
4521 +- void *data,
4522 +- gfp_t gfp)
4523 +-
4524 +-{
4525 +- struct page *page;
4526 +- int err;
4527 ++ page = wait_on_page_read(page);
4528 ++ if (IS_ERR(page))
4529 ++ return page;
4530 ++ goto out;
4531 ++ }
4532 ++ if (PageUptodate(page))
4533 ++ goto out;
4534 +
4535 +-retry:
4536 +- page = __read_cache_page(mapping, index, filler, data, gfp);
4537 +- if (IS_ERR(page))
4538 +- return page;
4539 ++ /*
4540 ++ * Page is not up to date and may be locked due one of the following
4541 ++ * case a: Page is being filled and the page lock is held
4542 ++ * case b: Read/write error clearing the page uptodate status
4543 ++ * case c: Truncation in progress (page locked)
4544 ++ * case d: Reclaim in progress
4545 ++ *
4546 ++ * Case a, the page will be up to date when the page is unlocked.
4547 ++ * There is no need to serialise on the page lock here as the page
4548 ++ * is pinned so the lock gives no additional protection. Even if the
4549 ++ * the page is truncated, the data is still valid if PageUptodate as
4550 ++ * it's a race vs truncate race.
4551 ++ * Case b, the page will not be up to date
4552 ++ * Case c, the page may be truncated but in itself, the data may still
4553 ++ * be valid after IO completes as it's a read vs truncate race. The
4554 ++ * operation must restart if the page is not uptodate on unlock but
4555 ++ * otherwise serialising on page lock to stabilise the mapping gives
4556 ++ * no additional guarantees to the caller as the page lock is
4557 ++ * released before return.
4558 ++ * Case d, similar to truncation. If reclaim holds the page lock, it
4559 ++ * will be a race with remove_mapping that determines if the mapping
4560 ++ * is valid on unlock but otherwise the data is valid and there is
4561 ++ * no need to serialise with page lock.
4562 ++ *
4563 ++ * As the page lock gives no additional guarantee, we optimistically
4564 ++ * wait on the page to be unlocked and check if it's up to date and
4565 ++ * use the page if it is. Otherwise, the page lock is required to
4566 ++ * distinguish between the different cases. The motivation is that we
4567 ++ * avoid spurious serialisations and wakeups when multiple processes
4568 ++ * wait on the same page for IO to complete.
4569 ++ */
4570 ++ wait_on_page_locked(page);
4571 + if (PageUptodate(page))
4572 + goto out;
4573 +
4574 ++ /* Distinguish between all the cases under the safety of the lock */
4575 + lock_page(page);
4576 ++
4577 ++ /* Case c or d, restart the operation */
4578 + if (!page->mapping) {
4579 + unlock_page(page);
4580 + page_cache_release(page);
4581 +- goto retry;
4582 ++ goto repeat;
4583 + }
4584 ++
4585 ++ /* Someone else locked and filled the page in a very small window */
4586 + if (PageUptodate(page)) {
4587 + unlock_page(page);
4588 + goto out;
4589 + }
4590 +- err = filler(data, page);
4591 +- if (err < 0) {
4592 +- page_cache_release(page);
4593 +- return ERR_PTR(err);
4594 +- } else {
4595 +- page = wait_on_page_read(page);
4596 +- if (IS_ERR(page))
4597 +- return page;
4598 +- }
4599 ++ goto filler;
4600 ++
4601 + out:
4602 + mark_page_accessed(page);
4603 + return page;
4604 +diff --git a/mm/util.c b/mm/util.c
4605 +index 818bbae84721..5fae5b9c2885 100644
4606 +--- a/mm/util.c
4607 ++++ b/mm/util.c
4608 +@@ -428,17 +428,25 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
4609 + int res = 0;
4610 + unsigned int len;
4611 + struct mm_struct *mm = get_task_mm(task);
4612 ++ unsigned long arg_start, arg_end, env_start, env_end;
4613 + if (!mm)
4614 + goto out;
4615 + if (!mm->arg_end)
4616 + goto out_mm; /* Shh! No looking before we're done */
4617 +
4618 +- len = mm->arg_end - mm->arg_start;
4619 ++ down_read(&mm->mmap_sem);
4620 ++ arg_start = mm->arg_start;
4621 ++ arg_end = mm->arg_end;
4622 ++ env_start = mm->env_start;
4623 ++ env_end = mm->env_end;
4624 ++ up_read(&mm->mmap_sem);
4625 ++
4626 ++ len = arg_end - arg_start;
4627 +
4628 + if (len > buflen)
4629 + len = buflen;
4630 +
4631 +- res = access_process_vm(task, mm->arg_start, buffer, len, 0);
4632 ++ res = access_process_vm(task, arg_start, buffer, len, 0);
4633 +
4634 + /*
4635 + * If the nul at the end of args has been overwritten, then
4636 +@@ -449,10 +457,10 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
4637 + if (len < res) {
4638 + res = len;
4639 + } else {
4640 +- len = mm->env_end - mm->env_start;
4641 ++ len = env_end - env_start;
4642 + if (len > buflen - res)
4643 + len = buflen - res;
4644 +- res += access_process_vm(task, mm->env_start,
4645 ++ res += access_process_vm(task, env_start,
4646 + buffer+res, len, 0);
4647 + res = strnlen(buffer, res);
4648 + }
4649 +diff --git a/mm/vmscan.c b/mm/vmscan.c
4650 +index 930f7c67a9c1..12a69e6c10ba 100644
4651 +--- a/mm/vmscan.c
4652 ++++ b/mm/vmscan.c
4653 +@@ -2057,10 +2057,16 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
4654 + }
4655 +
4656 + /*
4657 +- * There is enough inactive page cache, do not reclaim
4658 +- * anything from the anonymous working set right now.
4659 ++ * If there is enough inactive page cache, i.e. if the size of the
4660 ++ * inactive list is greater than that of the active list *and* the
4661 ++ * inactive list actually has some pages to scan on this priority, we
4662 ++ * do not reclaim anything from the anonymous working set right now.
4663 ++ * Without the second condition we could end up never scanning an
4664 ++ * lruvec even if it has plenty of old anonymous pages unless the
4665 ++ * system is under heavy pressure.
4666 + */
4667 +- if (!inactive_file_is_low(lruvec)) {
4668 ++ if (!inactive_file_is_low(lruvec) &&
4669 ++ get_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
4670 + scan_balance = SCAN_FILE;
4671 + goto out;
4672 + }
4673 +diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
4674 +index ec02f5869a78..3400b1e47668 100644
4675 +--- a/net/bridge/br_if.c
4676 ++++ b/net/bridge/br_if.c
4677 +@@ -456,8 +456,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
4678 + if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
4679 + return -ELOOP;
4680 +
4681 +- /* Device is already being bridged */
4682 +- if (br_port_exists(dev))
4683 ++ /* Device has master upper dev */
4684 ++ if (netdev_master_upper_dev_get(dev))
4685 + return -EBUSY;
4686 +
4687 + /* No bridging devices that dislike that (e.g. wireless) */
4688 +diff --git a/net/compat.c b/net/compat.c
4689 +index 0ccf3ecf6bbb..17e97b106458 100644
4690 +--- a/net/compat.c
4691 ++++ b/net/compat.c
4692 +@@ -358,7 +358,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
4693 + if (optname == SO_ATTACH_FILTER)
4694 + return do_set_attach_filter(sock, level, optname,
4695 + optval, optlen);
4696 +- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
4697 ++ if (!COMPAT_USE_64BIT_TIME &&
4698 ++ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
4699 + return do_set_sock_timeout(sock, level, optname, optval, optlen);
4700 +
4701 + return sock_setsockopt(sock, level, optname, optval, optlen);
4702 +@@ -423,7 +424,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
4703 + static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
4704 + char __user *optval, int __user *optlen)
4705 + {
4706 +- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
4707 ++ if (!COMPAT_USE_64BIT_TIME &&
4708 ++ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
4709 + return do_get_sock_timeout(sock, level, optname, optval, optlen);
4710 + return sock_getsockopt(sock, level, optname, optval, optlen);
4711 + }
4712 +diff --git a/net/core/sock.c b/net/core/sock.c
4713 +index cd12cb6fe366..4238835a0e4e 100644
4714 +--- a/net/core/sock.c
4715 ++++ b/net/core/sock.c
4716 +@@ -1474,7 +1474,7 @@ void sk_destruct(struct sock *sk)
4717 +
4718 + static void __sk_free(struct sock *sk)
4719 + {
4720 +- if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
4721 ++ if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
4722 + sock_diag_broadcast_destroy(sk);
4723 + else
4724 + sk_destruct(sk);
4725 +diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
4726 +index 7753681195c1..86a2ed0fb219 100644
4727 +--- a/net/dccp/ccids/ccid2.c
4728 ++++ b/net/dccp/ccids/ccid2.c
4729 +@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
4730 + DCCPF_SEQ_WMAX));
4731 + }
4732 +
4733 ++static void dccp_tasklet_schedule(struct sock *sk)
4734 ++{
4735 ++ struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
4736 ++
4737 ++ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
4738 ++ sock_hold(sk);
4739 ++ __tasklet_schedule(t);
4740 ++ }
4741 ++}
4742 ++
4743 + static void ccid2_hc_tx_rto_expire(unsigned long data)
4744 + {
4745 + struct sock *sk = (struct sock *)data;
4746 +@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
4747 +
4748 + /* if we were blocked before, we may now send cwnd=1 packet */
4749 + if (sender_was_blocked)
4750 +- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
4751 ++ dccp_tasklet_schedule(sk);
4752 + /* restart backed-off timer */
4753 + sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
4754 + out:
4755 +@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
4756 + done:
4757 + /* check if incoming Acks allow pending packets to be sent */
4758 + if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
4759 +- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
4760 ++ dccp_tasklet_schedule(sk);
4761 + dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
4762 + }
4763 +
4764 +diff --git a/net/dccp/timer.c b/net/dccp/timer.c
4765 +index 3ef7acef3ce8..aa7c7dad7f96 100644
4766 +--- a/net/dccp/timer.c
4767 ++++ b/net/dccp/timer.c
4768 +@@ -230,12 +230,12 @@ static void dccp_write_xmitlet(unsigned long data)
4769 + else
4770 + dccp_write_xmit(sk);
4771 + bh_unlock_sock(sk);
4772 ++ sock_put(sk);
4773 + }
4774 +
4775 + static void dccp_write_xmit_timer(unsigned long data)
4776 + {
4777 + dccp_write_xmitlet(data);
4778 +- sock_put((struct sock *)data);
4779 + }
4780 +
4781 + void dccp_init_xmit_timers(struct sock *sk)
4782 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
4783 +index 09c73dd541c5..10286432f684 100644
4784 +--- a/net/ipv4/ip_output.c
4785 ++++ b/net/ipv4/ip_output.c
4786 +@@ -1062,7 +1062,8 @@ alloc_new_skb:
4787 + if (copy > length)
4788 + copy = length;
4789 +
4790 +- if (!(rt->dst.dev->features&NETIF_F_SG)) {
4791 ++ if (!(rt->dst.dev->features&NETIF_F_SG) &&
4792 ++ skb_tailroom(skb) >= copy) {
4793 + unsigned int off;
4794 +
4795 + off = skb->len;
4796 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
4797 +index 37a3b05d175c..82c878224bfc 100644
4798 +--- a/net/ipv4/ping.c
4799 ++++ b/net/ipv4/ping.c
4800 +@@ -777,8 +777,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
4801 + ipc.addr = faddr = daddr;
4802 +
4803 + if (ipc.opt && ipc.opt->opt.srr) {
4804 +- if (!daddr)
4805 +- return -EINVAL;
4806 ++ if (!daddr) {
4807 ++ err = -EINVAL;
4808 ++ goto out_free;
4809 ++ }
4810 + faddr = ipc.opt->opt.faddr;
4811 + }
4812 + tos = get_rttos(&ipc, inet);
4813 +@@ -843,6 +845,7 @@ back_from_confirm:
4814 +
4815 + out:
4816 + ip_rt_put(rt);
4817 ++out_free:
4818 + if (free)
4819 + kfree(ipc.opt);
4820 + if (!err) {
4821 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4822 +index b531a0997664..a0f0a7db946b 100644
4823 +--- a/net/ipv4/tcp.c
4824 ++++ b/net/ipv4/tcp.c
4825 +@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
4826 + lock_sock(sk);
4827 +
4828 + flags = msg->msg_flags;
4829 +- if (flags & MSG_FASTOPEN) {
4830 ++ if ((flags & MSG_FASTOPEN) && !tp->repair) {
4831 + err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
4832 + if (err == -EINPROGRESS && copied_syn > 0)
4833 + goto out;
4834 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
4835 +index 39c2919fe0d3..2854db094864 100644
4836 +--- a/net/ipv4/tcp_output.c
4837 ++++ b/net/ipv4/tcp_output.c
4838 +@@ -2587,8 +2587,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
4839 + return -EBUSY;
4840 +
4841 + if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
4842 +- if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
4843 +- BUG();
4844 ++ if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
4845 ++ WARN_ON_ONCE(1);
4846 ++ return -EINVAL;
4847 ++ }
4848 + if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
4849 + return -ENOMEM;
4850 + }
4851 +@@ -3117,6 +3119,7 @@ static void tcp_connect_init(struct sock *sk)
4852 + sock_reset_flag(sk, SOCK_DONE);
4853 + tp->snd_wnd = 0;
4854 + tcp_init_wl(tp, 0);
4855 ++ tcp_write_queue_purge(sk);
4856 + tp->snd_una = tp->write_seq;
4857 + tp->snd_sml = tp->write_seq;
4858 + tp->snd_up = tp->write_seq;
4859 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4860 +index a98ae890adb9..6f929689fd03 100644
4861 +--- a/net/ipv4/udp.c
4862 ++++ b/net/ipv4/udp.c
4863 +@@ -991,8 +991,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
4864 + ipc.addr = faddr = daddr;
4865 +
4866 + if (ipc.opt && ipc.opt->opt.srr) {
4867 +- if (!daddr)
4868 +- return -EINVAL;
4869 ++ if (!daddr) {
4870 ++ err = -EINVAL;
4871 ++ goto out_free;
4872 ++ }
4873 + faddr = ipc.opt->opt.faddr;
4874 + connected = 0;
4875 + }
4876 +@@ -1105,6 +1107,7 @@ do_append_data:
4877 +
4878 + out:
4879 + ip_rt_put(rt);
4880 ++out_free:
4881 + if (free)
4882 + kfree(ipc.opt);
4883 + if (!err)
4884 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4885 +index bfa710e8b615..74786783834b 100644
4886 +--- a/net/ipv6/ip6_output.c
4887 ++++ b/net/ipv6/ip6_output.c
4888 +@@ -1529,7 +1529,8 @@ alloc_new_skb:
4889 + if (copy > length)
4890 + copy = length;
4891 +
4892 +- if (!(rt->dst.dev->features&NETIF_F_SG)) {
4893 ++ if (!(rt->dst.dev->features&NETIF_F_SG) &&
4894 ++ skb_tailroom(skb) >= copy) {
4895 + unsigned int off;
4896 +
4897 + off = skb->len;
4898 +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
4899 +index ae3438685caa..fb3248ff8b48 100644
4900 +--- a/net/l2tp/l2tp_netlink.c
4901 ++++ b/net/l2tp/l2tp_netlink.c
4902 +@@ -732,8 +732,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
4903 +
4904 + if ((session->ifname[0] &&
4905 + nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
4906 +- (session->offset &&
4907 +- nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
4908 + (session->cookie_len &&
4909 + nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
4910 + &session->cookie[0])) ||
4911 +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
4912 +index 09f2f3471ad6..83e8a295c806 100644
4913 +--- a/net/llc/af_llc.c
4914 ++++ b/net/llc/af_llc.c
4915 +@@ -926,6 +926,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
4916 + if (size > llc->dev->mtu)
4917 + size = llc->dev->mtu;
4918 + copied = size - hdrlen;
4919 ++ rc = -EINVAL;
4920 ++ if (copied < 0)
4921 ++ goto release;
4922 + release_sock(sk);
4923 + skb = sock_alloc_send_skb(sk, size, noblock, &rc);
4924 + lock_sock(sk);
4925 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
4926 +index 21e4d339217e..624c4719e404 100644
4927 +--- a/net/openvswitch/flow_netlink.c
4928 ++++ b/net/openvswitch/flow_netlink.c
4929 +@@ -1141,13 +1141,10 @@ static void nlattr_set(struct nlattr *attr, u8 val,
4930 +
4931 + /* The nlattr stream should already have been validated */
4932 + nla_for_each_nested(nla, attr, rem) {
4933 +- if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
4934 +- if (tbl[nla_type(nla)].next)
4935 +- tbl = tbl[nla_type(nla)].next;
4936 +- nlattr_set(nla, val, tbl);
4937 +- } else {
4938 ++ if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
4939 ++ nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
4940 ++ else
4941 + memset(nla_data(nla), val, nla_len(nla));
4942 +- }
4943 +
4944 + if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
4945 + *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
4946 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4947 +index f165514a4db5..392d4e2c0a24 100644
4948 +--- a/net/packet/af_packet.c
4949 ++++ b/net/packet/af_packet.c
4950 +@@ -2771,13 +2771,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
4951 + if (skb == NULL)
4952 + goto out_unlock;
4953 +
4954 +- skb_set_network_header(skb, reserve);
4955 ++ skb_reset_network_header(skb);
4956 +
4957 + err = -EINVAL;
4958 + if (sock->type == SOCK_DGRAM) {
4959 + offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
4960 + if (unlikely(offset < 0))
4961 + goto out_free;
4962 ++ } else if (reserve) {
4963 ++ skb_push(skb, reserve);
4964 + }
4965 +
4966 + /* Returns -EFAULT on error */
4967 +diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
4968 +index 3c6a47d66a04..117ed90c5f21 100644
4969 +--- a/net/sched/sch_fq.c
4970 ++++ b/net/sched/sch_fq.c
4971 +@@ -126,6 +126,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
4972 + return f->next == &detached;
4973 + }
4974 +
4975 ++static bool fq_flow_is_throttled(const struct fq_flow *f)
4976 ++{
4977 ++ return f->next == &throttled;
4978 ++}
4979 ++
4980 ++static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
4981 ++{
4982 ++ if (head->first)
4983 ++ head->last->next = flow;
4984 ++ else
4985 ++ head->first = flow;
4986 ++ head->last = flow;
4987 ++ flow->next = NULL;
4988 ++}
4989 ++
4990 ++static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
4991 ++{
4992 ++ rb_erase(&f->rate_node, &q->delayed);
4993 ++ q->throttled_flows--;
4994 ++ fq_flow_add_tail(&q->old_flows, f);
4995 ++}
4996 ++
4997 + static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
4998 + {
4999 + struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
5000 +@@ -153,15 +175,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
5001 +
5002 + static struct kmem_cache *fq_flow_cachep __read_mostly;
5003 +
5004 +-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
5005 +-{
5006 +- if (head->first)
5007 +- head->last->next = flow;
5008 +- else
5009 +- head->first = flow;
5010 +- head->last = flow;
5011 +- flow->next = NULL;
5012 +-}
5013 +
5014 + /* limit number of collected flows per round */
5015 + #define FQ_GC_MAX 8
5016 +@@ -265,6 +278,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
5017 + f->socket_hash != sk->sk_hash)) {
5018 + f->credit = q->initial_quantum;
5019 + f->socket_hash = sk->sk_hash;
5020 ++ if (fq_flow_is_throttled(f))
5021 ++ fq_flow_unset_throttled(q, f);
5022 + f->time_next_packet = 0ULL;
5023 + }
5024 + return f;
5025 +@@ -419,9 +434,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
5026 + q->time_next_delayed_flow = f->time_next_packet;
5027 + break;
5028 + }
5029 +- rb_erase(p, &q->delayed);
5030 +- q->throttled_flows--;
5031 +- fq_flow_add_tail(&q->old_flows, f);
5032 ++ fq_flow_unset_throttled(q, f);
5033 + }
5034 + }
5035 +
5036 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
5037 +index 559afd0ee7de..a40b8b0ef0d5 100644
5038 +--- a/net/sctp/associola.c
5039 ++++ b/net/sctp/associola.c
5040 +@@ -1000,9 +1000,10 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
5041 + struct sctp_endpoint *ep;
5042 + struct sctp_chunk *chunk;
5043 + struct sctp_inq *inqueue;
5044 +- int state;
5045 + sctp_subtype_t subtype;
5046 ++ int first_time = 1; /* is this the first time through the loop */
5047 + int error = 0;
5048 ++ int state;
5049 +
5050 + /* The association should be held so we should be safe. */
5051 + ep = asoc->ep;
5052 +@@ -1013,6 +1014,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
5053 + state = asoc->state;
5054 + subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
5055 +
5056 ++ /* If the first chunk in the packet is AUTH, do special
5057 ++ * processing specified in Section 6.3 of SCTP-AUTH spec
5058 ++ */
5059 ++ if (first_time && subtype.chunk == SCTP_CID_AUTH) {
5060 ++ struct sctp_chunkhdr *next_hdr;
5061 ++
5062 ++ next_hdr = sctp_inq_peek(inqueue);
5063 ++ if (!next_hdr)
5064 ++ goto normal;
5065 ++
5066 ++ /* If the next chunk is COOKIE-ECHO, skip the AUTH
5067 ++ * chunk while saving a pointer to it so we can do
5068 ++ * Authentication later (during cookie-echo
5069 ++ * processing).
5070 ++ */
5071 ++ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
5072 ++ chunk->auth_chunk = skb_clone(chunk->skb,
5073 ++ GFP_ATOMIC);
5074 ++ chunk->auth = 1;
5075 ++ continue;
5076 ++ }
5077 ++ }
5078 ++
5079 ++normal:
5080 + /* SCTP-AUTH, Section 6.3:
5081 + * The receiver has a list of chunk types which it expects
5082 + * to be received only after an AUTH-chunk. This list has
5083 +@@ -1051,6 +1076,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
5084 + /* If there is an error on chunk, discard this packet. */
5085 + if (error && chunk)
5086 + chunk->pdiscard = 1;
5087 ++
5088 ++ if (first_time)
5089 ++ first_time = 0;
5090 + }
5091 + sctp_association_put(asoc);
5092 + }
5093 +diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
5094 +index 7e8a16c77039..8d9b7ad25b65 100644
5095 +--- a/net/sctp/inqueue.c
5096 ++++ b/net/sctp/inqueue.c
5097 +@@ -178,7 +178,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
5098 + skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
5099 + chunk->subh.v = NULL; /* Subheader is no longer valid. */
5100 +
5101 +- if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
5102 ++ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <=
5103 + skb_tail_pointer(chunk->skb)) {
5104 + /* This is not a singleton */
5105 + chunk->singleton = 0;
5106 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
5107 +index 1cd7b7e33fa3..5ca8309ea7b1 100644
5108 +--- a/net/sctp/ipv6.c
5109 ++++ b/net/sctp/ipv6.c
5110 +@@ -863,6 +863,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
5111 + if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
5112 + return 1;
5113 +
5114 ++ if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
5115 ++ return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
5116 ++
5117 + return __sctp_v6_cmp_addr(addr1, addr2);
5118 + }
5119 +
5120 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
5121 +index 29c7c43de108..df9ac3746c1b 100644
5122 +--- a/net/sctp/sm_statefuns.c
5123 ++++ b/net/sctp/sm_statefuns.c
5124 +@@ -144,10 +144,8 @@ static sctp_disposition_t sctp_sf_violation_chunk(
5125 + void *arg,
5126 + sctp_cmd_seq_t *commands);
5127 +
5128 +-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
5129 +- const struct sctp_endpoint *ep,
5130 ++static sctp_ierror_t sctp_sf_authenticate(
5131 + const struct sctp_association *asoc,
5132 +- const sctp_subtype_t type,
5133 + struct sctp_chunk *chunk);
5134 +
5135 + static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
5136 +@@ -615,6 +613,38 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
5137 + return SCTP_DISPOSITION_CONSUME;
5138 + }
5139 +
5140 ++static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
5141 ++ const struct sctp_association *asoc)
5142 ++{
5143 ++ struct sctp_chunk auth;
5144 ++
5145 ++ if (!chunk->auth_chunk)
5146 ++ return true;
5147 ++
5148 ++ /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
5149 ++ * is supposed to be authenticated and we have to do delayed
5150 ++ * authentication. We've just recreated the association using
5151 ++ * the information in the cookie and now it's much easier to
5152 ++ * do the authentication.
5153 ++ */
5154 ++
5155 ++ /* Make sure that we and the peer are AUTH capable */
5156 ++ if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
5157 ++ return false;
5158 ++
5159 ++ /* set-up our fake chunk so that we can process it */
5160 ++ auth.skb = chunk->auth_chunk;
5161 ++ auth.asoc = chunk->asoc;
5162 ++ auth.sctp_hdr = chunk->sctp_hdr;
5163 ++ auth.chunk_hdr = (struct sctp_chunkhdr *)
5164 ++ skb_push(chunk->auth_chunk,
5165 ++ sizeof(struct sctp_chunkhdr));
5166 ++ skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
5167 ++ auth.transport = chunk->transport;
5168 ++
5169 ++ return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
5170 ++}
5171 ++
5172 + /*
5173 + * Respond to a normal COOKIE ECHO chunk.
5174 + * We are the side that is being asked for an association.
5175 +@@ -751,36 +781,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
5176 + if (error)
5177 + goto nomem_init;
5178 +
5179 +- /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
5180 +- * is supposed to be authenticated and we have to do delayed
5181 +- * authentication. We've just recreated the association using
5182 +- * the information in the cookie and now it's much easier to
5183 +- * do the authentication.
5184 +- */
5185 +- if (chunk->auth_chunk) {
5186 +- struct sctp_chunk auth;
5187 +- sctp_ierror_t ret;
5188 +-
5189 +- /* Make sure that we and the peer are AUTH capable */
5190 +- if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
5191 +- sctp_association_free(new_asoc);
5192 +- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
5193 +- }
5194 +-
5195 +- /* set-up our fake chunk so that we can process it */
5196 +- auth.skb = chunk->auth_chunk;
5197 +- auth.asoc = chunk->asoc;
5198 +- auth.sctp_hdr = chunk->sctp_hdr;
5199 +- auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
5200 +- sizeof(sctp_chunkhdr_t));
5201 +- skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
5202 +- auth.transport = chunk->transport;
5203 +-
5204 +- ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
5205 +- if (ret != SCTP_IERROR_NO_ERROR) {
5206 +- sctp_association_free(new_asoc);
5207 +- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
5208 +- }
5209 ++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
5210 ++ sctp_association_free(new_asoc);
5211 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
5212 + }
5213 +
5214 + repl = sctp_make_cookie_ack(new_asoc, chunk);
5215 +@@ -1717,13 +1720,15 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
5216 + GFP_ATOMIC))
5217 + goto nomem;
5218 +
5219 ++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
5220 ++ return SCTP_DISPOSITION_DISCARD;
5221 ++
5222 + /* Make sure no new addresses are being added during the
5223 + * restart. Though this is a pretty complicated attack
5224 + * since you'd have to get inside the cookie.
5225 + */
5226 +- if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
5227 ++ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
5228 + return SCTP_DISPOSITION_CONSUME;
5229 +- }
5230 +
5231 + /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
5232 + * the peer has restarted (Action A), it MUST NOT setup a new
5233 +@@ -1828,6 +1833,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
5234 + GFP_ATOMIC))
5235 + goto nomem;
5236 +
5237 ++ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
5238 ++ return SCTP_DISPOSITION_DISCARD;
5239 ++
5240 + /* Update the content of current association. */
5241 + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
5242 + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
5243 +@@ -1920,6 +1928,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
5244 + * a COOKIE ACK.
5245 + */
5246 +
5247 ++ if (!sctp_auth_chunk_verify(net, chunk, asoc))
5248 ++ return SCTP_DISPOSITION_DISCARD;
5249 ++
5250 + /* Don't accidentally move back into established state. */
5251 + if (asoc->state < SCTP_STATE_ESTABLISHED) {
5252 + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
5253 +@@ -1959,7 +1970,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
5254 + }
5255 + }
5256 +
5257 +- repl = sctp_make_cookie_ack(new_asoc, chunk);
5258 ++ repl = sctp_make_cookie_ack(asoc, chunk);
5259 + if (!repl)
5260 + goto nomem;
5261 +
5262 +@@ -3985,10 +3996,8 @@ gen_shutdown:
5263 + *
5264 + * The return value is the disposition of the chunk.
5265 + */
5266 +-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
5267 +- const struct sctp_endpoint *ep,
5268 ++static sctp_ierror_t sctp_sf_authenticate(
5269 + const struct sctp_association *asoc,
5270 +- const sctp_subtype_t type,
5271 + struct sctp_chunk *chunk)
5272 + {
5273 + struct sctp_authhdr *auth_hdr;
5274 +@@ -4087,7 +4096,7 @@ sctp_disposition_t sctp_sf_eat_auth(struct net *net,
5275 + commands);
5276 +
5277 + auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
5278 +- error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
5279 ++ error = sctp_sf_authenticate(asoc, chunk);
5280 + switch (error) {
5281 + case SCTP_IERROR_AUTH_BAD_HMAC:
5282 + /* Generate the ERROR chunk and discard the rest
5283 +diff --git a/net/wireless/core.c b/net/wireless/core.c
5284 +index eeaf83acba1b..a1e909ae0f78 100644
5285 +--- a/net/wireless/core.c
5286 ++++ b/net/wireless/core.c
5287 +@@ -94,6 +94,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev,
5288 +
5289 + ASSERT_RTNL();
5290 +
5291 ++ if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
5292 ++ return -EINVAL;
5293 ++
5294 + /* prohibit calling the thing phy%d when %d is not its number */
5295 + sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
5296 + if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
5297 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
5298 +index 62d87f859566..d6a11af0bab1 100644
5299 +--- a/net/xfrm/xfrm_state.c
5300 ++++ b/net/xfrm/xfrm_state.c
5301 +@@ -1159,6 +1159,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
5302 +
5303 + if (orig->aead) {
5304 + x->aead = xfrm_algo_aead_clone(orig->aead);
5305 ++ x->geniv = orig->geniv;
5306 + if (!x->aead)
5307 + goto error;
5308 + }
5309 +diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
5310 +index 0608f216f359..ac0a40b9ba1e 100644
5311 +--- a/sound/core/control_compat.c
5312 ++++ b/sound/core/control_compat.c
5313 +@@ -400,8 +400,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
5314 + if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
5315 + copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
5316 + goto error;
5317 +- if (get_user(data->owner, &data32->owner) ||
5318 +- get_user(data->type, &data32->type))
5319 ++ if (get_user(data->owner, &data32->owner))
5320 + goto error;
5321 + switch (data->type) {
5322 + case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
5323 +diff --git a/sound/core/timer.c b/sound/core/timer.c
5324 +index 48eaccba82a3..5a718b2d3c9a 100644
5325 +--- a/sound/core/timer.c
5326 ++++ b/sound/core/timer.c
5327 +@@ -318,8 +318,6 @@ int snd_timer_open(struct snd_timer_instance **ti,
5328 + return 0;
5329 + }
5330 +
5331 +-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
5332 +-
5333 + /*
5334 + * close a timer instance
5335 + */
5336 +@@ -408,7 +406,6 @@ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
5337 + static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
5338 + {
5339 + struct snd_timer *timer;
5340 +- unsigned long flags;
5341 + unsigned long resolution = 0;
5342 + struct snd_timer_instance *ts;
5343 + struct timespec tstamp;
5344 +@@ -432,34 +429,66 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
5345 + return;
5346 + if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
5347 + return;
5348 +- spin_lock_irqsave(&timer->lock, flags);
5349 + list_for_each_entry(ts, &ti->slave_active_head, active_list)
5350 + if (ts->ccallback)
5351 + ts->ccallback(ts, event + 100, &tstamp, resolution);
5352 +- spin_unlock_irqrestore(&timer->lock, flags);
5353 + }
5354 +
5355 +-static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri,
5356 +- unsigned long sticks)
5357 ++/* start/continue a master timer */
5358 ++static int snd_timer_start1(struct snd_timer_instance *timeri,
5359 ++ bool start, unsigned long ticks)
5360 + {
5361 ++ struct snd_timer *timer;
5362 ++ int result;
5363 ++ unsigned long flags;
5364 ++
5365 ++ timer = timeri->timer;
5366 ++ if (!timer)
5367 ++ return -EINVAL;
5368 ++
5369 ++ spin_lock_irqsave(&timer->lock, flags);
5370 ++ if (timer->card && timer->card->shutdown) {
5371 ++ result = -ENODEV;
5372 ++ goto unlock;
5373 ++ }
5374 ++ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
5375 ++ SNDRV_TIMER_IFLG_START)) {
5376 ++ result = -EBUSY;
5377 ++ goto unlock;
5378 ++ }
5379 ++
5380 ++ if (start)
5381 ++ timeri->ticks = timeri->cticks = ticks;
5382 ++ else if (!timeri->cticks)
5383 ++ timeri->cticks = 1;
5384 ++ timeri->pticks = 0;
5385 ++
5386 + list_move_tail(&timeri->active_list, &timer->active_list_head);
5387 + if (timer->running) {
5388 + if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
5389 + goto __start_now;
5390 + timer->flags |= SNDRV_TIMER_FLG_RESCHED;
5391 + timeri->flags |= SNDRV_TIMER_IFLG_START;
5392 +- return 1; /* delayed start */
5393 ++ result = 1; /* delayed start */
5394 + } else {
5395 +- timer->sticks = sticks;
5396 ++ if (start)
5397 ++ timer->sticks = ticks;
5398 + timer->hw.start(timer);
5399 + __start_now:
5400 + timer->running++;
5401 + timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
5402 +- return 0;
5403 ++ result = 0;
5404 + }
5405 ++ snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
5406 ++ SNDRV_TIMER_EVENT_CONTINUE);
5407 ++ unlock:
5408 ++ spin_unlock_irqrestore(&timer->lock, flags);
5409 ++ return result;
5410 + }
5411 +
5412 +-static int snd_timer_start_slave(struct snd_timer_instance *timeri)
5413 ++/* start/continue a slave timer */
5414 ++static int snd_timer_start_slave(struct snd_timer_instance *timeri,
5415 ++ bool start)
5416 + {
5417 + unsigned long flags;
5418 +
5419 +@@ -473,88 +502,37 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
5420 + spin_lock(&timeri->timer->lock);
5421 + list_add_tail(&timeri->active_list,
5422 + &timeri->master->slave_active_head);
5423 ++ snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
5424 ++ SNDRV_TIMER_EVENT_CONTINUE);
5425 + spin_unlock(&timeri->timer->lock);
5426 + }
5427 + spin_unlock_irqrestore(&slave_active_lock, flags);
5428 + return 1; /* delayed start */
5429 + }
5430 +
5431 +-/*
5432 +- * start the timer instance
5433 +- */
5434 +-int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
5435 ++/* stop/pause a master timer */
5436 ++static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
5437 + {
5438 + struct snd_timer *timer;
5439 +- int result = -EINVAL;
5440 ++ int result = 0;
5441 + unsigned long flags;
5442 +
5443 +- if (timeri == NULL || ticks < 1)
5444 +- return -EINVAL;
5445 +- if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
5446 +- result = snd_timer_start_slave(timeri);
5447 +- if (result >= 0)
5448 +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
5449 +- return result;
5450 +- }
5451 +- timer = timeri->timer;
5452 +- if (timer == NULL)
5453 +- return -EINVAL;
5454 +- if (timer->card && timer->card->shutdown)
5455 +- return -ENODEV;
5456 +- spin_lock_irqsave(&timer->lock, flags);
5457 +- if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
5458 +- SNDRV_TIMER_IFLG_START)) {
5459 +- result = -EBUSY;
5460 +- goto unlock;
5461 +- }
5462 +- timeri->ticks = timeri->cticks = ticks;
5463 +- timeri->pticks = 0;
5464 +- result = snd_timer_start1(timer, timeri, ticks);
5465 +- unlock:
5466 +- spin_unlock_irqrestore(&timer->lock, flags);
5467 +- if (result >= 0)
5468 +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
5469 +- return result;
5470 +-}
5471 +-
5472 +-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
5473 +-{
5474 +- struct snd_timer *timer;
5475 +- unsigned long flags;
5476 +-
5477 +- if (snd_BUG_ON(!timeri))
5478 +- return -ENXIO;
5479 +-
5480 +- if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
5481 +- spin_lock_irqsave(&slave_active_lock, flags);
5482 +- if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
5483 +- spin_unlock_irqrestore(&slave_active_lock, flags);
5484 +- return -EBUSY;
5485 +- }
5486 +- if (timeri->timer)
5487 +- spin_lock(&timeri->timer->lock);
5488 +- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
5489 +- list_del_init(&timeri->ack_list);
5490 +- list_del_init(&timeri->active_list);
5491 +- if (timeri->timer)
5492 +- spin_unlock(&timeri->timer->lock);
5493 +- spin_unlock_irqrestore(&slave_active_lock, flags);
5494 +- goto __end;
5495 +- }
5496 + timer = timeri->timer;
5497 + if (!timer)
5498 + return -EINVAL;
5499 + spin_lock_irqsave(&timer->lock, flags);
5500 + if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
5501 + SNDRV_TIMER_IFLG_START))) {
5502 +- spin_unlock_irqrestore(&timer->lock, flags);
5503 +- return -EBUSY;
5504 ++ result = -EBUSY;
5505 ++ goto unlock;
5506 + }
5507 + list_del_init(&timeri->ack_list);
5508 + list_del_init(&timeri->active_list);
5509 +- if (timer->card && timer->card->shutdown) {
5510 +- spin_unlock_irqrestore(&timer->lock, flags);
5511 +- return 0;
5512 ++ if (timer->card && timer->card->shutdown)
5513 ++ goto unlock;
5514 ++ if (stop) {
5515 ++ timeri->cticks = timeri->ticks;
5516 ++ timeri->pticks = 0;
5517 + }
5518 + if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
5519 + !(--timer->running)) {
5520 +@@ -569,13 +547,49 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
5521 + }
5522 + }
5523 + timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
5524 ++ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
5525 ++ SNDRV_TIMER_EVENT_CONTINUE);
5526 ++ unlock:
5527 + spin_unlock_irqrestore(&timer->lock, flags);
5528 +- __end:
5529 +- if (event != SNDRV_TIMER_EVENT_RESOLUTION)
5530 +- snd_timer_notify1(timeri, event);
5531 ++ return result;
5532 ++}
5533 ++
5534 ++/* stop/pause a slave timer */
5535 ++static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
5536 ++{
5537 ++ unsigned long flags;
5538 ++
5539 ++ spin_lock_irqsave(&slave_active_lock, flags);
5540 ++ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
5541 ++ spin_unlock_irqrestore(&slave_active_lock, flags);
5542 ++ return -EBUSY;
5543 ++ }
5544 ++ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
5545 ++ if (timeri->timer) {
5546 ++ spin_lock(&timeri->timer->lock);
5547 ++ list_del_init(&timeri->ack_list);
5548 ++ list_del_init(&timeri->active_list);
5549 ++ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
5550 ++ SNDRV_TIMER_EVENT_CONTINUE);
5551 ++ spin_unlock(&timeri->timer->lock);
5552 ++ }
5553 ++ spin_unlock_irqrestore(&slave_active_lock, flags);
5554 + return 0;
5555 + }
5556 +
5557 ++/*
5558 ++ * start the timer instance
5559 ++ */
5560 ++int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
5561 ++{
5562 ++ if (timeri == NULL || ticks < 1)
5563 ++ return -EINVAL;
5564 ++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
5565 ++ return snd_timer_start_slave(timeri, true);
5566 ++ else
5567 ++ return snd_timer_start1(timeri, true, ticks);
5568 ++}
5569 ++
5570 + /*
5571 + * stop the timer instance.
5572 + *
5573 +@@ -583,21 +597,10 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
5574 + */
5575 + int snd_timer_stop(struct snd_timer_instance *timeri)
5576 + {
5577 +- struct snd_timer *timer;
5578 +- unsigned long flags;
5579 +- int err;
5580 +-
5581 +- err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
5582 +- if (err < 0)
5583 +- return err;
5584 +- timer = timeri->timer;
5585 +- if (!timer)
5586 +- return -EINVAL;
5587 +- spin_lock_irqsave(&timer->lock, flags);
5588 +- timeri->cticks = timeri->ticks;
5589 +- timeri->pticks = 0;
5590 +- spin_unlock_irqrestore(&timer->lock, flags);
5591 +- return 0;
5592 ++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
5593 ++ return snd_timer_stop_slave(timeri, true);
5594 ++ else
5595 ++ return snd_timer_stop1(timeri, true);
5596 + }
5597 +
5598 + /*
5599 +@@ -605,32 +608,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
5600 + */
5601 + int snd_timer_continue(struct snd_timer_instance *timeri)
5602 + {
5603 +- struct snd_timer *timer;
5604 +- int result = -EINVAL;
5605 +- unsigned long flags;
5606 +-
5607 +- if (timeri == NULL)
5608 +- return result;
5609 + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
5610 +- return snd_timer_start_slave(timeri);
5611 +- timer = timeri->timer;
5612 +- if (! timer)
5613 +- return -EINVAL;
5614 +- if (timer->card && timer->card->shutdown)
5615 +- return -ENODEV;
5616 +- spin_lock_irqsave(&timer->lock, flags);
5617 +- if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
5618 +- result = -EBUSY;
5619 +- goto unlock;
5620 +- }
5621 +- if (!timeri->cticks)
5622 +- timeri->cticks = 1;
5623 +- timeri->pticks = 0;
5624 +- result = snd_timer_start1(timer, timeri, timer->sticks);
5625 +- unlock:
5626 +- spin_unlock_irqrestore(&timer->lock, flags);
5627 +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
5628 +- return result;
5629 ++ return snd_timer_start_slave(timeri, false);
5630 ++ else
5631 ++ return snd_timer_start1(timeri, false, 0);
5632 + }
5633 +
5634 + /*
5635 +@@ -638,7 +619,10 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
5636 + */
5637 + int snd_timer_pause(struct snd_timer_instance * timeri)
5638 + {
5639 +- return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
5640 ++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
5641 ++ return snd_timer_stop_slave(timeri, false);
5642 ++ else
5643 ++ return snd_timer_stop1(timeri, false);
5644 + }
5645 +
5646 + /*
5647 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5648 +index 3be91696ac35..d0b55c866370 100644
5649 +--- a/sound/pci/hda/hda_intel.c
5650 ++++ b/sound/pci/hda/hda_intel.c
5651 +@@ -2072,6 +2072,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
5652 + SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
5653 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
5654 + SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
5655 ++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
5656 ++ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
5657 + /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
5658 + SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
5659 + {}
5660 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
5661 +index c5447ff078b3..97d6a18e6956 100644
5662 +--- a/sound/usb/mixer.c
5663 ++++ b/sound/usb/mixer.c
5664 +@@ -904,6 +904,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
5665 + }
5666 + break;
5667 +
5668 ++ case USB_ID(0x0d8c, 0x0103):
5669 ++ if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
5670 ++ usb_audio_info(chip,
5671 ++ "set volume quirk for CM102-A+/102S+\n");
5672 ++ cval->min = -256;
5673 ++ }
5674 ++ break;
5675 ++
5676 + case USB_ID(0x0471, 0x0101):
5677 + case USB_ID(0x0471, 0x0104):
5678 + case USB_ID(0x0471, 0x0105):