Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sat, 13 Oct 2018 16:34:38
Message-Id: 1539448456.2ac2e047bfb433ba7d1057e38133e6c069d0d53b.mpagano@gentoo
1 commit: 2ac2e047bfb433ba7d1057e38133e6c069d0d53b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Oct 13 16:34:16 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Oct 13 16:34:16 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2ac2e047
7
8 Linux patch 4.9.133
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1132_linux-4.9.133.patch | 1471 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1475 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5814ba5..5ccf4d0 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -571,6 +571,10 @@ Patch: 1130_linux-4.9.132.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.132
23
24 +Patch: 1130_linux-4.9.133.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.133
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1132_linux-4.9.133.patch b/1132_linux-4.9.133.patch
33 new file mode 100644
34 index 0000000..6f23eca
35 --- /dev/null
36 +++ b/1132_linux-4.9.133.patch
37 @@ -0,0 +1,1471 @@
38 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
39 +index a36a695318c6..f9f67be8d3c3 100644
40 +--- a/Documentation/kernel-parameters.txt
41 ++++ b/Documentation/kernel-parameters.txt
42 +@@ -1084,12 +1084,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
43 + nopku [X86] Disable Memory Protection Keys CPU feature found
44 + in some Intel CPUs.
45 +
46 +- eagerfpu= [X86]
47 +- on enable eager fpu restore
48 +- off disable eager fpu restore
49 +- auto selects the default scheme, which automatically
50 +- enables eagerfpu restore for xsaveopt.
51 +-
52 + module.async_probe [KNL]
53 + Enable asynchronous probe on this module.
54 +
55 +diff --git a/Makefile b/Makefile
56 +index a46c9788ca67..18090f899a7c 100644
57 +--- a/Makefile
58 ++++ b/Makefile
59 +@@ -1,6 +1,6 @@
60 + VERSION = 4
61 + PATCHLEVEL = 9
62 +-SUBLEVEL = 132
63 ++SUBLEVEL = 133
64 + EXTRAVERSION =
65 + NAME = Roaring Lionus
66 +
67 +diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
68 +index 0e8c0151a390..3ce12137f94f 100644
69 +--- a/arch/arc/kernel/process.c
70 ++++ b/arch/arc/kernel/process.c
71 +@@ -213,6 +213,26 @@ int copy_thread(unsigned long clone_flags,
72 + task_thread_info(current)->thr_ptr;
73 + }
74 +
75 ++
76 ++ /*
77 ++ * setup usermode thread pointer #1:
78 ++ * when child is picked by scheduler, __switch_to() uses @c_callee to
79 ++ * populate usermode callee regs: this works (despite being in a kernel
80 ++ * function) since special return path for child @ret_from_fork()
81 ++ * ensures those regs are not clobbered all the way to RTIE to usermode
82 ++ */
83 ++ c_callee->r25 = task_thread_info(p)->thr_ptr;
84 ++
85 ++#ifdef CONFIG_ARC_CURR_IN_REG
86 ++ /*
87 ++ * setup usermode thread pointer #2:
88 ++ * however for this special use of r25 in kernel, __switch_to() sets
89 ++ * r25 for kernel needs and only in the final return path is usermode
90 ++ * r25 setup, from pt_regs->user_r25. So set that up as well
91 ++ */
92 ++ c_regs->user_r25 = c_callee->r25;
93 ++#endif
94 ++
95 + return 0;
96 + }
97 +
98 +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
99 +index e3acf5c3480e..02925043575a 100644
100 +--- a/arch/powerpc/kernel/fadump.c
101 ++++ b/arch/powerpc/kernel/fadump.c
102 +@@ -365,9 +365,9 @@ static int __init early_fadump_reserve_mem(char *p)
103 + }
104 + early_param("fadump_reserve_mem", early_fadump_reserve_mem);
105 +
106 +-static void register_fw_dump(struct fadump_mem_struct *fdm)
107 ++static int register_fw_dump(struct fadump_mem_struct *fdm)
108 + {
109 +- int rc;
110 ++ int rc, err;
111 + unsigned int wait_time;
112 +
113 + pr_debug("Registering for firmware-assisted kernel dump...\n");
114 +@@ -384,7 +384,11 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
115 +
116 + } while (wait_time);
117 +
118 ++ err = -EIO;
119 + switch (rc) {
120 ++ default:
121 ++ pr_err("Failed to register. Unknown Error(%d).\n", rc);
122 ++ break;
123 + case -1:
124 + printk(KERN_ERR "Failed to register firmware-assisted kernel"
125 + " dump. Hardware Error(%d).\n", rc);
126 +@@ -392,18 +396,22 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
127 + case -3:
128 + printk(KERN_ERR "Failed to register firmware-assisted kernel"
129 + " dump. Parameter Error(%d).\n", rc);
130 ++ err = -EINVAL;
131 + break;
132 + case -9:
133 + printk(KERN_ERR "firmware-assisted kernel dump is already "
134 + " registered.");
135 + fw_dump.dump_registered = 1;
136 ++ err = -EEXIST;
137 + break;
138 + case 0:
139 + printk(KERN_INFO "firmware-assisted kernel dump registration"
140 + " is successful\n");
141 + fw_dump.dump_registered = 1;
142 ++ err = 0;
143 + break;
144 + }
145 ++ return err;
146 + }
147 +
148 + void crash_fadump(struct pt_regs *regs, const char *str)
149 +@@ -1006,7 +1014,7 @@ static unsigned long init_fadump_header(unsigned long addr)
150 + return addr;
151 + }
152 +
153 +-static void register_fadump(void)
154 ++static int register_fadump(void)
155 + {
156 + unsigned long addr;
157 + void *vaddr;
158 +@@ -1017,7 +1025,7 @@ static void register_fadump(void)
159 + * assisted dump.
160 + */
161 + if (!fw_dump.reserve_dump_area_size)
162 +- return;
163 ++ return -ENODEV;
164 +
165 + ret = fadump_setup_crash_memory_ranges();
166 + if (ret)
167 +@@ -1032,7 +1040,7 @@ static void register_fadump(void)
168 + fadump_create_elfcore_headers(vaddr);
169 +
170 + /* register the future kernel dump with firmware. */
171 +- register_fw_dump(&fdm);
172 ++ return register_fw_dump(&fdm);
173 + }
174 +
175 + static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
176 +@@ -1218,7 +1226,6 @@ static ssize_t fadump_register_store(struct kobject *kobj,
177 + switch (buf[0]) {
178 + case '0':
179 + if (fw_dump.dump_registered == 0) {
180 +- ret = -EINVAL;
181 + goto unlock_out;
182 + }
183 + /* Un-register Firmware-assisted dump */
184 +@@ -1226,11 +1233,11 @@ static ssize_t fadump_register_store(struct kobject *kobj,
185 + break;
186 + case '1':
187 + if (fw_dump.dump_registered == 1) {
188 +- ret = -EINVAL;
189 ++ ret = -EEXIST;
190 + goto unlock_out;
191 + }
192 + /* Register Firmware-assisted dump */
193 +- register_fadump();
194 ++ ret = register_fadump();
195 + break;
196 + default:
197 + ret = -EINVAL;
198 +diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
199 +index dd1958436591..5773e1161072 100644
200 +--- a/arch/x86/crypto/crc32c-intel_glue.c
201 ++++ b/arch/x86/crypto/crc32c-intel_glue.c
202 +@@ -48,21 +48,13 @@
203 + #ifdef CONFIG_X86_64
204 + /*
205 + * use carryless multiply version of crc32c when buffer
206 +- * size is >= 512 (when eager fpu is enabled) or
207 +- * >= 1024 (when eager fpu is disabled) to account
208 ++ * size is >= 512 to account
209 + * for fpu state save/restore overhead.
210 + */
211 +-#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
212 +-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
213 ++#define CRC32C_PCL_BREAKEVEN 512
214 +
215 + asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
216 + unsigned int crc_init);
217 +-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
218 +-#define set_pcl_breakeven_point() \
219 +-do { \
220 +- if (!use_eager_fpu()) \
221 +- crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
222 +-} while (0)
223 + #endif /* CONFIG_X86_64 */
224 +
225 + static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
226 +@@ -185,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
227 + * use faster PCL version if datasize is large enough to
228 + * overcome kernel fpu state save/restore overhead
229 + */
230 +- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
231 ++ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
232 + kernel_fpu_begin();
233 + *crcp = crc_pcl(data, len, *crcp);
234 + kernel_fpu_end();
235 +@@ -197,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
236 + static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
237 + u8 *out)
238 + {
239 +- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
240 ++ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
241 + kernel_fpu_begin();
242 + *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
243 + kernel_fpu_end();
244 +@@ -257,7 +249,6 @@ static int __init crc32c_intel_mod_init(void)
245 + alg.update = crc32c_pcl_intel_update;
246 + alg.finup = crc32c_pcl_intel_finup;
247 + alg.digest = crc32c_pcl_intel_digest;
248 +- set_pcl_breakeven_point();
249 + }
250 + #endif
251 + return crypto_register_shash(&alg);
252 +diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
253 +index 02223cb4bcfd..1e967099ae51 100644
254 +--- a/arch/x86/entry/vdso/vclock_gettime.c
255 ++++ b/arch/x86/entry/vdso/vclock_gettime.c
256 +@@ -37,8 +37,9 @@ extern u8 pvclock_page
257 + notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
258 + {
259 + long ret;
260 +- asm("syscall" : "=a" (ret) :
261 +- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
262 ++ asm ("syscall" : "=a" (ret), "=m" (*ts) :
263 ++ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
264 ++ "memory", "rcx", "r11");
265 + return ret;
266 + }
267 +
268 +@@ -46,8 +47,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
269 + {
270 + long ret;
271 +
272 +- asm("syscall" : "=a" (ret) :
273 +- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
274 ++ asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
275 ++ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
276 ++ "memory", "rcx", "r11");
277 + return ret;
278 + }
279 +
280 +@@ -58,13 +60,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
281 + {
282 + long ret;
283 +
284 +- asm(
285 ++ asm (
286 + "mov %%ebx, %%edx \n"
287 +- "mov %2, %%ebx \n"
288 ++ "mov %[clock], %%ebx \n"
289 + "call __kernel_vsyscall \n"
290 + "mov %%edx, %%ebx \n"
291 +- : "=a" (ret)
292 +- : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
293 ++ : "=a" (ret), "=m" (*ts)
294 ++ : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
295 + : "memory", "edx");
296 + return ret;
297 + }
298 +@@ -73,13 +75,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
299 + {
300 + long ret;
301 +
302 +- asm(
303 ++ asm (
304 + "mov %%ebx, %%edx \n"
305 +- "mov %2, %%ebx \n"
306 ++ "mov %[tv], %%ebx \n"
307 + "call __kernel_vsyscall \n"
308 + "mov %%edx, %%ebx \n"
309 +- : "=a" (ret)
310 +- : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
311 ++ : "=a" (ret), "=m" (*tv), "=m" (*tz)
312 ++ : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
313 + : "memory", "edx");
314 + return ret;
315 + }
316 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
317 +index fbc1474960e3..f6d1bc93589c 100644
318 +--- a/arch/x86/include/asm/cpufeatures.h
319 ++++ b/arch/x86/include/asm/cpufeatures.h
320 +@@ -104,7 +104,6 @@
321 + #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
322 + #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
323 + #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
324 +-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
325 + #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
326 +
327 + /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
328 +diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
329 +index 8554f960e21b..25152843dd1f 100644
330 +--- a/arch/x86/include/asm/fixmap.h
331 ++++ b/arch/x86/include/asm/fixmap.h
332 +@@ -14,6 +14,16 @@
333 + #ifndef _ASM_X86_FIXMAP_H
334 + #define _ASM_X86_FIXMAP_H
335 +
336 ++/*
337 ++ * Exposed to assembly code for setting up initial page tables. Cannot be
338 ++ * calculated in assembly code (fixmap entries are an enum), but is sanity
339 ++ * checked in the actual fixmap C code to make sure that the fixmap is
340 ++ * covered fully.
341 ++ */
342 ++#define FIXMAP_PMD_NUM 2
343 ++/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
344 ++#define FIXMAP_PMD_TOP 507
345 ++
346 + #ifndef __ASSEMBLY__
347 + #include <linux/kernel.h>
348 + #include <asm/acpi.h>
349 +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
350 +index 8852e3afa1ad..499d6ed0e376 100644
351 +--- a/arch/x86/include/asm/fpu/internal.h
352 ++++ b/arch/x86/include/asm/fpu/internal.h
353 +@@ -60,11 +60,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
354 + /*
355 + * FPU related CPU feature flag helper routines:
356 + */
357 +-static __always_inline __pure bool use_eager_fpu(void)
358 +-{
359 +- return true;
360 +-}
361 +-
362 + static __always_inline __pure bool use_xsaveopt(void)
363 + {
364 + return static_cpu_has(X86_FEATURE_XSAVEOPT);
365 +@@ -501,24 +496,6 @@ static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
366 + }
367 +
368 +
369 +-/*
370 +- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
371 +- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
372 +- */
373 +-
374 +-static inline void __fpregs_activate_hw(void)
375 +-{
376 +- if (!use_eager_fpu())
377 +- clts();
378 +-}
379 +-
380 +-static inline void __fpregs_deactivate_hw(void)
381 +-{
382 +- if (!use_eager_fpu())
383 +- stts();
384 +-}
385 +-
386 +-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
387 + static inline void __fpregs_deactivate(struct fpu *fpu)
388 + {
389 + WARN_ON_FPU(!fpu->fpregs_active);
390 +@@ -528,7 +505,6 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
391 + trace_x86_fpu_regs_deactivated(fpu);
392 + }
393 +
394 +-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
395 + static inline void __fpregs_activate(struct fpu *fpu)
396 + {
397 + WARN_ON_FPU(fpu->fpregs_active);
398 +@@ -554,22 +530,17 @@ static inline int fpregs_active(void)
399 + }
400 +
401 + /*
402 +- * Encapsulate the CR0.TS handling together with the
403 +- * software flag.
404 +- *
405 + * These generally need preemption protection to work,
406 + * do try to avoid using these on their own.
407 + */
408 + static inline void fpregs_activate(struct fpu *fpu)
409 + {
410 +- __fpregs_activate_hw();
411 + __fpregs_activate(fpu);
412 + }
413 +
414 + static inline void fpregs_deactivate(struct fpu *fpu)
415 + {
416 + __fpregs_deactivate(fpu);
417 +- __fpregs_deactivate_hw();
418 + }
419 +
420 + /*
421 +@@ -596,8 +567,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
422 + * or if the past 5 consecutive context-switches used math.
423 + */
424 + fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
425 +- new_fpu->fpstate_active &&
426 +- (use_eager_fpu() || new_fpu->counter > 5);
427 ++ new_fpu->fpstate_active;
428 +
429 + if (old_fpu->fpregs_active) {
430 + if (!copy_fpregs_to_fpstate(old_fpu))
431 +@@ -611,18 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
432 +
433 + /* Don't change CR0.TS if we just switch! */
434 + if (fpu.preload) {
435 +- new_fpu->counter++;
436 + __fpregs_activate(new_fpu);
437 + trace_x86_fpu_regs_activated(new_fpu);
438 + prefetch(&new_fpu->state);
439 +- } else {
440 +- __fpregs_deactivate_hw();
441 + }
442 + } else {
443 +- old_fpu->counter = 0;
444 + old_fpu->last_cpu = -1;
445 + if (fpu.preload) {
446 +- new_fpu->counter++;
447 + if (fpu_want_lazy_restore(new_fpu, cpu))
448 + fpu.preload = 0;
449 + else
450 +diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
451 +index 48df486b02f9..3c80f5b9c09d 100644
452 +--- a/arch/x86/include/asm/fpu/types.h
453 ++++ b/arch/x86/include/asm/fpu/types.h
454 +@@ -321,17 +321,6 @@ struct fpu {
455 + */
456 + unsigned char fpregs_active;
457 +
458 +- /*
459 +- * @counter:
460 +- *
461 +- * This counter contains the number of consecutive context switches
462 +- * during which the FPU stays used. If this is over a threshold, the
463 +- * lazy FPU restore logic becomes eager, to save the trap overhead.
464 +- * This is an unsigned char so that after 256 iterations the counter
465 +- * wraps and the context switch behavior turns lazy again; this is to
466 +- * deal with bursty apps that only use the FPU for a short time:
467 +- */
468 +- unsigned char counter;
469 + /*
470 + * @state:
471 + *
472 +@@ -340,29 +329,6 @@ struct fpu {
473 + * the registers in the FPU are more recent than this state
474 + * copy. If the task context-switches away then they get
475 + * saved here and represent the FPU state.
476 +- *
477 +- * After context switches there may be a (short) time period
478 +- * during which the in-FPU hardware registers are unchanged
479 +- * and still perfectly match this state, if the tasks
480 +- * scheduled afterwards are not using the FPU.
481 +- *
482 +- * This is the 'lazy restore' window of optimization, which
483 +- * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
484 +- *
485 +- * We detect whether a subsequent task uses the FPU via setting
486 +- * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
487 +- *
488 +- * During this window, if the task gets scheduled again, we
489 +- * might be able to skip having to do a restore from this
490 +- * memory buffer to the hardware registers - at the cost of
491 +- * incurring the overhead of #NM fault traps.
492 +- *
493 +- * Note that on modern CPUs that support the XSAVEOPT (or other
494 +- * optimized XSAVE instructions), we don't use #NM traps anymore,
495 +- * as the hardware can track whether FPU registers need saving
496 +- * or not. On such CPUs we activate the non-lazy ('eagerfpu')
497 +- * logic, which unconditionally saves/restores all FPU state
498 +- * across context switches. (if FPU state exists.)
499 + */
500 + union fpregs_state state;
501 + /*
502 +diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
503 +index 221a32ed1372..d5c4df98aac3 100644
504 +--- a/arch/x86/include/asm/pgtable_64.h
505 ++++ b/arch/x86/include/asm/pgtable_64.h
506 +@@ -13,13 +13,14 @@
507 + #include <asm/processor.h>
508 + #include <linux/bitops.h>
509 + #include <linux/threads.h>
510 ++#include <asm/fixmap.h>
511 +
512 + extern pud_t level3_kernel_pgt[512];
513 + extern pud_t level3_ident_pgt[512];
514 + extern pmd_t level2_kernel_pgt[512];
515 + extern pmd_t level2_fixmap_pgt[512];
516 + extern pmd_t level2_ident_pgt[512];
517 +-extern pte_t level1_fixmap_pgt[512];
518 ++extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
519 + extern pgd_t init_level4_pgt[];
520 +
521 + #define swapper_pg_dir init_level4_pgt
522 +diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
523 +index 9217ab1f5bf6..342e59789fcd 100644
524 +--- a/arch/x86/include/asm/trace/fpu.h
525 ++++ b/arch/x86/include/asm/trace/fpu.h
526 +@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
527 + __field(struct fpu *, fpu)
528 + __field(bool, fpregs_active)
529 + __field(bool, fpstate_active)
530 +- __field(int, counter)
531 + __field(u64, xfeatures)
532 + __field(u64, xcomp_bv)
533 + ),
534 +@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
535 + __entry->fpu = fpu;
536 + __entry->fpregs_active = fpu->fpregs_active;
537 + __entry->fpstate_active = fpu->fpstate_active;
538 +- __entry->counter = fpu->counter;
539 + if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
540 + __entry->xfeatures = fpu->state.xsave.header.xfeatures;
541 + __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
542 + }
543 + ),
544 +- TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
545 ++ TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
546 + __entry->fpu,
547 + __entry->fpregs_active,
548 + __entry->fpstate_active,
549 +- __entry->counter,
550 + __entry->xfeatures,
551 + __entry->xcomp_bv
552 + )
553 +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
554 +index 430c095cfa0e..fc965118d2e6 100644
555 +--- a/arch/x86/kernel/fpu/core.c
556 ++++ b/arch/x86/kernel/fpu/core.c
557 +@@ -59,27 +59,9 @@ static bool kernel_fpu_disabled(void)
558 + return this_cpu_read(in_kernel_fpu);
559 + }
560 +
561 +-/*
562 +- * Were we in an interrupt that interrupted kernel mode?
563 +- *
564 +- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
565 +- * pair does nothing at all: the thread must not have fpu (so
566 +- * that we don't try to save the FPU state), and TS must
567 +- * be set (so that the clts/stts pair does nothing that is
568 +- * visible in the interrupted kernel thread).
569 +- *
570 +- * Except for the eagerfpu case when we return true; in the likely case
571 +- * the thread has FPU but we are not going to set/clear TS.
572 +- */
573 + static bool interrupted_kernel_fpu_idle(void)
574 + {
575 +- if (kernel_fpu_disabled())
576 +- return false;
577 +-
578 +- if (use_eager_fpu())
579 +- return true;
580 +-
581 +- return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
582 ++ return !kernel_fpu_disabled();
583 + }
584 +
585 + /*
586 +@@ -127,7 +109,6 @@ void __kernel_fpu_begin(void)
587 + copy_fpregs_to_fpstate(fpu);
588 + } else {
589 + this_cpu_write(fpu_fpregs_owner_ctx, NULL);
590 +- __fpregs_activate_hw();
591 + }
592 + }
593 + EXPORT_SYMBOL(__kernel_fpu_begin);
594 +@@ -138,8 +119,6 @@ void __kernel_fpu_end(void)
595 +
596 + if (fpu->fpregs_active)
597 + copy_kernel_to_fpregs(&fpu->state);
598 +- else
599 +- __fpregs_deactivate_hw();
600 +
601 + kernel_fpu_enable();
602 + }
603 +@@ -201,10 +180,7 @@ void fpu__save(struct fpu *fpu)
604 + trace_x86_fpu_before_save(fpu);
605 + if (fpu->fpregs_active) {
606 + if (!copy_fpregs_to_fpstate(fpu)) {
607 +- if (use_eager_fpu())
608 +- copy_kernel_to_fpregs(&fpu->state);
609 +- else
610 +- fpregs_deactivate(fpu);
611 ++ copy_kernel_to_fpregs(&fpu->state);
612 + }
613 + }
614 + trace_x86_fpu_after_save(fpu);
615 +@@ -249,7 +225,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
616 +
617 + int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
618 + {
619 +- dst_fpu->counter = 0;
620 + dst_fpu->fpregs_active = 0;
621 + dst_fpu->last_cpu = -1;
622 +
623 +@@ -262,8 +237,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
624 + * Don't let 'init optimized' areas of the XSAVE area
625 + * leak into the child task:
626 + */
627 +- if (use_eager_fpu())
628 +- memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
629 ++ memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
630 +
631 + /*
632 + * Save current FPU registers directly into the child
633 +@@ -285,10 +259,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
634 + memcpy(&src_fpu->state, &dst_fpu->state,
635 + fpu_kernel_xstate_size);
636 +
637 +- if (use_eager_fpu())
638 +- copy_kernel_to_fpregs(&src_fpu->state);
639 +- else
640 +- fpregs_deactivate(src_fpu);
641 ++ copy_kernel_to_fpregs(&src_fpu->state);
642 + }
643 + preempt_enable();
644 +
645 +@@ -461,7 +432,6 @@ void fpu__restore(struct fpu *fpu)
646 + trace_x86_fpu_before_restore(fpu);
647 + fpregs_activate(fpu);
648 + copy_kernel_to_fpregs(&fpu->state);
649 +- fpu->counter++;
650 + trace_x86_fpu_after_restore(fpu);
651 + kernel_fpu_enable();
652 + }
653 +@@ -479,7 +449,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
654 + void fpu__drop(struct fpu *fpu)
655 + {
656 + preempt_disable();
657 +- fpu->counter = 0;
658 +
659 + if (fpu->fpregs_active) {
660 + /* Ignore delayed exceptions from user space */
661 +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
662 +index 3ec0d2d64601..3a9318610c4d 100644
663 +--- a/arch/x86/kernel/fpu/signal.c
664 ++++ b/arch/x86/kernel/fpu/signal.c
665 +@@ -344,11 +344,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
666 + }
667 +
668 + fpu->fpstate_active = 1;
669 +- if (use_eager_fpu()) {
670 +- preempt_disable();
671 +- fpu__restore(fpu);
672 +- preempt_enable();
673 +- }
674 ++ preempt_disable();
675 ++ fpu__restore(fpu);
676 ++ preempt_enable();
677 +
678 + return err;
679 + } else {
680 +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
681 +index abfbb61b18b8..e9d7f461b7fa 100644
682 +--- a/arch/x86/kernel/fpu/xstate.c
683 ++++ b/arch/x86/kernel/fpu/xstate.c
684 +@@ -890,15 +890,6 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
685 + */
686 + if (!boot_cpu_has(X86_FEATURE_OSPKE))
687 + return -EINVAL;
688 +- /*
689 +- * For most XSAVE components, this would be an arduous task:
690 +- * brining fpstate up to date with fpregs, updating fpstate,
691 +- * then re-populating fpregs. But, for components that are
692 +- * never lazily managed, we can just access the fpregs
693 +- * directly. PKRU is never managed lazily, so we can just
694 +- * manipulate it directly. Make sure it stays that way.
695 +- */
696 +- WARN_ON_ONCE(!use_eager_fpu());
697 +
698 + /* Set the bits we need in PKRU: */
699 + if (init_val & PKEY_DISABLE_ACCESS)
700 +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
701 +index 9d72cf547c88..b0d6697ab153 100644
702 +--- a/arch/x86/kernel/head_64.S
703 ++++ b/arch/x86/kernel/head_64.S
704 +@@ -23,6 +23,7 @@
705 + #include "../entry/calling.h"
706 + #include <asm/export.h>
707 + #include <asm/nospec-branch.h>
708 ++#include <asm/fixmap.h>
709 +
710 + #ifdef CONFIG_PARAVIRT
711 + #include <asm/asm-offsets.h>
712 +@@ -493,13 +494,20 @@ NEXT_PAGE(level2_kernel_pgt)
713 + KERNEL_IMAGE_SIZE/PMD_SIZE)
714 +
715 + NEXT_PAGE(level2_fixmap_pgt)
716 +- .fill 506,8,0
717 +- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
718 +- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
719 +- .fill 5,8,0
720 ++ .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
721 ++ pgtno = 0
722 ++ .rept (FIXMAP_PMD_NUM)
723 ++ .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
724 ++ + _PAGE_TABLE;
725 ++ pgtno = pgtno + 1
726 ++ .endr
727 ++ /* 6 MB reserved space + a 2MB hole */
728 ++ .fill 4,8,0
729 +
730 + NEXT_PAGE(level1_fixmap_pgt)
731 ++ .rept (FIXMAP_PMD_NUM)
732 + .fill 512,8,0
733 ++ .endr
734 +
735 + #undef PMDS
736 +
737 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
738 +index 7e5119c1d15c..c17d3893ae60 100644
739 +--- a/arch/x86/kvm/cpuid.c
740 ++++ b/arch/x86/kvm/cpuid.c
741 +@@ -16,7 +16,6 @@
742 + #include <linux/export.h>
743 + #include <linux/vmalloc.h>
744 + #include <linux/uaccess.h>
745 +-#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
746 + #include <asm/user.h>
747 + #include <asm/fpu/xstate.h>
748 + #include "cpuid.h"
749 +@@ -114,8 +113,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
750 + if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
751 + best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
752 +
753 +- if (use_eager_fpu())
754 +- kvm_x86_ops->fpu_activate(vcpu);
755 ++ kvm_x86_ops->fpu_activate(vcpu);
756 +
757 + /*
758 + * The existing code assumes virtual address is 48-bit in the canonical
759 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
760 +index 203d42340fc1..5013ef165f44 100644
761 +--- a/arch/x86/kvm/x86.c
762 ++++ b/arch/x86/kvm/x86.c
763 +@@ -7631,16 +7631,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
764 + copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
765 + __kernel_fpu_end();
766 + ++vcpu->stat.fpu_reload;
767 +- /*
768 +- * If using eager FPU mode, or if the guest is a frequent user
769 +- * of the FPU, just leave the FPU active for next time.
770 +- * Every 255 times fpu_counter rolls over to 0; a guest that uses
771 +- * the FPU in bursts will revert to loading it on demand.
772 +- */
773 +- if (!use_eager_fpu()) {
774 +- if (++vcpu->fpu_counter < 5)
775 +- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
776 +- }
777 + trace_kvm_fpu(0);
778 + }
779 +
780 +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
781 +index e30baa8ad94f..8cbed30feb67 100644
782 +--- a/arch/x86/mm/pgtable.c
783 ++++ b/arch/x86/mm/pgtable.c
784 +@@ -536,6 +536,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
785 + {
786 + unsigned long address = __fix_to_virt(idx);
787 +
788 ++#ifdef CONFIG_X86_64
789 ++ /*
790 ++ * Ensure that the static initial page tables are covering the
791 ++ * fixmap completely.
792 ++ */
793 ++ BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
794 ++ (FIXMAP_PMD_NUM * PTRS_PER_PTE));
795 ++#endif
796 ++
797 + if (idx >= __end_of_fixed_addresses) {
798 + BUG();
799 + return;
800 +diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
801 +index 0bbec041c003..e2d2b3cd4276 100644
802 +--- a/arch/x86/mm/pkeys.c
803 ++++ b/arch/x86/mm/pkeys.c
804 +@@ -142,8 +142,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
805 + * Called from the FPU code when creating a fresh set of FPU
806 + * registers. This is called from a very specific context where
807 + * we know the FPU regstiers are safe for use and we can use PKRU
808 +- * directly. The fact that PKRU is only available when we are
809 +- * using eagerfpu mode makes this possible.
810 ++ * directly.
811 + */
812 + void copy_init_pkru_to_fpregs(void)
813 + {
814 +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
815 +index c92f75f7ae33..ebceaba20ad1 100644
816 +--- a/arch/x86/xen/mmu.c
817 ++++ b/arch/x86/xen/mmu.c
818 +@@ -1936,7 +1936,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
819 + * L3_k[511] -> level2_fixmap_pgt */
820 + convert_pfn_mfn(level3_kernel_pgt);
821 +
822 +- /* L3_k[511][506] -> level1_fixmap_pgt */
823 ++ /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
824 + convert_pfn_mfn(level2_fixmap_pgt);
825 + }
826 + /* We get [511][511] and have Xen's version of level2_kernel_pgt */
827 +@@ -1970,7 +1970,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
828 + set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
829 + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
830 + set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
831 +- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
832 ++
833 ++ for (i = 0; i < FIXMAP_PMD_NUM; i++) {
834 ++ set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
835 ++ PAGE_KERNEL_RO);
836 ++ }
837 +
838 + /* Pin down new L4 */
839 + pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
840 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
841 +index dfffba39f723..98517216879d 100644
842 +--- a/drivers/base/power/main.c
843 ++++ b/drivers/base/power/main.c
844 +@@ -1360,8 +1360,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
845 +
846 + dpm_wait_for_children(dev, async);
847 +
848 +- if (async_error)
849 ++ if (async_error) {
850 ++ dev->power.direct_complete = false;
851 + goto Complete;
852 ++ }
853 +
854 + /*
855 + * If a device configured to wake up the system from sleep states
856 +@@ -1373,6 +1375,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
857 + pm_wakeup_event(dev, 0);
858 +
859 + if (pm_wakeup_pending()) {
860 ++ dev->power.direct_complete = false;
861 + async_error = -EBUSY;
862 + goto Complete;
863 + }
864 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
865 +index 0fd0d82f80d2..fa9ef8ed5712 100644
866 +--- a/drivers/infiniband/core/ucma.c
867 ++++ b/drivers/infiniband/core/ucma.c
868 +@@ -1720,6 +1720,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
869 + mutex_lock(&mut);
870 + if (!ctx->closing) {
871 + mutex_unlock(&mut);
872 ++ ucma_put_ctx(ctx);
873 ++ wait_for_completion(&ctx->comp);
874 + /* rdma_destroy_id ensures that no event handlers are
875 + * inflight for that id before releasing it.
876 + */
877 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
878 +index a184c9830ca5..62eb4b7caff3 100644
879 +--- a/drivers/md/dm-cache-metadata.c
880 ++++ b/drivers/md/dm-cache-metadata.c
881 +@@ -1262,8 +1262,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
882 + if (hints_valid) {
883 + r = dm_array_cursor_next(&cmd->hint_cursor);
884 + if (r) {
885 +- DMERR("dm_array_cursor_next for hint failed");
886 +- goto out;
887 ++ dm_array_cursor_end(&cmd->hint_cursor);
888 ++ hints_valid = false;
889 + }
890 + }
891 + }
892 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
893 +index c817627d09ca..58b97226050f 100644
894 +--- a/drivers/md/dm-cache-target.c
895 ++++ b/drivers/md/dm-cache-target.c
896 +@@ -3390,8 +3390,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
897 +
898 + static bool can_resize(struct cache *cache, dm_cblock_t new_size)
899 + {
900 +- if (from_cblock(new_size) > from_cblock(cache->cache_size))
901 +- return true;
902 ++ if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
903 ++ if (cache->sized) {
904 ++ DMERR("%s: unable to extend cache due to missing cache table reload",
905 ++ cache_device_name(cache));
906 ++ return false;
907 ++ }
908 ++ }
909 +
910 + /*
911 + * We can't drop a dirty block when shrinking the cache.
912 +diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
913 +index 0dadc6044dba..b106a06d21cb 100644
914 +--- a/drivers/net/wireless/ath/ath10k/debug.c
915 ++++ b/drivers/net/wireless/ath/ath10k/debug.c
916 +@@ -1,6 +1,7 @@
917 + /*
918 + * Copyright (c) 2005-2011 Atheros Communications Inc.
919 + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
920 ++ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
921 + *
922 + * Permission to use, copy, modify, and/or distribute this software for any
923 + * purpose with or without fee is hereby granted, provided that the above
924 +@@ -161,6 +162,8 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
925 + void ath10k_debug_print_board_info(struct ath10k *ar)
926 + {
927 + char boardinfo[100];
928 ++ const struct firmware *board;
929 ++ u32 crc;
930 +
931 + if (ar->id.bmi_ids_valid)
932 + scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
933 +@@ -168,11 +171,16 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
934 + else
935 + scnprintf(boardinfo, sizeof(boardinfo), "N/A");
936 +
937 ++ board = ar->normal_mode_fw.board;
938 ++ if (!IS_ERR_OR_NULL(board))
939 ++ crc = crc32_le(0, board->data, board->size);
940 ++ else
941 ++ crc = 0;
942 ++
943 + ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
944 + ar->bd_api,
945 + boardinfo,
946 +- crc32_le(0, ar->normal_mode_fw.board->data,
947 +- ar->normal_mode_fw.board->size));
948 ++ crc);
949 + }
950 +
951 + void ath10k_debug_print_boot_info(struct ath10k *ar)
952 +diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
953 +index e0d00cef0bd8..5b974bb76e6c 100644
954 +--- a/drivers/net/wireless/ath/ath10k/trace.h
955 ++++ b/drivers/net/wireless/ath/ath10k/trace.h
956 +@@ -152,10 +152,9 @@ TRACE_EVENT(ath10k_log_dbg_dump,
957 + );
958 +
959 + TRACE_EVENT(ath10k_wmi_cmd,
960 +- TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
961 +- int ret),
962 ++ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
963 +
964 +- TP_ARGS(ar, id, buf, buf_len, ret),
965 ++ TP_ARGS(ar, id, buf, buf_len),
966 +
967 + TP_STRUCT__entry(
968 + __string(device, dev_name(ar->dev))
969 +@@ -163,7 +162,6 @@ TRACE_EVENT(ath10k_wmi_cmd,
970 + __field(unsigned int, id)
971 + __field(size_t, buf_len)
972 + __dynamic_array(u8, buf, buf_len)
973 +- __field(int, ret)
974 + ),
975 +
976 + TP_fast_assign(
977 +@@ -171,17 +169,15 @@ TRACE_EVENT(ath10k_wmi_cmd,
978 + __assign_str(driver, dev_driver_string(ar->dev));
979 + __entry->id = id;
980 + __entry->buf_len = buf_len;
981 +- __entry->ret = ret;
982 + memcpy(__get_dynamic_array(buf), buf, buf_len);
983 + ),
984 +
985 + TP_printk(
986 +- "%s %s id %d len %zu ret %d",
987 ++ "%s %s id %d len %zu",
988 + __get_str(driver),
989 + __get_str(device),
990 + __entry->id,
991 +- __entry->buf_len,
992 +- __entry->ret
993 ++ __entry->buf_len
994 + )
995 + );
996 +
997 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
998 +index f69b98f4276b..642a441a6586 100644
999 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
1000 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
1001 +@@ -1486,10 +1486,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1002 + bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1003 + ie_len = roundup(arg->ie_len, 4);
1004 + len = (sizeof(*tlv) + sizeof(*cmd)) +
1005 +- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
1006 +- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
1007 +- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
1008 +- (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
1009 ++ sizeof(*tlv) + chan_len +
1010 ++ sizeof(*tlv) + ssid_len +
1011 ++ sizeof(*tlv) + bssid_len +
1012 ++ sizeof(*tlv) + ie_len;
1013 +
1014 + skb = ath10k_wmi_alloc_skb(ar, len);
1015 + if (!skb)
1016 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
1017 +index e518b640aad0..75f7a7b549df 100644
1018 +--- a/drivers/net/wireless/ath/ath10k/wmi.c
1019 ++++ b/drivers/net/wireless/ath/ath10k/wmi.c
1020 +@@ -1711,8 +1711,8 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
1021 + cmd_hdr->cmd_id = __cpu_to_le32(cmd);
1022 +
1023 + memset(skb_cb, 0, sizeof(*skb_cb));
1024 ++ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
1025 + ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
1026 +- trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
1027 +
1028 + if (ret)
1029 + goto err_pull;
1030 +diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
1031 +index 3c4c58b9fe76..3b6fb5b3bdb2 100644
1032 +--- a/drivers/net/xen-netback/hash.c
1033 ++++ b/drivers/net/xen-netback/hash.c
1034 +@@ -332,20 +332,22 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
1035 + u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
1036 + u32 off)
1037 + {
1038 +- u32 *mapping = &vif->hash.mapping[off];
1039 ++ u32 *mapping = vif->hash.mapping;
1040 + struct gnttab_copy copy_op = {
1041 + .source.u.ref = gref,
1042 + .source.domid = vif->domid,
1043 +- .dest.u.gmfn = virt_to_gfn(mapping),
1044 + .dest.domid = DOMID_SELF,
1045 +- .dest.offset = xen_offset_in_page(mapping),
1046 +- .len = len * sizeof(u32),
1047 ++ .len = len * sizeof(*mapping),
1048 + .flags = GNTCOPY_source_gref
1049 + };
1050 +
1051 +- if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
1052 ++ if ((off + len < off) || (off + len > vif->hash.size) ||
1053 ++ len > XEN_PAGE_SIZE / sizeof(*mapping))
1054 + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
1055 +
1056 ++ copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
1057 ++ copy_op.dest.offset = xen_offset_in_page(mapping + off);
1058 ++
1059 + while (len-- != 0)
1060 + if (mapping[off++] >= vif->num_queues)
1061 + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
1062 +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
1063 +index 90b5a898d6b1..0a1ebbbd3f16 100644
1064 +--- a/drivers/of/unittest.c
1065 ++++ b/drivers/of/unittest.c
1066 +@@ -548,6 +548,9 @@ static void __init of_unittest_parse_interrupts(void)
1067 + struct of_phandle_args args;
1068 + int i, rc;
1069 +
1070 ++ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
1071 ++ return;
1072 ++
1073 + np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
1074 + if (!np) {
1075 + pr_err("missing testcase data\n");
1076 +@@ -622,6 +625,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
1077 + struct of_phandle_args args;
1078 + int i, rc;
1079 +
1080 ++ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
1081 ++ return;
1082 ++
1083 + np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
1084 + if (!np) {
1085 + pr_err("missing testcase data\n");
1086 +@@ -778,15 +784,19 @@ static void __init of_unittest_platform_populate(void)
1087 + pdev = of_find_device_by_node(np);
1088 + unittest(pdev, "device 1 creation failed\n");
1089 +
1090 +- irq = platform_get_irq(pdev, 0);
1091 +- unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
1092 +-
1093 +- /* Test that a parsing failure does not return -EPROBE_DEFER */
1094 +- np = of_find_node_by_path("/testcase-data/testcase-device2");
1095 +- pdev = of_find_device_by_node(np);
1096 +- unittest(pdev, "device 2 creation failed\n");
1097 +- irq = platform_get_irq(pdev, 0);
1098 +- unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
1099 ++ if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
1100 ++ irq = platform_get_irq(pdev, 0);
1101 ++ unittest(irq == -EPROBE_DEFER,
1102 ++ "device deferred probe failed - %d\n", irq);
1103 ++
1104 ++ /* Test that a parsing failure does not return -EPROBE_DEFER */
1105 ++ np = of_find_node_by_path("/testcase-data/testcase-device2");
1106 ++ pdev = of_find_device_by_node(np);
1107 ++ unittest(pdev, "device 2 creation failed\n");
1108 ++ irq = platform_get_irq(pdev, 0);
1109 ++ unittest(irq < 0 && irq != -EPROBE_DEFER,
1110 ++ "device parsing error failed - %d\n", irq);
1111 ++ }
1112 +
1113 + np = of_find_node_by_path("/testcase-data/platform-tests");
1114 + unittest(np, "No testcase data in device tree\n");
1115 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1116 +index 6b3c5c4cbb37..ccbbd4cde0f1 100644
1117 +--- a/drivers/pci/pci.c
1118 ++++ b/drivers/pci/pci.c
1119 +@@ -1114,12 +1114,12 @@ int pci_save_state(struct pci_dev *dev)
1120 + EXPORT_SYMBOL(pci_save_state);
1121 +
1122 + static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1123 +- u32 saved_val, int retry)
1124 ++ u32 saved_val, int retry, bool force)
1125 + {
1126 + u32 val;
1127 +
1128 + pci_read_config_dword(pdev, offset, &val);
1129 +- if (val == saved_val)
1130 ++ if (!force && val == saved_val)
1131 + return;
1132 +
1133 + for (;;) {
1134 +@@ -1138,25 +1138,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1135 + }
1136 +
1137 + static void pci_restore_config_space_range(struct pci_dev *pdev,
1138 +- int start, int end, int retry)
1139 ++ int start, int end, int retry,
1140 ++ bool force)
1141 + {
1142 + int index;
1143 +
1144 + for (index = end; index >= start; index--)
1145 + pci_restore_config_dword(pdev, 4 * index,
1146 + pdev->saved_config_space[index],
1147 +- retry);
1148 ++ retry, force);
1149 + }
1150 +
1151 + static void pci_restore_config_space(struct pci_dev *pdev)
1152 + {
1153 + if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1154 +- pci_restore_config_space_range(pdev, 10, 15, 0);
1155 ++ pci_restore_config_space_range(pdev, 10, 15, 0, false);
1156 + /* Restore BARs before the command register. */
1157 +- pci_restore_config_space_range(pdev, 4, 9, 10);
1158 +- pci_restore_config_space_range(pdev, 0, 3, 0);
1159 ++ pci_restore_config_space_range(pdev, 4, 9, 10, false);
1160 ++ pci_restore_config_space_range(pdev, 0, 3, 0, false);
1161 ++ } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1162 ++ pci_restore_config_space_range(pdev, 12, 15, 0, false);
1163 ++
1164 ++ /*
1165 ++ * Force rewriting of prefetch registers to avoid S3 resume
1166 ++ * issues on Intel PCI bridges that occur when these
1167 ++ * registers are not explicitly written.
1168 ++ */
1169 ++ pci_restore_config_space_range(pdev, 9, 11, 0, true);
1170 ++ pci_restore_config_space_range(pdev, 0, 8, 0, false);
1171 + } else {
1172 +- pci_restore_config_space_range(pdev, 0, 15, 0);
1173 ++ pci_restore_config_space_range(pdev, 0, 15, 0, false);
1174 + }
1175 + }
1176 +
1177 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1178 +index 789c81482542..e6429d419b80 100644
1179 +--- a/drivers/tty/tty_io.c
1180 ++++ b/drivers/tty/tty_io.c
1181 +@@ -1475,6 +1475,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1182 + static int tty_reopen(struct tty_struct *tty)
1183 + {
1184 + struct tty_driver *driver = tty->driver;
1185 ++ int retval;
1186 +
1187 + if (driver->type == TTY_DRIVER_TYPE_PTY &&
1188 + driver->subtype == PTY_TYPE_MASTER)
1189 +@@ -1488,10 +1489,14 @@ static int tty_reopen(struct tty_struct *tty)
1190 +
1191 + tty->count++;
1192 +
1193 +- if (!tty->ldisc)
1194 +- return tty_ldisc_reinit(tty, tty->termios.c_line);
1195 ++ if (tty->ldisc)
1196 ++ return 0;
1197 +
1198 +- return 0;
1199 ++ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1200 ++ if (retval)
1201 ++ tty->count--;
1202 ++
1203 ++ return retval;
1204 + }
1205 +
1206 + /**
1207 +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
1208 +index ce9e457e60c3..c10875834a5a 100644
1209 +--- a/drivers/usb/host/xhci-mtk.c
1210 ++++ b/drivers/usb/host/xhci-mtk.c
1211 +@@ -735,10 +735,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
1212 + xhci_mtk_host_enable(mtk);
1213 +
1214 + xhci_dbg(xhci, "%s: restart port polling\n", __func__);
1215 +- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1216 +- usb_hcd_poll_rh_status(hcd);
1217 + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1218 + usb_hcd_poll_rh_status(xhci->shared_hcd);
1219 ++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1220 ++ usb_hcd_poll_rh_status(hcd);
1221 + return 0;
1222 + }
1223 +
1224 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1225 +index f6782a347cde..b5140555a8d5 100644
1226 +--- a/drivers/usb/host/xhci-pci.c
1227 ++++ b/drivers/usb/host/xhci-pci.c
1228 +@@ -179,6 +179,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1229 + }
1230 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1231 + (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
1232 ++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
1233 ++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
1234 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
1235 + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
1236 + xhci->quirks |= XHCI_MISSING_CAS;
1237 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
1238 +index 2674da40d9cd..6d6acf2c07c3 100644
1239 +--- a/drivers/usb/serial/usb-serial-simple.c
1240 ++++ b/drivers/usb/serial/usb-serial-simple.c
1241 +@@ -87,7 +87,8 @@ DEVICE(moto_modem, MOTO_IDS);
1242 +
1243 + /* Motorola Tetra driver */
1244 + #define MOTOROLA_TETRA_IDS() \
1245 +- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
1246 ++ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
1247 ++ { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
1248 + DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
1249 +
1250 + /* Novatel Wireless GPS driver */
1251 +diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1252 +index ef69273074ba..a3edb20ea4c3 100644
1253 +--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1254 ++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1255 +@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
1256 + if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
1257 + return -EFAULT;
1258 +
1259 ++ if (mr->w > 4096 || mr->h > 4096)
1260 ++ return -EINVAL;
1261 ++
1262 + if (mr->w * mr->h * 3 > mr->buffer_size)
1263 + return -EINVAL;
1264 +
1265 +@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
1266 + mr->x, mr->y, mr->w, mr->h);
1267 +
1268 + if (r > 0) {
1269 +- if (copy_to_user(mr->buffer, buf, mr->buffer_size))
1270 ++ if (copy_to_user(mr->buffer, buf, r))
1271 + r = -EFAULT;
1272 + }
1273 +
1274 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1275 +index c19c96840480..c10180d0b018 100644
1276 +--- a/fs/ext4/xattr.c
1277 ++++ b/fs/ext4/xattr.c
1278 +@@ -209,12 +209,12 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
1279 + {
1280 + int error;
1281 +
1282 +- if (buffer_verified(bh))
1283 +- return 0;
1284 +-
1285 + if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
1286 + BHDR(bh)->h_blocks != cpu_to_le32(1))
1287 + return -EFSCORRUPTED;
1288 ++ if (buffer_verified(bh))
1289 ++ return 0;
1290 ++
1291 + if (!ext4_xattr_block_csum_verify(inode, bh))
1292 + return -EFSBADCRC;
1293 + error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
1294 +@@ -645,14 +645,20 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
1295 + }
1296 +
1297 + static int
1298 +-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
1299 ++ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
1300 ++ struct inode *inode)
1301 + {
1302 +- struct ext4_xattr_entry *last;
1303 ++ struct ext4_xattr_entry *last, *next;
1304 + size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
1305 +
1306 + /* Compute min_offs and last. */
1307 + last = s->first;
1308 +- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1309 ++ for (; !IS_LAST_ENTRY(last); last = next) {
1310 ++ next = EXT4_XATTR_NEXT(last);
1311 ++ if ((void *)next >= s->end) {
1312 ++ EXT4_ERROR_INODE(inode, "corrupted xattr entries");
1313 ++ return -EIO;
1314 ++ }
1315 + if (last->e_value_size) {
1316 + size_t offs = le16_to_cpu(last->e_value_offs);
1317 + if (offs < min_offs)
1318 +@@ -834,7 +840,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1319 + mb_cache_entry_delete_block(ext4_mb_cache, hash,
1320 + bs->bh->b_blocknr);
1321 + ea_bdebug(bs->bh, "modifying in-place");
1322 +- error = ext4_xattr_set_entry(i, s);
1323 ++ error = ext4_xattr_set_entry(i, s, inode);
1324 + if (!error) {
1325 + if (!IS_LAST_ENTRY(s->first))
1326 + ext4_xattr_rehash(header(s->base),
1327 +@@ -881,7 +887,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1328 + s->end = s->base + sb->s_blocksize;
1329 + }
1330 +
1331 +- error = ext4_xattr_set_entry(i, s);
1332 ++ error = ext4_xattr_set_entry(i, s, inode);
1333 + if (error == -EFSCORRUPTED)
1334 + goto bad_block;
1335 + if (error)
1336 +@@ -1079,7 +1085,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
1337 +
1338 + if (EXT4_I(inode)->i_extra_isize == 0)
1339 + return -ENOSPC;
1340 +- error = ext4_xattr_set_entry(i, s);
1341 ++ error = ext4_xattr_set_entry(i, s, inode);
1342 + if (error) {
1343 + if (error == -ENOSPC &&
1344 + ext4_has_inline_data(inode)) {
1345 +@@ -1091,7 +1097,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
1346 + error = ext4_xattr_ibody_find(inode, i, is);
1347 + if (error)
1348 + return error;
1349 +- error = ext4_xattr_set_entry(i, s);
1350 ++ error = ext4_xattr_set_entry(i, s, inode);
1351 + }
1352 + if (error)
1353 + return error;
1354 +@@ -1117,7 +1123,7 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
1355 +
1356 + if (EXT4_I(inode)->i_extra_isize == 0)
1357 + return -ENOSPC;
1358 +- error = ext4_xattr_set_entry(i, s);
1359 ++ error = ext4_xattr_set_entry(i, s, inode);
1360 + if (error)
1361 + return error;
1362 + header = IHDR(inode, ext4_raw_inode(&is->iloc));
1363 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
1364 +index b4dbc2f59656..aee2a066a446 100644
1365 +--- a/fs/f2fs/checkpoint.c
1366 ++++ b/fs/f2fs/checkpoint.c
1367 +@@ -676,6 +676,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1368 +
1369 + crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
1370 + if (crc_offset >= blk_size) {
1371 ++ f2fs_put_page(*cp_page, 1);
1372 + f2fs_msg(sbi->sb, KERN_WARNING,
1373 + "invalid crc_offset: %zu", crc_offset);
1374 + return -EINVAL;
1375 +@@ -684,6 +685,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1376 + crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
1377 + + crc_offset)));
1378 + if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
1379 ++ f2fs_put_page(*cp_page, 1);
1380 + f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
1381 + return -EINVAL;
1382 + }
1383 +@@ -703,14 +705,14 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1384 + err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1385 + &cp_page_1, version);
1386 + if (err)
1387 +- goto invalid_cp1;
1388 ++ return NULL;
1389 + pre_version = *version;
1390 +
1391 + cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
1392 + err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1393 + &cp_page_2, version);
1394 + if (err)
1395 +- goto invalid_cp2;
1396 ++ goto invalid_cp;
1397 + cur_version = *version;
1398 +
1399 + if (cur_version == pre_version) {
1400 +@@ -718,9 +720,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1401 + f2fs_put_page(cp_page_2, 1);
1402 + return cp_page_1;
1403 + }
1404 +-invalid_cp2:
1405 + f2fs_put_page(cp_page_2, 1);
1406 +-invalid_cp1:
1407 ++invalid_cp:
1408 + f2fs_put_page(cp_page_1, 1);
1409 + return NULL;
1410 + }
1411 +diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
1412 +index 03dda1cbe485..727a9e3fa806 100644
1413 +--- a/fs/ubifs/super.c
1414 ++++ b/fs/ubifs/super.c
1415 +@@ -1918,6 +1918,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
1416 + int dev, vol;
1417 + char *endptr;
1418 +
1419 ++ if (!name || !*name)
1420 ++ return ERR_PTR(-EINVAL);
1421 ++
1422 + /* First, try to open using the device node path method */
1423 + ubi = ubi_open_volume_path(name, mode);
1424 + if (!IS_ERR(ubi))
1425 +diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
1426 +index 984b2112c77b..ea8a97793d2d 100644
1427 +--- a/include/linux/netfilter_bridge/ebtables.h
1428 ++++ b/include/linux/netfilter_bridge/ebtables.h
1429 +@@ -123,4 +123,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
1430 + /* True if the target is not a standard target */
1431 + #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
1432 +
1433 ++static inline bool ebt_invalid_target(int target)
1434 ++{
1435 ++ return (target < -NUM_STANDARD_TARGETS || target >= 0);
1436 ++}
1437 ++
1438 + #endif
1439 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1440 +index 4c233437ee1a..bb0cf1caf1cd 100644
1441 +--- a/kernel/cgroup.c
1442 ++++ b/kernel/cgroup.c
1443 +@@ -4386,7 +4386,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
1444 + */
1445 + do {
1446 + css_task_iter_start(&from->self, &it);
1447 +- task = css_task_iter_next(&it);
1448 ++
1449 ++ do {
1450 ++ task = css_task_iter_next(&it);
1451 ++ } while (task && (task->flags & PF_EXITING));
1452 ++
1453 + if (task)
1454 + get_task_struct(task);
1455 + css_task_iter_end(&it);
1456 +diff --git a/mm/vmstat.c b/mm/vmstat.c
1457 +index 5f658b6a684f..d31e801a467c 100644
1458 +--- a/mm/vmstat.c
1459 ++++ b/mm/vmstat.c
1460 +@@ -1078,6 +1078,9 @@ const char * const vmstat_text[] = {
1461 + #ifdef CONFIG_SMP
1462 + "nr_tlb_remote_flush",
1463 + "nr_tlb_remote_flush_received",
1464 ++#else
1465 ++ "", /* nr_tlb_remote_flush */
1466 ++ "", /* nr_tlb_remote_flush_received */
1467 + #endif /* CONFIG_SMP */
1468 + "nr_tlb_local_flush_all",
1469 + "nr_tlb_local_flush_one",
1470 +diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
1471 +index 070cf134a22f..f2660c1b29e4 100644
1472 +--- a/net/bridge/netfilter/ebt_arpreply.c
1473 ++++ b/net/bridge/netfilter/ebt_arpreply.c
1474 +@@ -67,6 +67,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
1475 + if (e->ethproto != htons(ETH_P_ARP) ||
1476 + e->invflags & EBT_IPROTO)
1477 + return -EINVAL;
1478 ++ if (ebt_invalid_target(info->target))
1479 ++ return -EINVAL;
1480 ++
1481 + return 0;
1482 + }
1483 +
1484 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
1485 +index e63fd12f923a..6ef9d32c34f1 100644
1486 +--- a/net/mac80211/cfg.c
1487 ++++ b/net/mac80211/cfg.c
1488 +@@ -386,7 +386,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1489 + case NL80211_IFTYPE_AP:
1490 + case NL80211_IFTYPE_AP_VLAN:
1491 + /* Keys without a station are used for TX only */
1492 +- if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
1493 ++ if (sta && test_sta_flag(sta, WLAN_STA_MFP))
1494 + key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
1495 + break;
1496 + case NL80211_IFTYPE_ADHOC:
1497 +diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
1498 +index fbc1474960e3..f6d1bc93589c 100644
1499 +--- a/tools/arch/x86/include/asm/cpufeatures.h
1500 ++++ b/tools/arch/x86/include/asm/cpufeatures.h
1501 +@@ -104,7 +104,6 @@
1502 + #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
1503 + #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
1504 + #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
1505 +-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
1506 + #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
1507 +
1508 + /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */