Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 14 Nov 2018 14:01:07
Message-Id: 1542204041.0bf72262e810064df8ab8134561866268dc70d42.mpagano@gentoo
1 commit: 0bf72262e810064df8ab8134561866268dc70d42
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Oct 13 16:33:25 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 14:00:41 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0bf72262
7
8 Linux patch 4.14.76
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1075_linux-4.14.76.patch | 1999 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2003 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 0684007..7d1d7b6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -343,6 +343,10 @@ Patch: 1074_linux-4.14.75.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.75
23
24 +Patch: 1075_linux-4.14.76.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.76
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1075_linux-4.14.76.patch b/1075_linux-4.14.76.patch
33 new file mode 100644
34 index 0000000..1029509
35 --- /dev/null
36 +++ b/1075_linux-4.14.76.patch
37 @@ -0,0 +1,1999 @@
38 +diff --git a/Makefile b/Makefile
39 +index 7fc373c011c0..332dd011b3b9 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 75
47 ++SUBLEVEL = 76
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
52 +index 4674541eba3f..8ce6e7235915 100644
53 +--- a/arch/arc/kernel/process.c
54 ++++ b/arch/arc/kernel/process.c
55 +@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
56 + task_thread_info(current)->thr_ptr;
57 + }
58 +
59 ++
60 ++ /*
61 ++ * setup usermode thread pointer #1:
62 ++ * when child is picked by scheduler, __switch_to() uses @c_callee to
63 ++ * populate usermode callee regs: this works (despite being in a kernel
64 ++ * function) since special return path for child @ret_from_fork()
65 ++ * ensures those regs are not clobbered all the way to RTIE to usermode
66 ++ */
67 ++ c_callee->r25 = task_thread_info(p)->thr_ptr;
68 ++
69 ++#ifdef CONFIG_ARC_CURR_IN_REG
70 ++ /*
71 ++ * setup usermode thread pointer #2:
72 ++ * however for this special use of r25 in kernel, __switch_to() sets
73 ++ * r25 for kernel needs and only in the final return path is usermode
74 ++ * r25 setup, from pt_regs->user_r25. So set that up as well
75 ++ */
76 ++ c_regs->user_r25 = c_callee->r25;
77 ++#endif
78 ++
79 + return 0;
80 + }
81 +
82 +diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
83 +index bbcdf929be54..a5e919e34c42 100644
84 +--- a/arch/powerpc/include/asm/setup.h
85 ++++ b/arch/powerpc/include/asm/setup.h
86 +@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
87 +
88 + extern unsigned int rtas_data;
89 + extern unsigned long long memory_limit;
90 ++extern bool init_mem_is_free;
91 + extern unsigned long klimit;
92 + extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
93 +
94 +diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
95 +index 096d4e4d31e6..882c750dc519 100644
96 +--- a/arch/powerpc/lib/code-patching.c
97 ++++ b/arch/powerpc/lib/code-patching.c
98 +@@ -22,20 +22,28 @@
99 + #include <asm/page.h>
100 + #include <asm/code-patching.h>
101 + #include <asm/setup.h>
102 ++#include <asm/sections.h>
103 +
104 +-static int __patch_instruction(unsigned int *addr, unsigned int instr)
105 ++static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
106 ++ unsigned int *patch_addr)
107 + {
108 + int err;
109 +
110 +- __put_user_size(instr, addr, 4, err);
111 ++ __put_user_size(instr, patch_addr, 4, err);
112 + if (err)
113 + return err;
114 +
115 +- asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" :: "r" (addr));
116 ++ asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
117 ++ "r" (exec_addr));
118 +
119 + return 0;
120 + }
121 +
122 ++static int raw_patch_instruction(unsigned int *addr, unsigned int instr)
123 ++{
124 ++ return __patch_instruction(addr, instr, addr);
125 ++}
126 ++
127 + #ifdef CONFIG_STRICT_KERNEL_RWX
128 + static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
129 +
130 +@@ -135,10 +143,10 @@ static inline int unmap_patch_area(unsigned long addr)
131 + return 0;
132 + }
133 +
134 +-int patch_instruction(unsigned int *addr, unsigned int instr)
135 ++static int do_patch_instruction(unsigned int *addr, unsigned int instr)
136 + {
137 + int err;
138 +- unsigned int *dest = NULL;
139 ++ unsigned int *patch_addr = NULL;
140 + unsigned long flags;
141 + unsigned long text_poke_addr;
142 + unsigned long kaddr = (unsigned long)addr;
143 +@@ -149,7 +157,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
144 + * to allow patching. We just do the plain old patching
145 + */
146 + if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
147 +- return __patch_instruction(addr, instr);
148 ++ return raw_patch_instruction(addr, instr);
149 +
150 + local_irq_save(flags);
151 +
152 +@@ -159,17 +167,10 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
153 + goto out;
154 + }
155 +
156 +- dest = (unsigned int *)(text_poke_addr) +
157 ++ patch_addr = (unsigned int *)(text_poke_addr) +
158 + ((kaddr & ~PAGE_MASK) / sizeof(unsigned int));
159 +
160 +- /*
161 +- * We use __put_user_size so that we can handle faults while
162 +- * writing to dest and return err to handle faults gracefully
163 +- */
164 +- __put_user_size(instr, dest, 4, err);
165 +- if (!err)
166 +- asm ("dcbst 0, %0; sync; icbi 0,%0; icbi 0,%1; sync; isync"
167 +- ::"r" (dest), "r"(addr));
168 ++ __patch_instruction(addr, instr, patch_addr);
169 +
170 + err = unmap_patch_area(text_poke_addr);
171 + if (err)
172 +@@ -182,12 +183,22 @@ out:
173 + }
174 + #else /* !CONFIG_STRICT_KERNEL_RWX */
175 +
176 +-int patch_instruction(unsigned int *addr, unsigned int instr)
177 ++static int do_patch_instruction(unsigned int *addr, unsigned int instr)
178 + {
179 +- return __patch_instruction(addr, instr);
180 ++ return raw_patch_instruction(addr, instr);
181 + }
182 +
183 + #endif /* CONFIG_STRICT_KERNEL_RWX */
184 ++
185 ++int patch_instruction(unsigned int *addr, unsigned int instr)
186 ++{
187 ++ /* Make sure we aren't patching a freed init section */
188 ++ if (init_mem_is_free && init_section_contains(addr, 4)) {
189 ++ pr_debug("Skipping init section patching addr: 0x%px\n", addr);
190 ++ return 0;
191 ++ }
192 ++ return do_patch_instruction(addr, instr);
193 ++}
194 + NOKPROBE_SYMBOL(patch_instruction);
195 +
196 + int patch_branch(unsigned int *addr, unsigned long target, int flags)
197 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
198 +index 9c2f83331e5b..30bf13b72e5e 100644
199 +--- a/arch/powerpc/mm/mem.c
200 ++++ b/arch/powerpc/mm/mem.c
201 +@@ -63,6 +63,7 @@
202 + #endif
203 +
204 + unsigned long long memory_limit;
205 ++bool init_mem_is_free;
206 +
207 + #ifdef CONFIG_HIGHMEM
208 + pte_t *kmap_pte;
209 +@@ -405,6 +406,7 @@ void free_initmem(void)
210 + {
211 + ppc_md.progress = ppc_printk_progress;
212 + mark_initmem_nx();
213 ++ init_mem_is_free = true;
214 + free_initmem_default(POISON_FREE_INITMEM);
215 + }
216 +
217 +diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
218 +index b545bf9d2328..0a550dc5c525 100644
219 +--- a/arch/x86/entry/vdso/Makefile
220 ++++ b/arch/x86/entry/vdso/Makefile
221 +@@ -74,7 +74,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
222 + CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
223 + $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
224 + -fno-omit-frame-pointer -foptimize-sibling-calls \
225 +- -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
226 ++ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
227 ++
228 ++ifdef CONFIG_RETPOLINE
229 ++ifneq ($(RETPOLINE_VDSO_CFLAGS),)
230 ++ CFL += $(RETPOLINE_VDSO_CFLAGS)
231 ++endif
232 ++endif
233 +
234 + $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
235 +
236 +@@ -153,7 +159,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
237 + KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
238 + KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
239 + KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
240 +-KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
241 ++
242 ++ifdef CONFIG_RETPOLINE
243 ++ifneq ($(RETPOLINE_VDSO_CFLAGS),)
244 ++ KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
245 ++endif
246 ++endif
247 ++
248 + $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
249 +
250 + $(obj)/vdso32.so.dbg: FORCE \
251 +diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
252 +index fa8dbfcf7ed3..9c35dc0a9d64 100644
253 +--- a/arch/x86/entry/vdso/vclock_gettime.c
254 ++++ b/arch/x86/entry/vdso/vclock_gettime.c
255 +@@ -43,8 +43,9 @@ extern u8 hvclock_page
256 + notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
257 + {
258 + long ret;
259 +- asm("syscall" : "=a" (ret) :
260 +- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
261 ++ asm ("syscall" : "=a" (ret), "=m" (*ts) :
262 ++ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
263 ++ "memory", "rcx", "r11");
264 + return ret;
265 + }
266 +
267 +@@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
268 + {
269 + long ret;
270 +
271 +- asm("syscall" : "=a" (ret) :
272 +- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
273 ++ asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
274 ++ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
275 ++ "memory", "rcx", "r11");
276 + return ret;
277 + }
278 +
279 +@@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
280 + {
281 + long ret;
282 +
283 +- asm(
284 ++ asm (
285 + "mov %%ebx, %%edx \n"
286 +- "mov %2, %%ebx \n"
287 ++ "mov %[clock], %%ebx \n"
288 + "call __kernel_vsyscall \n"
289 + "mov %%edx, %%ebx \n"
290 +- : "=a" (ret)
291 +- : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
292 ++ : "=a" (ret), "=m" (*ts)
293 ++ : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
294 + : "memory", "edx");
295 + return ret;
296 + }
297 +@@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
298 + {
299 + long ret;
300 +
301 +- asm(
302 ++ asm (
303 + "mov %%ebx, %%edx \n"
304 +- "mov %2, %%ebx \n"
305 ++ "mov %[tv], %%ebx \n"
306 + "call __kernel_vsyscall \n"
307 + "mov %%edx, %%ebx \n"
308 +- : "=a" (ret)
309 +- : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
310 ++ : "=a" (ret), "=m" (*tv), "=m" (*tz)
311 ++ : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
312 + : "memory", "edx");
313 + return ret;
314 + }
315 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
316 +index 1dfb808abd23..d755e0d44ac1 100644
317 +--- a/arch/x86/kvm/mmu.c
318 ++++ b/arch/x86/kvm/mmu.c
319 +@@ -231,6 +231,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
320 + */
321 + static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
322 +
323 ++/*
324 ++ * In some cases, we need to preserve the GFN of a non-present or reserved
325 ++ * SPTE when we usurp the upper five bits of the physical address space to
326 ++ * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
327 ++ * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
328 ++ * left into the reserved bits, i.e. the GFN in the SPTE will be split into
329 ++ * high and low parts. This mask covers the lower bits of the GFN.
330 ++ */
331 ++static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
332 ++
333 ++
334 + static void mmu_spte_set(u64 *sptep, u64 spte);
335 + static void mmu_free_roots(struct kvm_vcpu *vcpu);
336 +
337 +@@ -338,9 +349,7 @@ static bool is_mmio_spte(u64 spte)
338 +
339 + static gfn_t get_mmio_spte_gfn(u64 spte)
340 + {
341 +- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
342 +- shadow_nonpresent_or_rsvd_mask;
343 +- u64 gpa = spte & ~mask;
344 ++ u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
345 +
346 + gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
347 + & shadow_nonpresent_or_rsvd_mask;
348 +@@ -404,6 +413,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
349 +
350 + static void kvm_mmu_reset_all_pte_masks(void)
351 + {
352 ++ u8 low_phys_bits;
353 ++
354 + shadow_user_mask = 0;
355 + shadow_accessed_mask = 0;
356 + shadow_dirty_mask = 0;
357 +@@ -418,12 +429,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
358 + * appropriate mask to guard against L1TF attacks. Otherwise, it is
359 + * assumed that the CPU is not vulnerable to L1TF.
360 + */
361 ++ low_phys_bits = boot_cpu_data.x86_phys_bits;
362 + if (boot_cpu_data.x86_phys_bits <
363 +- 52 - shadow_nonpresent_or_rsvd_mask_len)
364 ++ 52 - shadow_nonpresent_or_rsvd_mask_len) {
365 + shadow_nonpresent_or_rsvd_mask =
366 + rsvd_bits(boot_cpu_data.x86_phys_bits -
367 + shadow_nonpresent_or_rsvd_mask_len,
368 + boot_cpu_data.x86_phys_bits - 1);
369 ++ low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
370 ++ }
371 ++ shadow_nonpresent_or_rsvd_lower_gfn_mask =
372 ++ GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
373 + }
374 +
375 + static int is_cpuid_PSE36(void)
376 +diff --git a/block/blk-mq.c b/block/blk-mq.c
377 +index 49979c095f31..eac444804736 100644
378 +--- a/block/blk-mq.c
379 ++++ b/block/blk-mq.c
380 +@@ -1512,7 +1512,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
381 + BUG_ON(!rq->q);
382 + if (rq->mq_ctx != this_ctx) {
383 + if (this_ctx) {
384 +- trace_block_unplug(this_q, depth, from_schedule);
385 ++ trace_block_unplug(this_q, depth, !from_schedule);
386 + blk_mq_sched_insert_requests(this_q, this_ctx,
387 + &ctx_list,
388 + from_schedule);
389 +@@ -1532,7 +1532,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
390 + * on 'ctx_list'. Do those.
391 + */
392 + if (this_ctx) {
393 +- trace_block_unplug(this_q, depth, from_schedule);
394 ++ trace_block_unplug(this_q, depth, !from_schedule);
395 + blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
396 + from_schedule);
397 + }
398 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
399 +index 770b1539a083..d16b40cd26cc 100644
400 +--- a/drivers/base/power/main.c
401 ++++ b/drivers/base/power/main.c
402 +@@ -1462,8 +1462,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
403 +
404 + dpm_wait_for_subordinate(dev, async);
405 +
406 +- if (async_error)
407 ++ if (async_error) {
408 ++ dev->power.direct_complete = false;
409 + goto Complete;
410 ++ }
411 +
412 + /*
413 + * If a device configured to wake up the system from sleep states
414 +@@ -1475,6 +1477,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
415 + pm_wakeup_event(dev, 0);
416 +
417 + if (pm_wakeup_pending()) {
418 ++ dev->power.direct_complete = false;
419 + async_error = -EBUSY;
420 + goto Complete;
421 + }
422 +diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
423 +index ec8a4376f74f..2fab18fae4fc 100644
424 +--- a/drivers/clocksource/timer-atmel-pit.c
425 ++++ b/drivers/clocksource/timer-atmel-pit.c
426 +@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
427 + data->base = of_iomap(node, 0);
428 + if (!data->base) {
429 + pr_err("Could not map PIT address\n");
430 +- return -ENXIO;
431 ++ ret = -ENXIO;
432 ++ goto exit;
433 + }
434 +
435 + data->mck = of_clk_get(node, 0);
436 + if (IS_ERR(data->mck)) {
437 + pr_err("Unable to get mck clk\n");
438 +- return PTR_ERR(data->mck);
439 ++ ret = PTR_ERR(data->mck);
440 ++ goto exit;
441 + }
442 +
443 + ret = clk_prepare_enable(data->mck);
444 + if (ret) {
445 + pr_err("Unable to enable mck\n");
446 +- return ret;
447 ++ goto exit;
448 + }
449 +
450 + /* Get the interrupts property */
451 + data->irq = irq_of_parse_and_map(node, 0);
452 + if (!data->irq) {
453 + pr_err("Unable to get IRQ from DT\n");
454 +- return -EINVAL;
455 ++ ret = -EINVAL;
456 ++ goto exit;
457 + }
458 +
459 + /*
460 +@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
461 + ret = clocksource_register_hz(&data->clksrc, pit_rate);
462 + if (ret) {
463 + pr_err("Failed to register clocksource\n");
464 +- return ret;
465 ++ goto exit;
466 + }
467 +
468 + /* Set up irq handler */
469 +@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
470 + "at91_tick", data);
471 + if (ret) {
472 + pr_err("Unable to setup IRQ\n");
473 +- return ret;
474 ++ clocksource_unregister(&data->clksrc);
475 ++ goto exit;
476 + }
477 +
478 + /* Set up and register clockevents */
479 +@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
480 + clockevents_register_device(&data->clkevt);
481 +
482 + return 0;
483 ++
484 ++exit:
485 ++ kfree(data);
486 ++ return ret;
487 + }
488 + TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
489 + at91sam926x_pit_dt_init);
490 +diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
491 +index 0e8160701833..bb7b59fc5c08 100644
492 +--- a/drivers/crypto/chelsio/chcr_algo.c
493 ++++ b/drivers/crypto/chelsio/chcr_algo.c
494 +@@ -384,7 +384,8 @@ static inline int is_hmac(struct crypto_tfm *tfm)
495 +
496 + static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
497 + struct scatterlist *sg,
498 +- struct phys_sge_parm *sg_param)
499 ++ struct phys_sge_parm *sg_param,
500 ++ int pci_chan_id)
501 + {
502 + struct phys_sge_pairs *to;
503 + unsigned int len = 0, left_size = sg_param->obsize;
504 +@@ -402,6 +403,7 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
505 + phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
506 + phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
507 + phys_cpl->rss_hdr_int.hash_val = 0;
508 ++ phys_cpl->rss_hdr_int.channel = pci_chan_id;
509 + to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
510 + sizeof(struct cpl_rx_phys_dsgl));
511 + for (i = 0; nents && left_size; to++) {
512 +@@ -418,7 +420,8 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
513 + static inline int map_writesg_phys_cpl(struct device *dev,
514 + struct cpl_rx_phys_dsgl *phys_cpl,
515 + struct scatterlist *sg,
516 +- struct phys_sge_parm *sg_param)
517 ++ struct phys_sge_parm *sg_param,
518 ++ int pci_chan_id)
519 + {
520 + if (!sg || !sg_param->nents)
521 + return -EINVAL;
522 +@@ -428,7 +431,7 @@ static inline int map_writesg_phys_cpl(struct device *dev,
523 + pr_err("CHCR : DMA mapping failed\n");
524 + return -EINVAL;
525 + }
526 +- write_phys_cpl(phys_cpl, sg, sg_param);
527 ++ write_phys_cpl(phys_cpl, sg, sg_param, pci_chan_id);
528 + return 0;
529 + }
530 +
531 +@@ -608,7 +611,7 @@ static inline void create_wreq(struct chcr_context *ctx,
532 + is_iv ? iv_loc : IV_NOP, !!lcb,
533 + ctx->tx_qidx);
534 +
535 +- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
536 ++ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
537 + qid);
538 + chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
539 + 16) - ((sizeof(chcr_req->wreq)) >> 4)));
540 +@@ -698,7 +701,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
541 + sg_param.obsize = wrparam->bytes;
542 + sg_param.qid = wrparam->qid;
543 + error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
544 +- reqctx->dst, &sg_param);
545 ++ reqctx->dst, &sg_param,
546 ++ ctx->pci_chan_id);
547 + if (error)
548 + goto map_fail1;
549 +
550 +@@ -1228,16 +1232,23 @@ static int chcr_device_init(struct chcr_context *ctx)
551 + adap->vres.ncrypto_fc);
552 + rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
553 + txq_perchan = ntxq / u_ctx->lldi.nchan;
554 +- rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
555 +- rxq_idx += id % rxq_perchan;
556 +- txq_idx = ctx->dev->tx_channel_id * txq_perchan;
557 +- txq_idx += id % txq_perchan;
558 + spin_lock(&ctx->dev->lock_chcr_dev);
559 +- ctx->rx_qidx = rxq_idx;
560 +- ctx->tx_qidx = txq_idx;
561 ++ ctx->tx_chan_id = ctx->dev->tx_channel_id;
562 + ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
563 + ctx->dev->rx_channel_id = 0;
564 + spin_unlock(&ctx->dev->lock_chcr_dev);
565 ++ rxq_idx = ctx->tx_chan_id * rxq_perchan;
566 ++ rxq_idx += id % rxq_perchan;
567 ++ txq_idx = ctx->tx_chan_id * txq_perchan;
568 ++ txq_idx += id % txq_perchan;
569 ++ ctx->rx_qidx = rxq_idx;
570 ++ ctx->tx_qidx = txq_idx;
571 ++ /* Channel Id used by SGE to forward packet to Host.
572 ++ * Same value should be used in cpl_fw6_pld RSS_CH field
573 ++ * by FW. Driver programs PCI channel ID to be used in fw
574 ++ * at the time of queue allocation with value "pi->tx_chan"
575 ++ */
576 ++ ctx->pci_chan_id = txq_idx / txq_perchan;
577 + }
578 + out:
579 + return err;
580 +@@ -2066,7 +2077,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
581 + sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
582 + sg_param.qid = qid;
583 + error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
584 +- reqctx->dst, &sg_param);
585 ++ reqctx->dst, &sg_param,
586 ++ ctx->pci_chan_id);
587 + if (error)
588 + goto dstmap_fail;
589 +
590 +@@ -2389,7 +2401,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
591 + sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
592 + sg_param.qid = qid;
593 + error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
594 +- reqctx->dst, &sg_param);
595 ++ reqctx->dst, &sg_param, ctx->pci_chan_id);
596 + if (error)
597 + goto dstmap_fail;
598 +
599 +@@ -2545,7 +2557,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
600 + sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
601 + sg_param.qid = qid;
602 + error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
603 +- reqctx->dst, &sg_param);
604 ++ reqctx->dst, &sg_param,
605 ++ ctx->pci_chan_id);
606 + if (error)
607 + goto dstmap_fail;
608 +
609 +diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
610 +index 30af1ee17b87..e039d9aeb651 100644
611 +--- a/drivers/crypto/chelsio/chcr_crypto.h
612 ++++ b/drivers/crypto/chelsio/chcr_crypto.h
613 +@@ -222,6 +222,8 @@ struct chcr_context {
614 + struct chcr_dev *dev;
615 + unsigned char tx_qidx;
616 + unsigned char rx_qidx;
617 ++ unsigned char tx_chan_id;
618 ++ unsigned char pci_chan_id;
619 + struct __crypto_ctx crypto_ctx[0];
620 + };
621 +
622 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
623 +index 9fc3d387eae3..fb36425e21ff 100644
624 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
625 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
626 +@@ -231,6 +231,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
627 + {
628 + int i;
629 +
630 ++ cancel_delayed_work_sync(&adev->vce.idle_work);
631 ++
632 + if (adev->vce.vcpu_bo == NULL)
633 + return 0;
634 +
635 +@@ -241,7 +243,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
636 + if (i == AMDGPU_MAX_VCE_HANDLES)
637 + return 0;
638 +
639 +- cancel_delayed_work_sync(&adev->vce.idle_work);
640 + /* TODO: suspending running encoding sessions isn't supported */
641 + return -EINVAL;
642 + }
643 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
644 +index 1612d8aa6ad6..fca1b10628a6 100644
645 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
646 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
647 +@@ -155,11 +155,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
648 + unsigned size;
649 + void *ptr;
650 +
651 ++ cancel_delayed_work_sync(&adev->vcn.idle_work);
652 ++
653 + if (adev->vcn.vcpu_bo == NULL)
654 + return 0;
655 +
656 +- cancel_delayed_work_sync(&adev->vcn.idle_work);
657 +-
658 + size = amdgpu_bo_size(adev->vcn.vcpu_bo);
659 + ptr = adev->vcn.cpu_addr;
660 +
661 +diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
662 +index 7bcf5702c91c..889c95d4feec 100644
663 +--- a/drivers/gpu/drm/drm_syncobj.c
664 ++++ b/drivers/gpu/drm/drm_syncobj.c
665 +@@ -96,6 +96,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
666 + {
667 + int ret;
668 +
669 ++ WARN_ON(*fence);
670 ++
671 + *fence = drm_syncobj_fence_get(syncobj);
672 + if (*fence)
673 + return 1;
674 +@@ -656,6 +658,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
675 +
676 + if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
677 + for (i = 0; i < count; ++i) {
678 ++ if (entries[i].fence)
679 ++ continue;
680 ++
681 + drm_syncobj_fence_get_or_add_callback(syncobjs[i],
682 + &entries[i].fence,
683 + &entries[i].syncobj_cb,
684 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
685 +index 16423d7ab599..17144a781aeb 100644
686 +--- a/drivers/infiniband/core/ucma.c
687 ++++ b/drivers/infiniband/core/ucma.c
688 +@@ -1742,6 +1742,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
689 + mutex_lock(&mut);
690 + if (!ctx->closing) {
691 + mutex_unlock(&mut);
692 ++ ucma_put_ctx(ctx);
693 ++ wait_for_completion(&ctx->comp);
694 + /* rdma_destroy_id ensures that no event handlers are
695 + * inflight for that id before releasing it.
696 + */
697 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
698 +index 0a5a45f3ec5f..7f1c64c4ad24 100644
699 +--- a/drivers/md/dm-cache-metadata.c
700 ++++ b/drivers/md/dm-cache-metadata.c
701 +@@ -1454,8 +1454,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
702 + if (hints_valid) {
703 + r = dm_array_cursor_next(&cmd->hint_cursor);
704 + if (r) {
705 +- DMERR("dm_array_cursor_next for hint failed");
706 +- goto out;
707 ++ dm_array_cursor_end(&cmd->hint_cursor);
708 ++ hints_valid = false;
709 + }
710 + }
711 +
712 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
713 +index a4b7c2698096..e2ea57d5376e 100644
714 +--- a/drivers/md/dm-cache-target.c
715 ++++ b/drivers/md/dm-cache-target.c
716 +@@ -3097,8 +3097,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
717 +
718 + static bool can_resize(struct cache *cache, dm_cblock_t new_size)
719 + {
720 +- if (from_cblock(new_size) > from_cblock(cache->cache_size))
721 +- return true;
722 ++ if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
723 ++ if (cache->sized) {
724 ++ DMERR("%s: unable to extend cache due to missing cache table reload",
725 ++ cache_device_name(cache));
726 ++ return false;
727 ++ }
728 ++ }
729 +
730 + /*
731 + * We can't drop a dirty block when shrinking the cache.
732 +diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
733 +index df514507d3f1..22003895f854 100644
734 +--- a/drivers/net/wireless/ath/ath10k/debug.c
735 ++++ b/drivers/net/wireless/ath/ath10k/debug.c
736 +@@ -1,6 +1,7 @@
737 + /*
738 + * Copyright (c) 2005-2011 Atheros Communications Inc.
739 + * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
740 ++ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
741 + *
742 + * Permission to use, copy, modify, and/or distribute this software for any
743 + * purpose with or without fee is hereby granted, provided that the above
744 +@@ -163,6 +164,8 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
745 + void ath10k_debug_print_board_info(struct ath10k *ar)
746 + {
747 + char boardinfo[100];
748 ++ const struct firmware *board;
749 ++ u32 crc;
750 +
751 + if (ar->id.bmi_ids_valid)
752 + scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
753 +@@ -170,11 +173,16 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
754 + else
755 + scnprintf(boardinfo, sizeof(boardinfo), "N/A");
756 +
757 ++ board = ar->normal_mode_fw.board;
758 ++ if (!IS_ERR_OR_NULL(board))
759 ++ crc = crc32_le(0, board->data, board->size);
760 ++ else
761 ++ crc = 0;
762 ++
763 + ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
764 + ar->bd_api,
765 + boardinfo,
766 +- crc32_le(0, ar->normal_mode_fw.board->data,
767 +- ar->normal_mode_fw.board->size));
768 ++ crc);
769 + }
770 +
771 + void ath10k_debug_print_boot_info(struct ath10k *ar)
772 +diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
773 +index e0d00cef0bd8..5b974bb76e6c 100644
774 +--- a/drivers/net/wireless/ath/ath10k/trace.h
775 ++++ b/drivers/net/wireless/ath/ath10k/trace.h
776 +@@ -152,10 +152,9 @@ TRACE_EVENT(ath10k_log_dbg_dump,
777 + );
778 +
779 + TRACE_EVENT(ath10k_wmi_cmd,
780 +- TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
781 +- int ret),
782 ++ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
783 +
784 +- TP_ARGS(ar, id, buf, buf_len, ret),
785 ++ TP_ARGS(ar, id, buf, buf_len),
786 +
787 + TP_STRUCT__entry(
788 + __string(device, dev_name(ar->dev))
789 +@@ -163,7 +162,6 @@ TRACE_EVENT(ath10k_wmi_cmd,
790 + __field(unsigned int, id)
791 + __field(size_t, buf_len)
792 + __dynamic_array(u8, buf, buf_len)
793 +- __field(int, ret)
794 + ),
795 +
796 + TP_fast_assign(
797 +@@ -171,17 +169,15 @@ TRACE_EVENT(ath10k_wmi_cmd,
798 + __assign_str(driver, dev_driver_string(ar->dev));
799 + __entry->id = id;
800 + __entry->buf_len = buf_len;
801 +- __entry->ret = ret;
802 + memcpy(__get_dynamic_array(buf), buf, buf_len);
803 + ),
804 +
805 + TP_printk(
806 +- "%s %s id %d len %zu ret %d",
807 ++ "%s %s id %d len %zu",
808 + __get_str(driver),
809 + __get_str(device),
810 + __entry->id,
811 +- __entry->buf_len,
812 +- __entry->ret
813 ++ __entry->buf_len
814 + )
815 + );
816 +
817 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
818 +index baec856af90f..b54001e97ced 100644
819 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
820 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
821 +@@ -1486,10 +1486,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
822 + bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
823 + ie_len = roundup(arg->ie_len, 4);
824 + len = (sizeof(*tlv) + sizeof(*cmd)) +
825 +- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
826 +- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
827 +- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
828 +- (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
829 ++ sizeof(*tlv) + chan_len +
830 ++ sizeof(*tlv) + ssid_len +
831 ++ sizeof(*tlv) + bssid_len +
832 ++ sizeof(*tlv) + ie_len;
833 +
834 + skb = ath10k_wmi_alloc_skb(ar, len);
835 + if (!skb)
836 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
837 +index 38a97086708b..2ab5311659ea 100644
838 +--- a/drivers/net/wireless/ath/ath10k/wmi.c
839 ++++ b/drivers/net/wireless/ath/ath10k/wmi.c
840 +@@ -1741,8 +1741,8 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
841 + cmd_hdr->cmd_id = __cpu_to_le32(cmd);
842 +
843 + memset(skb_cb, 0, sizeof(*skb_cb));
844 ++ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
845 + ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
846 +- trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
847 +
848 + if (ret)
849 + goto err_pull;
850 +diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
851 +index 3c4c58b9fe76..3b6fb5b3bdb2 100644
852 +--- a/drivers/net/xen-netback/hash.c
853 ++++ b/drivers/net/xen-netback/hash.c
854 +@@ -332,20 +332,22 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
855 + u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
856 + u32 off)
857 + {
858 +- u32 *mapping = &vif->hash.mapping[off];
859 ++ u32 *mapping = vif->hash.mapping;
860 + struct gnttab_copy copy_op = {
861 + .source.u.ref = gref,
862 + .source.domid = vif->domid,
863 +- .dest.u.gmfn = virt_to_gfn(mapping),
864 + .dest.domid = DOMID_SELF,
865 +- .dest.offset = xen_offset_in_page(mapping),
866 +- .len = len * sizeof(u32),
867 ++ .len = len * sizeof(*mapping),
868 + .flags = GNTCOPY_source_gref
869 + };
870 +
871 +- if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
872 ++ if ((off + len < off) || (off + len > vif->hash.size) ||
873 ++ len > XEN_PAGE_SIZE / sizeof(*mapping))
874 + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
875 +
876 ++ copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
877 ++ copy_op.dest.offset = xen_offset_in_page(mapping + off);
878 ++
879 + while (len-- != 0)
880 + if (mapping[off++] >= vif->num_queues)
881 + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
882 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
883 +index 7deb7b5d8683..058d542647dd 100644
884 +--- a/drivers/nvme/host/fc.c
885 ++++ b/drivers/nvme/host/fc.c
886 +@@ -2868,6 +2868,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
887 + }
888 +
889 + if (ret) {
890 ++ nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
891 ++ cancel_work_sync(&ctrl->ctrl.reset_work);
892 ++ cancel_delayed_work_sync(&ctrl->connect_work);
893 ++
894 + /* couldn't schedule retry - fail out */
895 + dev_err(ctrl->ctrl.device,
896 + "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
897 +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
898 +index 985a85f281a8..7c6aff761800 100644
899 +--- a/drivers/of/unittest.c
900 ++++ b/drivers/of/unittest.c
901 +@@ -614,6 +614,9 @@ static void __init of_unittest_parse_interrupts(void)
902 + struct of_phandle_args args;
903 + int i, rc;
904 +
905 ++ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
906 ++ return;
907 ++
908 + np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
909 + if (!np) {
910 + pr_err("missing testcase data\n");
911 +@@ -688,6 +691,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
912 + struct of_phandle_args args;
913 + int i, rc;
914 +
915 ++ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
916 ++ return;
917 ++
918 + np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
919 + if (!np) {
920 + pr_err("missing testcase data\n");
921 +@@ -844,15 +850,19 @@ static void __init of_unittest_platform_populate(void)
922 + pdev = of_find_device_by_node(np);
923 + unittest(pdev, "device 1 creation failed\n");
924 +
925 +- irq = platform_get_irq(pdev, 0);
926 +- unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
927 ++ if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
928 ++ irq = platform_get_irq(pdev, 0);
929 ++ unittest(irq == -EPROBE_DEFER,
930 ++ "device deferred probe failed - %d\n", irq);
931 +
932 +- /* Test that a parsing failure does not return -EPROBE_DEFER */
933 +- np = of_find_node_by_path("/testcase-data/testcase-device2");
934 +- pdev = of_find_device_by_node(np);
935 +- unittest(pdev, "device 2 creation failed\n");
936 +- irq = platform_get_irq(pdev, 0);
937 +- unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
938 ++ /* Test that a parsing failure does not return -EPROBE_DEFER */
939 ++ np = of_find_node_by_path("/testcase-data/testcase-device2");
940 ++ pdev = of_find_device_by_node(np);
941 ++ unittest(pdev, "device 2 creation failed\n");
942 ++ irq = platform_get_irq(pdev, 0);
943 ++ unittest(irq < 0 && irq != -EPROBE_DEFER,
944 ++ "device parsing error failed - %d\n", irq);
945 ++ }
946 +
947 + np = of_find_node_by_path("/testcase-data/platform-tests");
948 + unittest(np, "No testcase data in device tree\n");
949 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
950 +index 22924629e64a..1af30c881566 100644
951 +--- a/drivers/pci/pci.c
952 ++++ b/drivers/pci/pci.c
953 +@@ -1112,12 +1112,12 @@ int pci_save_state(struct pci_dev *dev)
954 + EXPORT_SYMBOL(pci_save_state);
955 +
956 + static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
957 +- u32 saved_val, int retry)
958 ++ u32 saved_val, int retry, bool force)
959 + {
960 + u32 val;
961 +
962 + pci_read_config_dword(pdev, offset, &val);
963 +- if (val == saved_val)
964 ++ if (!force && val == saved_val)
965 + return;
966 +
967 + for (;;) {
968 +@@ -1136,25 +1136,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
969 + }
970 +
971 + static void pci_restore_config_space_range(struct pci_dev *pdev,
972 +- int start, int end, int retry)
973 ++ int start, int end, int retry,
974 ++ bool force)
975 + {
976 + int index;
977 +
978 + for (index = end; index >= start; index--)
979 + pci_restore_config_dword(pdev, 4 * index,
980 + pdev->saved_config_space[index],
981 +- retry);
982 ++ retry, force);
983 + }
984 +
985 + static void pci_restore_config_space(struct pci_dev *pdev)
986 + {
987 + if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
988 +- pci_restore_config_space_range(pdev, 10, 15, 0);
989 ++ pci_restore_config_space_range(pdev, 10, 15, 0, false);
990 + /* Restore BARs before the command register. */
991 +- pci_restore_config_space_range(pdev, 4, 9, 10);
992 +- pci_restore_config_space_range(pdev, 0, 3, 0);
993 ++ pci_restore_config_space_range(pdev, 4, 9, 10, false);
994 ++ pci_restore_config_space_range(pdev, 0, 3, 0, false);
995 ++ } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
996 ++ pci_restore_config_space_range(pdev, 12, 15, 0, false);
997 ++
998 ++ /*
999 ++ * Force rewriting of prefetch registers to avoid S3 resume
1000 ++ * issues on Intel PCI bridges that occur when these
1001 ++ * registers are not explicitly written.
1002 ++ */
1003 ++ pci_restore_config_space_range(pdev, 9, 11, 0, true);
1004 ++ pci_restore_config_space_range(pdev, 0, 8, 0, false);
1005 + } else {
1006 +- pci_restore_config_space_range(pdev, 0, 15, 0);
1007 ++ pci_restore_config_space_range(pdev, 0, 15, 0, false);
1008 + }
1009 + }
1010 +
1011 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1012 +index 562d31073f9a..8d65b2f9ee80 100644
1013 +--- a/drivers/tty/tty_io.c
1014 ++++ b/drivers/tty/tty_io.c
1015 +@@ -1254,6 +1254,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1016 + static int tty_reopen(struct tty_struct *tty)
1017 + {
1018 + struct tty_driver *driver = tty->driver;
1019 ++ int retval;
1020 +
1021 + if (driver->type == TTY_DRIVER_TYPE_PTY &&
1022 + driver->subtype == PTY_TYPE_MASTER)
1023 +@@ -1267,10 +1268,14 @@ static int tty_reopen(struct tty_struct *tty)
1024 +
1025 + tty->count++;
1026 +
1027 +- if (!tty->ldisc)
1028 +- return tty_ldisc_reinit(tty, tty->termios.c_line);
1029 ++ if (tty->ldisc)
1030 ++ return 0;
1031 +
1032 +- return 0;
1033 ++ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1034 ++ if (retval)
1035 ++ tty->count--;
1036 ++
1037 ++ return retval;
1038 + }
1039 +
1040 + /**
1041 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1042 +index feaa0d8f830a..9f6f402470ac 100644
1043 +--- a/drivers/usb/class/cdc-acm.c
1044 ++++ b/drivers/usb/class/cdc-acm.c
1045 +@@ -1527,6 +1527,7 @@ static void acm_disconnect(struct usb_interface *intf)
1046 + {
1047 + struct acm *acm = usb_get_intfdata(intf);
1048 + struct tty_struct *tty;
1049 ++ int i;
1050 +
1051 + /* sibling interface is already cleaning up */
1052 + if (!acm)
1053 +@@ -1557,6 +1558,11 @@ static void acm_disconnect(struct usb_interface *intf)
1054 +
1055 + tty_unregister_device(acm_tty_driver, acm->minor);
1056 +
1057 ++ usb_free_urb(acm->ctrlurb);
1058 ++ for (i = 0; i < ACM_NW; i++)
1059 ++ usb_free_urb(acm->wb[i].urb);
1060 ++ for (i = 0; i < acm->rx_buflimit; i++)
1061 ++ usb_free_urb(acm->read_urbs[i]);
1062 + acm_write_buffers_free(acm);
1063 + usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
1064 + acm_read_buffers_free(acm);
1065 +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
1066 +index 8fb60657ed4f..510d28a9d190 100644
1067 +--- a/drivers/usb/host/xhci-mtk.c
1068 ++++ b/drivers/usb/host/xhci-mtk.c
1069 +@@ -780,10 +780,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
1070 + xhci_mtk_host_enable(mtk);
1071 +
1072 + xhci_dbg(xhci, "%s: restart port polling\n", __func__);
1073 +- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1074 +- usb_hcd_poll_rh_status(hcd);
1075 + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1076 + usb_hcd_poll_rh_status(xhci->shared_hcd);
1077 ++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1078 ++ usb_hcd_poll_rh_status(hcd);
1079 + return 0;
1080 + }
1081 +
1082 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1083 +index 838d37e79fa2..9218f506f8e3 100644
1084 +--- a/drivers/usb/host/xhci-pci.c
1085 ++++ b/drivers/usb/host/xhci-pci.c
1086 +@@ -196,6 +196,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1087 + }
1088 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1089 + (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
1090 ++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
1091 ++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
1092 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
1093 + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
1094 + xhci->quirks |= XHCI_MISSING_CAS;
1095 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
1096 +index 2674da40d9cd..6d6acf2c07c3 100644
1097 +--- a/drivers/usb/serial/usb-serial-simple.c
1098 ++++ b/drivers/usb/serial/usb-serial-simple.c
1099 +@@ -87,7 +87,8 @@ DEVICE(moto_modem, MOTO_IDS);
1100 +
1101 + /* Motorola Tetra driver */
1102 + #define MOTOROLA_TETRA_IDS() \
1103 +- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
1104 ++ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
1105 ++ { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
1106 + DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
1107 +
1108 + /* Novatel Wireless GPS driver */
1109 +diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1110 +index ef69273074ba..a3edb20ea4c3 100644
1111 +--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1112 ++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1113 +@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
1114 + if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
1115 + return -EFAULT;
1116 +
1117 ++ if (mr->w > 4096 || mr->h > 4096)
1118 ++ return -EINVAL;
1119 ++
1120 + if (mr->w * mr->h * 3 > mr->buffer_size)
1121 + return -EINVAL;
1122 +
1123 +@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
1124 + mr->x, mr->y, mr->w, mr->h);
1125 +
1126 + if (r > 0) {
1127 +- if (copy_to_user(mr->buffer, buf, mr->buffer_size))
1128 ++ if (copy_to_user(mr->buffer, buf, r))
1129 + r = -EFAULT;
1130 + }
1131 +
1132 +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
1133 +index 36c9fbf70d44..d9873aa014a6 100644
1134 +--- a/drivers/virtio/virtio_balloon.c
1135 ++++ b/drivers/virtio/virtio_balloon.c
1136 +@@ -143,16 +143,17 @@ static void set_page_pfns(struct virtio_balloon *vb,
1137 +
1138 + static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
1139 + {
1140 +- struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
1141 + unsigned num_allocated_pages;
1142 ++ unsigned num_pfns;
1143 ++ struct page *page;
1144 ++ LIST_HEAD(pages);
1145 +
1146 + /* We can only do one array worth at a time. */
1147 + num = min(num, ARRAY_SIZE(vb->pfns));
1148 +
1149 +- mutex_lock(&vb->balloon_lock);
1150 +- for (vb->num_pfns = 0; vb->num_pfns < num;
1151 +- vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
1152 +- struct page *page = balloon_page_enqueue(vb_dev_info);
1153 ++ for (num_pfns = 0; num_pfns < num;
1154 ++ num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
1155 ++ struct page *page = balloon_page_alloc();
1156 +
1157 + if (!page) {
1158 + dev_info_ratelimited(&vb->vdev->dev,
1159 +@@ -162,11 +163,23 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
1160 + msleep(200);
1161 + break;
1162 + }
1163 ++
1164 ++ balloon_page_push(&pages, page);
1165 ++ }
1166 ++
1167 ++ mutex_lock(&vb->balloon_lock);
1168 ++
1169 ++ vb->num_pfns = 0;
1170 ++
1171 ++ while ((page = balloon_page_pop(&pages))) {
1172 ++ balloon_page_enqueue(&vb->vb_dev_info, page);
1173 ++
1174 + set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
1175 + vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
1176 + if (!virtio_has_feature(vb->vdev,
1177 + VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
1178 + adjust_managed_page_count(page, -1);
1179 ++ vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
1180 + }
1181 +
1182 + num_allocated_pages = vb->num_pfns;
1183 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
1184 +index c282e21f5b5e..41fce930f44c 100644
1185 +--- a/fs/f2fs/checkpoint.c
1186 ++++ b/fs/f2fs/checkpoint.c
1187 +@@ -708,6 +708,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1188 +
1189 + crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
1190 + if (crc_offset > (blk_size - sizeof(__le32))) {
1191 ++ f2fs_put_page(*cp_page, 1);
1192 + f2fs_msg(sbi->sb, KERN_WARNING,
1193 + "invalid crc_offset: %zu", crc_offset);
1194 + return -EINVAL;
1195 +@@ -715,6 +716,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1196 +
1197 + crc = cur_cp_crc(*cp_block);
1198 + if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
1199 ++ f2fs_put_page(*cp_page, 1);
1200 + f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
1201 + return -EINVAL;
1202 + }
1203 +@@ -734,14 +736,14 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1204 + err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1205 + &cp_page_1, version);
1206 + if (err)
1207 +- goto invalid_cp1;
1208 ++ return NULL;
1209 + pre_version = *version;
1210 +
1211 + cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
1212 + err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1213 + &cp_page_2, version);
1214 + if (err)
1215 +- goto invalid_cp2;
1216 ++ goto invalid_cp;
1217 + cur_version = *version;
1218 +
1219 + if (cur_version == pre_version) {
1220 +@@ -749,9 +751,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1221 + f2fs_put_page(cp_page_2, 1);
1222 + return cp_page_1;
1223 + }
1224 +-invalid_cp2:
1225 + f2fs_put_page(cp_page_2, 1);
1226 +-invalid_cp1:
1227 ++invalid_cp:
1228 + f2fs_put_page(cp_page_1, 1);
1229 + return NULL;
1230 + }
1231 +diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
1232 +index e1cd3dcf5a03..ad827cf642fe 100644
1233 +--- a/fs/ubifs/super.c
1234 ++++ b/fs/ubifs/super.c
1235 +@@ -1930,6 +1930,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
1236 + int dev, vol;
1237 + char *endptr;
1238 +
1239 ++ if (!name || !*name)
1240 ++ return ERR_PTR(-EINVAL);
1241 ++
1242 + /* First, try to open using the device node path method */
1243 + ubi = ubi_open_volume_path(name, mode);
1244 + if (!IS_ERR(ubi))
1245 +diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
1246 +index fbbe6da40fed..53051f3d8f25 100644
1247 +--- a/include/linux/balloon_compaction.h
1248 ++++ b/include/linux/balloon_compaction.h
1249 +@@ -50,6 +50,7 @@
1250 + #include <linux/gfp.h>
1251 + #include <linux/err.h>
1252 + #include <linux/fs.h>
1253 ++#include <linux/list.h>
1254 +
1255 + /*
1256 + * Balloon device information descriptor.
1257 +@@ -67,7 +68,9 @@ struct balloon_dev_info {
1258 + struct inode *inode;
1259 + };
1260 +
1261 +-extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
1262 ++extern struct page *balloon_page_alloc(void);
1263 ++extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
1264 ++ struct page *page);
1265 + extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
1266 +
1267 + static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
1268 +@@ -193,4 +196,34 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
1269 + }
1270 +
1271 + #endif /* CONFIG_BALLOON_COMPACTION */
1272 ++
1273 ++/*
1274 ++ * balloon_page_push - insert a page into a page list.
1275 ++ * @head : pointer to list
1276 ++ * @page : page to be added
1277 ++ *
1278 ++ * Caller must ensure the page is private and protect the list.
1279 ++ */
1280 ++static inline void balloon_page_push(struct list_head *pages, struct page *page)
1281 ++{
1282 ++ list_add(&page->lru, pages);
1283 ++}
1284 ++
1285 ++/*
1286 ++ * balloon_page_pop - remove a page from a page list.
1287 ++ * @head : pointer to list
1288 ++ * @page : page to be added
1289 ++ *
1290 ++ * Caller must ensure the page is private and protect the list.
1291 ++ */
1292 ++static inline struct page *balloon_page_pop(struct list_head *pages)
1293 ++{
1294 ++ struct page *page = list_first_entry_or_null(pages, struct page, lru);
1295 ++
1296 ++ if (!page)
1297 ++ return NULL;
1298 ++
1299 ++ list_del(&page->lru);
1300 ++ return page;
1301 ++}
1302 + #endif /* _LINUX_BALLOON_COMPACTION_H */
1303 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
1304 +index 82a25880714a..7aa2de25c09c 100644
1305 +--- a/include/linux/hugetlb.h
1306 ++++ b/include/linux/hugetlb.h
1307 +@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
1308 + pte_t *huge_pte_offset(struct mm_struct *mm,
1309 + unsigned long addr, unsigned long sz);
1310 + int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
1311 ++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1312 ++ unsigned long *start, unsigned long *end);
1313 + struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
1314 + int write);
1315 + struct page *follow_huge_pd(struct vm_area_struct *vma,
1316 +@@ -169,6 +171,18 @@ static inline unsigned long hugetlb_total_pages(void)
1317 + return 0;
1318 + }
1319 +
1320 ++static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
1321 ++ pte_t *ptep)
1322 ++{
1323 ++ return 0;
1324 ++}
1325 ++
1326 ++static inline void adjust_range_if_pmd_sharing_possible(
1327 ++ struct vm_area_struct *vma,
1328 ++ unsigned long *start, unsigned long *end)
1329 ++{
1330 ++}
1331 ++
1332 + #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
1333 + #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
1334 + #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
1335 +diff --git a/include/linux/mm.h b/include/linux/mm.h
1336 +index a26cf767407e..58f2263de4de 100644
1337 +--- a/include/linux/mm.h
1338 ++++ b/include/linux/mm.h
1339 +@@ -2322,6 +2322,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1340 + return vma;
1341 + }
1342 +
1343 ++static inline bool range_in_vma(struct vm_area_struct *vma,
1344 ++ unsigned long start, unsigned long end)
1345 ++{
1346 ++ return (vma && vma->vm_start <= start && end <= vma->vm_end);
1347 ++}
1348 ++
1349 + #ifdef CONFIG_MMU
1350 + pgprot_t vm_get_page_prot(unsigned long vm_flags);
1351 + void vma_set_page_prot(struct vm_area_struct *vma);
1352 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1353 +index 812ebf1cbb87..4dbce29a9313 100644
1354 +--- a/kernel/events/core.c
1355 ++++ b/kernel/events/core.c
1356 +@@ -3757,6 +3757,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value)
1357 + goto out;
1358 + }
1359 +
1360 ++ /* If this is a pinned event it must be running on this CPU */
1361 ++ if (event->attr.pinned && event->oncpu != smp_processor_id()) {
1362 ++ ret = -EBUSY;
1363 ++ goto out;
1364 ++ }
1365 ++
1366 + /*
1367 + * If the event is currently on this CPU, its either a per-task event,
1368 + * or local to this CPU. Furthermore it means its ACTIVE (otherwise
1369 +diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
1370 +index 68d28924ba79..ef858d547e2d 100644
1371 +--- a/mm/balloon_compaction.c
1372 ++++ b/mm/balloon_compaction.c
1373 +@@ -10,23 +10,38 @@
1374 + #include <linux/export.h>
1375 + #include <linux/balloon_compaction.h>
1376 +
1377 ++/*
1378 ++ * balloon_page_alloc - allocates a new page for insertion into the balloon
1379 ++ * page list.
1380 ++ *
1381 ++ * Driver must call it to properly allocate a new enlisted balloon page.
1382 ++ * Driver must call balloon_page_enqueue before definitively removing it from
1383 ++ * the guest system. This function returns the page address for the recently
1384 ++ * allocated page or NULL in the case we fail to allocate a new page this turn.
1385 ++ */
1386 ++struct page *balloon_page_alloc(void)
1387 ++{
1388 ++ struct page *page = alloc_page(balloon_mapping_gfp_mask() |
1389 ++ __GFP_NOMEMALLOC | __GFP_NORETRY);
1390 ++ return page;
1391 ++}
1392 ++EXPORT_SYMBOL_GPL(balloon_page_alloc);
1393 ++
1394 + /*
1395 + * balloon_page_enqueue - allocates a new page and inserts it into the balloon
1396 + * page list.
1397 + * @b_dev_info: balloon device descriptor where we will insert a new page to
1398 ++ * @page: new page to enqueue - allocated using balloon_page_alloc.
1399 + *
1400 +- * Driver must call it to properly allocate a new enlisted balloon page
1401 ++ * Driver must call it to properly enqueue a new allocated balloon page
1402 + * before definitively removing it from the guest system.
1403 + * This function returns the page address for the recently enqueued page or
1404 + * NULL in the case we fail to allocate a new page this turn.
1405 + */
1406 +-struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
1407 ++void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
1408 ++ struct page *page)
1409 + {
1410 + unsigned long flags;
1411 +- struct page *page = alloc_page(balloon_mapping_gfp_mask() |
1412 +- __GFP_NOMEMALLOC | __GFP_NORETRY);
1413 +- if (!page)
1414 +- return NULL;
1415 +
1416 + /*
1417 + * Block others from accessing the 'page' when we get around to
1418 +@@ -39,7 +54,6 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
1419 + __count_vm_event(BALLOON_INFLATE);
1420 + spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
1421 + unlock_page(page);
1422 +- return page;
1423 + }
1424 + EXPORT_SYMBOL_GPL(balloon_page_enqueue);
1425 +
1426 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1427 +index 255469f78217..174612f8339c 100644
1428 +--- a/mm/huge_memory.c
1429 ++++ b/mm/huge_memory.c
1430 +@@ -2886,7 +2886,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
1431 + flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
1432 + page_add_anon_rmap(new, vma, mmun_start, true);
1433 + set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
1434 +- if (vma->vm_flags & VM_LOCKED)
1435 ++ if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
1436 + mlock_vma_page(new);
1437 + update_mmu_cache_pmd(vma, address, pvmw->pmd);
1438 + }
1439 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1440 +index dfd2947e046e..9801dc0250e2 100644
1441 +--- a/mm/hugetlb.c
1442 ++++ b/mm/hugetlb.c
1443 +@@ -4517,12 +4517,40 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
1444 + /*
1445 + * check on proper vm_flags and page table alignment
1446 + */
1447 +- if (vma->vm_flags & VM_MAYSHARE &&
1448 +- vma->vm_start <= base && end <= vma->vm_end)
1449 ++ if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
1450 + return true;
1451 + return false;
1452 + }
1453 +
1454 ++/*
1455 ++ * Determine if start,end range within vma could be mapped by shared pmd.
1456 ++ * If yes, adjust start and end to cover range associated with possible
1457 ++ * shared pmd mappings.
1458 ++ */
1459 ++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1460 ++ unsigned long *start, unsigned long *end)
1461 ++{
1462 ++ unsigned long check_addr = *start;
1463 ++
1464 ++ if (!(vma->vm_flags & VM_MAYSHARE))
1465 ++ return;
1466 ++
1467 ++ for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
1468 ++ unsigned long a_start = check_addr & PUD_MASK;
1469 ++ unsigned long a_end = a_start + PUD_SIZE;
1470 ++
1471 ++ /*
1472 ++ * If sharing is possible, adjust start/end if necessary.
1473 ++ */
1474 ++ if (range_in_vma(vma, a_start, a_end)) {
1475 ++ if (a_start < *start)
1476 ++ *start = a_start;
1477 ++ if (a_end > *end)
1478 ++ *end = a_end;
1479 ++ }
1480 ++ }
1481 ++}
1482 ++
1483 + /*
1484 + * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
1485 + * and returns the corresponding pte. While this is not necessary for the
1486 +@@ -4620,6 +4648,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
1487 + {
1488 + return 0;
1489 + }
1490 ++
1491 ++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1492 ++ unsigned long *start, unsigned long *end)
1493 ++{
1494 ++}
1495 + #define want_pmd_share() (0)
1496 + #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
1497 +
1498 +diff --git a/mm/migrate.c b/mm/migrate.c
1499 +index 1236449b4777..cbb025239071 100644
1500 +--- a/mm/migrate.c
1501 ++++ b/mm/migrate.c
1502 +@@ -274,6 +274,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
1503 + if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
1504 + mlock_vma_page(new);
1505 +
1506 ++ if (PageTransHuge(page) && PageMlocked(page))
1507 ++ clear_page_mlock(page);
1508 ++
1509 + /* No need to invalidate - it was non-present before */
1510 + update_mmu_cache(vma, pvmw.address, pvmw.pte);
1511 + }
1512 +diff --git a/mm/rmap.c b/mm/rmap.c
1513 +index 97edcf44d88c..8bd2ddd8febd 100644
1514 +--- a/mm/rmap.c
1515 ++++ b/mm/rmap.c
1516 +@@ -1358,11 +1358,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1517 + }
1518 +
1519 + /*
1520 +- * We have to assume the worse case ie pmd for invalidation. Note that
1521 +- * the page can not be free in this function as call of try_to_unmap()
1522 +- * must hold a reference on the page.
1523 ++ * For THP, we have to assume the worse case ie pmd for invalidation.
1524 ++ * For hugetlb, it could be much worse if we need to do pud
1525 ++ * invalidation in the case of pmd sharing.
1526 ++ *
1527 ++ * Note that the page can not be free in this function as call of
1528 ++ * try_to_unmap() must hold a reference on the page.
1529 + */
1530 + end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1531 ++ if (PageHuge(page)) {
1532 ++ /*
1533 ++ * If sharing is possible, start and end will be adjusted
1534 ++ * accordingly.
1535 ++ */
1536 ++ adjust_range_if_pmd_sharing_possible(vma, &start, &end);
1537 ++ }
1538 + mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
1539 +
1540 + while (page_vma_mapped_walk(&pvmw)) {
1541 +@@ -1408,6 +1418,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1542 + subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1543 + address = pvmw.address;
1544 +
1545 ++ if (PageHuge(page)) {
1546 ++ if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
1547 ++ /*
1548 ++ * huge_pmd_unshare unmapped an entire PMD
1549 ++ * page. There is no way of knowing exactly
1550 ++ * which PMDs may be cached for this mm, so
1551 ++ * we must flush them all. start/end were
1552 ++ * already adjusted above to cover this range.
1553 ++ */
1554 ++ flush_cache_range(vma, start, end);
1555 ++ flush_tlb_range(vma, start, end);
1556 ++ mmu_notifier_invalidate_range(mm, start, end);
1557 ++
1558 ++ /*
1559 ++ * The ref count of the PMD page was dropped
1560 ++ * which is part of the way map counting
1561 ++ * is done for shared PMDs. Return 'true'
1562 ++ * here. When there is no other sharing,
1563 ++ * huge_pmd_unshare returns false and we will
1564 ++ * unmap the actual page and drop map count
1565 ++ * to zero.
1566 ++ */
1567 ++ page_vma_mapped_walk_done(&pvmw);
1568 ++ break;
1569 ++ }
1570 ++ }
1571 +
1572 + if (IS_ENABLED(CONFIG_MIGRATION) &&
1573 + (flags & TTU_MIGRATION) &&
1574 +diff --git a/mm/vmstat.c b/mm/vmstat.c
1575 +index 4bb13e72ac97..2bdc962b2dfe 100644
1576 +--- a/mm/vmstat.c
1577 ++++ b/mm/vmstat.c
1578 +@@ -1203,6 +1203,9 @@ const char * const vmstat_text[] = {
1579 + #ifdef CONFIG_SMP
1580 + "nr_tlb_remote_flush",
1581 + "nr_tlb_remote_flush_received",
1582 ++#else
1583 ++ "", /* nr_tlb_remote_flush */
1584 ++ "", /* nr_tlb_remote_flush_received */
1585 + #endif /* CONFIG_SMP */
1586 + "nr_tlb_local_flush_all",
1587 + "nr_tlb_local_flush_one",
1588 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
1589 +index b456b882a6ea..63558335e41e 100644
1590 +--- a/net/mac80211/cfg.c
1591 ++++ b/net/mac80211/cfg.c
1592 +@@ -426,7 +426,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1593 + case NL80211_IFTYPE_AP:
1594 + case NL80211_IFTYPE_AP_VLAN:
1595 + /* Keys without a station are used for TX only */
1596 +- if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
1597 ++ if (sta && test_sta_flag(sta, WLAN_STA_MFP))
1598 + key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
1599 + break;
1600 + case NL80211_IFTYPE_ADHOC:
1601 +diff --git a/net/rds/ib.h b/net/rds/ib.h
1602 +index 86a8578d95b8..7db93f7f5c61 100644
1603 +--- a/net/rds/ib.h
1604 ++++ b/net/rds/ib.h
1605 +@@ -373,7 +373,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
1606 + int rds_ib_recv_init(void);
1607 + void rds_ib_recv_exit(void);
1608 + int rds_ib_recv_path(struct rds_conn_path *conn);
1609 +-int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
1610 ++int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
1611 + void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
1612 + void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
1613 + void rds_ib_inc_free(struct rds_incoming *inc);
1614 +diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
1615 +index 6e721c449c4b..e086395a2355 100644
1616 +--- a/net/rds/ib_cm.c
1617 ++++ b/net/rds/ib_cm.c
1618 +@@ -946,7 +946,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
1619 + if (!ic)
1620 + return -ENOMEM;
1621 +
1622 +- ret = rds_ib_recv_alloc_caches(ic);
1623 ++ ret = rds_ib_recv_alloc_caches(ic, gfp);
1624 + if (ret) {
1625 + kfree(ic);
1626 + return ret;
1627 +diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
1628 +index b4e421aa9727..918d2e676b9b 100644
1629 +--- a/net/rds/ib_recv.c
1630 ++++ b/net/rds/ib_recv.c
1631 +@@ -98,12 +98,12 @@ static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
1632 + }
1633 + }
1634 +
1635 +-static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
1636 ++static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
1637 + {
1638 + struct rds_ib_cache_head *head;
1639 + int cpu;
1640 +
1641 +- cache->percpu = alloc_percpu(struct rds_ib_cache_head);
1642 ++ cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
1643 + if (!cache->percpu)
1644 + return -ENOMEM;
1645 +
1646 +@@ -118,13 +118,13 @@ static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
1647 + return 0;
1648 + }
1649 +
1650 +-int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
1651 ++int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
1652 + {
1653 + int ret;
1654 +
1655 +- ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
1656 ++ ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
1657 + if (!ret) {
1658 +- ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
1659 ++ ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
1660 + if (ret)
1661 + free_percpu(ic->i_cache_incs.percpu);
1662 + }
1663 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
1664 +index 615fdc63452e..e37653b0f2d0 100644
1665 +--- a/tools/perf/builtin-script.c
1666 ++++ b/tools/perf/builtin-script.c
1667 +@@ -25,6 +25,7 @@
1668 + #include "util/string2.h"
1669 + #include "util/thread-stack.h"
1670 + #include "util/time-utils.h"
1671 ++#include "util/path.h"
1672 + #include "print_binary.h"
1673 + #include <linux/bitmap.h>
1674 + #include <linux/kernel.h>
1675 +@@ -2129,19 +2130,6 @@ out:
1676 + return rc;
1677 + }
1678 +
1679 +-/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
1680 +-static int is_directory(const char *base_path, const struct dirent *dent)
1681 +-{
1682 +- char path[PATH_MAX];
1683 +- struct stat st;
1684 +-
1685 +- sprintf(path, "%s/%s", base_path, dent->d_name);
1686 +- if (stat(path, &st))
1687 +- return 0;
1688 +-
1689 +- return S_ISDIR(st.st_mode);
1690 +-}
1691 +-
1692 + #define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
1693 + while ((lang_dirent = readdir(scripts_dir)) != NULL) \
1694 + if ((lang_dirent->d_type == DT_DIR || \
1695 +diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
1696 +index dac76ac117c1..398d4cc2f0e4 100644
1697 +--- a/tools/perf/util/annotate.c
1698 ++++ b/tools/perf/util/annotate.c
1699 +@@ -1432,7 +1432,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1700 + struct arch **parch, char *cpuid)
1701 + {
1702 + struct dso *dso = map->dso;
1703 +- char command[PATH_MAX * 2];
1704 ++ char *command;
1705 + struct arch *arch = NULL;
1706 + FILE *file;
1707 + char symfs_filename[PATH_MAX];
1708 +@@ -1496,7 +1496,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1709 + strcpy(symfs_filename, tmp);
1710 + }
1711 +
1712 +- snprintf(command, sizeof(command),
1713 ++ err = asprintf(&command,
1714 + "%s %s%s --start-address=0x%016" PRIx64
1715 + " --stop-address=0x%016" PRIx64
1716 + " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
1717 +@@ -1509,12 +1509,17 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1718 + symbol_conf.annotate_src ? "-S" : "",
1719 + symfs_filename, symfs_filename);
1720 +
1721 ++ if (err < 0) {
1722 ++ pr_err("Failure allocating memory for the command to run\n");
1723 ++ goto out_remove_tmp;
1724 ++ }
1725 ++
1726 + pr_debug("Executing: %s\n", command);
1727 +
1728 + err = -1;
1729 + if (pipe(stdout_fd) < 0) {
1730 + pr_err("Failure creating the pipe to run %s\n", command);
1731 +- goto out_remove_tmp;
1732 ++ goto out_free_command;
1733 + }
1734 +
1735 + pid = fork();
1736 +@@ -1541,7 +1546,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1737 + * If we were using debug info should retry with
1738 + * original binary.
1739 + */
1740 +- goto out_remove_tmp;
1741 ++ goto out_free_command;
1742 + }
1743 +
1744 + nline = 0;
1745 +@@ -1570,6 +1575,8 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1746 +
1747 + fclose(file);
1748 + err = 0;
1749 ++out_free_command:
1750 ++ free(command);
1751 + out_remove_tmp:
1752 + close(stdout_fd[0]);
1753 +
1754 +@@ -1583,7 +1590,7 @@ out:
1755 +
1756 + out_close_stdout:
1757 + close(stdout_fd[1]);
1758 +- goto out_remove_tmp;
1759 ++ goto out_free_command;
1760 + }
1761 +
1762 + static void insert_source_line(struct rb_root *root, struct source_line *src_line)
1763 +diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
1764 +index 933f5c6bffb4..ca56ba2dd3da 100644
1765 +--- a/tools/perf/util/path.c
1766 ++++ b/tools/perf/util/path.c
1767 +@@ -18,6 +18,7 @@
1768 + #include <stdio.h>
1769 + #include <sys/types.h>
1770 + #include <sys/stat.h>
1771 ++#include <dirent.h>
1772 + #include <unistd.h>
1773 +
1774 + static char bad_path[] = "/bad-path/";
1775 +@@ -77,3 +78,16 @@ bool is_regular_file(const char *file)
1776 +
1777 + return S_ISREG(st.st_mode);
1778 + }
1779 ++
1780 ++/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
1781 ++bool is_directory(const char *base_path, const struct dirent *dent)
1782 ++{
1783 ++ char path[PATH_MAX];
1784 ++ struct stat st;
1785 ++
1786 ++ sprintf(path, "%s/%s", base_path, dent->d_name);
1787 ++ if (stat(path, &st))
1788 ++ return false;
1789 ++
1790 ++ return S_ISDIR(st.st_mode);
1791 ++}
1792 +diff --git a/tools/perf/util/path.h b/tools/perf/util/path.h
1793 +index 14a254ada7eb..f014f905df50 100644
1794 +--- a/tools/perf/util/path.h
1795 ++++ b/tools/perf/util/path.h
1796 +@@ -2,9 +2,12 @@
1797 + #ifndef _PERF_PATH_H
1798 + #define _PERF_PATH_H
1799 +
1800 ++struct dirent;
1801 ++
1802 + int path__join(char *bf, size_t size, const char *path1, const char *path2);
1803 + int path__join3(char *bf, size_t size, const char *path1, const char *path2, const char *path3);
1804 +
1805 + bool is_regular_file(const char *file);
1806 ++bool is_directory(const char *base_path, const struct dirent *dent);
1807 +
1808 + #endif /* _PERF_PATH_H */
1809 +diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
1810 +index af415febbc46..da4df7fd43a2 100644
1811 +--- a/tools/perf/util/setup.py
1812 ++++ b/tools/perf/util/setup.py
1813 +@@ -28,6 +28,8 @@ class install_lib(_install_lib):
1814 + cflags = getenv('CFLAGS', '').split()
1815 + # switch off several checks (need to be at the end of cflags list)
1816 + cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
1817 ++if cc != "clang":
1818 ++ cflags += ['-Wno-cast-function-type' ]
1819 +
1820 + src_perf = getenv('srctree') + '/tools/perf'
1821 + build_lib = getenv('PYTHON_EXTBUILD_LIB')
1822 +diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
1823 +index 235259011704..35edd61d1663 100644
1824 +--- a/tools/testing/selftests/x86/test_vdso.c
1825 ++++ b/tools/testing/selftests/x86/test_vdso.c
1826 +@@ -17,6 +17,7 @@
1827 + #include <errno.h>
1828 + #include <sched.h>
1829 + #include <stdbool.h>
1830 ++#include <limits.h>
1831 +
1832 + #ifndef SYS_getcpu
1833 + # ifdef __x86_64__
1834 +@@ -31,6 +32,14 @@
1835 +
1836 + int nerrs = 0;
1837 +
1838 ++typedef int (*vgettime_t)(clockid_t, struct timespec *);
1839 ++
1840 ++vgettime_t vdso_clock_gettime;
1841 ++
1842 ++typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
1843 ++
1844 ++vgtod_t vdso_gettimeofday;
1845 ++
1846 + typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
1847 +
1848 + getcpu_t vgetcpu;
1849 +@@ -95,6 +104,15 @@ static void fill_function_pointers()
1850 + printf("Warning: failed to find getcpu in vDSO\n");
1851 +
1852 + vgetcpu = (getcpu_t) vsyscall_getcpu();
1853 ++
1854 ++ vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
1855 ++ if (!vdso_clock_gettime)
1856 ++ printf("Warning: failed to find clock_gettime in vDSO\n");
1857 ++
1858 ++ vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
1859 ++ if (!vdso_gettimeofday)
1860 ++ printf("Warning: failed to find gettimeofday in vDSO\n");
1861 ++
1862 + }
1863 +
1864 + static long sys_getcpu(unsigned * cpu, unsigned * node,
1865 +@@ -103,6 +121,16 @@ static long sys_getcpu(unsigned * cpu, unsigned * node,
1866 + return syscall(__NR_getcpu, cpu, node, cache);
1867 + }
1868 +
1869 ++static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
1870 ++{
1871 ++ return syscall(__NR_clock_gettime, id, ts);
1872 ++}
1873 ++
1874 ++static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
1875 ++{
1876 ++ return syscall(__NR_gettimeofday, tv, tz);
1877 ++}
1878 ++
1879 + static void test_getcpu(void)
1880 + {
1881 + printf("[RUN]\tTesting getcpu...\n");
1882 +@@ -155,10 +183,154 @@ static void test_getcpu(void)
1883 + }
1884 + }
1885 +
1886 ++static bool ts_leq(const struct timespec *a, const struct timespec *b)
1887 ++{
1888 ++ if (a->tv_sec != b->tv_sec)
1889 ++ return a->tv_sec < b->tv_sec;
1890 ++ else
1891 ++ return a->tv_nsec <= b->tv_nsec;
1892 ++}
1893 ++
1894 ++static bool tv_leq(const struct timeval *a, const struct timeval *b)
1895 ++{
1896 ++ if (a->tv_sec != b->tv_sec)
1897 ++ return a->tv_sec < b->tv_sec;
1898 ++ else
1899 ++ return a->tv_usec <= b->tv_usec;
1900 ++}
1901 ++
1902 ++static char const * const clocknames[] = {
1903 ++ [0] = "CLOCK_REALTIME",
1904 ++ [1] = "CLOCK_MONOTONIC",
1905 ++ [2] = "CLOCK_PROCESS_CPUTIME_ID",
1906 ++ [3] = "CLOCK_THREAD_CPUTIME_ID",
1907 ++ [4] = "CLOCK_MONOTONIC_RAW",
1908 ++ [5] = "CLOCK_REALTIME_COARSE",
1909 ++ [6] = "CLOCK_MONOTONIC_COARSE",
1910 ++ [7] = "CLOCK_BOOTTIME",
1911 ++ [8] = "CLOCK_REALTIME_ALARM",
1912 ++ [9] = "CLOCK_BOOTTIME_ALARM",
1913 ++ [10] = "CLOCK_SGI_CYCLE",
1914 ++ [11] = "CLOCK_TAI",
1915 ++};
1916 ++
1917 ++static void test_one_clock_gettime(int clock, const char *name)
1918 ++{
1919 ++ struct timespec start, vdso, end;
1920 ++ int vdso_ret, end_ret;
1921 ++
1922 ++ printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
1923 ++
1924 ++ if (sys_clock_gettime(clock, &start) < 0) {
1925 ++ if (errno == EINVAL) {
1926 ++ vdso_ret = vdso_clock_gettime(clock, &vdso);
1927 ++ if (vdso_ret == -EINVAL) {
1928 ++ printf("[OK]\tNo such clock.\n");
1929 ++ } else {
1930 ++ printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
1931 ++ nerrs++;
1932 ++ }
1933 ++ } else {
1934 ++ printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
1935 ++ }
1936 ++ return;
1937 ++ }
1938 ++
1939 ++ vdso_ret = vdso_clock_gettime(clock, &vdso);
1940 ++ end_ret = sys_clock_gettime(clock, &end);
1941 ++
1942 ++ if (vdso_ret != 0 || end_ret != 0) {
1943 ++ printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
1944 ++ vdso_ret, errno);
1945 ++ nerrs++;
1946 ++ return;
1947 ++ }
1948 ++
1949 ++ printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
1950 ++ (unsigned long long)start.tv_sec, start.tv_nsec,
1951 ++ (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
1952 ++ (unsigned long long)end.tv_sec, end.tv_nsec);
1953 ++
1954 ++ if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
1955 ++ printf("[FAIL]\tTimes are out of sequence\n");
1956 ++ nerrs++;
1957 ++ }
1958 ++}
1959 ++
1960 ++static void test_clock_gettime(void)
1961 ++{
1962 ++ for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
1963 ++ clock++) {
1964 ++ test_one_clock_gettime(clock, clocknames[clock]);
1965 ++ }
1966 ++
1967 ++ /* Also test some invalid clock ids */
1968 ++ test_one_clock_gettime(-1, "invalid");
1969 ++ test_one_clock_gettime(INT_MIN, "invalid");
1970 ++ test_one_clock_gettime(INT_MAX, "invalid");
1971 ++}
1972 ++
1973 ++static void test_gettimeofday(void)
1974 ++{
1975 ++ struct timeval start, vdso, end;
1976 ++ struct timezone sys_tz, vdso_tz;
1977 ++ int vdso_ret, end_ret;
1978 ++
1979 ++ if (!vdso_gettimeofday)
1980 ++ return;
1981 ++
1982 ++ printf("[RUN]\tTesting gettimeofday...\n");
1983 ++
1984 ++ if (sys_gettimeofday(&start, &sys_tz) < 0) {
1985 ++ printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
1986 ++ nerrs++;
1987 ++ return;
1988 ++ }
1989 ++
1990 ++ vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
1991 ++ end_ret = sys_gettimeofday(&end, NULL);
1992 ++
1993 ++ if (vdso_ret != 0 || end_ret != 0) {
1994 ++ printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
1995 ++ vdso_ret, errno);
1996 ++ nerrs++;
1997 ++ return;
1998 ++ }
1999 ++
2000 ++ printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
2001 ++ (unsigned long long)start.tv_sec, start.tv_usec,
2002 ++ (unsigned long long)vdso.tv_sec, vdso.tv_usec,
2003 ++ (unsigned long long)end.tv_sec, end.tv_usec);
2004 ++
2005 ++ if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
2006 ++ printf("[FAIL]\tTimes are out of sequence\n");
2007 ++ nerrs++;
2008 ++ }
2009 ++
2010 ++ if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
2011 ++ sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
2012 ++ printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
2013 ++ sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
2014 ++ } else {
2015 ++ printf("[FAIL]\ttimezones do not match\n");
2016 ++ nerrs++;
2017 ++ }
2018 ++
2019 ++ /* And make sure that passing NULL for tz doesn't crash. */
2020 ++ vdso_gettimeofday(&vdso, NULL);
2021 ++}
2022 ++
2023 + int main(int argc, char **argv)
2024 + {
2025 + fill_function_pointers();
2026 +
2027 ++ test_clock_gettime();
2028 ++ test_gettimeofday();
2029 ++
2030 ++ /*
2031 ++ * Test getcpu() last so that, if something goes wrong setting affinity,
2032 ++ * we still run the other tests.
2033 ++ */
2034 + test_getcpu();
2035 +
2036 + return nerrs ? 1 : 0;