Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Thu, 29 Aug 2019 14:14:01
Message-Id: 1567088018.019eea27b90a11b416aa484582661386065fe93a.mpagano@gentoo
1 commit: 019eea27b90a11b416aa484582661386065fe93a
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Aug 29 14:13:38 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Aug 29 14:13:38 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=019eea27
7
8 Linux patch 4.14.141
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1140_linux-4.14.141.patch | 2471 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2475 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 46d7bd2..9d8b846 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -603,6 +603,10 @@ Patch: 1139_linux-4.14.140.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.140
23
24 +Patch: 1140_linux-4.14.141.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.141
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1140_linux-4.14.141.patch b/1140_linux-4.14.141.patch
33 new file mode 100644
34 index 0000000..72c8cee
35 --- /dev/null
36 +++ b/1140_linux-4.14.141.patch
37 @@ -0,0 +1,2471 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index 13d80111bc1f..188a7db8501b 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -3788,6 +3788,13 @@
43 + Run specified binary instead of /init from the ramdisk,
44 + used for early userspace startup. See initrd.
45 +
46 ++ rdrand= [X86]
47 ++ force - Override the decision by the kernel to hide the
48 ++ advertisement of RDRAND support (this affects
49 ++ certain AMD processors because of buggy BIOS
50 ++ support, specifically around the suspend/resume
51 ++ path).
52 ++
53 + rdt= [HW,X86,RDT]
54 + Turn on/off individual RDT features. List is:
55 + cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, mba.
56 +diff --git a/Makefile b/Makefile
57 +index be7290af771e..eefd21f3d1ec 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,7 +1,7 @@
61 + # SPDX-License-Identifier: GPL-2.0
62 + VERSION = 4
63 + PATCHLEVEL = 14
64 +-SUBLEVEL = 140
65 ++SUBLEVEL = 141
66 + EXTRAVERSION =
67 + NAME = Petit Gorille
68 +
69 +diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
70 +index 97d5239ca47b..428ef2189203 100644
71 +--- a/arch/mips/kernel/cacheinfo.c
72 ++++ b/arch/mips/kernel/cacheinfo.c
73 +@@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu)
74 + if (c->tcache.waysize)
75 + populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
76 +
77 ++ this_cpu_ci->cpu_map_populated = true;
78 ++
79 + return 0;
80 + }
81 +
82 +diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
83 +index 5f209f111e59..df7ddd246eaa 100644
84 +--- a/arch/mips/kernel/i8253.c
85 ++++ b/arch/mips/kernel/i8253.c
86 +@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
87 +
88 + static int __init init_pit_clocksource(void)
89 + {
90 +- if (num_possible_cpus() > 1) /* PIT does not scale! */
91 ++ if (num_possible_cpus() > 1 || /* PIT does not scale! */
92 ++ !clockevent_state_periodic(&i8253_clockevent))
93 + return 0;
94 +
95 + return clocksource_i8253_init();
96 +diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
97 +index 3280953a82cf..09af857ca099 100644
98 +--- a/arch/powerpc/kernel/misc_64.S
99 ++++ b/arch/powerpc/kernel/misc_64.S
100 +@@ -134,7 +134,7 @@ _GLOBAL_TOC(flush_dcache_range)
101 + subf r8,r6,r4 /* compute length */
102 + add r8,r8,r5 /* ensure we get enough */
103 + lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
104 +- srw. r8,r8,r9 /* compute line count */
105 ++ srd. r8,r8,r9 /* compute line count */
106 + beqlr /* nothing to do? */
107 + mtctr r8
108 + 0: dcbst 0,r6
109 +@@ -190,7 +190,7 @@ _GLOBAL(flush_inval_dcache_range)
110 + subf r8,r6,r4 /* compute length */
111 + add r8,r8,r5 /* ensure we get enough */
112 + lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
113 +- srw. r8,r8,r9 /* compute line count */
114 ++ srd. r8,r8,r9 /* compute line count */
115 + beqlr /* nothing to do? */
116 + sync
117 + isync
118 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
119 +index a07ffd23e4dd..d3983fdf1012 100644
120 +--- a/arch/x86/include/asm/bootparam_utils.h
121 ++++ b/arch/x86/include/asm/bootparam_utils.h
122 +@@ -18,6 +18,20 @@
123 + * Note: efi_info is commonly left uninitialized, but that field has a
124 + * private magic, so it is better to leave it unchanged.
125 + */
126 ++
127 ++#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
128 ++
129 ++#define BOOT_PARAM_PRESERVE(struct_member) \
130 ++ { \
131 ++ .start = offsetof(struct boot_params, struct_member), \
132 ++ .len = sizeof_mbr(struct boot_params, struct_member), \
133 ++ }
134 ++
135 ++struct boot_params_to_save {
136 ++ unsigned int start;
137 ++ unsigned int len;
138 ++};
139 ++
140 + static void sanitize_boot_params(struct boot_params *boot_params)
141 + {
142 + /*
143 +@@ -36,19 +50,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
144 + */
145 + if (boot_params->sentinel) {
146 + /* fields in boot_params are left uninitialized, clear them */
147 +- memset(&boot_params->ext_ramdisk_image, 0,
148 +- (char *)&boot_params->efi_info -
149 +- (char *)&boot_params->ext_ramdisk_image);
150 +- memset(&boot_params->kbd_status, 0,
151 +- (char *)&boot_params->hdr -
152 +- (char *)&boot_params->kbd_status);
153 +- memset(&boot_params->_pad7[0], 0,
154 +- (char *)&boot_params->edd_mbr_sig_buffer[0] -
155 +- (char *)&boot_params->_pad7[0]);
156 +- memset(&boot_params->_pad8[0], 0,
157 +- (char *)&boot_params->eddbuf[0] -
158 +- (char *)&boot_params->_pad8[0]);
159 +- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
160 ++ static struct boot_params scratch;
161 ++ char *bp_base = (char *)boot_params;
162 ++ char *save_base = (char *)&scratch;
163 ++ int i;
164 ++
165 ++ const struct boot_params_to_save to_save[] = {
166 ++ BOOT_PARAM_PRESERVE(screen_info),
167 ++ BOOT_PARAM_PRESERVE(apm_bios_info),
168 ++ BOOT_PARAM_PRESERVE(tboot_addr),
169 ++ BOOT_PARAM_PRESERVE(ist_info),
170 ++ BOOT_PARAM_PRESERVE(hd0_info),
171 ++ BOOT_PARAM_PRESERVE(hd1_info),
172 ++ BOOT_PARAM_PRESERVE(sys_desc_table),
173 ++ BOOT_PARAM_PRESERVE(olpc_ofw_header),
174 ++ BOOT_PARAM_PRESERVE(efi_info),
175 ++ BOOT_PARAM_PRESERVE(alt_mem_k),
176 ++ BOOT_PARAM_PRESERVE(scratch),
177 ++ BOOT_PARAM_PRESERVE(e820_entries),
178 ++ BOOT_PARAM_PRESERVE(eddbuf_entries),
179 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
180 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
181 ++ BOOT_PARAM_PRESERVE(hdr),
182 ++ BOOT_PARAM_PRESERVE(e820_table),
183 ++ BOOT_PARAM_PRESERVE(eddbuf),
184 ++ };
185 ++
186 ++ memset(&scratch, 0, sizeof(scratch));
187 ++
188 ++ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
189 ++ memcpy(save_base + to_save[i].start,
190 ++ bp_base + to_save[i].start, to_save[i].len);
191 ++ }
192 ++
193 ++ memcpy(boot_params, save_base, sizeof(*boot_params));
194 + }
195 + }
196 +
197 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
198 +index 7f1c8448d595..fda3bf75de6c 100644
199 +--- a/arch/x86/include/asm/msr-index.h
200 ++++ b/arch/x86/include/asm/msr-index.h
201 +@@ -334,6 +334,7 @@
202 + #define MSR_AMD64_PATCH_LEVEL 0x0000008b
203 + #define MSR_AMD64_TSC_RATIO 0xc0000104
204 + #define MSR_AMD64_NB_CFG 0xc001001f
205 ++#define MSR_AMD64_CPUID_FN_1 0xc0011004
206 + #define MSR_AMD64_PATCH_LOADER 0xc0010020
207 + #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
208 + #define MSR_AMD64_OSVW_STATUS 0xc0010141
209 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
210 +index f1ddf3a1f307..f6b496d11097 100644
211 +--- a/arch/x86/include/asm/nospec-branch.h
212 ++++ b/arch/x86/include/asm/nospec-branch.h
213 +@@ -202,7 +202,7 @@
214 + " lfence;\n" \
215 + " jmp 902b;\n" \
216 + " .align 16\n" \
217 +- "903: addl $4, %%esp;\n" \
218 ++ "903: lea 4(%%esp), %%esp;\n" \
219 + " pushl %[thunk_target];\n" \
220 + " ret;\n" \
221 + " .align 16\n" \
222 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
223 +index ae410f7585f1..f8f9cfded97d 100644
224 +--- a/arch/x86/kernel/apic/apic.c
225 ++++ b/arch/x86/kernel/apic/apic.c
226 +@@ -723,7 +723,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
227 + static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
228 +
229 + /*
230 +- * Temporary interrupt handler.
231 ++ * Temporary interrupt handler and polled calibration function.
232 + */
233 + static void __init lapic_cal_handler(struct clock_event_device *dev)
234 + {
235 +@@ -807,7 +807,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
236 + static int __init calibrate_APIC_clock(void)
237 + {
238 + struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
239 +- void (*real_handler)(struct clock_event_device *dev);
240 ++ u64 tsc_perj = 0, tsc_start = 0;
241 ++ unsigned long jif_start;
242 + unsigned long deltaj;
243 + long delta, deltatsc;
244 + int pm_referenced = 0;
245 +@@ -838,28 +839,64 @@ static int __init calibrate_APIC_clock(void)
246 + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
247 + "calibrating APIC timer ...\n");
248 +
249 ++ /*
250 ++ * There are platforms w/o global clockevent devices. Instead of
251 ++ * making the calibration conditional on that, use a polling based
252 ++ * approach everywhere.
253 ++ */
254 + local_irq_disable();
255 +
256 +- /* Replace the global interrupt handler */
257 +- real_handler = global_clock_event->event_handler;
258 +- global_clock_event->event_handler = lapic_cal_handler;
259 +-
260 + /*
261 + * Setup the APIC counter to maximum. There is no way the lapic
262 + * can underflow in the 100ms detection time frame
263 + */
264 + __setup_APIC_LVTT(0xffffffff, 0, 0);
265 +
266 +- /* Let the interrupts run */
267 ++ /*
268 ++ * Methods to terminate the calibration loop:
269 ++ * 1) Global clockevent if available (jiffies)
270 ++ * 2) TSC if available and frequency is known
271 ++ */
272 ++ jif_start = READ_ONCE(jiffies);
273 ++
274 ++ if (tsc_khz) {
275 ++ tsc_start = rdtsc();
276 ++ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
277 ++ }
278 ++
279 ++ /*
280 ++ * Enable interrupts so the tick can fire, if a global
281 ++ * clockevent device is available
282 ++ */
283 + local_irq_enable();
284 +
285 +- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
286 +- cpu_relax();
287 ++ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
288 ++ /* Wait for a tick to elapse */
289 ++ while (1) {
290 ++ if (tsc_khz) {
291 ++ u64 tsc_now = rdtsc();
292 ++ if ((tsc_now - tsc_start) >= tsc_perj) {
293 ++ tsc_start += tsc_perj;
294 ++ break;
295 ++ }
296 ++ } else {
297 ++ unsigned long jif_now = READ_ONCE(jiffies);
298 +
299 +- local_irq_disable();
300 ++ if (time_after(jif_now, jif_start)) {
301 ++ jif_start = jif_now;
302 ++ break;
303 ++ }
304 ++ }
305 ++ cpu_relax();
306 ++ }
307 +
308 +- /* Restore the real event handler */
309 +- global_clock_event->event_handler = real_handler;
310 ++ /* Invoke the calibration routine */
311 ++ local_irq_disable();
312 ++ lapic_cal_handler(NULL);
313 ++ local_irq_enable();
314 ++ }
315 ++
316 ++ local_irq_disable();
317 +
318 + /* Build delta t1-t2 as apic timer counts down */
319 + delta = lapic_cal_t1 - lapic_cal_t2;
320 +@@ -912,10 +949,11 @@ static int __init calibrate_APIC_clock(void)
321 + levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
322 +
323 + /*
324 +- * PM timer calibration failed or not turned on
325 +- * so lets try APIC timer based calibration
326 ++ * PM timer calibration failed or not turned on so lets try APIC
327 ++ * timer based calibration, if a global clockevent device is
328 ++ * available.
329 + */
330 +- if (!pm_referenced) {
331 ++ if (!pm_referenced && global_clock_event) {
332 + apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
333 +
334 + /*
335 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
336 +index bbebcd7a781e..3914f9218a6b 100644
337 +--- a/arch/x86/kernel/cpu/amd.c
338 ++++ b/arch/x86/kernel/cpu/amd.c
339 +@@ -772,6 +772,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
340 + msr_set_bit(MSR_AMD64_DE_CFG, 31);
341 + }
342 +
343 ++static bool rdrand_force;
344 ++
345 ++static int __init rdrand_cmdline(char *str)
346 ++{
347 ++ if (!str)
348 ++ return -EINVAL;
349 ++
350 ++ if (!strcmp(str, "force"))
351 ++ rdrand_force = true;
352 ++ else
353 ++ return -EINVAL;
354 ++
355 ++ return 0;
356 ++}
357 ++early_param("rdrand", rdrand_cmdline);
358 ++
359 ++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
360 ++{
361 ++ /*
362 ++ * Saving of the MSR used to hide the RDRAND support during
363 ++ * suspend/resume is done by arch/x86/power/cpu.c, which is
364 ++ * dependent on CONFIG_PM_SLEEP.
365 ++ */
366 ++ if (!IS_ENABLED(CONFIG_PM_SLEEP))
367 ++ return;
368 ++
369 ++ /*
370 ++ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
371 ++ * RDRAND support using the CPUID function directly.
372 ++ */
373 ++ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
374 ++ return;
375 ++
376 ++ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
377 ++
378 ++ /*
379 ++ * Verify that the CPUID change has occurred in case the kernel is
380 ++ * running virtualized and the hypervisor doesn't support the MSR.
381 ++ */
382 ++ if (cpuid_ecx(1) & BIT(30)) {
383 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
384 ++ return;
385 ++ }
386 ++
387 ++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
388 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
389 ++}
390 ++
391 ++static void init_amd_jg(struct cpuinfo_x86 *c)
392 ++{
393 ++ /*
394 ++ * Some BIOS implementations do not restore proper RDRAND support
395 ++ * across suspend and resume. Check on whether to hide the RDRAND
396 ++ * instruction support via CPUID.
397 ++ */
398 ++ clear_rdrand_cpuid_bit(c);
399 ++}
400 ++
401 + static void init_amd_bd(struct cpuinfo_x86 *c)
402 + {
403 + u64 value;
404 +@@ -786,6 +844,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
405 + wrmsrl_safe(MSR_F15H_IC_CFG, value);
406 + }
407 + }
408 ++
409 ++ /*
410 ++ * Some BIOS implementations do not restore proper RDRAND support
411 ++ * across suspend and resume. Check on whether to hide the RDRAND
412 ++ * instruction support via CPUID.
413 ++ */
414 ++ clear_rdrand_cpuid_bit(c);
415 + }
416 +
417 + static void init_amd_zn(struct cpuinfo_x86 *c)
418 +@@ -828,6 +893,7 @@ static void init_amd(struct cpuinfo_x86 *c)
419 + case 0x10: init_amd_gh(c); break;
420 + case 0x12: init_amd_ln(c); break;
421 + case 0x15: init_amd_bd(c); break;
422 ++ case 0x16: init_amd_jg(c); break;
423 + case 0x17: init_amd_zn(c); break;
424 + }
425 +
426 +diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
427 +index 2dd1fe13a37b..19f707992db2 100644
428 +--- a/arch/x86/lib/cpu.c
429 ++++ b/arch/x86/lib/cpu.c
430 +@@ -1,5 +1,6 @@
431 + #include <linux/types.h>
432 + #include <linux/export.h>
433 ++#include <asm/cpu.h>
434 +
435 + unsigned int x86_family(unsigned int sig)
436 + {
437 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
438 +index 513ce09e9950..3aa3149df07f 100644
439 +--- a/arch/x86/power/cpu.c
440 ++++ b/arch/x86/power/cpu.c
441 +@@ -13,6 +13,7 @@
442 + #include <linux/smp.h>
443 + #include <linux/perf_event.h>
444 + #include <linux/tboot.h>
445 ++#include <linux/dmi.h>
446 +
447 + #include <asm/pgtable.h>
448 + #include <asm/proto.h>
449 +@@ -24,7 +25,7 @@
450 + #include <asm/debugreg.h>
451 + #include <asm/cpu.h>
452 + #include <asm/mmu_context.h>
453 +-#include <linux/dmi.h>
454 ++#include <asm/cpu_device_id.h>
455 +
456 + #ifdef CONFIG_X86_32
457 + __visible unsigned long saved_context_ebx;
458 +@@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void)
459 +
460 + core_initcall(bsp_pm_check_init);
461 +
462 +-static int msr_init_context(const u32 *msr_id, const int total_num)
463 ++static int msr_build_context(const u32 *msr_id, const int num)
464 + {
465 +- int i = 0;
466 ++ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
467 + struct saved_msr *msr_array;
468 ++ int total_num;
469 ++ int i, j;
470 +
471 +- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
472 +- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
473 +- return -EINVAL;
474 +- }
475 ++ total_num = saved_msrs->num + num;
476 +
477 + msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
478 + if (!msr_array) {
479 +@@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
480 + return -ENOMEM;
481 + }
482 +
483 +- for (i = 0; i < total_num; i++) {
484 +- msr_array[i].info.msr_no = msr_id[i];
485 ++ if (saved_msrs->array) {
486 ++ /*
487 ++ * Multiple callbacks can invoke this function, so copy any
488 ++ * MSR save requests from previous invocations.
489 ++ */
490 ++ memcpy(msr_array, saved_msrs->array,
491 ++ sizeof(struct saved_msr) * saved_msrs->num);
492 ++
493 ++ kfree(saved_msrs->array);
494 ++ }
495 ++
496 ++ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
497 ++ msr_array[i].info.msr_no = msr_id[j];
498 + msr_array[i].valid = false;
499 + msr_array[i].info.reg.q = 0;
500 + }
501 +- saved_context.saved_msrs.num = total_num;
502 +- saved_context.saved_msrs.array = msr_array;
503 ++ saved_msrs->num = total_num;
504 ++ saved_msrs->array = msr_array;
505 +
506 + return 0;
507 + }
508 +
509 + /*
510 +- * The following section is a quirk framework for problematic BIOSen:
511 ++ * The following sections are a quirk framework for problematic BIOSen:
512 + * Sometimes MSRs are modified by the BIOSen after suspended to
513 + * RAM, this might cause unexpected behavior after wakeup.
514 + * Thus we save/restore these specified MSRs across suspend/resume
515 +@@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
516 + u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
517 +
518 + pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
519 +- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
520 ++ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
521 + }
522 +
523 + static const struct dmi_system_id msr_save_dmi_table[] = {
524 +@@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
525 + {}
526 + };
527 +
528 ++static int msr_save_cpuid_features(const struct x86_cpu_id *c)
529 ++{
530 ++ u32 cpuid_msr_id[] = {
531 ++ MSR_AMD64_CPUID_FN_1,
532 ++ };
533 ++
534 ++ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
535 ++ c->family);
536 ++
537 ++ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
538 ++}
539 ++
540 ++static const struct x86_cpu_id msr_save_cpu_table[] = {
541 ++ {
542 ++ .vendor = X86_VENDOR_AMD,
543 ++ .family = 0x15,
544 ++ .model = X86_MODEL_ANY,
545 ++ .feature = X86_FEATURE_ANY,
546 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
547 ++ },
548 ++ {
549 ++ .vendor = X86_VENDOR_AMD,
550 ++ .family = 0x16,
551 ++ .model = X86_MODEL_ANY,
552 ++ .feature = X86_FEATURE_ANY,
553 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
554 ++ },
555 ++ {}
556 ++};
557 ++
558 ++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
559 ++static int pm_cpu_check(const struct x86_cpu_id *c)
560 ++{
561 ++ const struct x86_cpu_id *m;
562 ++ int ret = 0;
563 ++
564 ++ m = x86_match_cpu(msr_save_cpu_table);
565 ++ if (m) {
566 ++ pm_cpu_match_t fn;
567 ++
568 ++ fn = (pm_cpu_match_t)m->driver_data;
569 ++ ret = fn(m);
570 ++ }
571 ++
572 ++ return ret;
573 ++}
574 ++
575 + static int pm_check_save_msr(void)
576 + {
577 + dmi_check_system(msr_save_dmi_table);
578 ++ pm_cpu_check(msr_save_cpu_table);
579 ++
580 + return 0;
581 + }
582 +
583 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
584 +index bf5777bc04d3..eb0c4ee20525 100644
585 +--- a/drivers/ata/libata-scsi.c
586 ++++ b/drivers/ata/libata-scsi.c
587 +@@ -1804,6 +1804,21 @@ nothing_to_do:
588 + return 1;
589 + }
590 +
591 ++static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
592 ++{
593 ++ struct request *rq = scmd->request;
594 ++ u32 req_blocks;
595 ++
596 ++ if (!blk_rq_is_passthrough(rq))
597 ++ return true;
598 ++
599 ++ req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
600 ++ if (n_blocks > req_blocks)
601 ++ return false;
602 ++
603 ++ return true;
604 ++}
605 ++
606 + /**
607 + * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
608 + * @qc: Storage for translated ATA taskfile
609 +@@ -1848,6 +1863,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
610 + scsi_10_lba_len(cdb, &block, &n_block);
611 + if (cdb[1] & (1 << 3))
612 + tf_flags |= ATA_TFLAG_FUA;
613 ++ if (!ata_check_nblocks(scmd, n_block))
614 ++ goto invalid_fld;
615 + break;
616 + case READ_6:
617 + case WRITE_6:
618 +@@ -1862,6 +1879,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
619 + */
620 + if (!n_block)
621 + n_block = 256;
622 ++ if (!ata_check_nblocks(scmd, n_block))
623 ++ goto invalid_fld;
624 + break;
625 + case READ_16:
626 + case WRITE_16:
627 +@@ -1872,6 +1891,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
628 + scsi_16_lba_len(cdb, &block, &n_block);
629 + if (cdb[1] & (1 << 3))
630 + tf_flags |= ATA_TFLAG_FUA;
631 ++ if (!ata_check_nblocks(scmd, n_block))
632 ++ goto invalid_fld;
633 + break;
634 + default:
635 + DPRINTK("no-byte command\n");
636 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
637 +index cc2f2e35f4c2..8c36ff0c2dd4 100644
638 +--- a/drivers/ata/libata-sff.c
639 ++++ b/drivers/ata/libata-sff.c
640 +@@ -704,6 +704,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
641 + unsigned int offset;
642 + unsigned char *buf;
643 +
644 ++ if (!qc->cursg) {
645 ++ qc->curbytes = qc->nbytes;
646 ++ return;
647 ++ }
648 + if (qc->curbytes == qc->nbytes - qc->sect_size)
649 + ap->hsm_task_state = HSM_ST_LAST;
650 +
651 +@@ -729,6 +733,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
652 +
653 + if (qc->cursg_ofs == qc->cursg->length) {
654 + qc->cursg = sg_next(qc->cursg);
655 ++ if (!qc->cursg)
656 ++ ap->hsm_task_state = HSM_ST_LAST;
657 + qc->cursg_ofs = 0;
658 + }
659 + }
660 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
661 +index 25351b6b1e34..562e90bf73c9 100644
662 +--- a/drivers/gpio/gpiolib.c
663 ++++ b/drivers/gpio/gpiolib.c
664 +@@ -971,9 +971,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
665 + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
666 + lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
667 + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
668 +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
669 ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
670 ++ GPIOLINE_FLAG_IS_OUT);
671 + if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
672 +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
673 ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
674 ++ GPIOLINE_FLAG_IS_OUT);
675 +
676 + if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
677 + return -EFAULT;
678 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
679 +index b4e7404fe660..a11637b0f6cc 100644
680 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
681 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
682 +@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
683 + u8 *ptr = msg->buf;
684 +
685 + while (remaining) {
686 +- u8 cnt = (remaining > 16) ? 16 : remaining;
687 +- u8 cmd;
688 ++ u8 cnt, retries, cmd;
689 +
690 + if (msg->flags & I2C_M_RD)
691 + cmd = 1;
692 +@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
693 + if (mcnt || remaining > 16)
694 + cmd |= 4; /* MOT */
695 +
696 +- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
697 +- if (ret < 0) {
698 +- nvkm_i2c_aux_release(aux);
699 +- return ret;
700 ++ for (retries = 0, cnt = 0;
701 ++ retries < 32 && !cnt;
702 ++ retries++) {
703 ++ cnt = min_t(u8, remaining, 16);
704 ++ ret = aux->func->xfer(aux, true, cmd,
705 ++ msg->addr, ptr, &cnt);
706 ++ if (ret < 0)
707 ++ goto out;
708 ++ }
709 ++ if (!cnt) {
710 ++ AUX_TRACE(aux, "no data after 32 retries");
711 ++ ret = -EIO;
712 ++ goto out;
713 + }
714 +
715 + ptr += cnt;
716 +@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
717 + msg++;
718 + }
719 +
720 ++ ret = num;
721 ++out:
722 + nvkm_i2c_aux_release(aux);
723 +- return num;
724 ++ return ret;
725 + }
726 +
727 + static u32
728 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
729 +index 97000996b8dc..50cc060cc552 100644
730 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
731 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
732 +@@ -300,8 +300,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
733 + break;
734 + }
735 +
736 +- if (retries == RETRIES)
737 ++ if (retries == RETRIES) {
738 ++ kfree(reply);
739 + return -EINVAL;
740 ++ }
741 +
742 + *msg_len = reply_len;
743 + *msg = reply;
744 +diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
745 +index 9428ea7cdf8a..c52bd163abb3 100644
746 +--- a/drivers/hid/hid-a4tech.c
747 ++++ b/drivers/hid/hid-a4tech.c
748 +@@ -26,12 +26,36 @@
749 + #define A4_2WHEEL_MOUSE_HACK_7 0x01
750 + #define A4_2WHEEL_MOUSE_HACK_B8 0x02
751 +
752 ++#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
753 ++
754 + struct a4tech_sc {
755 + unsigned long quirks;
756 + unsigned int hw_wheel;
757 + __s32 delayed_value;
758 + };
759 +
760 ++static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
761 ++ struct hid_field *field, struct hid_usage *usage,
762 ++ unsigned long **bit, int *max)
763 ++{
764 ++ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
765 ++
766 ++ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
767 ++ usage->hid == A4_WHEEL_ORIENTATION) {
768 ++ /*
769 ++ * We do not want to have this usage mapped to anything as it's
770 ++ * nonstandard and doesn't really behave like an HID report.
771 ++ * It's only selecting the orientation (vertical/horizontal) of
772 ++ * the previous mouse wheel report. The input_events will be
773 ++ * generated once both reports are recorded in a4_event().
774 ++ */
775 ++ return -1;
776 ++ }
777 ++
778 ++ return 0;
779 ++
780 ++}
781 ++
782 + static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
783 + struct hid_field *field, struct hid_usage *usage,
784 + unsigned long **bit, int *max)
785 +@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
786 + struct a4tech_sc *a4 = hid_get_drvdata(hdev);
787 + struct input_dev *input;
788 +
789 +- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
790 +- !usage->type)
791 ++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
792 + return 0;
793 +
794 + input = field->hidinput->input;
795 +@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
796 + return 1;
797 + }
798 +
799 +- if (usage->hid == 0x000100b8) {
800 ++ if (usage->hid == A4_WHEEL_ORIENTATION) {
801 + input_event(input, EV_REL, value ? REL_HWHEEL :
802 + REL_WHEEL, a4->delayed_value);
803 + return 1;
804 +@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
805 + static struct hid_driver a4_driver = {
806 + .name = "a4tech",
807 + .id_table = a4_devices,
808 ++ .input_mapping = a4_input_mapping,
809 + .input_mapped = a4_input_mapped,
810 + .event = a4_event,
811 + .probe = a4_probe,
812 +diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
813 +index b83376077d72..cfa0cb22c9b3 100644
814 +--- a/drivers/hid/hid-tmff.c
815 ++++ b/drivers/hid/hid-tmff.c
816 +@@ -34,6 +34,8 @@
817 +
818 + #include "hid-ids.h"
819 +
820 ++#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
821 ++
822 + static const signed short ff_rumble[] = {
823 + FF_RUMBLE,
824 + -1
825 +@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
826 + struct hid_field *ff_field = tmff->ff_field;
827 + int x, y;
828 + int left, right; /* Rumbling */
829 ++ int motor_swap;
830 +
831 + switch (effect->type) {
832 + case FF_CONSTANT:
833 +@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
834 + ff_field->logical_minimum,
835 + ff_field->logical_maximum);
836 +
837 ++ /* 2-in-1 strong motor is left */
838 ++ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
839 ++ motor_swap = left;
840 ++ left = right;
841 ++ right = motor_swap;
842 ++ }
843 ++
844 + dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
845 + ff_field->value[0] = left;
846 + ff_field->value[1] = right;
847 +@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
848 + .driver_data = (unsigned long)ff_rumble },
849 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
850 + .driver_data = (unsigned long)ff_rumble },
851 ++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
852 ++ .driver_data = (unsigned long)ff_rumble },
853 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
854 + .driver_data = (unsigned long)ff_rumble },
855 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
856 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
857 +index 60e2d4cf1fe3..2e593874f5e0 100644
858 +--- a/drivers/hid/wacom_wac.c
859 ++++ b/drivers/hid/wacom_wac.c
860 +@@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
861 + y >>= 1;
862 + distance >>= 1;
863 + }
864 ++ if (features->type == INTUOSHT2)
865 ++ distance = features->distance_max - distance;
866 + input_report_abs(input, ABS_X, x);
867 + input_report_abs(input, ABS_Y, y);
868 + input_report_abs(input, ABS_DISTANCE, distance);
869 +@@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
870 + input_report_key(input, BTN_BASE2, (data[11] & 0x02));
871 +
872 + if (data[12] & 0x80)
873 +- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
874 ++ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
875 + else
876 + input_report_abs(input, ABS_WHEEL, 0);
877 +
878 +diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
879 +index 35983c7c3137..87588198d68f 100644
880 +--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
881 ++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
882 +@@ -1402,6 +1402,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
883 + printk(KERN_DEBUG
884 + "%s: %s: alloc urb for fifo %i failed",
885 + hw->name, __func__, fifo->fifonum);
886 ++ continue;
887 + }
888 + fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
889 + fifo->iso[i].indx = i;
890 +@@ -1700,13 +1701,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
891 + static int
892 + setup_hfcsusb(struct hfcsusb *hw)
893 + {
894 ++ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
895 + u_char b;
896 ++ int ret;
897 +
898 + if (debug & DBG_HFC_CALL_TRACE)
899 + printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
900 +
901 ++ if (!dmabuf)
902 ++ return -ENOMEM;
903 ++
904 ++ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
905 ++
906 ++ memcpy(&b, dmabuf, sizeof(u_char));
907 ++ kfree(dmabuf);
908 ++
909 + /* check the chip id */
910 +- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
911 ++ if (ret != 1) {
912 + printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
913 + hw->name, __func__);
914 + return 1;
915 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
916 +index b7d3b62dae7f..1e17e6421da3 100644
917 +--- a/drivers/md/dm-bufio.c
918 ++++ b/drivers/md/dm-bufio.c
919 +@@ -1630,7 +1630,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
920 + unsigned long freed;
921 +
922 + c = container_of(shrink, struct dm_bufio_client, shrinker);
923 +- if (!dm_bufio_trylock(c))
924 ++ if (sc->gfp_mask & __GFP_FS)
925 ++ dm_bufio_lock(c);
926 ++ else if (!dm_bufio_trylock(c))
927 + return SHRINK_STOP;
928 +
929 + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
930 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
931 +index b9d1897bcf5b..bd9a45b94b55 100644
932 +--- a/drivers/md/dm-kcopyd.c
933 ++++ b/drivers/md/dm-kcopyd.c
934 +@@ -545,8 +545,10 @@ static int run_io_job(struct kcopyd_job *job)
935 + * no point in continuing.
936 + */
937 + if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
938 +- job->master_job->write_err)
939 ++ job->master_job->write_err) {
940 ++ job->write_err = job->master_job->write_err;
941 + return -EIO;
942 ++ }
943 +
944 + io_job_start(job->kc->throttle);
945 +
946 +@@ -598,6 +600,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
947 + else
948 + job->read_err = 1;
949 + push(&kc->complete_jobs, job);
950 ++ wake(kc);
951 + break;
952 + }
953 +
954 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
955 +index d76e685206b3..8f070debe498 100644
956 +--- a/drivers/md/dm-table.c
957 ++++ b/drivers/md/dm-table.c
958 +@@ -1308,7 +1308,7 @@ void dm_table_event(struct dm_table *t)
959 + }
960 + EXPORT_SYMBOL(dm_table_event);
961 +
962 +-sector_t dm_table_get_size(struct dm_table *t)
963 ++inline sector_t dm_table_get_size(struct dm_table *t)
964 + {
965 + return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
966 + }
967 +@@ -1333,6 +1333,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
968 + unsigned int l, n = 0, k = 0;
969 + sector_t *node;
970 +
971 ++ if (unlikely(sector >= dm_table_get_size(t)))
972 ++ return &t->targets[t->num_targets];
973 ++
974 + for (l = 0; l < t->depth; l++) {
975 + n = get_child(n, k);
976 + node = get_node(t, l, n);
977 +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
978 +index 597098a43aba..b322821a6323 100644
979 +--- a/drivers/md/dm-zoned-metadata.c
980 ++++ b/drivers/md/dm-zoned-metadata.c
981 +@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
982 + sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
983 + struct bio *bio;
984 +
985 ++ if (dmz_bdev_is_dying(zmd->dev))
986 ++ return ERR_PTR(-EIO);
987 ++
988 + /* Get a new block and a BIO to read it */
989 + mblk = dmz_alloc_mblock(zmd, mblk_no);
990 + if (!mblk)
991 +- return NULL;
992 ++ return ERR_PTR(-ENOMEM);
993 +
994 + bio = bio_alloc(GFP_NOIO, 1);
995 + if (!bio) {
996 + dmz_free_mblock(zmd, mblk);
997 +- return NULL;
998 ++ return ERR_PTR(-ENOMEM);
999 + }
1000 +
1001 + spin_lock(&zmd->mblk_lock);
1002 +@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
1003 + if (!mblk) {
1004 + /* Cache miss: read the block from disk */
1005 + mblk = dmz_get_mblock_slow(zmd, mblk_no);
1006 +- if (!mblk)
1007 +- return ERR_PTR(-ENOMEM);
1008 ++ if (IS_ERR(mblk))
1009 ++ return mblk;
1010 + }
1011 +
1012 + /* Wait for on-going read I/O and check for error */
1013 +@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
1014 + /*
1015 + * Issue a metadata block write BIO.
1016 + */
1017 +-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1018 +- unsigned int set)
1019 ++static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1020 ++ unsigned int set)
1021 + {
1022 + sector_t block = zmd->sb[set].block + mblk->no;
1023 + struct bio *bio;
1024 +
1025 ++ if (dmz_bdev_is_dying(zmd->dev))
1026 ++ return -EIO;
1027 ++
1028 + bio = bio_alloc(GFP_NOIO, 1);
1029 + if (!bio) {
1030 + set_bit(DMZ_META_ERROR, &mblk->state);
1031 +- return;
1032 ++ return -ENOMEM;
1033 + }
1034 +
1035 + set_bit(DMZ_META_WRITING, &mblk->state);
1036 +@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1037 + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
1038 + bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
1039 + submit_bio(bio);
1040 ++
1041 ++ return 0;
1042 + }
1043 +
1044 + /*
1045 +@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
1046 + struct bio *bio;
1047 + int ret;
1048 +
1049 ++ if (dmz_bdev_is_dying(zmd->dev))
1050 ++ return -EIO;
1051 ++
1052 + bio = bio_alloc(GFP_NOIO, 1);
1053 + if (!bio)
1054 + return -ENOMEM;
1055 +@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
1056 + {
1057 + struct dmz_mblock *mblk;
1058 + struct blk_plug plug;
1059 +- int ret = 0;
1060 ++ int ret = 0, nr_mblks_submitted = 0;
1061 +
1062 + /* Issue writes */
1063 + blk_start_plug(&plug);
1064 +- list_for_each_entry(mblk, write_list, link)
1065 +- dmz_write_mblock(zmd, mblk, set);
1066 ++ list_for_each_entry(mblk, write_list, link) {
1067 ++ ret = dmz_write_mblock(zmd, mblk, set);
1068 ++ if (ret)
1069 ++ break;
1070 ++ nr_mblks_submitted++;
1071 ++ }
1072 + blk_finish_plug(&plug);
1073 +
1074 + /* Wait for completion */
1075 + list_for_each_entry(mblk, write_list, link) {
1076 ++ if (!nr_mblks_submitted)
1077 ++ break;
1078 + wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
1079 + TASK_UNINTERRUPTIBLE);
1080 + if (test_bit(DMZ_META_ERROR, &mblk->state)) {
1081 + clear_bit(DMZ_META_ERROR, &mblk->state);
1082 + ret = -EIO;
1083 + }
1084 ++ nr_mblks_submitted--;
1085 + }
1086 +
1087 + /* Flush drive cache (this will also sync data) */
1088 +@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
1089 + */
1090 + dmz_lock_flush(zmd);
1091 +
1092 ++ if (dmz_bdev_is_dying(zmd->dev)) {
1093 ++ ret = -EIO;
1094 ++ goto out;
1095 ++ }
1096 ++
1097 + /* Get dirty blocks */
1098 + spin_lock(&zmd->mblk_lock);
1099 + list_splice_init(&zmd->mblk_dirty_list, &write_list);
1100 +@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1101 + struct dm_zone *zone;
1102 +
1103 + if (list_empty(&zmd->map_rnd_list))
1104 +- return NULL;
1105 ++ return ERR_PTR(-EBUSY);
1106 +
1107 + list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1108 + if (dmz_is_buf(zone))
1109 +@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1110 + return dzone;
1111 + }
1112 +
1113 +- return NULL;
1114 ++ return ERR_PTR(-EBUSY);
1115 + }
1116 +
1117 + /*
1118 +@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1119 + struct dm_zone *zone;
1120 +
1121 + if (list_empty(&zmd->map_seq_list))
1122 +- return NULL;
1123 ++ return ERR_PTR(-EBUSY);
1124 +
1125 + list_for_each_entry(zone, &zmd->map_seq_list, link) {
1126 + if (!zone->bzone)
1127 +@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1128 + return zone;
1129 + }
1130 +
1131 +- return NULL;
1132 ++ return ERR_PTR(-EBUSY);
1133 + }
1134 +
1135 + /*
1136 +@@ -1623,6 +1646,10 @@ again:
1137 + /* Alloate a random zone */
1138 + dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1139 + if (!dzone) {
1140 ++ if (dmz_bdev_is_dying(zmd->dev)) {
1141 ++ dzone = ERR_PTR(-EIO);
1142 ++ goto out;
1143 ++ }
1144 + dmz_wait_for_free_zones(zmd);
1145 + goto again;
1146 + }
1147 +@@ -1720,6 +1747,10 @@ again:
1148 + /* Alloate a random zone */
1149 + bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1150 + if (!bzone) {
1151 ++ if (dmz_bdev_is_dying(zmd->dev)) {
1152 ++ bzone = ERR_PTR(-EIO);
1153 ++ goto out;
1154 ++ }
1155 + dmz_wait_for_free_zones(zmd);
1156 + goto again;
1157 + }
1158 +diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
1159 +index 44a119e12f1a..a9f84a998476 100644
1160 +--- a/drivers/md/dm-zoned-reclaim.c
1161 ++++ b/drivers/md/dm-zoned-reclaim.c
1162 +@@ -37,7 +37,7 @@ enum {
1163 + /*
1164 + * Number of seconds of target BIO inactivity to consider the target idle.
1165 + */
1166 +-#define DMZ_IDLE_PERIOD (10UL * HZ)
1167 ++#define DMZ_IDLE_PERIOD (10UL * HZ)
1168 +
1169 + /*
1170 + * Percentage of unmapped (free) random zones below which reclaim starts
1171 +@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
1172 + set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
1173 +
1174 + while (block < end_block) {
1175 ++ if (dev->flags & DMZ_BDEV_DYING)
1176 ++ return -EIO;
1177 ++
1178 + /* Get a valid region from the source zone */
1179 + ret = dmz_first_valid_block(zmd, src_zone, &block);
1180 + if (ret <= 0)
1181 +@@ -217,7 +220,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1182 +
1183 + dmz_unlock_flush(zmd);
1184 +
1185 +- return 0;
1186 ++ return ret;
1187 + }
1188 +
1189 + /*
1190 +@@ -261,7 +264,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1191 +
1192 + dmz_unlock_flush(zmd);
1193 +
1194 +- return 0;
1195 ++ return ret;
1196 + }
1197 +
1198 + /*
1199 +@@ -314,7 +317,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1200 +
1201 + dmz_unlock_flush(zmd);
1202 +
1203 +- return 0;
1204 ++ return ret;
1205 + }
1206 +
1207 + /*
1208 +@@ -336,7 +339,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1209 + /*
1210 + * Find a candidate zone for reclaim and process it.
1211 + */
1212 +-static void dmz_reclaim(struct dmz_reclaim *zrc)
1213 ++static int dmz_do_reclaim(struct dmz_reclaim *zrc)
1214 + {
1215 + struct dmz_metadata *zmd = zrc->metadata;
1216 + struct dm_zone *dzone;
1217 +@@ -346,8 +349,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
1218 +
1219 + /* Get a data zone */
1220 + dzone = dmz_get_zone_for_reclaim(zmd);
1221 +- if (!dzone)
1222 +- return;
1223 ++ if (IS_ERR(dzone))
1224 ++ return PTR_ERR(dzone);
1225 +
1226 + start = jiffies;
1227 +
1228 +@@ -393,13 +396,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
1229 + out:
1230 + if (ret) {
1231 + dmz_unlock_zone_reclaim(dzone);
1232 +- return;
1233 ++ return ret;
1234 + }
1235 +
1236 +- (void) dmz_flush_metadata(zrc->metadata);
1237 ++ ret = dmz_flush_metadata(zrc->metadata);
1238 ++ if (ret) {
1239 ++ dmz_dev_debug(zrc->dev,
1240 ++ "Metadata flush for zone %u failed, err %d\n",
1241 ++ dmz_id(zmd, rzone), ret);
1242 ++ return ret;
1243 ++ }
1244 +
1245 + dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
1246 + dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
1247 ++ return 0;
1248 + }
1249 +
1250 + /*
1251 +@@ -444,6 +454,10 @@ static void dmz_reclaim_work(struct work_struct *work)
1252 + struct dmz_metadata *zmd = zrc->metadata;
1253 + unsigned int nr_rnd, nr_unmap_rnd;
1254 + unsigned int p_unmap_rnd;
1255 ++ int ret;
1256 ++
1257 ++ if (dmz_bdev_is_dying(zrc->dev))
1258 ++ return;
1259 +
1260 + if (!dmz_should_reclaim(zrc)) {
1261 + mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
1262 +@@ -473,7 +487,17 @@ static void dmz_reclaim_work(struct work_struct *work)
1263 + (dmz_target_idle(zrc) ? "Idle" : "Busy"),
1264 + p_unmap_rnd, nr_unmap_rnd, nr_rnd);
1265 +
1266 +- dmz_reclaim(zrc);
1267 ++ ret = dmz_do_reclaim(zrc);
1268 ++ if (ret) {
1269 ++ dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
1270 ++ if (ret == -EIO)
1271 ++ /*
1272 ++ * LLD might be performing some error handling sequence
1273 ++ * at the underlying device. To not interfere, do not
1274 ++ * attempt to schedule the next reclaim run immediately.
1275 ++ */
1276 ++ return;
1277 ++ }
1278 +
1279 + dmz_schedule_reclaim(zrc);
1280 + }
1281 +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
1282 +index 532bfce7f072..1e004d975e78 100644
1283 +--- a/drivers/md/dm-zoned-target.c
1284 ++++ b/drivers/md/dm-zoned-target.c
1285 +@@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
1286 +
1287 + atomic_inc(&bioctx->ref);
1288 + generic_make_request(clone);
1289 ++ if (clone->bi_status == BLK_STS_IOERR)
1290 ++ return -EIO;
1291 +
1292 + if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
1293 + zone->wp_block += nr_blocks;
1294 +@@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
1295 +
1296 + /* Get the buffer zone. One will be allocated if needed */
1297 + bzone = dmz_get_chunk_buffer(zmd, zone);
1298 +- if (!bzone)
1299 +- return -ENOSPC;
1300 ++ if (IS_ERR(bzone))
1301 ++ return PTR_ERR(bzone);
1302 +
1303 + if (dmz_is_readonly(bzone))
1304 + return -EROFS;
1305 +@@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
1306 +
1307 + dmz_lock_metadata(zmd);
1308 +
1309 ++ if (dmz->dev->flags & DMZ_BDEV_DYING) {
1310 ++ ret = -EIO;
1311 ++ goto out;
1312 ++ }
1313 ++
1314 + /*
1315 + * Get the data zone mapping the chunk. There may be no
1316 + * mapping for read and discard. If a mapping is obtained,
1317 +@@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work)
1318 +
1319 + /* Flush dirty metadata blocks */
1320 + ret = dmz_flush_metadata(dmz->metadata);
1321 ++ if (ret)
1322 ++ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
1323 +
1324 + /* Process queued flush requests */
1325 + while (1) {
1326 +@@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work)
1327 + * Get a chunk work and start it to process a new BIO.
1328 + * If the BIO chunk has no work yet, create one.
1329 + */
1330 +-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1331 ++static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1332 + {
1333 + unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
1334 + struct dm_chunk_work *cw;
1335 ++ int ret = 0;
1336 +
1337 + mutex_lock(&dmz->chunk_lock);
1338 +
1339 + /* Get the BIO chunk work. If one is not active yet, create one */
1340 + cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
1341 + if (!cw) {
1342 +- int ret;
1343 +
1344 + /* Create a new chunk work */
1345 + cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
1346 +- if (!cw)
1347 ++ if (unlikely(!cw)) {
1348 ++ ret = -ENOMEM;
1349 + goto out;
1350 ++ }
1351 +
1352 + INIT_WORK(&cw->work, dmz_chunk_work);
1353 + atomic_set(&cw->refcount, 0);
1354 +@@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1355 + ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
1356 + if (unlikely(ret)) {
1357 + kfree(cw);
1358 +- cw = NULL;
1359 + goto out;
1360 + }
1361 + }
1362 +@@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1363 + bio_list_add(&cw->bio_list, bio);
1364 + dmz_get_chunk_work(cw);
1365 +
1366 ++ dmz_reclaim_bio_acc(dmz->reclaim);
1367 + if (queue_work(dmz->chunk_wq, &cw->work))
1368 + dmz_get_chunk_work(cw);
1369 + out:
1370 + mutex_unlock(&dmz->chunk_lock);
1371 ++ return ret;
1372 ++}
1373 ++
1374 ++/*
1375 ++ * Check the backing device availability. If it's on the way out,
1376 ++ * start failing I/O. Reclaim and metadata components also call this
1377 ++ * function to cleanly abort operation in the event of such failure.
1378 ++ */
1379 ++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
1380 ++{
1381 ++ struct gendisk *disk;
1382 ++
1383 ++ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
1384 ++ disk = dmz_dev->bdev->bd_disk;
1385 ++ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
1386 ++ dmz_dev_warn(dmz_dev, "Backing device queue dying");
1387 ++ dmz_dev->flags |= DMZ_BDEV_DYING;
1388 ++ } else if (disk->fops->check_events) {
1389 ++ if (disk->fops->check_events(disk, 0) &
1390 ++ DISK_EVENT_MEDIA_CHANGE) {
1391 ++ dmz_dev_warn(dmz_dev, "Backing device offline");
1392 ++ dmz_dev->flags |= DMZ_BDEV_DYING;
1393 ++ }
1394 ++ }
1395 ++ }
1396 ++
1397 ++ return dmz_dev->flags & DMZ_BDEV_DYING;
1398 + }
1399 +
1400 + /*
1401 +@@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
1402 + sector_t sector = bio->bi_iter.bi_sector;
1403 + unsigned int nr_sectors = bio_sectors(bio);
1404 + sector_t chunk_sector;
1405 ++ int ret;
1406 ++
1407 ++ if (dmz_bdev_is_dying(dmz->dev))
1408 ++ return DM_MAPIO_KILL;
1409 +
1410 + dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
1411 + bio_op(bio), (unsigned long long)sector, nr_sectors,
1412 +@@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
1413 + dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
1414 +
1415 + /* Now ready to handle this BIO */
1416 +- dmz_reclaim_bio_acc(dmz->reclaim);
1417 +- dmz_queue_chunk_work(dmz, bio);
1418 ++ ret = dmz_queue_chunk_work(dmz, bio);
1419 ++ if (ret) {
1420 ++ dmz_dev_debug(dmz->dev,
1421 ++ "BIO op %d, can't process chunk %llu, err %i\n",
1422 ++ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
1423 ++ ret);
1424 ++ return DM_MAPIO_REQUEUE;
1425 ++ }
1426 +
1427 + return DM_MAPIO_SUBMITTED;
1428 + }
1429 +@@ -855,6 +903,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti,
1430 + {
1431 + struct dmz_target *dmz = ti->private;
1432 +
1433 ++ if (dmz_bdev_is_dying(dmz->dev))
1434 ++ return -ENODEV;
1435 ++
1436 + *bdev = dmz->dev->bdev;
1437 +
1438 + return 0;
1439 +diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
1440 +index ed8de49c9a08..93a64529f219 100644
1441 +--- a/drivers/md/dm-zoned.h
1442 ++++ b/drivers/md/dm-zoned.h
1443 +@@ -56,6 +56,8 @@ struct dmz_dev {
1444 +
1445 + unsigned int nr_zones;
1446 +
1447 ++ unsigned int flags;
1448 ++
1449 + sector_t zone_nr_sectors;
1450 + unsigned int zone_nr_sectors_shift;
1451 +
1452 +@@ -67,6 +69,9 @@ struct dmz_dev {
1453 + (dev)->zone_nr_sectors_shift)
1454 + #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
1455 +
1456 ++/* Device flags. */
1457 ++#define DMZ_BDEV_DYING (1 << 0)
1458 ++
1459 + /*
1460 + * Zone descriptor.
1461 + */
1462 +@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
1463 + void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
1464 + void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
1465 +
1466 ++/*
1467 ++ * Functions defined in dm-zoned-target.c
1468 ++ */
1469 ++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
1470 ++
1471 + #endif /* DM_ZONED_H */
1472 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
1473 +index 58b319757b1e..8aae0624a297 100644
1474 +--- a/drivers/md/persistent-data/dm-btree.c
1475 ++++ b/drivers/md/persistent-data/dm-btree.c
1476 +@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
1477 +
1478 + new_parent = shadow_current(s);
1479 +
1480 ++ pn = dm_block_data(new_parent);
1481 ++ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1482 ++ sizeof(__le64) : s->info->value_type.size;
1483 ++
1484 ++ /* create & init the left block */
1485 + r = new_block(s->info, &left);
1486 + if (r < 0)
1487 + return r;
1488 +
1489 ++ ln = dm_block_data(left);
1490 ++ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1491 ++
1492 ++ ln->header.flags = pn->header.flags;
1493 ++ ln->header.nr_entries = cpu_to_le32(nr_left);
1494 ++ ln->header.max_entries = pn->header.max_entries;
1495 ++ ln->header.value_size = pn->header.value_size;
1496 ++ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1497 ++ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1498 ++
1499 ++ /* create & init the right block */
1500 + r = new_block(s->info, &right);
1501 + if (r < 0) {
1502 + unlock_block(s->info, left);
1503 + return r;
1504 + }
1505 +
1506 +- pn = dm_block_data(new_parent);
1507 +- ln = dm_block_data(left);
1508 + rn = dm_block_data(right);
1509 +-
1510 +- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1511 + nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
1512 +
1513 +- ln->header.flags = pn->header.flags;
1514 +- ln->header.nr_entries = cpu_to_le32(nr_left);
1515 +- ln->header.max_entries = pn->header.max_entries;
1516 +- ln->header.value_size = pn->header.value_size;
1517 +-
1518 + rn->header.flags = pn->header.flags;
1519 + rn->header.nr_entries = cpu_to_le32(nr_right);
1520 + rn->header.max_entries = pn->header.max_entries;
1521 + rn->header.value_size = pn->header.value_size;
1522 +-
1523 +- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1524 + memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
1525 +-
1526 +- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1527 +- sizeof(__le64) : s->info->value_type.size;
1528 +- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1529 + memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
1530 + nr_right * size);
1531 +
1532 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1533 +index 4aed69d9dd17..b23cac2c4738 100644
1534 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
1535 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1536 +@@ -248,7 +248,7 @@ static int out(struct sm_metadata *smm)
1537 + }
1538 +
1539 + if (smm->recursion_count == 1)
1540 +- apply_bops(smm);
1541 ++ r = apply_bops(smm);
1542 +
1543 + smm->recursion_count--;
1544 +
1545 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1546 +index 60d0c270af85..c1eeba1906fd 100644
1547 +--- a/drivers/net/bonding/bond_main.c
1548 ++++ b/drivers/net/bonding/bond_main.c
1549 +@@ -2153,6 +2153,15 @@ static void bond_miimon_commit(struct bonding *bond)
1550 + bond_for_each_slave(bond, slave, iter) {
1551 + switch (slave->new_link) {
1552 + case BOND_LINK_NOCHANGE:
1553 ++ /* For 802.3ad mode, check current slave speed and
1554 ++ * duplex again in case its port was disabled after
1555 ++ * invalid speed/duplex reporting but recovered before
1556 ++ * link monitoring could make a decision on the actual
1557 ++ * link status
1558 ++ */
1559 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
1560 ++ slave->link == BOND_LINK_UP)
1561 ++ bond_3ad_adapter_speed_duplex_changed(slave);
1562 + continue;
1563 +
1564 + case BOND_LINK_UP:
1565 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1566 +index 7d61d8801220..d92113db4fb9 100644
1567 +--- a/drivers/net/can/dev.c
1568 ++++ b/drivers/net/can/dev.c
1569 +@@ -1217,6 +1217,8 @@ int register_candev(struct net_device *dev)
1570 + return -EINVAL;
1571 +
1572 + dev->rtnl_link_ops = &can_link_ops;
1573 ++ netif_carrier_off(dev);
1574 ++
1575 + return register_netdev(dev);
1576 + }
1577 + EXPORT_SYMBOL_GPL(register_candev);
1578 +diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
1579 +index dd56133cc461..fc9f8b01ecae 100644
1580 +--- a/drivers/net/can/sja1000/peak_pcmcia.c
1581 ++++ b/drivers/net/can/sja1000/peak_pcmcia.c
1582 +@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
1583 + if (!netdev)
1584 + continue;
1585 +
1586 +- strncpy(name, netdev->name, IFNAMSIZ);
1587 ++ strlcpy(name, netdev->name, IFNAMSIZ);
1588 +
1589 + unregister_sja1000dev(netdev);
1590 +
1591 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1592 +index d68c79f9a4b9..059282a6065c 100644
1593 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1594 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1595 +@@ -881,7 +881,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
1596 +
1597 + dev_prev_siblings = dev->prev_siblings;
1598 + dev->state &= ~PCAN_USB_STATE_CONNECTED;
1599 +- strncpy(name, netdev->name, IFNAMSIZ);
1600 ++ strlcpy(name, netdev->name, IFNAMSIZ);
1601 +
1602 + unregister_netdev(netdev);
1603 +
1604 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1605 +index 79053d2ce7a3..338683e5ef1e 100644
1606 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1607 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1608 +@@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1609 + if (!adapter->regs) {
1610 + dev_err(&pdev->dev, "cannot map device registers\n");
1611 + err = -ENOMEM;
1612 +- goto out_free_adapter;
1613 ++ goto out_free_adapter_nofail;
1614 + }
1615 +
1616 + adapter->pdev = pdev;
1617 +@@ -3390,6 +3390,9 @@ out_free_dev:
1618 + if (adapter->port[i])
1619 + free_netdev(adapter->port[i]);
1620 +
1621 ++out_free_adapter_nofail:
1622 ++ kfree_skb(adapter->nofail_skb);
1623 ++
1624 + out_free_adapter:
1625 + kfree(adapter);
1626 +
1627 +diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
1628 +index c27054b8ce81..1bfe9544b3c1 100644
1629 +--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
1630 ++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
1631 +@@ -157,6 +157,7 @@ struct hip04_priv {
1632 + unsigned int reg_inten;
1633 +
1634 + struct napi_struct napi;
1635 ++ struct device *dev;
1636 + struct net_device *ndev;
1637 +
1638 + struct tx_desc *tx_desc;
1639 +@@ -185,7 +186,7 @@ struct hip04_priv {
1640 +
1641 + static inline unsigned int tx_count(unsigned int head, unsigned int tail)
1642 + {
1643 +- return (head - tail) % (TX_DESC_NUM - 1);
1644 ++ return (head - tail) % TX_DESC_NUM;
1645 + }
1646 +
1647 + static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
1648 +@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
1649 + }
1650 +
1651 + if (priv->tx_phys[tx_tail]) {
1652 +- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
1653 ++ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
1654 + priv->tx_skb[tx_tail]->len,
1655 + DMA_TO_DEVICE);
1656 + priv->tx_phys[tx_tail] = 0;
1657 +@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1658 + return NETDEV_TX_BUSY;
1659 + }
1660 +
1661 +- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1662 +- if (dma_mapping_error(&ndev->dev, phys)) {
1663 ++ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
1664 ++ if (dma_mapping_error(priv->dev, phys)) {
1665 + dev_kfree_skb(skb);
1666 + return NETDEV_TX_OK;
1667 + }
1668 +@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1669 + u16 len;
1670 + u32 err;
1671 +
1672 ++ /* clean up tx descriptors */
1673 ++ tx_remaining = hip04_tx_reclaim(ndev, false);
1674 ++
1675 + while (cnt && !last) {
1676 + buf = priv->rx_buf[priv->rx_head];
1677 + skb = build_skb(buf, priv->rx_buf_size);
1678 +@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1679 + goto refill;
1680 + }
1681 +
1682 +- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
1683 ++ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
1684 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1685 + priv->rx_phys[priv->rx_head] = 0;
1686 +
1687 +@@ -534,9 +538,9 @@ refill:
1688 + buf = netdev_alloc_frag(priv->rx_buf_size);
1689 + if (!buf)
1690 + goto done;
1691 +- phys = dma_map_single(&ndev->dev, buf,
1692 ++ phys = dma_map_single(priv->dev, buf,
1693 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1694 +- if (dma_mapping_error(&ndev->dev, phys))
1695 ++ if (dma_mapping_error(priv->dev, phys))
1696 + goto done;
1697 + priv->rx_buf[priv->rx_head] = buf;
1698 + priv->rx_phys[priv->rx_head] = phys;
1699 +@@ -557,8 +561,7 @@ refill:
1700 + }
1701 + napi_complete_done(napi, rx);
1702 + done:
1703 +- /* clean up tx descriptors and start a new timer if necessary */
1704 +- tx_remaining = hip04_tx_reclaim(ndev, false);
1705 ++ /* start a new timer if necessary */
1706 + if (rx < budget && tx_remaining)
1707 + hip04_start_tx_timer(priv);
1708 +
1709 +@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
1710 + for (i = 0; i < RX_DESC_NUM; i++) {
1711 + dma_addr_t phys;
1712 +
1713 +- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
1714 ++ phys = dma_map_single(priv->dev, priv->rx_buf[i],
1715 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1716 +- if (dma_mapping_error(&ndev->dev, phys))
1717 ++ if (dma_mapping_error(priv->dev, phys))
1718 + return -EIO;
1719 +
1720 + priv->rx_phys[i] = phys;
1721 +@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
1722 +
1723 + for (i = 0; i < RX_DESC_NUM; i++) {
1724 + if (priv->rx_phys[i]) {
1725 +- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
1726 ++ dma_unmap_single(priv->dev, priv->rx_phys[i],
1727 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1728 + priv->rx_phys[i] = 0;
1729 + }
1730 +@@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
1731 + return -ENOMEM;
1732 +
1733 + priv = netdev_priv(ndev);
1734 ++ priv->dev = d;
1735 + priv->ndev = ndev;
1736 + platform_set_drvdata(pdev, ndev);
1737 + SET_NETDEV_DEV(ndev, &pdev->dev);
1738 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
1739 +index 7746417130bd..c5d9f290ec4c 100644
1740 +--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
1741 ++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
1742 +@@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
1743 + snprintf(bit_name, 30,
1744 + p_aeu->bit_name, num);
1745 + else
1746 +- strncpy(bit_name,
1747 ++ strlcpy(bit_name,
1748 + p_aeu->bit_name, 30);
1749 +
1750 + /* We now need to pass bitmask in its
1751 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
1752 +index 1e13dea66989..c9258aabca2d 100644
1753 +--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
1754 ++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
1755 +@@ -398,7 +398,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
1756 + /* Vendor specific information */
1757 + dev->vendor_id = cdev->vendor_id;
1758 + dev->vendor_part_id = cdev->device_id;
1759 +- dev->hw_ver = 0;
1760 ++ dev->hw_ver = cdev->chip_rev;
1761 + dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
1762 + (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
1763 +
1764 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1765 +index 4b0144b2a252..e2050afaab7a 100644
1766 +--- a/drivers/net/usb/qmi_wwan.c
1767 ++++ b/drivers/net/usb/qmi_wwan.c
1768 +@@ -1220,6 +1220,7 @@ static const struct usb_device_id products[] = {
1769 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1770 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1771 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1772 ++ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
1773 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1774 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1775 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
1776 +diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
1777 +index 56f2112e0cd8..85df2e009310 100644
1778 +--- a/drivers/nfc/st-nci/se.c
1779 ++++ b/drivers/nfc/st-nci/se.c
1780 +@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
1781 +
1782 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
1783 + skb->len - 2, GFP_KERNEL);
1784 ++ if (!transaction)
1785 ++ return -ENOMEM;
1786 +
1787 + transaction->aid_len = skb->data[1];
1788 + memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
1789 +diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
1790 +index 3a98563d4a12..eac608a457f0 100644
1791 +--- a/drivers/nfc/st21nfca/se.c
1792 ++++ b/drivers/nfc/st21nfca/se.c
1793 +@@ -326,6 +326,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
1794 +
1795 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
1796 + skb->len - 2, GFP_KERNEL);
1797 ++ if (!transaction)
1798 ++ return -ENOMEM;
1799 +
1800 + transaction->aid_len = skb->data[1];
1801 + memcpy(transaction->aid, &skb->data[2],
1802 +diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
1803 +index e7cce412f2cf..cb647c8c7b68 100644
1804 +--- a/fs/ceph/locks.c
1805 ++++ b/fs/ceph/locks.c
1806 +@@ -78,8 +78,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
1807 + req->r_wait_for_completion = ceph_lock_wait_for_completion;
1808 +
1809 + err = ceph_mdsc_do_request(mdsc, inode, req);
1810 +-
1811 +- if (operation == CEPH_MDS_OP_GETFILELOCK) {
1812 ++ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
1813 + fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
1814 + if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
1815 + fl->fl_type = F_RDLCK;
1816 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
1817 +index 23326b0cd562..58a502e622aa 100644
1818 +--- a/fs/cifs/smb2ops.c
1819 ++++ b/fs/cifs/smb2ops.c
1820 +@@ -2168,7 +2168,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, struct smb_rqst *old_rq)
1821 + static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
1822 + unsigned int buflen)
1823 + {
1824 +- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
1825 ++ void *addr;
1826 ++ /*
1827 ++ * VMAP_STACK (at least) puts stack into the vmalloc address space
1828 ++ */
1829 ++ if (is_vmalloc_addr(buf))
1830 ++ addr = vmalloc_to_page(buf);
1831 ++ else
1832 ++ addr = virt_to_page(buf);
1833 ++ sg_set_page(sg, addr, buflen, offset_in_page(buf));
1834 + }
1835 +
1836 + static struct scatterlist *
1837 +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
1838 +index a73144b3cb8c..22cff39cca29 100644
1839 +--- a/fs/nfs/nfs4_fs.h
1840 ++++ b/fs/nfs/nfs4_fs.h
1841 +@@ -433,7 +433,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
1842 +
1843 + extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
1844 + extern void nfs4_put_state_owner(struct nfs4_state_owner *);
1845 +-extern void nfs4_purge_state_owners(struct nfs_server *);
1846 ++extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
1847 ++extern void nfs4_free_state_owners(struct list_head *head);
1848 + extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
1849 + extern void nfs4_put_open_state(struct nfs4_state *);
1850 + extern void nfs4_close_state(struct nfs4_state *, fmode_t);
1851 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
1852 +index 8f96f6548dc8..0924b68b5657 100644
1853 +--- a/fs/nfs/nfs4client.c
1854 ++++ b/fs/nfs/nfs4client.c
1855 +@@ -739,9 +739,12 @@ out:
1856 +
1857 + static void nfs4_destroy_server(struct nfs_server *server)
1858 + {
1859 ++ LIST_HEAD(freeme);
1860 ++
1861 + nfs_server_return_all_delegations(server);
1862 + unset_pnfs_layoutdriver(server);
1863 +- nfs4_purge_state_owners(server);
1864 ++ nfs4_purge_state_owners(server, &freeme);
1865 ++ nfs4_free_state_owners(&freeme);
1866 + }
1867 +
1868 + /*
1869 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
1870 +index 85ec07e4aa91..f92bfc787c5f 100644
1871 +--- a/fs/nfs/nfs4state.c
1872 ++++ b/fs/nfs/nfs4state.c
1873 +@@ -614,24 +614,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
1874 + /**
1875 + * nfs4_purge_state_owners - Release all cached state owners
1876 + * @server: nfs_server with cached state owners to release
1877 ++ * @head: resulting list of state owners
1878 + *
1879 + * Called at umount time. Remaining state owners will be on
1880 + * the LRU with ref count of zero.
1881 ++ * Note that the state owners are not freed, but are added
1882 ++ * to the list @head, which can later be used as an argument
1883 ++ * to nfs4_free_state_owners.
1884 + */
1885 +-void nfs4_purge_state_owners(struct nfs_server *server)
1886 ++void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
1887 + {
1888 + struct nfs_client *clp = server->nfs_client;
1889 + struct nfs4_state_owner *sp, *tmp;
1890 +- LIST_HEAD(doomed);
1891 +
1892 + spin_lock(&clp->cl_lock);
1893 + list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
1894 +- list_move(&sp->so_lru, &doomed);
1895 ++ list_move(&sp->so_lru, head);
1896 + nfs4_remove_state_owner_locked(sp);
1897 + }
1898 + spin_unlock(&clp->cl_lock);
1899 ++}
1900 +
1901 +- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
1902 ++/**
1903 ++ * nfs4_purge_state_owners - Release all cached state owners
1904 ++ * @head: resulting list of state owners
1905 ++ *
1906 ++ * Frees a list of state owners that was generated by
1907 ++ * nfs4_purge_state_owners
1908 ++ */
1909 ++void nfs4_free_state_owners(struct list_head *head)
1910 ++{
1911 ++ struct nfs4_state_owner *sp, *tmp;
1912 ++
1913 ++ list_for_each_entry_safe(sp, tmp, head, so_lru) {
1914 + list_del(&sp->so_lru);
1915 + nfs4_free_state_owner(sp);
1916 + }
1917 +@@ -1782,12 +1797,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
1918 + struct nfs4_state_owner *sp;
1919 + struct nfs_server *server;
1920 + struct rb_node *pos;
1921 ++ LIST_HEAD(freeme);
1922 + int status = 0;
1923 +
1924 + restart:
1925 + rcu_read_lock();
1926 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1927 +- nfs4_purge_state_owners(server);
1928 ++ nfs4_purge_state_owners(server, &freeme);
1929 + spin_lock(&clp->cl_lock);
1930 + for (pos = rb_first(&server->state_owners);
1931 + pos != NULL;
1932 +@@ -1816,6 +1832,7 @@ restart:
1933 + spin_unlock(&clp->cl_lock);
1934 + }
1935 + rcu_read_unlock();
1936 ++ nfs4_free_state_owners(&freeme);
1937 + return 0;
1938 + }
1939 +
1940 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
1941 +index 7a908d683258..a609d480606d 100644
1942 +--- a/fs/userfaultfd.c
1943 ++++ b/fs/userfaultfd.c
1944 +@@ -854,6 +854,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1945 + /* len == 0 means wake all */
1946 + struct userfaultfd_wake_range range = { .len = 0, };
1947 + unsigned long new_flags;
1948 ++ bool still_valid;
1949 +
1950 + ACCESS_ONCE(ctx->released) = true;
1951 +
1952 +@@ -869,8 +870,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1953 + * taking the mmap_sem for writing.
1954 + */
1955 + down_write(&mm->mmap_sem);
1956 +- if (!mmget_still_valid(mm))
1957 +- goto skip_mm;
1958 ++ still_valid = mmget_still_valid(mm);
1959 + prev = NULL;
1960 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
1961 + cond_resched();
1962 +@@ -881,19 +881,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1963 + continue;
1964 + }
1965 + new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
1966 +- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
1967 +- new_flags, vma->anon_vma,
1968 +- vma->vm_file, vma->vm_pgoff,
1969 +- vma_policy(vma),
1970 +- NULL_VM_UFFD_CTX);
1971 +- if (prev)
1972 +- vma = prev;
1973 +- else
1974 +- prev = vma;
1975 ++ if (still_valid) {
1976 ++ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
1977 ++ new_flags, vma->anon_vma,
1978 ++ vma->vm_file, vma->vm_pgoff,
1979 ++ vma_policy(vma),
1980 ++ NULL_VM_UFFD_CTX);
1981 ++ if (prev)
1982 ++ vma = prev;
1983 ++ else
1984 ++ prev = vma;
1985 ++ }
1986 + vma->vm_flags = new_flags;
1987 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1988 + }
1989 +-skip_mm:
1990 + up_write(&mm->mmap_sem);
1991 + mmput(mm);
1992 + wakeup:
1993 +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
1994 +index 1daa965f1e08..4e6f2c8574f7 100644
1995 +--- a/fs/xfs/xfs_iops.c
1996 ++++ b/fs/xfs/xfs_iops.c
1997 +@@ -789,6 +789,7 @@ xfs_setattr_nonsize(
1998 +
1999 + out_cancel:
2000 + xfs_trans_cancel(tp);
2001 ++ xfs_iunlock(ip, XFS_ILOCK_EXCL);
2002 + out_dqrele:
2003 + xfs_qm_dqrele(udqp);
2004 + xfs_qm_dqrele(gdqp);
2005 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
2006 +index aa08d4184608..92784b290564 100644
2007 +--- a/kernel/irq/irqdesc.c
2008 ++++ b/kernel/irq/irqdesc.c
2009 +@@ -277,6 +277,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
2010 + }
2011 + }
2012 +
2013 ++static void irq_sysfs_del(struct irq_desc *desc)
2014 ++{
2015 ++ /*
2016 ++ * If irq_sysfs_init() has not yet been invoked (early boot), then
2017 ++ * irq_kobj_base is NULL and the descriptor was never added.
2018 ++ * kobject_del() complains about a object with no parent, so make
2019 ++ * it conditional.
2020 ++ */
2021 ++ if (irq_kobj_base)
2022 ++ kobject_del(&desc->kobj);
2023 ++}
2024 ++
2025 + static int __init irq_sysfs_init(void)
2026 + {
2027 + struct irq_desc *desc;
2028 +@@ -307,6 +319,7 @@ static struct kobj_type irq_kobj_type = {
2029 + };
2030 +
2031 + static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
2032 ++static void irq_sysfs_del(struct irq_desc *desc) {}
2033 +
2034 + #endif /* CONFIG_SYSFS */
2035 +
2036 +@@ -420,7 +433,7 @@ static void free_desc(unsigned int irq)
2037 + * The sysfs entry must be serialized against a concurrent
2038 + * irq_sysfs_init() as well.
2039 + */
2040 +- kobject_del(&desc->kobj);
2041 ++ irq_sysfs_del(desc);
2042 + delete_irq_desc(irq);
2043 +
2044 + /*
2045 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2046 +index 930f2aa3bb4d..1adc2e6c50f9 100644
2047 +--- a/mm/huge_memory.c
2048 ++++ b/mm/huge_memory.c
2049 +@@ -33,6 +33,7 @@
2050 + #include <linux/page_idle.h>
2051 + #include <linux/shmem_fs.h>
2052 + #include <linux/oom.h>
2053 ++#include <linux/page_owner.h>
2054 +
2055 + #include <asm/tlb.h>
2056 + #include <asm/pgalloc.h>
2057 +@@ -2387,6 +2388,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2058 + }
2059 +
2060 + ClearPageCompound(head);
2061 ++
2062 ++ split_page_owner(head, HPAGE_PMD_ORDER);
2063 ++
2064 + /* See comment in __split_huge_page_tail() */
2065 + if (PageAnon(head)) {
2066 + /* Additional pin to radix tree of swap cache */
2067 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
2068 +index 685049a9048d..c5317a7f05e9 100644
2069 +--- a/mm/zsmalloc.c
2070 ++++ b/mm/zsmalloc.c
2071 +@@ -52,6 +52,7 @@
2072 + #include <linux/zpool.h>
2073 + #include <linux/mount.h>
2074 + #include <linux/migrate.h>
2075 ++#include <linux/wait.h>
2076 + #include <linux/pagemap.h>
2077 +
2078 + #define ZSPAGE_MAGIC 0x58
2079 +@@ -267,6 +268,10 @@ struct zs_pool {
2080 + #ifdef CONFIG_COMPACTION
2081 + struct inode *inode;
2082 + struct work_struct free_work;
2083 ++ /* A wait queue for when migration races with async_free_zspage() */
2084 ++ struct wait_queue_head migration_wait;
2085 ++ atomic_long_t isolated_pages;
2086 ++ bool destroying;
2087 + #endif
2088 + };
2089 +
2090 +@@ -1878,6 +1883,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
2091 + zspage->isolated--;
2092 + }
2093 +
2094 ++static void putback_zspage_deferred(struct zs_pool *pool,
2095 ++ struct size_class *class,
2096 ++ struct zspage *zspage)
2097 ++{
2098 ++ enum fullness_group fg;
2099 ++
2100 ++ fg = putback_zspage(class, zspage);
2101 ++ if (fg == ZS_EMPTY)
2102 ++ schedule_work(&pool->free_work);
2103 ++
2104 ++}
2105 ++
2106 ++static inline void zs_pool_dec_isolated(struct zs_pool *pool)
2107 ++{
2108 ++ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
2109 ++ atomic_long_dec(&pool->isolated_pages);
2110 ++ /*
2111 ++ * There's no possibility of racing, since wait_for_isolated_drain()
2112 ++ * checks the isolated count under &class->lock after enqueuing
2113 ++ * on migration_wait.
2114 ++ */
2115 ++ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
2116 ++ wake_up_all(&pool->migration_wait);
2117 ++}
2118 ++
2119 + static void replace_sub_page(struct size_class *class, struct zspage *zspage,
2120 + struct page *newpage, struct page *oldpage)
2121 + {
2122 +@@ -1947,6 +1977,7 @@ bool zs_page_isolate(struct page *page, isolate_mode_t mode)
2123 + */
2124 + if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
2125 + get_zspage_mapping(zspage, &class_idx, &fullness);
2126 ++ atomic_long_inc(&pool->isolated_pages);
2127 + remove_zspage(class, zspage, fullness);
2128 + }
2129 +
2130 +@@ -2046,8 +2077,16 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2131 + * Page migration is done so let's putback isolated zspage to
2132 + * the list if @page is final isolated subpage in the zspage.
2133 + */
2134 +- if (!is_zspage_isolated(zspage))
2135 +- putback_zspage(class, zspage);
2136 ++ if (!is_zspage_isolated(zspage)) {
2137 ++ /*
2138 ++ * We cannot race with zs_destroy_pool() here because we wait
2139 ++ * for isolation to hit zero before we start destroying.
2140 ++ * Also, we ensure that everyone can see pool->destroying before
2141 ++ * we start waiting.
2142 ++ */
2143 ++ putback_zspage_deferred(pool, class, zspage);
2144 ++ zs_pool_dec_isolated(pool);
2145 ++ }
2146 +
2147 + reset_page(page);
2148 + put_page(page);
2149 +@@ -2093,13 +2132,12 @@ void zs_page_putback(struct page *page)
2150 + spin_lock(&class->lock);
2151 + dec_zspage_isolation(zspage);
2152 + if (!is_zspage_isolated(zspage)) {
2153 +- fg = putback_zspage(class, zspage);
2154 + /*
2155 + * Due to page_lock, we cannot free zspage immediately
2156 + * so let's defer.
2157 + */
2158 +- if (fg == ZS_EMPTY)
2159 +- schedule_work(&pool->free_work);
2160 ++ putback_zspage_deferred(pool, class, zspage);
2161 ++ zs_pool_dec_isolated(pool);
2162 + }
2163 + spin_unlock(&class->lock);
2164 + }
2165 +@@ -2123,8 +2161,36 @@ static int zs_register_migration(struct zs_pool *pool)
2166 + return 0;
2167 + }
2168 +
2169 ++static bool pool_isolated_are_drained(struct zs_pool *pool)
2170 ++{
2171 ++ return atomic_long_read(&pool->isolated_pages) == 0;
2172 ++}
2173 ++
2174 ++/* Function for resolving migration */
2175 ++static void wait_for_isolated_drain(struct zs_pool *pool)
2176 ++{
2177 ++
2178 ++ /*
2179 ++ * We're in the process of destroying the pool, so there are no
2180 ++ * active allocations. zs_page_isolate() fails for completely free
2181 ++ * zspages, so we need only wait for the zs_pool's isolated
2182 ++ * count to hit zero.
2183 ++ */
2184 ++ wait_event(pool->migration_wait,
2185 ++ pool_isolated_are_drained(pool));
2186 ++}
2187 ++
2188 + static void zs_unregister_migration(struct zs_pool *pool)
2189 + {
2190 ++ pool->destroying = true;
2191 ++ /*
2192 ++ * We need a memory barrier here to ensure global visibility of
2193 ++ * pool->destroying. Thus pool->isolated pages will either be 0 in which
2194 ++ * case we don't care, or it will be > 0 and pool->destroying will
2195 ++ * ensure that we wake up once isolation hits 0.
2196 ++ */
2197 ++ smp_mb();
2198 ++ wait_for_isolated_drain(pool); /* This can block */
2199 + flush_work(&pool->free_work);
2200 + iput(pool->inode);
2201 + }
2202 +@@ -2365,6 +2431,8 @@ struct zs_pool *zs_create_pool(const char *name)
2203 + if (!pool->name)
2204 + goto err;
2205 +
2206 ++ init_waitqueue_head(&pool->migration_wait);
2207 ++
2208 + if (create_cache(pool))
2209 + goto err;
2210 +
2211 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2212 +index f9c6e8ca1fcb..100b4f88179a 100644
2213 +--- a/net/bridge/netfilter/ebtables.c
2214 ++++ b/net/bridge/netfilter/ebtables.c
2215 +@@ -2273,8 +2273,10 @@ static int compat_do_replace(struct net *net, void __user *user,
2216 + state.buf_kern_len = size64;
2217 +
2218 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2219 +- if (WARN_ON(ret < 0))
2220 ++ if (WARN_ON(ret < 0)) {
2221 ++ vfree(entries_tmp);
2222 + goto out_unlock;
2223 ++ }
2224 +
2225 + vfree(entries_tmp);
2226 + tmp.entries_size = size64;
2227 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
2228 +index 53ea2d48896c..92b2641ab93b 100644
2229 +--- a/net/ceph/osd_client.c
2230 ++++ b/net/ceph/osd_client.c
2231 +@@ -1330,7 +1330,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
2232 + struct ceph_osds up, acting;
2233 + bool force_resend = false;
2234 + bool unpaused = false;
2235 +- bool legacy_change;
2236 ++ bool legacy_change = false;
2237 + bool split = false;
2238 + bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
2239 + bool recovery_deletes = ceph_osdmap_flag(osdc,
2240 +@@ -1426,15 +1426,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
2241 + t->osd = acting.primary;
2242 + }
2243 +
2244 +- if (unpaused || legacy_change || force_resend ||
2245 +- (split && con && CEPH_HAVE_FEATURE(con->peer_features,
2246 +- RESEND_ON_SPLIT)))
2247 ++ if (unpaused || legacy_change || force_resend || split)
2248 + ct_res = CALC_TARGET_NEED_RESEND;
2249 + else
2250 + ct_res = CALC_TARGET_NO_ACTION;
2251 +
2252 + out:
2253 +- dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
2254 ++ dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
2255 ++ legacy_change, force_resend, split, ct_res, t->osd);
2256 + return ct_res;
2257 + }
2258 +
2259 +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
2260 +index a3f1dc7cf538..dbf17d3596a6 100644
2261 +--- a/net/netfilter/ipset/ip_set_core.c
2262 ++++ b/net/netfilter/ipset/ip_set_core.c
2263 +@@ -1128,7 +1128,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
2264 + return -ENOENT;
2265 +
2266 + write_lock_bh(&ip_set_ref_lock);
2267 +- if (set->ref != 0) {
2268 ++ if (set->ref != 0 || set->ref_netlink != 0) {
2269 + ret = -IPSET_ERR_REFERENCED;
2270 + goto out;
2271 + }
2272 +diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
2273 +index 9aa741d27279..0480ec4c8035 100644
2274 +--- a/sound/soc/davinci/davinci-mcasp.c
2275 ++++ b/sound/soc/davinci/davinci-mcasp.c
2276 +@@ -1158,6 +1158,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
2277 + return ret;
2278 + }
2279 +
2280 ++static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
2281 ++ struct snd_pcm_hw_rule *rule)
2282 ++{
2283 ++ struct davinci_mcasp_ruledata *rd = rule->private;
2284 ++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2285 ++ struct snd_mask nfmt;
2286 ++ int i, slot_width;
2287 ++
2288 ++ snd_mask_none(&nfmt);
2289 ++ slot_width = rd->mcasp->slot_width;
2290 ++
2291 ++ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
2292 ++ if (snd_mask_test(fmt, i)) {
2293 ++ if (snd_pcm_format_width(i) <= slot_width) {
2294 ++ snd_mask_set(&nfmt, i);
2295 ++ }
2296 ++ }
2297 ++ }
2298 ++
2299 ++ return snd_mask_refine(fmt, &nfmt);
2300 ++}
2301 ++
2302 + static const unsigned int davinci_mcasp_dai_rates[] = {
2303 + 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
2304 + 88200, 96000, 176400, 192000,
2305 +@@ -1251,7 +1273,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2306 + struct davinci_mcasp_ruledata *ruledata =
2307 + &mcasp->ruledata[substream->stream];
2308 + u32 max_channels = 0;
2309 +- int i, dir;
2310 ++ int i, dir, ret;
2311 + int tdm_slots = mcasp->tdm_slots;
2312 +
2313 + /* Do not allow more then one stream per direction */
2314 +@@ -1280,6 +1302,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2315 + max_channels++;
2316 + }
2317 + ruledata->serializers = max_channels;
2318 ++ ruledata->mcasp = mcasp;
2319 + max_channels *= tdm_slots;
2320 + /*
2321 + * If the already active stream has less channels than the calculated
2322 +@@ -1305,20 +1328,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2323 + 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2324 + &mcasp->chconstr[substream->stream]);
2325 +
2326 +- if (mcasp->slot_width)
2327 +- snd_pcm_hw_constraint_minmax(substream->runtime,
2328 +- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2329 +- 8, mcasp->slot_width);
2330 ++ if (mcasp->slot_width) {
2331 ++ /* Only allow formats require <= slot_width bits on the bus */
2332 ++ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
2333 ++ SNDRV_PCM_HW_PARAM_FORMAT,
2334 ++ davinci_mcasp_hw_rule_slot_width,
2335 ++ ruledata,
2336 ++ SNDRV_PCM_HW_PARAM_FORMAT, -1);
2337 ++ if (ret)
2338 ++ return ret;
2339 ++ }
2340 +
2341 + /*
2342 + * If we rely on implicit BCLK divider setting we should
2343 + * set constraints based on what we can provide.
2344 + */
2345 + if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
2346 +- int ret;
2347 +-
2348 +- ruledata->mcasp = mcasp;
2349 +-
2350 + ret = snd_pcm_hw_rule_add(substream->runtime, 0,
2351 + SNDRV_PCM_HW_PARAM_RATE,
2352 + davinci_mcasp_hw_rule_rate,
2353 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
2354 +index 42c2a3065b77..ff5206f5455d 100644
2355 +--- a/sound/soc/soc-core.c
2356 ++++ b/sound/soc/soc-core.c
2357 +@@ -1757,8 +1757,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
2358 + }
2359 + }
2360 +
2361 +- if (dai_link->dai_fmt)
2362 +- snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
2363 ++ if (dai_link->dai_fmt) {
2364 ++ ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
2365 ++ if (ret)
2366 ++ return ret;
2367 ++ }
2368 +
2369 + ret = soc_post_component_init(rtd, dai_link->name);
2370 + if (ret)
2371 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2372 +index b4c8ba412a5c..104d5f487c7d 100644
2373 +--- a/sound/soc/soc-dapm.c
2374 ++++ b/sound/soc/soc-dapm.c
2375 +@@ -1152,8 +1152,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
2376 + list_add_tail(&widget->work_list, list);
2377 +
2378 + if (custom_stop_condition && custom_stop_condition(widget, dir)) {
2379 +- widget->endpoints[dir] = 1;
2380 +- return widget->endpoints[dir];
2381 ++ list = NULL;
2382 ++ custom_stop_condition = NULL;
2383 + }
2384 +
2385 + if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
2386 +@@ -1190,8 +1190,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
2387 + *
2388 + * Optionally, can be supplied with a function acting as a stopping condition.
2389 + * This function takes the dapm widget currently being examined and the walk
2390 +- * direction as an arguments, it should return true if the walk should be
2391 +- * stopped and false otherwise.
2392 ++ * direction as an arguments, it should return true if widgets from that point
2393 ++ * in the graph onwards should not be added to the widget list.
2394 + */
2395 + static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
2396 + struct list_head *list,
2397 +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
2398 +index 997875c770b1..275f1c3c73b6 100644
2399 +--- a/tools/perf/bench/numa.c
2400 ++++ b/tools/perf/bench/numa.c
2401 +@@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
2402 +
2403 + /* Allocate and initialize all memory on CPU#0: */
2404 + if (init_cpu0) {
2405 +- orig_mask = bind_to_node(0);
2406 +- bind_to_memnode(0);
2407 ++ int node = numa_node_of_cpu(0);
2408 ++
2409 ++ orig_mask = bind_to_node(node);
2410 ++ bind_to_memnode(node);
2411 + }
2412 +
2413 + bytes = bytes0 + HPSIZE;
2414 +diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
2415 +index 25a42acabee1..13a33fb71a6d 100644
2416 +--- a/tools/perf/builtin-ftrace.c
2417 ++++ b/tools/perf/builtin-ftrace.c
2418 +@@ -162,7 +162,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
2419 + int last_cpu;
2420 +
2421 + last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
2422 +- mask_size = (last_cpu + 3) / 4 + 1;
2423 ++ mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
2424 + mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
2425 +
2426 + cpumask = malloc(mask_size);
2427 +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
2428 +index d51dc9ca8861..94a7cabe9b82 100644
2429 +--- a/tools/perf/pmu-events/jevents.c
2430 ++++ b/tools/perf/pmu-events/jevents.c
2431 +@@ -346,6 +346,7 @@ static struct fixed {
2432 + { "inst_retired.any_p", "event=0xc0" },
2433 + { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
2434 + { "cpu_clk_unhalted.thread", "event=0x3c" },
2435 ++ { "cpu_clk_unhalted.core", "event=0x3c" },
2436 + { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
2437 + { NULL, NULL},
2438 + };
2439 +diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
2440 +index 424b82a7d078..f0679613bd18 100644
2441 +--- a/tools/perf/tests/parse-events.c
2442 ++++ b/tools/perf/tests/parse-events.c
2443 +@@ -19,32 +19,6 @@
2444 + #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
2445 + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
2446 +
2447 +-#if defined(__s390x__)
2448 +-/* Return true if kvm module is available and loaded. Test this
2449 +- * and retun success when trace point kvm_s390_create_vm
2450 +- * exists. Otherwise this test always fails.
2451 +- */
2452 +-static bool kvm_s390_create_vm_valid(void)
2453 +-{
2454 +- char *eventfile;
2455 +- bool rc = false;
2456 +-
2457 +- eventfile = get_events_file("kvm-s390");
2458 +-
2459 +- if (eventfile) {
2460 +- DIR *mydir = opendir(eventfile);
2461 +-
2462 +- if (mydir) {
2463 +- rc = true;
2464 +- closedir(mydir);
2465 +- }
2466 +- put_events_file(eventfile);
2467 +- }
2468 +-
2469 +- return rc;
2470 +-}
2471 +-#endif
2472 +-
2473 + static int test__checkevent_tracepoint(struct perf_evlist *evlist)
2474 + {
2475 + struct perf_evsel *evsel = perf_evlist__first(evlist);
2476 +@@ -1626,7 +1600,6 @@ static struct evlist_test test__events[] = {
2477 + {
2478 + .name = "kvm-s390:kvm_s390_create_vm",
2479 + .check = test__checkevent_tracepoint,
2480 +- .valid = kvm_s390_create_vm_valid,
2481 + .id = 100,
2482 + },
2483 + #endif
2484 +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
2485 +index 383674f448fc..f93846edc1e0 100644
2486 +--- a/tools/perf/util/cpumap.c
2487 ++++ b/tools/perf/util/cpumap.c
2488 +@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
2489 + unsigned char *bitmap;
2490 + int last_cpu = cpu_map__cpu(map, map->nr - 1);
2491 +
2492 +- bitmap = zalloc((last_cpu + 7) / 8);
2493 ++ if (buf == NULL)
2494 ++ return 0;
2495 ++
2496 ++ bitmap = zalloc(last_cpu / 8 + 1);
2497 + if (bitmap == NULL) {
2498 + buf[0] = '\0';
2499 + return 0;
2500 +diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
2501 +new file mode 100644
2502 +index 000000000000..63ed533f73d6
2503 +--- /dev/null
2504 ++++ b/tools/testing/selftests/kvm/config
2505 +@@ -0,0 +1,3 @@
2506 ++CONFIG_KVM=y
2507 ++CONFIG_KVM_INTEL=y
2508 ++CONFIG_KVM_AMD=y