Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Thu, 29 Aug 2019 14:15:31
Message-Id: 1567088104.e82b3f7e057ed2b8500c8faacd875f3dc7d43572.mpagano@gentoo
1 commit: e82b3f7e057ed2b8500c8faacd875f3dc7d43572
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Aug 29 14:15:04 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Aug 29 14:15:04 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e82b3f7e
7
8 Linux patch 4.19.69
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1068_linux-4.19.69.patch | 4438 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4442 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index dd97210..c203203 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -315,6 +315,10 @@ Patch: 1067_linux-4.19.68.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.68
23
24 +Patch: 1068_linux-4.19.69.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.69
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1068_linux-4.19.69.patch b/1068_linux-4.19.69.patch
33 new file mode 100644
34 index 0000000..c959f94
35 --- /dev/null
36 +++ b/1068_linux-4.19.69.patch
37 @@ -0,0 +1,4438 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index c96a8e9ad5c2..e8ddf0ef232e 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -3948,6 +3948,13 @@
43 + Run specified binary instead of /init from the ramdisk,
44 + used for early userspace startup. See initrd.
45 +
46 ++ rdrand= [X86]
47 ++ force - Override the decision by the kernel to hide the
48 ++ advertisement of RDRAND support (this affects
49 ++ certain AMD processors because of buggy BIOS
50 ++ support, specifically around the suspend/resume
51 ++ path).
52 ++
53 + rdt= [HW,X86,RDT]
54 + Turn on/off individual RDT features. List is:
55 + cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
56 +diff --git a/Makefile b/Makefile
57 +index 6f164b04d953..677341239449 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,7 +1,7 @@
61 + # SPDX-License-Identifier: GPL-2.0
62 + VERSION = 4
63 + PATCHLEVEL = 19
64 +-SUBLEVEL = 68
65 ++SUBLEVEL = 69
66 + EXTRAVERSION =
67 + NAME = "People's Front"
68 +
69 +diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
70 +index fd6cde23bb5d..871fa50a09f1 100644
71 +--- a/arch/arm/kvm/coproc.c
72 ++++ b/arch/arm/kvm/coproc.c
73 +@@ -658,13 +658,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
74 + }
75 +
76 + static void reset_coproc_regs(struct kvm_vcpu *vcpu,
77 +- const struct coproc_reg *table, size_t num)
78 ++ const struct coproc_reg *table, size_t num,
79 ++ unsigned long *bmap)
80 + {
81 + unsigned long i;
82 +
83 + for (i = 0; i < num; i++)
84 +- if (table[i].reset)
85 ++ if (table[i].reset) {
86 ++ int reg = table[i].reg;
87 ++
88 + table[i].reset(vcpu, &table[i]);
89 ++ if (reg > 0 && reg < NR_CP15_REGS) {
90 ++ set_bit(reg, bmap);
91 ++ if (table[i].is_64bit)
92 ++ set_bit(reg + 1, bmap);
93 ++ }
94 ++ }
95 + }
96 +
97 + static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
98 +@@ -1439,17 +1448,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
99 + {
100 + size_t num;
101 + const struct coproc_reg *table;
102 +-
103 +- /* Catch someone adding a register without putting in reset entry. */
104 +- memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
105 ++ DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
106 +
107 + /* Generic chip reset first (so target could override). */
108 +- reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
109 ++ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
110 +
111 + table = get_target_table(vcpu->arch.target, &num);
112 +- reset_coproc_regs(vcpu, table, num);
113 ++ reset_coproc_regs(vcpu, table, num, bmap);
114 +
115 + for (num = 1; num < NR_CP15_REGS; num++)
116 +- WARN(vcpu_cp15(vcpu, num) == 0x42424242,
117 ++ WARN(!test_bit(num, bmap),
118 + "Didn't reset vcpu_cp15(vcpu, %zi)", num);
119 + }
120 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
121 +index d112af75680b..6da2bbdb9648 100644
122 +--- a/arch/arm64/kvm/sys_regs.c
123 ++++ b/arch/arm64/kvm/sys_regs.c
124 +@@ -626,7 +626,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
125 + */
126 + val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
127 + | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
128 +- __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
129 ++ __vcpu_sys_reg(vcpu, r->reg) = val;
130 + }
131 +
132 + static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
133 +@@ -968,13 +968,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
134 + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
135 + #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
136 + { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
137 +- trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
138 ++ trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
139 + { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
140 +- trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
141 ++ trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
142 + { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
143 +- trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
144 ++ trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
145 + { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
146 +- trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
147 ++ trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
148 +
149 + /* Macro to expand the PMEVCNTRn_EL0 register */
150 + #define PMU_PMEVCNTR_EL0(n) \
151 +@@ -1359,7 +1359,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
152 +
153 + { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
154 +
155 +- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
156 ++ { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
157 + { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
158 + { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
159 + { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
160 +@@ -2072,13 +2072,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
161 + }
162 +
163 + static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
164 +- const struct sys_reg_desc *table, size_t num)
165 ++ const struct sys_reg_desc *table, size_t num,
166 ++ unsigned long *bmap)
167 + {
168 + unsigned long i;
169 +
170 + for (i = 0; i < num; i++)
171 +- if (table[i].reset)
172 ++ if (table[i].reset) {
173 ++ int reg = table[i].reg;
174 ++
175 + table[i].reset(vcpu, &table[i]);
176 ++ if (reg > 0 && reg < NR_SYS_REGS)
177 ++ set_bit(reg, bmap);
178 ++ }
179 + }
180 +
181 + /**
182 +@@ -2576,18 +2582,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
183 + {
184 + size_t num;
185 + const struct sys_reg_desc *table;
186 +-
187 +- /* Catch someone adding a register without putting in reset entry. */
188 +- memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
189 ++ DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
190 +
191 + /* Generic chip reset first (so target could override). */
192 +- reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
193 ++ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
194 +
195 + table = get_target_table(vcpu->arch.target, true, &num);
196 +- reset_sys_reg_descs(vcpu, table, num);
197 ++ reset_sys_reg_descs(vcpu, table, num, bmap);
198 +
199 + for (num = 1; num < NR_SYS_REGS; num++) {
200 +- if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
201 ++ if (WARN(!test_bit(num, bmap),
202 + "Didn't reset __vcpu_sys_reg(%zi)\n", num))
203 + break;
204 + }
205 +diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
206 +index 97d5239ca47b..428ef2189203 100644
207 +--- a/arch/mips/kernel/cacheinfo.c
208 ++++ b/arch/mips/kernel/cacheinfo.c
209 +@@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu)
210 + if (c->tcache.waysize)
211 + populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
212 +
213 ++ this_cpu_ci->cpu_map_populated = true;
214 ++
215 + return 0;
216 + }
217 +
218 +diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
219 +index 5f209f111e59..df7ddd246eaa 100644
220 +--- a/arch/mips/kernel/i8253.c
221 ++++ b/arch/mips/kernel/i8253.c
222 +@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
223 +
224 + static int __init init_pit_clocksource(void)
225 + {
226 +- if (num_possible_cpus() > 1) /* PIT does not scale! */
227 ++ if (num_possible_cpus() > 1 || /* PIT does not scale! */
228 ++ !clockevent_state_periodic(&i8253_clockevent))
229 + return 0;
230 +
231 + return clocksource_i8253_init();
232 +diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
233 +index 262ba9481781..1bf6aaefd26a 100644
234 +--- a/arch/powerpc/kernel/misc_64.S
235 ++++ b/arch/powerpc/kernel/misc_64.S
236 +@@ -135,7 +135,7 @@ _GLOBAL_TOC(flush_dcache_range)
237 + subf r8,r6,r4 /* compute length */
238 + add r8,r8,r5 /* ensure we get enough */
239 + lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
240 +- srw. r8,r8,r9 /* compute line count */
241 ++ srd. r8,r8,r9 /* compute line count */
242 + beqlr /* nothing to do? */
243 + mtctr r8
244 + 0: dcbst 0,r6
245 +@@ -153,7 +153,7 @@ _GLOBAL(flush_inval_dcache_range)
246 + subf r8,r6,r4 /* compute length */
247 + add r8,r8,r5 /* ensure we get enough */
248 + lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
249 +- srw. r8,r8,r9 /* compute line count */
250 ++ srd. r8,r8,r9 /* compute line count */
251 + beqlr /* nothing to do? */
252 + sync
253 + isync
254 +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
255 +index b43f8d33a369..18ede6e806b9 100644
256 +--- a/arch/s390/kernel/vmlinux.lds.S
257 ++++ b/arch/s390/kernel/vmlinux.lds.S
258 +@@ -31,10 +31,9 @@ PHDRS {
259 + SECTIONS
260 + {
261 + . = 0x100000;
262 +- _stext = .; /* Start of text section */
263 + .text : {
264 +- /* Text and read-only data */
265 +- _text = .;
266 ++ _stext = .; /* Start of text section */
267 ++ _text = .; /* Text and read-only data */
268 + HEAD_TEXT
269 + TEXT_TEXT
270 + SCHED_TEXT
271 +@@ -46,11 +45,10 @@ SECTIONS
272 + *(.text.*_indirect_*)
273 + *(.fixup)
274 + *(.gnu.warning)
275 ++ . = ALIGN(PAGE_SIZE);
276 ++ _etext = .; /* End of text section */
277 + } :text = 0x0700
278 +
279 +- . = ALIGN(PAGE_SIZE);
280 +- _etext = .; /* End of text section */
281 +-
282 + NOTES :text :note
283 +
284 + .dummy : { *(.dummy) } :data
285 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
286 +index a07ffd23e4dd..d3983fdf1012 100644
287 +--- a/arch/x86/include/asm/bootparam_utils.h
288 ++++ b/arch/x86/include/asm/bootparam_utils.h
289 +@@ -18,6 +18,20 @@
290 + * Note: efi_info is commonly left uninitialized, but that field has a
291 + * private magic, so it is better to leave it unchanged.
292 + */
293 ++
294 ++#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
295 ++
296 ++#define BOOT_PARAM_PRESERVE(struct_member) \
297 ++ { \
298 ++ .start = offsetof(struct boot_params, struct_member), \
299 ++ .len = sizeof_mbr(struct boot_params, struct_member), \
300 ++ }
301 ++
302 ++struct boot_params_to_save {
303 ++ unsigned int start;
304 ++ unsigned int len;
305 ++};
306 ++
307 + static void sanitize_boot_params(struct boot_params *boot_params)
308 + {
309 + /*
310 +@@ -36,19 +50,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
311 + */
312 + if (boot_params->sentinel) {
313 + /* fields in boot_params are left uninitialized, clear them */
314 +- memset(&boot_params->ext_ramdisk_image, 0,
315 +- (char *)&boot_params->efi_info -
316 +- (char *)&boot_params->ext_ramdisk_image);
317 +- memset(&boot_params->kbd_status, 0,
318 +- (char *)&boot_params->hdr -
319 +- (char *)&boot_params->kbd_status);
320 +- memset(&boot_params->_pad7[0], 0,
321 +- (char *)&boot_params->edd_mbr_sig_buffer[0] -
322 +- (char *)&boot_params->_pad7[0]);
323 +- memset(&boot_params->_pad8[0], 0,
324 +- (char *)&boot_params->eddbuf[0] -
325 +- (char *)&boot_params->_pad8[0]);
326 +- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
327 ++ static struct boot_params scratch;
328 ++ char *bp_base = (char *)boot_params;
329 ++ char *save_base = (char *)&scratch;
330 ++ int i;
331 ++
332 ++ const struct boot_params_to_save to_save[] = {
333 ++ BOOT_PARAM_PRESERVE(screen_info),
334 ++ BOOT_PARAM_PRESERVE(apm_bios_info),
335 ++ BOOT_PARAM_PRESERVE(tboot_addr),
336 ++ BOOT_PARAM_PRESERVE(ist_info),
337 ++ BOOT_PARAM_PRESERVE(hd0_info),
338 ++ BOOT_PARAM_PRESERVE(hd1_info),
339 ++ BOOT_PARAM_PRESERVE(sys_desc_table),
340 ++ BOOT_PARAM_PRESERVE(olpc_ofw_header),
341 ++ BOOT_PARAM_PRESERVE(efi_info),
342 ++ BOOT_PARAM_PRESERVE(alt_mem_k),
343 ++ BOOT_PARAM_PRESERVE(scratch),
344 ++ BOOT_PARAM_PRESERVE(e820_entries),
345 ++ BOOT_PARAM_PRESERVE(eddbuf_entries),
346 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
347 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
348 ++ BOOT_PARAM_PRESERVE(hdr),
349 ++ BOOT_PARAM_PRESERVE(e820_table),
350 ++ BOOT_PARAM_PRESERVE(eddbuf),
351 ++ };
352 ++
353 ++ memset(&scratch, 0, sizeof(scratch));
354 ++
355 ++ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
356 ++ memcpy(save_base + to_save[i].start,
357 ++ bp_base + to_save[i].start, to_save[i].len);
358 ++ }
359 ++
360 ++ memcpy(boot_params, save_base, sizeof(*boot_params));
361 + }
362 + }
363 +
364 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
365 +index f85f43db9225..a1d22e4428f6 100644
366 +--- a/arch/x86/include/asm/msr-index.h
367 ++++ b/arch/x86/include/asm/msr-index.h
368 +@@ -334,6 +334,7 @@
369 + #define MSR_AMD64_PATCH_LEVEL 0x0000008b
370 + #define MSR_AMD64_TSC_RATIO 0xc0000104
371 + #define MSR_AMD64_NB_CFG 0xc001001f
372 ++#define MSR_AMD64_CPUID_FN_1 0xc0011004
373 + #define MSR_AMD64_PATCH_LOADER 0xc0010020
374 + #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
375 + #define MSR_AMD64_OSVW_STATUS 0xc0010141
376 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
377 +index 599c273f5d00..28cb2b31527a 100644
378 +--- a/arch/x86/include/asm/nospec-branch.h
379 ++++ b/arch/x86/include/asm/nospec-branch.h
380 +@@ -202,7 +202,7 @@
381 + " lfence;\n" \
382 + " jmp 902b;\n" \
383 + " .align 16\n" \
384 +- "903: addl $4, %%esp;\n" \
385 ++ "903: lea 4(%%esp), %%esp;\n" \
386 + " pushl %[thunk_target];\n" \
387 + " ret;\n" \
388 + " .align 16\n" \
389 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
390 +index 272a12865b2a..b316bd61a6ac 100644
391 +--- a/arch/x86/kernel/apic/apic.c
392 ++++ b/arch/x86/kernel/apic/apic.c
393 +@@ -715,7 +715,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
394 + static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
395 +
396 + /*
397 +- * Temporary interrupt handler.
398 ++ * Temporary interrupt handler and polled calibration function.
399 + */
400 + static void __init lapic_cal_handler(struct clock_event_device *dev)
401 + {
402 +@@ -799,7 +799,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
403 + static int __init calibrate_APIC_clock(void)
404 + {
405 + struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
406 +- void (*real_handler)(struct clock_event_device *dev);
407 ++ u64 tsc_perj = 0, tsc_start = 0;
408 ++ unsigned long jif_start;
409 + unsigned long deltaj;
410 + long delta, deltatsc;
411 + int pm_referenced = 0;
412 +@@ -830,28 +831,64 @@ static int __init calibrate_APIC_clock(void)
413 + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
414 + "calibrating APIC timer ...\n");
415 +
416 ++ /*
417 ++ * There are platforms w/o global clockevent devices. Instead of
418 ++ * making the calibration conditional on that, use a polling based
419 ++ * approach everywhere.
420 ++ */
421 + local_irq_disable();
422 +
423 +- /* Replace the global interrupt handler */
424 +- real_handler = global_clock_event->event_handler;
425 +- global_clock_event->event_handler = lapic_cal_handler;
426 +-
427 + /*
428 + * Setup the APIC counter to maximum. There is no way the lapic
429 + * can underflow in the 100ms detection time frame
430 + */
431 + __setup_APIC_LVTT(0xffffffff, 0, 0);
432 +
433 +- /* Let the interrupts run */
434 ++ /*
435 ++ * Methods to terminate the calibration loop:
436 ++ * 1) Global clockevent if available (jiffies)
437 ++ * 2) TSC if available and frequency is known
438 ++ */
439 ++ jif_start = READ_ONCE(jiffies);
440 ++
441 ++ if (tsc_khz) {
442 ++ tsc_start = rdtsc();
443 ++ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
444 ++ }
445 ++
446 ++ /*
447 ++ * Enable interrupts so the tick can fire, if a global
448 ++ * clockevent device is available
449 ++ */
450 + local_irq_enable();
451 +
452 +- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
453 +- cpu_relax();
454 ++ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
455 ++ /* Wait for a tick to elapse */
456 ++ while (1) {
457 ++ if (tsc_khz) {
458 ++ u64 tsc_now = rdtsc();
459 ++ if ((tsc_now - tsc_start) >= tsc_perj) {
460 ++ tsc_start += tsc_perj;
461 ++ break;
462 ++ }
463 ++ } else {
464 ++ unsigned long jif_now = READ_ONCE(jiffies);
465 +
466 +- local_irq_disable();
467 ++ if (time_after(jif_now, jif_start)) {
468 ++ jif_start = jif_now;
469 ++ break;
470 ++ }
471 ++ }
472 ++ cpu_relax();
473 ++ }
474 +
475 +- /* Restore the real event handler */
476 +- global_clock_event->event_handler = real_handler;
477 ++ /* Invoke the calibration routine */
478 ++ local_irq_disable();
479 ++ lapic_cal_handler(NULL);
480 ++ local_irq_enable();
481 ++ }
482 ++
483 ++ local_irq_disable();
484 +
485 + /* Build delta t1-t2 as apic timer counts down */
486 + delta = lapic_cal_t1 - lapic_cal_t2;
487 +@@ -904,10 +941,11 @@ static int __init calibrate_APIC_clock(void)
488 + levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
489 +
490 + /*
491 +- * PM timer calibration failed or not turned on
492 +- * so lets try APIC timer based calibration
493 ++ * PM timer calibration failed or not turned on so lets try APIC
494 ++ * timer based calibration, if a global clockevent device is
495 ++ * available.
496 + */
497 +- if (!pm_referenced) {
498 ++ if (!pm_referenced && global_clock_event) {
499 + apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
500 +
501 + /*
502 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
503 +index da1f5e78363e..f86f912ce215 100644
504 +--- a/arch/x86/kernel/cpu/amd.c
505 ++++ b/arch/x86/kernel/cpu/amd.c
506 +@@ -799,6 +799,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
507 + msr_set_bit(MSR_AMD64_DE_CFG, 31);
508 + }
509 +
510 ++static bool rdrand_force;
511 ++
512 ++static int __init rdrand_cmdline(char *str)
513 ++{
514 ++ if (!str)
515 ++ return -EINVAL;
516 ++
517 ++ if (!strcmp(str, "force"))
518 ++ rdrand_force = true;
519 ++ else
520 ++ return -EINVAL;
521 ++
522 ++ return 0;
523 ++}
524 ++early_param("rdrand", rdrand_cmdline);
525 ++
526 ++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
527 ++{
528 ++ /*
529 ++ * Saving of the MSR used to hide the RDRAND support during
530 ++ * suspend/resume is done by arch/x86/power/cpu.c, which is
531 ++ * dependent on CONFIG_PM_SLEEP.
532 ++ */
533 ++ if (!IS_ENABLED(CONFIG_PM_SLEEP))
534 ++ return;
535 ++
536 ++ /*
537 ++ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
538 ++ * RDRAND support using the CPUID function directly.
539 ++ */
540 ++ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
541 ++ return;
542 ++
543 ++ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
544 ++
545 ++ /*
546 ++ * Verify that the CPUID change has occurred in case the kernel is
547 ++ * running virtualized and the hypervisor doesn't support the MSR.
548 ++ */
549 ++ if (cpuid_ecx(1) & BIT(30)) {
550 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
551 ++ return;
552 ++ }
553 ++
554 ++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
555 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
556 ++}
557 ++
558 ++static void init_amd_jg(struct cpuinfo_x86 *c)
559 ++{
560 ++ /*
561 ++ * Some BIOS implementations do not restore proper RDRAND support
562 ++ * across suspend and resume. Check on whether to hide the RDRAND
563 ++ * instruction support via CPUID.
564 ++ */
565 ++ clear_rdrand_cpuid_bit(c);
566 ++}
567 ++
568 + static void init_amd_bd(struct cpuinfo_x86 *c)
569 + {
570 + u64 value;
571 +@@ -813,6 +871,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
572 + wrmsrl_safe(MSR_F15H_IC_CFG, value);
573 + }
574 + }
575 ++
576 ++ /*
577 ++ * Some BIOS implementations do not restore proper RDRAND support
578 ++ * across suspend and resume. Check on whether to hide the RDRAND
579 ++ * instruction support via CPUID.
580 ++ */
581 ++ clear_rdrand_cpuid_bit(c);
582 + }
583 +
584 + static void init_amd_zn(struct cpuinfo_x86 *c)
585 +@@ -855,6 +920,7 @@ static void init_amd(struct cpuinfo_x86 *c)
586 + case 0x10: init_amd_gh(c); break;
587 + case 0x12: init_amd_ln(c); break;
588 + case 0x15: init_amd_bd(c); break;
589 ++ case 0x16: init_amd_jg(c); break;
590 + case 0x17: init_amd_zn(c); break;
591 + }
592 +
593 +diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
594 +index 2dd1fe13a37b..19f707992db2 100644
595 +--- a/arch/x86/lib/cpu.c
596 ++++ b/arch/x86/lib/cpu.c
597 +@@ -1,5 +1,6 @@
598 + #include <linux/types.h>
599 + #include <linux/export.h>
600 ++#include <asm/cpu.h>
601 +
602 + unsigned int x86_family(unsigned int sig)
603 + {
604 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
605 +index 513ce09e9950..3aa3149df07f 100644
606 +--- a/arch/x86/power/cpu.c
607 ++++ b/arch/x86/power/cpu.c
608 +@@ -13,6 +13,7 @@
609 + #include <linux/smp.h>
610 + #include <linux/perf_event.h>
611 + #include <linux/tboot.h>
612 ++#include <linux/dmi.h>
613 +
614 + #include <asm/pgtable.h>
615 + #include <asm/proto.h>
616 +@@ -24,7 +25,7 @@
617 + #include <asm/debugreg.h>
618 + #include <asm/cpu.h>
619 + #include <asm/mmu_context.h>
620 +-#include <linux/dmi.h>
621 ++#include <asm/cpu_device_id.h>
622 +
623 + #ifdef CONFIG_X86_32
624 + __visible unsigned long saved_context_ebx;
625 +@@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void)
626 +
627 + core_initcall(bsp_pm_check_init);
628 +
629 +-static int msr_init_context(const u32 *msr_id, const int total_num)
630 ++static int msr_build_context(const u32 *msr_id, const int num)
631 + {
632 +- int i = 0;
633 ++ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
634 + struct saved_msr *msr_array;
635 ++ int total_num;
636 ++ int i, j;
637 +
638 +- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
639 +- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
640 +- return -EINVAL;
641 +- }
642 ++ total_num = saved_msrs->num + num;
643 +
644 + msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
645 + if (!msr_array) {
646 +@@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
647 + return -ENOMEM;
648 + }
649 +
650 +- for (i = 0; i < total_num; i++) {
651 +- msr_array[i].info.msr_no = msr_id[i];
652 ++ if (saved_msrs->array) {
653 ++ /*
654 ++ * Multiple callbacks can invoke this function, so copy any
655 ++ * MSR save requests from previous invocations.
656 ++ */
657 ++ memcpy(msr_array, saved_msrs->array,
658 ++ sizeof(struct saved_msr) * saved_msrs->num);
659 ++
660 ++ kfree(saved_msrs->array);
661 ++ }
662 ++
663 ++ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
664 ++ msr_array[i].info.msr_no = msr_id[j];
665 + msr_array[i].valid = false;
666 + msr_array[i].info.reg.q = 0;
667 + }
668 +- saved_context.saved_msrs.num = total_num;
669 +- saved_context.saved_msrs.array = msr_array;
670 ++ saved_msrs->num = total_num;
671 ++ saved_msrs->array = msr_array;
672 +
673 + return 0;
674 + }
675 +
676 + /*
677 +- * The following section is a quirk framework for problematic BIOSen:
678 ++ * The following sections are a quirk framework for problematic BIOSen:
679 + * Sometimes MSRs are modified by the BIOSen after suspended to
680 + * RAM, this might cause unexpected behavior after wakeup.
681 + * Thus we save/restore these specified MSRs across suspend/resume
682 +@@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
683 + u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
684 +
685 + pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
686 +- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
687 ++ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
688 + }
689 +
690 + static const struct dmi_system_id msr_save_dmi_table[] = {
691 +@@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
692 + {}
693 + };
694 +
695 ++static int msr_save_cpuid_features(const struct x86_cpu_id *c)
696 ++{
697 ++ u32 cpuid_msr_id[] = {
698 ++ MSR_AMD64_CPUID_FN_1,
699 ++ };
700 ++
701 ++ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
702 ++ c->family);
703 ++
704 ++ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
705 ++}
706 ++
707 ++static const struct x86_cpu_id msr_save_cpu_table[] = {
708 ++ {
709 ++ .vendor = X86_VENDOR_AMD,
710 ++ .family = 0x15,
711 ++ .model = X86_MODEL_ANY,
712 ++ .feature = X86_FEATURE_ANY,
713 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
714 ++ },
715 ++ {
716 ++ .vendor = X86_VENDOR_AMD,
717 ++ .family = 0x16,
718 ++ .model = X86_MODEL_ANY,
719 ++ .feature = X86_FEATURE_ANY,
720 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
721 ++ },
722 ++ {}
723 ++};
724 ++
725 ++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
726 ++static int pm_cpu_check(const struct x86_cpu_id *c)
727 ++{
728 ++ const struct x86_cpu_id *m;
729 ++ int ret = 0;
730 ++
731 ++ m = x86_match_cpu(msr_save_cpu_table);
732 ++ if (m) {
733 ++ pm_cpu_match_t fn;
734 ++
735 ++ fn = (pm_cpu_match_t)m->driver_data;
736 ++ ret = fn(m);
737 ++ }
738 ++
739 ++ return ret;
740 ++}
741 ++
742 + static int pm_check_save_msr(void)
743 + {
744 + dmi_check_system(msr_save_dmi_table);
745 ++ pm_cpu_check(msr_save_cpu_table);
746 ++
747 + return 0;
748 + }
749 +
750 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
751 +index becd793a258c..d8d2ac294b0c 100644
752 +--- a/block/bfq-iosched.c
753 ++++ b/block/bfq-iosched.c
754 +@@ -1886,9 +1886,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
755 + blk_rq_pos(container_of(rb_prev(&req->rb_node),
756 + struct request, rb_node))) {
757 + struct bfq_queue *bfqq = bfq_init_rq(req);
758 +- struct bfq_data *bfqd = bfqq->bfqd;
759 ++ struct bfq_data *bfqd;
760 + struct request *prev, *next_rq;
761 +
762 ++ if (!bfqq)
763 ++ return;
764 ++
765 ++ bfqd = bfqq->bfqd;
766 ++
767 + /* Reposition request in its sort_list */
768 + elv_rb_del(&bfqq->sort_list, req);
769 + elv_rb_add(&bfqq->sort_list, req);
770 +@@ -1930,6 +1935,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
771 + struct bfq_queue *bfqq = bfq_init_rq(rq),
772 + *next_bfqq = bfq_init_rq(next);
773 +
774 ++ if (!bfqq)
775 ++ return;
776 ++
777 + /*
778 + * If next and rq belong to the same bfq_queue and next is older
779 + * than rq, then reposition rq in the fifo (by substituting next
780 +@@ -4590,12 +4598,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
781 +
782 + spin_lock_irq(&bfqd->lock);
783 + bfqq = bfq_init_rq(rq);
784 +- if (at_head || blk_rq_is_passthrough(rq)) {
785 ++ if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
786 + if (at_head)
787 + list_add(&rq->queuelist, &bfqd->dispatch);
788 + else
789 + list_add_tail(&rq->queuelist, &bfqd->dispatch);
790 +- } else { /* bfqq is assumed to be non null here */
791 ++ } else {
792 + idle_timer_disabled = __bfq_insert_request(bfqd, rq);
793 + /*
794 + * Update bfqq, because, if a queue merge has occurred
795 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
796 +index 1984fc78c750..3a64fa4aaf7e 100644
797 +--- a/drivers/ata/libata-scsi.c
798 ++++ b/drivers/ata/libata-scsi.c
799 +@@ -1803,6 +1803,21 @@ nothing_to_do:
800 + return 1;
801 + }
802 +
803 ++static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
804 ++{
805 ++ struct request *rq = scmd->request;
806 ++ u32 req_blocks;
807 ++
808 ++ if (!blk_rq_is_passthrough(rq))
809 ++ return true;
810 ++
811 ++ req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
812 ++ if (n_blocks > req_blocks)
813 ++ return false;
814 ++
815 ++ return true;
816 ++}
817 ++
818 + /**
819 + * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
820 + * @qc: Storage for translated ATA taskfile
821 +@@ -1847,6 +1862,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
822 + scsi_10_lba_len(cdb, &block, &n_block);
823 + if (cdb[1] & (1 << 3))
824 + tf_flags |= ATA_TFLAG_FUA;
825 ++ if (!ata_check_nblocks(scmd, n_block))
826 ++ goto invalid_fld;
827 + break;
828 + case READ_6:
829 + case WRITE_6:
830 +@@ -1861,6 +1878,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
831 + */
832 + if (!n_block)
833 + n_block = 256;
834 ++ if (!ata_check_nblocks(scmd, n_block))
835 ++ goto invalid_fld;
836 + break;
837 + case READ_16:
838 + case WRITE_16:
839 +@@ -1871,6 +1890,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
840 + scsi_16_lba_len(cdb, &block, &n_block);
841 + if (cdb[1] & (1 << 3))
842 + tf_flags |= ATA_TFLAG_FUA;
843 ++ if (!ata_check_nblocks(scmd, n_block))
844 ++ goto invalid_fld;
845 + break;
846 + default:
847 + DPRINTK("no-byte command\n");
848 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
849 +index c5ea0fc635e5..873cc0906055 100644
850 +--- a/drivers/ata/libata-sff.c
851 ++++ b/drivers/ata/libata-sff.c
852 +@@ -674,6 +674,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
853 + unsigned int offset;
854 + unsigned char *buf;
855 +
856 ++ if (!qc->cursg) {
857 ++ qc->curbytes = qc->nbytes;
858 ++ return;
859 ++ }
860 + if (qc->curbytes == qc->nbytes - qc->sect_size)
861 + ap->hsm_task_state = HSM_ST_LAST;
862 +
863 +@@ -699,6 +703,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
864 +
865 + if (qc->cursg_ofs == qc->cursg->length) {
866 + qc->cursg = sg_next(qc->cursg);
867 ++ if (!qc->cursg)
868 ++ ap->hsm_task_state = HSM_ST_LAST;
869 + qc->cursg_ofs = 0;
870 + }
871 + }
872 +diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
873 +index 568f59b58ddf..e7c877d354c7 100644
874 +--- a/drivers/clk/socfpga/clk-periph-s10.c
875 ++++ b/drivers/clk/socfpga/clk-periph-s10.c
876 +@@ -37,7 +37,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
877 + if (socfpgaclk->fixed_div) {
878 + div = socfpgaclk->fixed_div;
879 + } else {
880 +- if (!socfpgaclk->bypass_reg)
881 ++ if (socfpgaclk->hw.reg)
882 + div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
883 + }
884 +
885 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
886 +index b308ce92685d..53395852f012 100644
887 +--- a/drivers/gpio/gpiolib.c
888 ++++ b/drivers/gpio/gpiolib.c
889 +@@ -1082,9 +1082,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
890 + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
891 + lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
892 + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
893 +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
894 ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
895 ++ GPIOLINE_FLAG_IS_OUT);
896 + if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
897 +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
898 ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
899 ++ GPIOLINE_FLAG_IS_OUT);
900 +
901 + if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
902 + return -EFAULT;
903 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
904 +index b4e7404fe660..a11637b0f6cc 100644
905 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
906 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
907 +@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
908 + u8 *ptr = msg->buf;
909 +
910 + while (remaining) {
911 +- u8 cnt = (remaining > 16) ? 16 : remaining;
912 +- u8 cmd;
913 ++ u8 cnt, retries, cmd;
914 +
915 + if (msg->flags & I2C_M_RD)
916 + cmd = 1;
917 +@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
918 + if (mcnt || remaining > 16)
919 + cmd |= 4; /* MOT */
920 +
921 +- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
922 +- if (ret < 0) {
923 +- nvkm_i2c_aux_release(aux);
924 +- return ret;
925 ++ for (retries = 0, cnt = 0;
926 ++ retries < 32 && !cnt;
927 ++ retries++) {
928 ++ cnt = min_t(u8, remaining, 16);
929 ++ ret = aux->func->xfer(aux, true, cmd,
930 ++ msg->addr, ptr, &cnt);
931 ++ if (ret < 0)
932 ++ goto out;
933 ++ }
934 ++ if (!cnt) {
935 ++ AUX_TRACE(aux, "no data after 32 retries");
936 ++ ret = -EIO;
937 ++ goto out;
938 + }
939 +
940 + ptr += cnt;
941 +@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
942 + msg++;
943 + }
944 +
945 ++ ret = num;
946 ++out:
947 + nvkm_i2c_aux_release(aux);
948 +- return num;
949 ++ return ret;
950 + }
951 +
952 + static u32
953 +diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
954 +index 080f05352195..6a4da3a0ff1c 100644
955 +--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
956 ++++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
957 +@@ -436,7 +436,7 @@ static int rockchip_dp_resume(struct device *dev)
958 +
959 + static const struct dev_pm_ops rockchip_dp_pm_ops = {
960 + #ifdef CONFIG_PM_SLEEP
961 +- .suspend = rockchip_dp_suspend,
962 ++ .suspend_late = rockchip_dp_suspend,
963 + .resume_early = rockchip_dp_resume,
964 + #endif
965 + };
966 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
967 +index e4e09d47c5c0..59e9d05ab928 100644
968 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
969 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
970 +@@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
971 + break;
972 + }
973 +
974 +- if (retries == RETRIES)
975 ++ if (retries == RETRIES) {
976 ++ kfree(reply);
977 + return -EINVAL;
978 ++ }
979 +
980 + *msg_len = reply_len;
981 + *msg = reply;
982 +diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
983 +index 9428ea7cdf8a..c52bd163abb3 100644
984 +--- a/drivers/hid/hid-a4tech.c
985 ++++ b/drivers/hid/hid-a4tech.c
986 +@@ -26,12 +26,36 @@
987 + #define A4_2WHEEL_MOUSE_HACK_7 0x01
988 + #define A4_2WHEEL_MOUSE_HACK_B8 0x02
989 +
990 ++#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
991 ++
992 + struct a4tech_sc {
993 + unsigned long quirks;
994 + unsigned int hw_wheel;
995 + __s32 delayed_value;
996 + };
997 +
998 ++static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
999 ++ struct hid_field *field, struct hid_usage *usage,
1000 ++ unsigned long **bit, int *max)
1001 ++{
1002 ++ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
1003 ++
1004 ++ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
1005 ++ usage->hid == A4_WHEEL_ORIENTATION) {
1006 ++ /*
1007 ++ * We do not want to have this usage mapped to anything as it's
1008 ++ * nonstandard and doesn't really behave like an HID report.
1009 ++ * It's only selecting the orientation (vertical/horizontal) of
1010 ++ * the previous mouse wheel report. The input_events will be
1011 ++ * generated once both reports are recorded in a4_event().
1012 ++ */
1013 ++ return -1;
1014 ++ }
1015 ++
1016 ++ return 0;
1017 ++
1018 ++}
1019 ++
1020 + static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
1021 + struct hid_field *field, struct hid_usage *usage,
1022 + unsigned long **bit, int *max)
1023 +@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
1024 + struct a4tech_sc *a4 = hid_get_drvdata(hdev);
1025 + struct input_dev *input;
1026 +
1027 +- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
1028 +- !usage->type)
1029 ++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
1030 + return 0;
1031 +
1032 + input = field->hidinput->input;
1033 +@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
1034 + return 1;
1035 + }
1036 +
1037 +- if (usage->hid == 0x000100b8) {
1038 ++ if (usage->hid == A4_WHEEL_ORIENTATION) {
1039 + input_event(input, EV_REL, value ? REL_HWHEEL :
1040 + REL_WHEEL, a4->delayed_value);
1041 + return 1;
1042 +@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
1043 + static struct hid_driver a4_driver = {
1044 + .name = "a4tech",
1045 + .id_table = a4_devices,
1046 ++ .input_mapping = a4_input_mapping,
1047 + .input_mapped = a4_input_mapped,
1048 + .event = a4_event,
1049 + .probe = a4_probe,
1050 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1051 +index 2898bb061945..4a2fa57ddcb8 100644
1052 +--- a/drivers/hid/hid-ids.h
1053 ++++ b/drivers/hid/hid-ids.h
1054 +@@ -971,6 +971,7 @@
1055 + #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
1056 + #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
1057 + #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
1058 ++#define USB_DEVICE_ID_SAITEK_X52 0x075c
1059 +
1060 + #define USB_VENDOR_ID_SAMSUNG 0x0419
1061 + #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
1062 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1063 +index d29c7c9cd185..e553f6fae7a4 100644
1064 +--- a/drivers/hid/hid-quirks.c
1065 ++++ b/drivers/hid/hid-quirks.c
1066 +@@ -143,6 +143,7 @@ static const struct hid_device_id hid_quirks[] = {
1067 + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1068 + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1069 + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
1070 ++ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1071 + { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
1072 + { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
1073 + { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
1074 +diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
1075 +index bea8def64f43..30b8c3256c99 100644
1076 +--- a/drivers/hid/hid-tmff.c
1077 ++++ b/drivers/hid/hid-tmff.c
1078 +@@ -34,6 +34,8 @@
1079 +
1080 + #include "hid-ids.h"
1081 +
1082 ++#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
1083 ++
1084 + static const signed short ff_rumble[] = {
1085 + FF_RUMBLE,
1086 + -1
1087 +@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
1088 + struct hid_field *ff_field = tmff->ff_field;
1089 + int x, y;
1090 + int left, right; /* Rumbling */
1091 ++ int motor_swap;
1092 +
1093 + switch (effect->type) {
1094 + case FF_CONSTANT:
1095 +@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
1096 + ff_field->logical_minimum,
1097 + ff_field->logical_maximum);
1098 +
1099 ++ /* 2-in-1 strong motor is left */
1100 ++ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
1101 ++ motor_swap = left;
1102 ++ left = right;
1103 ++ right = motor_swap;
1104 ++ }
1105 ++
1106 + dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
1107 + ff_field->value[0] = left;
1108 + ff_field->value[1] = right;
1109 +@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
1110 + .driver_data = (unsigned long)ff_rumble },
1111 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
1112 + .driver_data = (unsigned long)ff_rumble },
1113 ++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
1114 ++ .driver_data = (unsigned long)ff_rumble },
1115 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
1116 + .driver_data = (unsigned long)ff_rumble },
1117 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
1118 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1119 +index e56dc97fe4b6..50ef7b6cd195 100644
1120 +--- a/drivers/hid/wacom_wac.c
1121 ++++ b/drivers/hid/wacom_wac.c
1122 +@@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
1123 + y >>= 1;
1124 + distance >>= 1;
1125 + }
1126 ++ if (features->type == INTUOSHT2)
1127 ++ distance = features->distance_max - distance;
1128 + input_report_abs(input, ABS_X, x);
1129 + input_report_abs(input, ABS_Y, y);
1130 + input_report_abs(input, ABS_DISTANCE, distance);
1131 +@@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1132 + input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1133 +
1134 + if (data[12] & 0x80)
1135 +- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
1136 ++ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1137 + else
1138 + input_report_abs(input, ABS_WHEEL, 0);
1139 +
1140 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
1141 +index 2f164bd74687..fdb0f832fade 100644
1142 +--- a/drivers/hv/channel.c
1143 ++++ b/drivers/hv/channel.c
1144 +@@ -38,7 +38,7 @@
1145 +
1146 + static unsigned long virt_to_hvpfn(void *addr)
1147 + {
1148 +- unsigned long paddr;
1149 ++ phys_addr_t paddr;
1150 +
1151 + if (is_vmalloc_addr(addr))
1152 + paddr = page_to_phys(vmalloc_to_page(addr)) +
1153 +diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
1154 +index 060dc7fd66c1..c952002c6301 100644
1155 +--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
1156 ++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
1157 +@@ -1406,6 +1406,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
1158 + printk(KERN_DEBUG
1159 + "%s: %s: alloc urb for fifo %i failed",
1160 + hw->name, __func__, fifo->fifonum);
1161 ++ continue;
1162 + }
1163 + fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
1164 + fifo->iso[i].indx = i;
1165 +@@ -1704,13 +1705,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
1166 + static int
1167 + setup_hfcsusb(struct hfcsusb *hw)
1168 + {
1169 ++ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
1170 + u_char b;
1171 ++ int ret;
1172 +
1173 + if (debug & DBG_HFC_CALL_TRACE)
1174 + printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
1175 +
1176 ++ if (!dmabuf)
1177 ++ return -ENOMEM;
1178 ++
1179 ++ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
1180 ++
1181 ++ memcpy(&b, dmabuf, sizeof(u_char));
1182 ++ kfree(dmabuf);
1183 ++
1184 + /* check the chip id */
1185 +- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
1186 ++ if (ret != 1) {
1187 + printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
1188 + hw->name, __func__);
1189 + return 1;
1190 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1191 +index b1d0ae2dbd3d..dc385b70e4c3 100644
1192 +--- a/drivers/md/dm-bufio.c
1193 ++++ b/drivers/md/dm-bufio.c
1194 +@@ -1602,7 +1602,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1195 + unsigned long freed;
1196 +
1197 + c = container_of(shrink, struct dm_bufio_client, shrinker);
1198 +- if (!dm_bufio_trylock(c))
1199 ++ if (sc->gfp_mask & __GFP_FS)
1200 ++ dm_bufio_lock(c);
1201 ++ else if (!dm_bufio_trylock(c))
1202 + return SHRINK_STOP;
1203 +
1204 + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1205 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1206 +index dbdcc543832d..2e22d588f056 100644
1207 +--- a/drivers/md/dm-integrity.c
1208 ++++ b/drivers/md/dm-integrity.c
1209 +@@ -1749,7 +1749,22 @@ offload_to_thread:
1210 + queue_work(ic->wait_wq, &dio->work);
1211 + return;
1212 + }
1213 ++ if (journal_read_pos != NOT_FOUND)
1214 ++ dio->range.n_sectors = ic->sectors_per_block;
1215 + wait_and_add_new_range(ic, &dio->range);
1216 ++ /*
1217 ++ * wait_and_add_new_range drops the spinlock, so the journal
1218 ++ * may have been changed arbitrarily. We need to recheck.
1219 ++ * To simplify the code, we restrict I/O size to just one block.
1220 ++ */
1221 ++ if (journal_read_pos != NOT_FOUND) {
1222 ++ sector_t next_sector;
1223 ++ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1224 ++ if (unlikely(new_pos != journal_read_pos)) {
1225 ++ remove_range_unlocked(ic, &dio->range);
1226 ++ goto retry;
1227 ++ }
1228 ++ }
1229 + }
1230 + spin_unlock_irq(&ic->endio_wait.lock);
1231 +
1232 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
1233 +index 671c24332802..3f694d9061ec 100644
1234 +--- a/drivers/md/dm-kcopyd.c
1235 ++++ b/drivers/md/dm-kcopyd.c
1236 +@@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job)
1237 + * no point in continuing.
1238 + */
1239 + if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
1240 +- job->master_job->write_err)
1241 ++ job->master_job->write_err) {
1242 ++ job->write_err = job->master_job->write_err;
1243 + return -EIO;
1244 ++ }
1245 +
1246 + io_job_start(job->kc->throttle);
1247 +
1248 +@@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
1249 + else
1250 + job->read_err = 1;
1251 + push(&kc->complete_jobs, job);
1252 ++ wake(kc);
1253 + break;
1254 + }
1255 +
1256 +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
1257 +index c44925e4e481..b78a8a4d061c 100644
1258 +--- a/drivers/md/dm-raid.c
1259 ++++ b/drivers/md/dm-raid.c
1260 +@@ -3199,7 +3199,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1261 + */
1262 + r = rs_prepare_reshape(rs);
1263 + if (r)
1264 +- return r;
1265 ++ goto bad;
1266 +
1267 + /* Reshaping ain't recovery, so disable recovery */
1268 + rs_setup_recovery(rs, MaxSector);
1269 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1270 +index 34ab30dd5de9..36275c59e4e7 100644
1271 +--- a/drivers/md/dm-table.c
1272 ++++ b/drivers/md/dm-table.c
1273 +@@ -1349,7 +1349,7 @@ void dm_table_event(struct dm_table *t)
1274 + }
1275 + EXPORT_SYMBOL(dm_table_event);
1276 +
1277 +-sector_t dm_table_get_size(struct dm_table *t)
1278 ++inline sector_t dm_table_get_size(struct dm_table *t)
1279 + {
1280 + return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1281 + }
1282 +@@ -1374,6 +1374,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1283 + unsigned int l, n = 0, k = 0;
1284 + sector_t *node;
1285 +
1286 ++ if (unlikely(sector >= dm_table_get_size(t)))
1287 ++ return &t->targets[t->num_targets];
1288 ++
1289 + for (l = 0; l < t->depth; l++) {
1290 + n = get_child(n, k);
1291 + node = get_node(t, l, n);
1292 +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
1293 +index 4cdde7a02e94..7e8d7fc99410 100644
1294 +--- a/drivers/md/dm-zoned-metadata.c
1295 ++++ b/drivers/md/dm-zoned-metadata.c
1296 +@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
1297 + sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
1298 + struct bio *bio;
1299 +
1300 ++ if (dmz_bdev_is_dying(zmd->dev))
1301 ++ return ERR_PTR(-EIO);
1302 ++
1303 + /* Get a new block and a BIO to read it */
1304 + mblk = dmz_alloc_mblock(zmd, mblk_no);
1305 + if (!mblk)
1306 +- return NULL;
1307 ++ return ERR_PTR(-ENOMEM);
1308 +
1309 + bio = bio_alloc(GFP_NOIO, 1);
1310 + if (!bio) {
1311 + dmz_free_mblock(zmd, mblk);
1312 +- return NULL;
1313 ++ return ERR_PTR(-ENOMEM);
1314 + }
1315 +
1316 + spin_lock(&zmd->mblk_lock);
1317 +@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
1318 + if (!mblk) {
1319 + /* Cache miss: read the block from disk */
1320 + mblk = dmz_get_mblock_slow(zmd, mblk_no);
1321 +- if (!mblk)
1322 +- return ERR_PTR(-ENOMEM);
1323 ++ if (IS_ERR(mblk))
1324 ++ return mblk;
1325 + }
1326 +
1327 + /* Wait for on-going read I/O and check for error */
1328 +@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
1329 + /*
1330 + * Issue a metadata block write BIO.
1331 + */
1332 +-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1333 +- unsigned int set)
1334 ++static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1335 ++ unsigned int set)
1336 + {
1337 + sector_t block = zmd->sb[set].block + mblk->no;
1338 + struct bio *bio;
1339 +
1340 ++ if (dmz_bdev_is_dying(zmd->dev))
1341 ++ return -EIO;
1342 ++
1343 + bio = bio_alloc(GFP_NOIO, 1);
1344 + if (!bio) {
1345 + set_bit(DMZ_META_ERROR, &mblk->state);
1346 +- return;
1347 ++ return -ENOMEM;
1348 + }
1349 +
1350 + set_bit(DMZ_META_WRITING, &mblk->state);
1351 +@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1352 + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
1353 + bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
1354 + submit_bio(bio);
1355 ++
1356 ++ return 0;
1357 + }
1358 +
1359 + /*
1360 +@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
1361 + struct bio *bio;
1362 + int ret;
1363 +
1364 ++ if (dmz_bdev_is_dying(zmd->dev))
1365 ++ return -EIO;
1366 ++
1367 + bio = bio_alloc(GFP_NOIO, 1);
1368 + if (!bio)
1369 + return -ENOMEM;
1370 +@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
1371 + {
1372 + struct dmz_mblock *mblk;
1373 + struct blk_plug plug;
1374 +- int ret = 0;
1375 ++ int ret = 0, nr_mblks_submitted = 0;
1376 +
1377 + /* Issue writes */
1378 + blk_start_plug(&plug);
1379 +- list_for_each_entry(mblk, write_list, link)
1380 +- dmz_write_mblock(zmd, mblk, set);
1381 ++ list_for_each_entry(mblk, write_list, link) {
1382 ++ ret = dmz_write_mblock(zmd, mblk, set);
1383 ++ if (ret)
1384 ++ break;
1385 ++ nr_mblks_submitted++;
1386 ++ }
1387 + blk_finish_plug(&plug);
1388 +
1389 + /* Wait for completion */
1390 + list_for_each_entry(mblk, write_list, link) {
1391 ++ if (!nr_mblks_submitted)
1392 ++ break;
1393 + wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
1394 + TASK_UNINTERRUPTIBLE);
1395 + if (test_bit(DMZ_META_ERROR, &mblk->state)) {
1396 + clear_bit(DMZ_META_ERROR, &mblk->state);
1397 + ret = -EIO;
1398 + }
1399 ++ nr_mblks_submitted--;
1400 + }
1401 +
1402 + /* Flush drive cache (this will also sync data) */
1403 +@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
1404 + */
1405 + dmz_lock_flush(zmd);
1406 +
1407 ++ if (dmz_bdev_is_dying(zmd->dev)) {
1408 ++ ret = -EIO;
1409 ++ goto out;
1410 ++ }
1411 ++
1412 + /* Get dirty blocks */
1413 + spin_lock(&zmd->mblk_lock);
1414 + list_splice_init(&zmd->mblk_dirty_list, &write_list);
1415 +@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1416 + struct dm_zone *zone;
1417 +
1418 + if (list_empty(&zmd->map_rnd_list))
1419 +- return NULL;
1420 ++ return ERR_PTR(-EBUSY);
1421 +
1422 + list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1423 + if (dmz_is_buf(zone))
1424 +@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1425 + return dzone;
1426 + }
1427 +
1428 +- return NULL;
1429 ++ return ERR_PTR(-EBUSY);
1430 + }
1431 +
1432 + /*
1433 +@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1434 + struct dm_zone *zone;
1435 +
1436 + if (list_empty(&zmd->map_seq_list))
1437 +- return NULL;
1438 ++ return ERR_PTR(-EBUSY);
1439 +
1440 + list_for_each_entry(zone, &zmd->map_seq_list, link) {
1441 + if (!zone->bzone)
1442 +@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1443 + return zone;
1444 + }
1445 +
1446 +- return NULL;
1447 ++ return ERR_PTR(-EBUSY);
1448 + }
1449 +
1450 + /*
1451 +@@ -1623,6 +1646,10 @@ again:
1452 + /* Alloate a random zone */
1453 + dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1454 + if (!dzone) {
1455 ++ if (dmz_bdev_is_dying(zmd->dev)) {
1456 ++ dzone = ERR_PTR(-EIO);
1457 ++ goto out;
1458 ++ }
1459 + dmz_wait_for_free_zones(zmd);
1460 + goto again;
1461 + }
1462 +@@ -1720,6 +1747,10 @@ again:
1463 + /* Alloate a random zone */
1464 + bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1465 + if (!bzone) {
1466 ++ if (dmz_bdev_is_dying(zmd->dev)) {
1467 ++ bzone = ERR_PTR(-EIO);
1468 ++ goto out;
1469 ++ }
1470 + dmz_wait_for_free_zones(zmd);
1471 + goto again;
1472 + }
1473 +diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
1474 +index edf4b95eb075..9470b8f77a33 100644
1475 +--- a/drivers/md/dm-zoned-reclaim.c
1476 ++++ b/drivers/md/dm-zoned-reclaim.c
1477 +@@ -37,7 +37,7 @@ enum {
1478 + /*
1479 + * Number of seconds of target BIO inactivity to consider the target idle.
1480 + */
1481 +-#define DMZ_IDLE_PERIOD (10UL * HZ)
1482 ++#define DMZ_IDLE_PERIOD (10UL * HZ)
1483 +
1484 + /*
1485 + * Percentage of unmapped (free) random zones below which reclaim starts
1486 +@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
1487 + set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
1488 +
1489 + while (block < end_block) {
1490 ++ if (dev->flags & DMZ_BDEV_DYING)
1491 ++ return -EIO;
1492 ++
1493 + /* Get a valid region from the source zone */
1494 + ret = dmz_first_valid_block(zmd, src_zone, &block);
1495 + if (ret <= 0)
1496 +@@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1497 +
1498 + dmz_unlock_flush(zmd);
1499 +
1500 +- return 0;
1501 ++ return ret;
1502 + }
1503 +
1504 + /*
1505 +@@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1506 +
1507 + dmz_unlock_flush(zmd);
1508 +
1509 +- return 0;
1510 ++ return ret;
1511 + }
1512 +
1513 + /*
1514 +@@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1515 +
1516 + dmz_unlock_flush(zmd);
1517 +
1518 +- return 0;
1519 ++ return ret;
1520 + }
1521 +
1522 + /*
1523 +@@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1524 + /*
1525 + * Find a candidate zone for reclaim and process it.
1526 + */
1527 +-static void dmz_reclaim(struct dmz_reclaim *zrc)
1528 ++static int dmz_do_reclaim(struct dmz_reclaim *zrc)
1529 + {
1530 + struct dmz_metadata *zmd = zrc->metadata;
1531 + struct dm_zone *dzone;
1532 +@@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
1533 +
1534 + /* Get a data zone */
1535 + dzone = dmz_get_zone_for_reclaim(zmd);
1536 +- if (!dzone)
1537 +- return;
1538 ++ if (IS_ERR(dzone))
1539 ++ return PTR_ERR(dzone);
1540 +
1541 + start = jiffies;
1542 +
1543 +@@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
1544 + out:
1545 + if (ret) {
1546 + dmz_unlock_zone_reclaim(dzone);
1547 +- return;
1548 ++ return ret;
1549 + }
1550 +
1551 +- (void) dmz_flush_metadata(zrc->metadata);
1552 ++ ret = dmz_flush_metadata(zrc->metadata);
1553 ++ if (ret) {
1554 ++ dmz_dev_debug(zrc->dev,
1555 ++ "Metadata flush for zone %u failed, err %d\n",
1556 ++ dmz_id(zmd, rzone), ret);
1557 ++ return ret;
1558 ++ }
1559 +
1560 + dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
1561 + dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
1562 ++ return 0;
1563 + }
1564 +
1565 + /*
1566 +@@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work)
1567 + struct dmz_metadata *zmd = zrc->metadata;
1568 + unsigned int nr_rnd, nr_unmap_rnd;
1569 + unsigned int p_unmap_rnd;
1570 ++ int ret;
1571 ++
1572 ++ if (dmz_bdev_is_dying(zrc->dev))
1573 ++ return;
1574 +
1575 + if (!dmz_should_reclaim(zrc)) {
1576 + mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
1577 +@@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work)
1578 + (dmz_target_idle(zrc) ? "Idle" : "Busy"),
1579 + p_unmap_rnd, nr_unmap_rnd, nr_rnd);
1580 +
1581 +- dmz_reclaim(zrc);
1582 ++ ret = dmz_do_reclaim(zrc);
1583 ++ if (ret) {
1584 ++ dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
1585 ++ if (ret == -EIO)
1586 ++ /*
1587 ++ * LLD might be performing some error handling sequence
1588 ++ * at the underlying device. To not interfere, do not
1589 ++ * attempt to schedule the next reclaim run immediately.
1590 ++ */
1591 ++ return;
1592 ++ }
1593 +
1594 + dmz_schedule_reclaim(zrc);
1595 + }
1596 +diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
1597 +index 85fb2baa8a7f..1030c42add05 100644
1598 +--- a/drivers/md/dm-zoned-target.c
1599 ++++ b/drivers/md/dm-zoned-target.c
1600 +@@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
1601 +
1602 + atomic_inc(&bioctx->ref);
1603 + generic_make_request(clone);
1604 ++ if (clone->bi_status == BLK_STS_IOERR)
1605 ++ return -EIO;
1606 +
1607 + if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
1608 + zone->wp_block += nr_blocks;
1609 +@@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
1610 +
1611 + /* Get the buffer zone. One will be allocated if needed */
1612 + bzone = dmz_get_chunk_buffer(zmd, zone);
1613 +- if (!bzone)
1614 +- return -ENOSPC;
1615 ++ if (IS_ERR(bzone))
1616 ++ return PTR_ERR(bzone);
1617 +
1618 + if (dmz_is_readonly(bzone))
1619 + return -EROFS;
1620 +@@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
1621 +
1622 + dmz_lock_metadata(zmd);
1623 +
1624 ++ if (dmz->dev->flags & DMZ_BDEV_DYING) {
1625 ++ ret = -EIO;
1626 ++ goto out;
1627 ++ }
1628 ++
1629 + /*
1630 + * Get the data zone mapping the chunk. There may be no
1631 + * mapping for read and discard. If a mapping is obtained,
1632 +@@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work)
1633 +
1634 + /* Flush dirty metadata blocks */
1635 + ret = dmz_flush_metadata(dmz->metadata);
1636 ++ if (ret)
1637 ++ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
1638 +
1639 + /* Process queued flush requests */
1640 + while (1) {
1641 +@@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work)
1642 + * Get a chunk work and start it to process a new BIO.
1643 + * If the BIO chunk has no work yet, create one.
1644 + */
1645 +-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1646 ++static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1647 + {
1648 + unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
1649 + struct dm_chunk_work *cw;
1650 ++ int ret = 0;
1651 +
1652 + mutex_lock(&dmz->chunk_lock);
1653 +
1654 + /* Get the BIO chunk work. If one is not active yet, create one */
1655 + cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
1656 + if (!cw) {
1657 +- int ret;
1658 +
1659 + /* Create a new chunk work */
1660 + cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
1661 +- if (!cw)
1662 ++ if (unlikely(!cw)) {
1663 ++ ret = -ENOMEM;
1664 + goto out;
1665 ++ }
1666 +
1667 + INIT_WORK(&cw->work, dmz_chunk_work);
1668 + atomic_set(&cw->refcount, 0);
1669 +@@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1670 + ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
1671 + if (unlikely(ret)) {
1672 + kfree(cw);
1673 +- cw = NULL;
1674 + goto out;
1675 + }
1676 + }
1677 +@@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1678 + bio_list_add(&cw->bio_list, bio);
1679 + dmz_get_chunk_work(cw);
1680 +
1681 ++ dmz_reclaim_bio_acc(dmz->reclaim);
1682 + if (queue_work(dmz->chunk_wq, &cw->work))
1683 + dmz_get_chunk_work(cw);
1684 + out:
1685 + mutex_unlock(&dmz->chunk_lock);
1686 ++ return ret;
1687 ++}
1688 ++
1689 ++/*
1690 ++ * Check the backing device availability. If it's on the way out,
1691 ++ * start failing I/O. Reclaim and metadata components also call this
1692 ++ * function to cleanly abort operation in the event of such failure.
1693 ++ */
1694 ++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
1695 ++{
1696 ++ struct gendisk *disk;
1697 ++
1698 ++ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
1699 ++ disk = dmz_dev->bdev->bd_disk;
1700 ++ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
1701 ++ dmz_dev_warn(dmz_dev, "Backing device queue dying");
1702 ++ dmz_dev->flags |= DMZ_BDEV_DYING;
1703 ++ } else if (disk->fops->check_events) {
1704 ++ if (disk->fops->check_events(disk, 0) &
1705 ++ DISK_EVENT_MEDIA_CHANGE) {
1706 ++ dmz_dev_warn(dmz_dev, "Backing device offline");
1707 ++ dmz_dev->flags |= DMZ_BDEV_DYING;
1708 ++ }
1709 ++ }
1710 ++ }
1711 ++
1712 ++ return dmz_dev->flags & DMZ_BDEV_DYING;
1713 + }
1714 +
1715 + /*
1716 +@@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
1717 + sector_t sector = bio->bi_iter.bi_sector;
1718 + unsigned int nr_sectors = bio_sectors(bio);
1719 + sector_t chunk_sector;
1720 ++ int ret;
1721 ++
1722 ++ if (dmz_bdev_is_dying(dmz->dev))
1723 ++ return DM_MAPIO_KILL;
1724 +
1725 + dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
1726 + bio_op(bio), (unsigned long long)sector, nr_sectors,
1727 +@@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
1728 + dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
1729 +
1730 + /* Now ready to handle this BIO */
1731 +- dmz_reclaim_bio_acc(dmz->reclaim);
1732 +- dmz_queue_chunk_work(dmz, bio);
1733 ++ ret = dmz_queue_chunk_work(dmz, bio);
1734 ++ if (ret) {
1735 ++ dmz_dev_debug(dmz->dev,
1736 ++ "BIO op %d, can't process chunk %llu, err %i\n",
1737 ++ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
1738 ++ ret);
1739 ++ return DM_MAPIO_REQUEUE;
1740 ++ }
1741 +
1742 + return DM_MAPIO_SUBMITTED;
1743 + }
1744 +@@ -856,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
1745 + {
1746 + struct dmz_target *dmz = ti->private;
1747 +
1748 ++ if (dmz_bdev_is_dying(dmz->dev))
1749 ++ return -ENODEV;
1750 ++
1751 + *bdev = dmz->dev->bdev;
1752 +
1753 + return 0;
1754 +diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
1755 +index ed8de49c9a08..93a64529f219 100644
1756 +--- a/drivers/md/dm-zoned.h
1757 ++++ b/drivers/md/dm-zoned.h
1758 +@@ -56,6 +56,8 @@ struct dmz_dev {
1759 +
1760 + unsigned int nr_zones;
1761 +
1762 ++ unsigned int flags;
1763 ++
1764 + sector_t zone_nr_sectors;
1765 + unsigned int zone_nr_sectors_shift;
1766 +
1767 +@@ -67,6 +69,9 @@ struct dmz_dev {
1768 + (dev)->zone_nr_sectors_shift)
1769 + #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
1770 +
1771 ++/* Device flags. */
1772 ++#define DMZ_BDEV_DYING (1 << 0)
1773 ++
1774 + /*
1775 + * Zone descriptor.
1776 + */
1777 +@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
1778 + void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
1779 + void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
1780 +
1781 ++/*
1782 ++ * Functions defined in dm-zoned-target.c
1783 ++ */
1784 ++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
1785 ++
1786 + #endif /* DM_ZONED_H */
1787 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
1788 +index 58b319757b1e..8aae0624a297 100644
1789 +--- a/drivers/md/persistent-data/dm-btree.c
1790 ++++ b/drivers/md/persistent-data/dm-btree.c
1791 +@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
1792 +
1793 + new_parent = shadow_current(s);
1794 +
1795 ++ pn = dm_block_data(new_parent);
1796 ++ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1797 ++ sizeof(__le64) : s->info->value_type.size;
1798 ++
1799 ++ /* create & init the left block */
1800 + r = new_block(s->info, &left);
1801 + if (r < 0)
1802 + return r;
1803 +
1804 ++ ln = dm_block_data(left);
1805 ++ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1806 ++
1807 ++ ln->header.flags = pn->header.flags;
1808 ++ ln->header.nr_entries = cpu_to_le32(nr_left);
1809 ++ ln->header.max_entries = pn->header.max_entries;
1810 ++ ln->header.value_size = pn->header.value_size;
1811 ++ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1812 ++ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1813 ++
1814 ++ /* create & init the right block */
1815 + r = new_block(s->info, &right);
1816 + if (r < 0) {
1817 + unlock_block(s->info, left);
1818 + return r;
1819 + }
1820 +
1821 +- pn = dm_block_data(new_parent);
1822 +- ln = dm_block_data(left);
1823 + rn = dm_block_data(right);
1824 +-
1825 +- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1826 + nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
1827 +
1828 +- ln->header.flags = pn->header.flags;
1829 +- ln->header.nr_entries = cpu_to_le32(nr_left);
1830 +- ln->header.max_entries = pn->header.max_entries;
1831 +- ln->header.value_size = pn->header.value_size;
1832 +-
1833 + rn->header.flags = pn->header.flags;
1834 + rn->header.nr_entries = cpu_to_le32(nr_right);
1835 + rn->header.max_entries = pn->header.max_entries;
1836 + rn->header.value_size = pn->header.value_size;
1837 +-
1838 +- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1839 + memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
1840 +-
1841 +- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1842 +- sizeof(__le64) : s->info->value_type.size;
1843 +- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1844 + memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
1845 + nr_right * size);
1846 +
1847 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1848 +index aec449243966..25328582cc48 100644
1849 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
1850 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1851 +@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
1852 + }
1853 +
1854 + if (smm->recursion_count == 1)
1855 +- apply_bops(smm);
1856 ++ r = apply_bops(smm);
1857 +
1858 + smm->recursion_count--;
1859 +
1860 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1861 +index 8f14f85b8e95..0d2392c4b625 100644
1862 +--- a/drivers/net/bonding/bond_main.c
1863 ++++ b/drivers/net/bonding/bond_main.c
1864 +@@ -2190,6 +2190,15 @@ static void bond_miimon_commit(struct bonding *bond)
1865 + bond_for_each_slave(bond, slave, iter) {
1866 + switch (slave->new_link) {
1867 + case BOND_LINK_NOCHANGE:
1868 ++ /* For 802.3ad mode, check current slave speed and
1869 ++ * duplex again in case its port was disabled after
1870 ++ * invalid speed/duplex reporting but recovered before
1871 ++ * link monitoring could make a decision on the actual
1872 ++ * link status
1873 ++ */
1874 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
1875 ++ slave->link == BOND_LINK_UP)
1876 ++ bond_3ad_adapter_speed_duplex_changed(slave);
1877 + continue;
1878 +
1879 + case BOND_LINK_UP:
1880 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1881 +index c05e4d50d43d..bd127ce3aba2 100644
1882 +--- a/drivers/net/can/dev.c
1883 ++++ b/drivers/net/can/dev.c
1884 +@@ -1260,6 +1260,8 @@ int register_candev(struct net_device *dev)
1885 + return -EINVAL;
1886 +
1887 + dev->rtnl_link_ops = &can_link_ops;
1888 ++ netif_carrier_off(dev);
1889 ++
1890 + return register_netdev(dev);
1891 + }
1892 + EXPORT_SYMBOL_GPL(register_candev);
1893 +diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
1894 +index b8c39ede7cd5..179bfcd541f2 100644
1895 +--- a/drivers/net/can/sja1000/peak_pcmcia.c
1896 ++++ b/drivers/net/can/sja1000/peak_pcmcia.c
1897 +@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
1898 + if (!netdev)
1899 + continue;
1900 +
1901 +- strncpy(name, netdev->name, IFNAMSIZ);
1902 ++ strlcpy(name, netdev->name, IFNAMSIZ);
1903 +
1904 + unregister_sja1000dev(netdev);
1905 +
1906 +diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
1907 +index da64e71a62ee..fccb6bf21fad 100644
1908 +--- a/drivers/net/can/spi/mcp251x.c
1909 ++++ b/drivers/net/can/spi/mcp251x.c
1910 +@@ -678,17 +678,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
1911 + return regulator_disable(reg);
1912 + }
1913 +
1914 +-static void mcp251x_open_clean(struct net_device *net)
1915 +-{
1916 +- struct mcp251x_priv *priv = netdev_priv(net);
1917 +- struct spi_device *spi = priv->spi;
1918 +-
1919 +- free_irq(spi->irq, priv);
1920 +- mcp251x_hw_sleep(spi);
1921 +- mcp251x_power_enable(priv->transceiver, 0);
1922 +- close_candev(net);
1923 +-}
1924 +-
1925 + static int mcp251x_stop(struct net_device *net)
1926 + {
1927 + struct mcp251x_priv *priv = netdev_priv(net);
1928 +@@ -954,37 +943,43 @@ static int mcp251x_open(struct net_device *net)
1929 + flags | IRQF_ONESHOT, DEVICE_NAME, priv);
1930 + if (ret) {
1931 + dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
1932 +- mcp251x_power_enable(priv->transceiver, 0);
1933 +- close_candev(net);
1934 +- goto open_unlock;
1935 ++ goto out_close;
1936 + }
1937 +
1938 + priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
1939 + 0);
1940 ++ if (!priv->wq) {
1941 ++ ret = -ENOMEM;
1942 ++ goto out_clean;
1943 ++ }
1944 + INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
1945 + INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
1946 +
1947 + ret = mcp251x_hw_reset(spi);
1948 +- if (ret) {
1949 +- mcp251x_open_clean(net);
1950 +- goto open_unlock;
1951 +- }
1952 ++ if (ret)
1953 ++ goto out_free_wq;
1954 + ret = mcp251x_setup(net, spi);
1955 +- if (ret) {
1956 +- mcp251x_open_clean(net);
1957 +- goto open_unlock;
1958 +- }
1959 ++ if (ret)
1960 ++ goto out_free_wq;
1961 + ret = mcp251x_set_normal_mode(spi);
1962 +- if (ret) {
1963 +- mcp251x_open_clean(net);
1964 +- goto open_unlock;
1965 +- }
1966 ++ if (ret)
1967 ++ goto out_free_wq;
1968 +
1969 + can_led_event(net, CAN_LED_EVENT_OPEN);
1970 +
1971 + netif_wake_queue(net);
1972 ++ mutex_unlock(&priv->mcp_lock);
1973 +
1974 +-open_unlock:
1975 ++ return 0;
1976 ++
1977 ++out_free_wq:
1978 ++ destroy_workqueue(priv->wq);
1979 ++out_clean:
1980 ++ free_irq(spi->irq, priv);
1981 ++ mcp251x_hw_sleep(spi);
1982 ++out_close:
1983 ++ mcp251x_power_enable(priv->transceiver, 0);
1984 ++ close_candev(net);
1985 + mutex_unlock(&priv->mcp_lock);
1986 + return ret;
1987 + }
1988 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1989 +index 740ef47eab01..43b0fa2b9932 100644
1990 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1991 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1992 +@@ -863,7 +863,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
1993 +
1994 + dev_prev_siblings = dev->prev_siblings;
1995 + dev->state &= ~PCAN_USB_STATE_CONNECTED;
1996 +- strncpy(name, netdev->name, IFNAMSIZ);
1997 ++ strlcpy(name, netdev->name, IFNAMSIZ);
1998 +
1999 + unregister_netdev(netdev);
2000 +
2001 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2002 +index c34ea385fe4a..6be6de0774b6 100644
2003 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2004 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2005 +@@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2006 + if (!adapter->regs) {
2007 + dev_err(&pdev->dev, "cannot map device registers\n");
2008 + err = -ENOMEM;
2009 +- goto out_free_adapter;
2010 ++ goto out_free_adapter_nofail;
2011 + }
2012 +
2013 + adapter->pdev = pdev;
2014 +@@ -3398,6 +3398,9 @@ out_free_dev:
2015 + if (adapter->port[i])
2016 + free_netdev(adapter->port[i]);
2017 +
2018 ++out_free_adapter_nofail:
2019 ++ kfree_skb(adapter->nofail_skb);
2020 ++
2021 + out_free_adapter:
2022 + kfree(adapter);
2023 +
2024 +diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
2025 +index 6127697ede12..a91d49dd92ea 100644
2026 +--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
2027 ++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
2028 +@@ -157,6 +157,7 @@ struct hip04_priv {
2029 + unsigned int reg_inten;
2030 +
2031 + struct napi_struct napi;
2032 ++ struct device *dev;
2033 + struct net_device *ndev;
2034 +
2035 + struct tx_desc *tx_desc;
2036 +@@ -185,7 +186,7 @@ struct hip04_priv {
2037 +
2038 + static inline unsigned int tx_count(unsigned int head, unsigned int tail)
2039 + {
2040 +- return (head - tail) % (TX_DESC_NUM - 1);
2041 ++ return (head - tail) % TX_DESC_NUM;
2042 + }
2043 +
2044 + static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
2045 +@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
2046 + }
2047 +
2048 + if (priv->tx_phys[tx_tail]) {
2049 +- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
2050 ++ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
2051 + priv->tx_skb[tx_tail]->len,
2052 + DMA_TO_DEVICE);
2053 + priv->tx_phys[tx_tail] = 0;
2054 +@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2055 + return NETDEV_TX_BUSY;
2056 + }
2057 +
2058 +- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2059 +- if (dma_mapping_error(&ndev->dev, phys)) {
2060 ++ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
2061 ++ if (dma_mapping_error(priv->dev, phys)) {
2062 + dev_kfree_skb(skb);
2063 + return NETDEV_TX_OK;
2064 + }
2065 +@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
2066 + u16 len;
2067 + u32 err;
2068 +
2069 ++ /* clean up tx descriptors */
2070 ++ tx_remaining = hip04_tx_reclaim(ndev, false);
2071 ++
2072 + while (cnt && !last) {
2073 + buf = priv->rx_buf[priv->rx_head];
2074 + skb = build_skb(buf, priv->rx_buf_size);
2075 +@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
2076 + goto refill;
2077 + }
2078 +
2079 +- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
2080 ++ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
2081 + RX_BUF_SIZE, DMA_FROM_DEVICE);
2082 + priv->rx_phys[priv->rx_head] = 0;
2083 +
2084 +@@ -534,9 +538,9 @@ refill:
2085 + buf = netdev_alloc_frag(priv->rx_buf_size);
2086 + if (!buf)
2087 + goto done;
2088 +- phys = dma_map_single(&ndev->dev, buf,
2089 ++ phys = dma_map_single(priv->dev, buf,
2090 + RX_BUF_SIZE, DMA_FROM_DEVICE);
2091 +- if (dma_mapping_error(&ndev->dev, phys))
2092 ++ if (dma_mapping_error(priv->dev, phys))
2093 + goto done;
2094 + priv->rx_buf[priv->rx_head] = buf;
2095 + priv->rx_phys[priv->rx_head] = phys;
2096 +@@ -557,8 +561,7 @@ refill:
2097 + }
2098 + napi_complete_done(napi, rx);
2099 + done:
2100 +- /* clean up tx descriptors and start a new timer if necessary */
2101 +- tx_remaining = hip04_tx_reclaim(ndev, false);
2102 ++ /* start a new timer if necessary */
2103 + if (rx < budget && tx_remaining)
2104 + hip04_start_tx_timer(priv);
2105 +
2106 +@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
2107 + for (i = 0; i < RX_DESC_NUM; i++) {
2108 + dma_addr_t phys;
2109 +
2110 +- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
2111 ++ phys = dma_map_single(priv->dev, priv->rx_buf[i],
2112 + RX_BUF_SIZE, DMA_FROM_DEVICE);
2113 +- if (dma_mapping_error(&ndev->dev, phys))
2114 ++ if (dma_mapping_error(priv->dev, phys))
2115 + return -EIO;
2116 +
2117 + priv->rx_phys[i] = phys;
2118 +@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
2119 +
2120 + for (i = 0; i < RX_DESC_NUM; i++) {
2121 + if (priv->rx_phys[i]) {
2122 +- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
2123 ++ dma_unmap_single(priv->dev, priv->rx_phys[i],
2124 + RX_BUF_SIZE, DMA_FROM_DEVICE);
2125 + priv->rx_phys[i] = 0;
2126 + }
2127 +@@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
2128 + return -ENOMEM;
2129 +
2130 + priv = netdev_priv(ndev);
2131 ++ priv->dev = d;
2132 + priv->ndev = ndev;
2133 + platform_set_drvdata(pdev, ndev);
2134 + SET_NETDEV_DEV(ndev, &pdev->dev);
2135 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2136 +index 6455511457ca..9b608d23ff7e 100644
2137 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2138 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2139 +@@ -4412,9 +4412,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
2140 + if (state->pause & MLO_PAUSE_RX)
2141 + ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
2142 +
2143 +- ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
2144 +- ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
2145 +- MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
2146 ++ ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
2147 ++ MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
2148 ++ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
2149 +
2150 + writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
2151 + writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
2152 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
2153 +index b22f464ea3fa..f9e475075d3e 100644
2154 +--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
2155 ++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
2156 +@@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
2157 + snprintf(bit_name, 30,
2158 + p_aeu->bit_name, num);
2159 + else
2160 +- strncpy(bit_name,
2161 ++ strlcpy(bit_name,
2162 + p_aeu->bit_name, 30);
2163 +
2164 + /* We now need to pass bitmask in its
2165 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2166 +index 13802b825d65..909422d93903 100644
2167 +--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2168 ++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2169 +@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
2170 + /* Vendor specific information */
2171 + dev->vendor_id = cdev->vendor_id;
2172 + dev->vendor_part_id = cdev->device_id;
2173 +- dev->hw_ver = 0;
2174 ++ dev->hw_ver = cdev->chip_rev;
2175 + dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
2176 + (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
2177 +
2178 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2179 +index d0e6e1503581..48cf5e2b2441 100644
2180 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2181 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2182 +@@ -88,6 +88,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
2183 + u32 value;
2184 +
2185 + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
2186 ++ if (queue >= 4)
2187 ++ queue -= 4;
2188 +
2189 + value = readl(ioaddr + base_register);
2190 +
2191 +@@ -105,6 +107,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
2192 + u32 value;
2193 +
2194 + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
2195 ++ if (queue >= 4)
2196 ++ queue -= 4;
2197 +
2198 + value = readl(ioaddr + base_register);
2199 +
2200 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2201 +index d182f82f7b58..870302a7177e 100644
2202 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2203 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2204 +@@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
2205 + u32 value, reg;
2206 +
2207 + reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
2208 ++ if (queue >= 4)
2209 ++ queue -= 4;
2210 +
2211 + value = readl(ioaddr + reg);
2212 + value &= ~XGMAC_PSRQ(queue);
2213 +@@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
2214 + u32 value, reg;
2215 +
2216 + reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
2217 ++ if (queue >= 4)
2218 ++ queue -= 4;
2219 +
2220 + value = readl(ioaddr + reg);
2221 + value &= ~XGMAC_QxMDMACH(queue);
2222 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2223 +index 58ea18af9813..37c0bc699cd9 100644
2224 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2225 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2226 +@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
2227 + entry = &priv->tc_entries[i];
2228 + if (!entry->in_use && !first && free)
2229 + first = entry;
2230 +- if (entry->handle == loc && !free)
2231 ++ if ((entry->handle == loc) && !free && !entry->is_frag)
2232 + dup = entry;
2233 + }
2234 +
2235 +diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
2236 +index 491efc1bf5c4..7278eca70f9f 100644
2237 +--- a/drivers/net/phy/phy_led_triggers.c
2238 ++++ b/drivers/net/phy/phy_led_triggers.c
2239 +@@ -58,8 +58,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
2240 + if (!phy->last_triggered)
2241 + led_trigger_event(&phy->led_link_trigger->trigger,
2242 + LED_FULL);
2243 ++ else
2244 ++ led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
2245 +
2246 +- led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
2247 + led_trigger_event(&plt->trigger, LED_FULL);
2248 + phy->last_triggered = plt;
2249 + }
2250 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2251 +index 128c8a327d8e..51017c6bb3bc 100644
2252 +--- a/drivers/net/usb/qmi_wwan.c
2253 ++++ b/drivers/net/usb/qmi_wwan.c
2254 +@@ -1231,6 +1231,7 @@ static const struct usb_device_id products[] = {
2255 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2256 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
2257 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
2258 ++ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
2259 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
2260 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
2261 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
2262 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2263 +index 7cd428c0af43..ce2dd06af62e 100644
2264 +--- a/drivers/net/wireless/mac80211_hwsim.c
2265 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2266 +@@ -3502,10 +3502,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
2267 + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2268 + cb->nlh->nlmsg_seq, &hwsim_genl_family,
2269 + NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
2270 +- if (!hdr)
2271 ++ if (hdr) {
2272 ++ genl_dump_check_consistent(cb, hdr);
2273 ++ genlmsg_end(skb, hdr);
2274 ++ } else {
2275 + res = -EMSGSIZE;
2276 +- genl_dump_check_consistent(cb, hdr);
2277 +- genlmsg_end(skb, hdr);
2278 ++ }
2279 + }
2280 +
2281 + done:
2282 +diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
2283 +index f55d082ace71..5d6e7e931bc6 100644
2284 +--- a/drivers/nfc/st-nci/se.c
2285 ++++ b/drivers/nfc/st-nci/se.c
2286 +@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
2287 +
2288 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
2289 + skb->len - 2, GFP_KERNEL);
2290 ++ if (!transaction)
2291 ++ return -ENOMEM;
2292 +
2293 + transaction->aid_len = skb->data[1];
2294 + memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
2295 +diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
2296 +index 4bed9e842db3..fd967a38a94a 100644
2297 +--- a/drivers/nfc/st21nfca/se.c
2298 ++++ b/drivers/nfc/st21nfca/se.c
2299 +@@ -328,6 +328,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
2300 +
2301 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
2302 + skb->len - 2, GFP_KERNEL);
2303 ++ if (!transaction)
2304 ++ return -ENOMEM;
2305 +
2306 + transaction->aid_len = skb->data[1];
2307 + memcpy(transaction->aid, &skb->data[2],
2308 +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
2309 +index 9c332a6f6667..476728bdae8c 100644
2310 +--- a/fs/ceph/addr.c
2311 ++++ b/fs/ceph/addr.c
2312 +@@ -913,8 +913,9 @@ get_more_pages:
2313 + if (page_offset(page) >= ceph_wbc.i_size) {
2314 + dout("%p page eof %llu\n",
2315 + page, ceph_wbc.i_size);
2316 +- if (ceph_wbc.size_stable ||
2317 +- page_offset(page) >= i_size_read(inode))
2318 ++ if ((ceph_wbc.size_stable ||
2319 ++ page_offset(page) >= i_size_read(inode)) &&
2320 ++ clear_page_dirty_for_io(page))
2321 + mapping->a_ops->invalidatepage(page,
2322 + 0, PAGE_SIZE);
2323 + unlock_page(page);
2324 +diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
2325 +index 9dae2ec7e1fa..6a8f4a99582e 100644
2326 +--- a/fs/ceph/locks.c
2327 ++++ b/fs/ceph/locks.c
2328 +@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
2329 + req->r_wait_for_completion = ceph_lock_wait_for_completion;
2330 +
2331 + err = ceph_mdsc_do_request(mdsc, inode, req);
2332 +-
2333 +- if (operation == CEPH_MDS_OP_GETFILELOCK) {
2334 ++ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
2335 + fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
2336 + if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
2337 + fl->fl_type = F_RDLCK;
2338 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2339 +index 0ccf8f9b63a2..cc9e846a3865 100644
2340 +--- a/fs/cifs/smb2ops.c
2341 ++++ b/fs/cifs/smb2ops.c
2342 +@@ -2545,7 +2545,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
2343 + static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
2344 + unsigned int buflen)
2345 + {
2346 +- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
2347 ++ void *addr;
2348 ++ /*
2349 ++ * VMAP_STACK (at least) puts stack into the vmalloc address space
2350 ++ */
2351 ++ if (is_vmalloc_addr(buf))
2352 ++ addr = vmalloc_to_page(buf);
2353 ++ else
2354 ++ addr = virt_to_page(buf);
2355 ++ sg_set_page(sg, addr, buflen, offset_in_page(buf));
2356 + }
2357 +
2358 + /* Assumes the first rqst has a transform header as the first iov.
2359 +@@ -3121,7 +3129,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
2360 + {
2361 + int ret, length;
2362 + char *buf = server->smallbuf;
2363 +- char *tmpbuf;
2364 + struct smb2_sync_hdr *shdr;
2365 + unsigned int pdu_length = server->pdu_size;
2366 + unsigned int buf_size;
2367 +@@ -3151,18 +3158,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
2368 + return length;
2369 +
2370 + next_is_large = server->large_buf;
2371 +- one_more:
2372 ++one_more:
2373 + shdr = (struct smb2_sync_hdr *)buf;
2374 + if (shdr->NextCommand) {
2375 +- if (next_is_large) {
2376 +- tmpbuf = server->bigbuf;
2377 ++ if (next_is_large)
2378 + next_buffer = (char *)cifs_buf_get();
2379 +- } else {
2380 +- tmpbuf = server->smallbuf;
2381 ++ else
2382 + next_buffer = (char *)cifs_small_buf_get();
2383 +- }
2384 + memcpy(next_buffer,
2385 +- tmpbuf + le32_to_cpu(shdr->NextCommand),
2386 ++ buf + le32_to_cpu(shdr->NextCommand),
2387 + pdu_length - le32_to_cpu(shdr->NextCommand));
2388 + }
2389 +
2390 +@@ -3191,12 +3195,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
2391 + pdu_length -= le32_to_cpu(shdr->NextCommand);
2392 + server->large_buf = next_is_large;
2393 + if (next_is_large)
2394 +- server->bigbuf = next_buffer;
2395 ++ server->bigbuf = buf = next_buffer;
2396 + else
2397 +- server->smallbuf = next_buffer;
2398 +-
2399 +- buf += le32_to_cpu(shdr->NextCommand);
2400 ++ server->smallbuf = buf = next_buffer;
2401 + goto one_more;
2402 ++ } else if (ret != 0) {
2403 ++ /*
2404 ++ * ret != 0 here means that we didn't get to handle_mid() thus
2405 ++ * server->smallbuf and server->bigbuf are still valid. We need
2406 ++ * to free next_buffer because it is not going to be used
2407 ++ * anywhere.
2408 ++ */
2409 ++ if (next_is_large)
2410 ++ free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
2411 ++ else
2412 ++ free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
2413 + }
2414 +
2415 + return ret;
2416 +diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
2417 +index 4dc887813c71..a7bc4e0494f9 100644
2418 +--- a/fs/nfs/fscache.c
2419 ++++ b/fs/nfs/fscache.c
2420 +@@ -118,6 +118,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
2421 + struct rb_node **p, *parent;
2422 + int diff;
2423 +
2424 ++ nfss->fscache_key = NULL;
2425 ++ nfss->fscache = NULL;
2426 ++ if (!(nfss->options & NFS_OPTION_FSCACHE))
2427 ++ return;
2428 + if (!uniq) {
2429 + uniq = "";
2430 + ulen = 1;
2431 +@@ -230,10 +234,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
2432 + void nfs_fscache_init_inode(struct inode *inode)
2433 + {
2434 + struct nfs_fscache_inode_auxdata auxdata;
2435 ++ struct nfs_server *nfss = NFS_SERVER(inode);
2436 + struct nfs_inode *nfsi = NFS_I(inode);
2437 +
2438 + nfsi->fscache = NULL;
2439 +- if (!S_ISREG(inode->i_mode))
2440 ++ if (!(nfss->fscache && S_ISREG(inode->i_mode)))
2441 + return;
2442 +
2443 + memset(&auxdata, 0, sizeof(auxdata));
2444 +diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
2445 +index 161ba2edb9d0..6363ea956858 100644
2446 +--- a/fs/nfs/fscache.h
2447 ++++ b/fs/nfs/fscache.h
2448 +@@ -186,7 +186,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
2449 + */
2450 + static inline const char *nfs_server_fscache_state(struct nfs_server *server)
2451 + {
2452 +- if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
2453 ++ if (server->fscache)
2454 + return "yes";
2455 + return "no ";
2456 + }
2457 +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
2458 +index 63287d911c08..5b61520dce88 100644
2459 +--- a/fs/nfs/nfs4_fs.h
2460 ++++ b/fs/nfs/nfs4_fs.h
2461 +@@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
2462 +
2463 + extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
2464 + extern void nfs4_put_state_owner(struct nfs4_state_owner *);
2465 +-extern void nfs4_purge_state_owners(struct nfs_server *);
2466 ++extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
2467 ++extern void nfs4_free_state_owners(struct list_head *head);
2468 + extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
2469 + extern void nfs4_put_open_state(struct nfs4_state *);
2470 + extern void nfs4_close_state(struct nfs4_state *, fmode_t);
2471 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
2472 +index 8f53455c4765..86991bcfbeb1 100644
2473 +--- a/fs/nfs/nfs4client.c
2474 ++++ b/fs/nfs/nfs4client.c
2475 +@@ -754,9 +754,12 @@ out:
2476 +
2477 + static void nfs4_destroy_server(struct nfs_server *server)
2478 + {
2479 ++ LIST_HEAD(freeme);
2480 ++
2481 + nfs_server_return_all_delegations(server);
2482 + unset_pnfs_layoutdriver(server);
2483 +- nfs4_purge_state_owners(server);
2484 ++ nfs4_purge_state_owners(server, &freeme);
2485 ++ nfs4_free_state_owners(&freeme);
2486 + }
2487 +
2488 + /*
2489 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2490 +index 3ba2087469ac..c36ef75f2054 100644
2491 +--- a/fs/nfs/nfs4state.c
2492 ++++ b/fs/nfs/nfs4state.c
2493 +@@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
2494 + /**
2495 + * nfs4_purge_state_owners - Release all cached state owners
2496 + * @server: nfs_server with cached state owners to release
2497 ++ * @head: resulting list of state owners
2498 + *
2499 + * Called at umount time. Remaining state owners will be on
2500 + * the LRU with ref count of zero.
2501 ++ * Note that the state owners are not freed, but are added
2502 ++ * to the list @head, which can later be used as an argument
2503 ++ * to nfs4_free_state_owners.
2504 + */
2505 +-void nfs4_purge_state_owners(struct nfs_server *server)
2506 ++void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
2507 + {
2508 + struct nfs_client *clp = server->nfs_client;
2509 + struct nfs4_state_owner *sp, *tmp;
2510 +- LIST_HEAD(doomed);
2511 +
2512 + spin_lock(&clp->cl_lock);
2513 + list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
2514 +- list_move(&sp->so_lru, &doomed);
2515 ++ list_move(&sp->so_lru, head);
2516 + nfs4_remove_state_owner_locked(sp);
2517 + }
2518 + spin_unlock(&clp->cl_lock);
2519 ++}
2520 +
2521 +- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
2522 ++/**
2523 ++ * nfs4_purge_state_owners - Release all cached state owners
2524 ++ * @head: resulting list of state owners
2525 ++ *
2526 ++ * Frees a list of state owners that was generated by
2527 ++ * nfs4_purge_state_owners
2528 ++ */
2529 ++void nfs4_free_state_owners(struct list_head *head)
2530 ++{
2531 ++ struct nfs4_state_owner *sp, *tmp;
2532 ++
2533 ++ list_for_each_entry_safe(sp, tmp, head, so_lru) {
2534 + list_del(&sp->so_lru);
2535 + nfs4_free_state_owner(sp);
2536 + }
2537 +@@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
2538 + struct nfs4_state_owner *sp;
2539 + struct nfs_server *server;
2540 + struct rb_node *pos;
2541 ++ LIST_HEAD(freeme);
2542 + int status = 0;
2543 +
2544 + restart:
2545 + rcu_read_lock();
2546 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2547 +- nfs4_purge_state_owners(server);
2548 ++ nfs4_purge_state_owners(server, &freeme);
2549 + spin_lock(&clp->cl_lock);
2550 + for (pos = rb_first(&server->state_owners);
2551 + pos != NULL;
2552 +@@ -1877,6 +1893,7 @@ restart:
2553 + spin_unlock(&clp->cl_lock);
2554 + }
2555 + rcu_read_unlock();
2556 ++ nfs4_free_state_owners(&freeme);
2557 + return 0;
2558 + }
2559 +
2560 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
2561 +index 6df9b85caf20..d90efdea9fbd 100644
2562 +--- a/fs/nfs/super.c
2563 ++++ b/fs/nfs/super.c
2564 +@@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
2565 + data->acdirmin != nfss->acdirmin / HZ ||
2566 + data->acdirmax != nfss->acdirmax / HZ ||
2567 + data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
2568 ++ (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
2569 + data->nfs_server.port != nfss->port ||
2570 + data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
2571 + !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
2572 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
2573 +index e1ebdbe40032..9c2955f67f70 100644
2574 +--- a/fs/userfaultfd.c
2575 ++++ b/fs/userfaultfd.c
2576 +@@ -881,6 +881,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2577 + /* len == 0 means wake all */
2578 + struct userfaultfd_wake_range range = { .len = 0, };
2579 + unsigned long new_flags;
2580 ++ bool still_valid;
2581 +
2582 + WRITE_ONCE(ctx->released, true);
2583 +
2584 +@@ -896,8 +897,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2585 + * taking the mmap_sem for writing.
2586 + */
2587 + down_write(&mm->mmap_sem);
2588 +- if (!mmget_still_valid(mm))
2589 +- goto skip_mm;
2590 ++ still_valid = mmget_still_valid(mm);
2591 + prev = NULL;
2592 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
2593 + cond_resched();
2594 +@@ -908,19 +908,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2595 + continue;
2596 + }
2597 + new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
2598 +- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
2599 +- new_flags, vma->anon_vma,
2600 +- vma->vm_file, vma->vm_pgoff,
2601 +- vma_policy(vma),
2602 +- NULL_VM_UFFD_CTX);
2603 +- if (prev)
2604 +- vma = prev;
2605 +- else
2606 +- prev = vma;
2607 ++ if (still_valid) {
2608 ++ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
2609 ++ new_flags, vma->anon_vma,
2610 ++ vma->vm_file, vma->vm_pgoff,
2611 ++ vma_policy(vma),
2612 ++ NULL_VM_UFFD_CTX);
2613 ++ if (prev)
2614 ++ vma = prev;
2615 ++ else
2616 ++ prev = vma;
2617 ++ }
2618 + vma->vm_flags = new_flags;
2619 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
2620 + }
2621 +-skip_mm:
2622 + up_write(&mm->mmap_sem);
2623 + mmput(mm);
2624 + wakeup:
2625 +diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
2626 +index c6299f82a6e4..6410d3e00ce0 100644
2627 +--- a/fs/xfs/libxfs/xfs_attr.c
2628 ++++ b/fs/xfs/libxfs/xfs_attr.c
2629 +@@ -191,6 +191,121 @@ xfs_attr_calc_size(
2630 + return nblks;
2631 + }
2632 +
2633 ++STATIC int
2634 ++xfs_attr_try_sf_addname(
2635 ++ struct xfs_inode *dp,
2636 ++ struct xfs_da_args *args)
2637 ++{
2638 ++
2639 ++ struct xfs_mount *mp = dp->i_mount;
2640 ++ int error, error2;
2641 ++
2642 ++ error = xfs_attr_shortform_addname(args);
2643 ++ if (error == -ENOSPC)
2644 ++ return error;
2645 ++
2646 ++ /*
2647 ++ * Commit the shortform mods, and we're done.
2648 ++ * NOTE: this is also the error path (EEXIST, etc).
2649 ++ */
2650 ++ if (!error && (args->flags & ATTR_KERNOTIME) == 0)
2651 ++ xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
2652 ++
2653 ++ if (mp->m_flags & XFS_MOUNT_WSYNC)
2654 ++ xfs_trans_set_sync(args->trans);
2655 ++
2656 ++ error2 = xfs_trans_commit(args->trans);
2657 ++ args->trans = NULL;
2658 ++ return error ? error : error2;
2659 ++}
2660 ++
2661 ++/*
2662 ++ * Set the attribute specified in @args.
2663 ++ */
2664 ++int
2665 ++xfs_attr_set_args(
2666 ++ struct xfs_da_args *args)
2667 ++{
2668 ++ struct xfs_inode *dp = args->dp;
2669 ++ struct xfs_buf *leaf_bp = NULL;
2670 ++ int error;
2671 ++
2672 ++ /*
2673 ++ * If the attribute list is non-existent or a shortform list,
2674 ++ * upgrade it to a single-leaf-block attribute list.
2675 ++ */
2676 ++ if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
2677 ++ (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
2678 ++ dp->i_d.di_anextents == 0)) {
2679 ++
2680 ++ /*
2681 ++ * Build initial attribute list (if required).
2682 ++ */
2683 ++ if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
2684 ++ xfs_attr_shortform_create(args);
2685 ++
2686 ++ /*
2687 ++ * Try to add the attr to the attribute list in the inode.
2688 ++ */
2689 ++ error = xfs_attr_try_sf_addname(dp, args);
2690 ++ if (error != -ENOSPC)
2691 ++ return error;
2692 ++
2693 ++ /*
2694 ++ * It won't fit in the shortform, transform to a leaf block.
2695 ++ * GROT: another possible req'mt for a double-split btree op.
2696 ++ */
2697 ++ error = xfs_attr_shortform_to_leaf(args, &leaf_bp);
2698 ++ if (error)
2699 ++ return error;
2700 ++
2701 ++ /*
2702 ++ * Prevent the leaf buffer from being unlocked so that a
2703 ++ * concurrent AIL push cannot grab the half-baked leaf
2704 ++ * buffer and run into problems with the write verifier.
2705 ++ * Once we're done rolling the transaction we can release
2706 ++ * the hold and add the attr to the leaf.
2707 ++ */
2708 ++ xfs_trans_bhold(args->trans, leaf_bp);
2709 ++ error = xfs_defer_finish(&args->trans);
2710 ++ xfs_trans_bhold_release(args->trans, leaf_bp);
2711 ++ if (error) {
2712 ++ xfs_trans_brelse(args->trans, leaf_bp);
2713 ++ return error;
2714 ++ }
2715 ++ }
2716 ++
2717 ++ if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
2718 ++ error = xfs_attr_leaf_addname(args);
2719 ++ else
2720 ++ error = xfs_attr_node_addname(args);
2721 ++ return error;
2722 ++}
2723 ++
2724 ++/*
2725 ++ * Remove the attribute specified in @args.
2726 ++ */
2727 ++int
2728 ++xfs_attr_remove_args(
2729 ++ struct xfs_da_args *args)
2730 ++{
2731 ++ struct xfs_inode *dp = args->dp;
2732 ++ int error;
2733 ++
2734 ++ if (!xfs_inode_hasattr(dp)) {
2735 ++ error = -ENOATTR;
2736 ++ } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
2737 ++ ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
2738 ++ error = xfs_attr_shortform_remove(args);
2739 ++ } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
2740 ++ error = xfs_attr_leaf_removename(args);
2741 ++ } else {
2742 ++ error = xfs_attr_node_removename(args);
2743 ++ }
2744 ++
2745 ++ return error;
2746 ++}
2747 ++
2748 + int
2749 + xfs_attr_set(
2750 + struct xfs_inode *dp,
2751 +@@ -200,11 +315,10 @@ xfs_attr_set(
2752 + int flags)
2753 + {
2754 + struct xfs_mount *mp = dp->i_mount;
2755 +- struct xfs_buf *leaf_bp = NULL;
2756 + struct xfs_da_args args;
2757 + struct xfs_trans_res tres;
2758 + int rsvd = (flags & ATTR_ROOT) != 0;
2759 +- int error, err2, local;
2760 ++ int error, local;
2761 +
2762 + XFS_STATS_INC(mp, xs_attr_set);
2763 +
2764 +@@ -255,93 +369,17 @@ xfs_attr_set(
2765 + error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
2766 + rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
2767 + XFS_QMOPT_RES_REGBLKS);
2768 +- if (error) {
2769 +- xfs_iunlock(dp, XFS_ILOCK_EXCL);
2770 +- xfs_trans_cancel(args.trans);
2771 +- return error;
2772 +- }
2773 ++ if (error)
2774 ++ goto out_trans_cancel;
2775 +
2776 + xfs_trans_ijoin(args.trans, dp, 0);
2777 +-
2778 +- /*
2779 +- * If the attribute list is non-existent or a shortform list,
2780 +- * upgrade it to a single-leaf-block attribute list.
2781 +- */
2782 +- if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
2783 +- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
2784 +- dp->i_d.di_anextents == 0)) {
2785 +-
2786 +- /*
2787 +- * Build initial attribute list (if required).
2788 +- */
2789 +- if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
2790 +- xfs_attr_shortform_create(&args);
2791 +-
2792 +- /*
2793 +- * Try to add the attr to the attribute list in
2794 +- * the inode.
2795 +- */
2796 +- error = xfs_attr_shortform_addname(&args);
2797 +- if (error != -ENOSPC) {
2798 +- /*
2799 +- * Commit the shortform mods, and we're done.
2800 +- * NOTE: this is also the error path (EEXIST, etc).
2801 +- */
2802 +- ASSERT(args.trans != NULL);
2803 +-
2804 +- /*
2805 +- * If this is a synchronous mount, make sure that
2806 +- * the transaction goes to disk before returning
2807 +- * to the user.
2808 +- */
2809 +- if (mp->m_flags & XFS_MOUNT_WSYNC)
2810 +- xfs_trans_set_sync(args.trans);
2811 +-
2812 +- if (!error && (flags & ATTR_KERNOTIME) == 0) {
2813 +- xfs_trans_ichgtime(args.trans, dp,
2814 +- XFS_ICHGTIME_CHG);
2815 +- }
2816 +- err2 = xfs_trans_commit(args.trans);
2817 +- xfs_iunlock(dp, XFS_ILOCK_EXCL);
2818 +-
2819 +- return error ? error : err2;
2820 +- }
2821 +-
2822 +- /*
2823 +- * It won't fit in the shortform, transform to a leaf block.
2824 +- * GROT: another possible req'mt for a double-split btree op.
2825 +- */
2826 +- error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
2827 +- if (error)
2828 +- goto out;
2829 +- /*
2830 +- * Prevent the leaf buffer from being unlocked so that a
2831 +- * concurrent AIL push cannot grab the half-baked leaf
2832 +- * buffer and run into problems with the write verifier.
2833 +- */
2834 +- xfs_trans_bhold(args.trans, leaf_bp);
2835 +- error = xfs_defer_finish(&args.trans);
2836 +- if (error)
2837 +- goto out;
2838 +-
2839 +- /*
2840 +- * Commit the leaf transformation. We'll need another (linked)
2841 +- * transaction to add the new attribute to the leaf, which
2842 +- * means that we have to hold & join the leaf buffer here too.
2843 +- */
2844 +- error = xfs_trans_roll_inode(&args.trans, dp);
2845 +- if (error)
2846 +- goto out;
2847 +- xfs_trans_bjoin(args.trans, leaf_bp);
2848 +- leaf_bp = NULL;
2849 +- }
2850 +-
2851 +- if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
2852 +- error = xfs_attr_leaf_addname(&args);
2853 +- else
2854 +- error = xfs_attr_node_addname(&args);
2855 ++ error = xfs_attr_set_args(&args);
2856 + if (error)
2857 +- goto out;
2858 ++ goto out_trans_cancel;
2859 ++ if (!args.trans) {
2860 ++ /* shortform attribute has already been committed */
2861 ++ goto out_unlock;
2862 ++ }
2863 +
2864 + /*
2865 + * If this is a synchronous mount, make sure that the
2866 +@@ -358,17 +396,14 @@ xfs_attr_set(
2867 + */
2868 + xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
2869 + error = xfs_trans_commit(args.trans);
2870 ++out_unlock:
2871 + xfs_iunlock(dp, XFS_ILOCK_EXCL);
2872 +-
2873 + return error;
2874 +
2875 +-out:
2876 +- if (leaf_bp)
2877 +- xfs_trans_brelse(args.trans, leaf_bp);
2878 ++out_trans_cancel:
2879 + if (args.trans)
2880 + xfs_trans_cancel(args.trans);
2881 +- xfs_iunlock(dp, XFS_ILOCK_EXCL);
2882 +- return error;
2883 ++ goto out_unlock;
2884 + }
2885 +
2886 + /*
2887 +@@ -423,17 +458,7 @@ xfs_attr_remove(
2888 + */
2889 + xfs_trans_ijoin(args.trans, dp, 0);
2890 +
2891 +- if (!xfs_inode_hasattr(dp)) {
2892 +- error = -ENOATTR;
2893 +- } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
2894 +- ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
2895 +- error = xfs_attr_shortform_remove(&args);
2896 +- } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
2897 +- error = xfs_attr_leaf_removename(&args);
2898 +- } else {
2899 +- error = xfs_attr_node_removename(&args);
2900 +- }
2901 +-
2902 ++ error = xfs_attr_remove_args(&args);
2903 + if (error)
2904 + goto out;
2905 +
2906 +diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
2907 +new file mode 100644
2908 +index 000000000000..cc04ee0aacfb
2909 +--- /dev/null
2910 ++++ b/fs/xfs/libxfs/xfs_attr.h
2911 +@@ -0,0 +1,150 @@
2912 ++// SPDX-License-Identifier: GPL-2.0
2913 ++/*
2914 ++ * Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc.
2915 ++ * All Rights Reserved.
2916 ++ */
2917 ++#ifndef __XFS_ATTR_H__
2918 ++#define __XFS_ATTR_H__
2919 ++
2920 ++struct xfs_inode;
2921 ++struct xfs_da_args;
2922 ++struct xfs_attr_list_context;
2923 ++
2924 ++/*
2925 ++ * Large attribute lists are structured around Btrees where all the data
2926 ++ * elements are in the leaf nodes. Attribute names are hashed into an int,
2927 ++ * then that int is used as the index into the Btree. Since the hashval
2928 ++ * of an attribute name may not be unique, we may have duplicate keys.
2929 ++ * The internal links in the Btree are logical block offsets into the file.
2930 ++ *
2931 ++ * Small attribute lists use a different format and are packed as tightly
2932 ++ * as possible so as to fit into the literal area of the inode.
2933 ++ */
2934 ++
2935 ++/*========================================================================
2936 ++ * External interfaces
2937 ++ *========================================================================*/
2938 ++
2939 ++
2940 ++#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
2941 ++#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
2942 ++#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
2943 ++#define ATTR_SECURE 0x0008 /* use attrs in security namespace */
2944 ++#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */
2945 ++#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */
2946 ++
2947 ++#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
2948 ++#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
2949 ++
2950 ++#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
2951 ++
2952 ++#define XFS_ATTR_FLAGS \
2953 ++ { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
2954 ++ { ATTR_ROOT, "ROOT" }, \
2955 ++ { ATTR_TRUST, "TRUST" }, \
2956 ++ { ATTR_SECURE, "SECURE" }, \
2957 ++ { ATTR_CREATE, "CREATE" }, \
2958 ++ { ATTR_REPLACE, "REPLACE" }, \
2959 ++ { ATTR_KERNOTIME, "KERNOTIME" }, \
2960 ++ { ATTR_KERNOVAL, "KERNOVAL" }, \
2961 ++ { ATTR_INCOMPLETE, "INCOMPLETE" }
2962 ++
2963 ++/*
2964 ++ * The maximum size (into the kernel or returned from the kernel) of an
2965 ++ * attribute value or the buffer used for an attr_list() call. Larger
2966 ++ * sizes will result in an ERANGE return code.
2967 ++ */
2968 ++#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */
2969 ++
2970 ++/*
2971 ++ * Define how lists of attribute names are returned to the user from
2972 ++ * the attr_list() call. A large, 32bit aligned, buffer is passed in
2973 ++ * along with its size. We put an array of offsets at the top that each
2974 ++ * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom.
2975 ++ */
2976 ++typedef struct attrlist {
2977 ++ __s32 al_count; /* number of entries in attrlist */
2978 ++ __s32 al_more; /* T/F: more attrs (do call again) */
2979 ++ __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */
2980 ++} attrlist_t;
2981 ++
2982 ++/*
2983 ++ * Show the interesting info about one attribute. This is what the
2984 ++ * al_offset[i] entry points to.
2985 ++ */
2986 ++typedef struct attrlist_ent { /* data from attr_list() */
2987 ++ __u32 a_valuelen; /* number bytes in value of attr */
2988 ++ char a_name[1]; /* attr name (NULL terminated) */
2989 ++} attrlist_ent_t;
2990 ++
2991 ++/*
2992 ++ * Given a pointer to the (char*) buffer containing the attr_list() result,
2993 ++ * and an index, return a pointer to the indicated attribute in the buffer.
2994 ++ */
2995 ++#define ATTR_ENTRY(buffer, index) \
2996 ++ ((attrlist_ent_t *) \
2997 ++ &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ])
2998 ++
2999 ++/*
3000 ++ * Kernel-internal version of the attrlist cursor.
3001 ++ */
3002 ++typedef struct attrlist_cursor_kern {
3003 ++ __u32 hashval; /* hash value of next entry to add */
3004 ++ __u32 blkno; /* block containing entry (suggestion) */
3005 ++ __u32 offset; /* offset in list of equal-hashvals */
3006 ++ __u16 pad1; /* padding to match user-level */
3007 ++ __u8 pad2; /* padding to match user-level */
3008 ++ __u8 initted; /* T/F: cursor has been initialized */
3009 ++} attrlist_cursor_kern_t;
3010 ++
3011 ++
3012 ++/*========================================================================
3013 ++ * Structure used to pass context around among the routines.
3014 ++ *========================================================================*/
3015 ++
3016 ++
3017 ++/* void; state communicated via *context */
3018 ++typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
3019 ++ unsigned char *, int, int);
3020 ++
3021 ++typedef struct xfs_attr_list_context {
3022 ++ struct xfs_trans *tp;
3023 ++ struct xfs_inode *dp; /* inode */
3024 ++ struct attrlist_cursor_kern *cursor; /* position in list */
3025 ++ char *alist; /* output buffer */
3026 ++ int seen_enough; /* T/F: seen enough of list? */
3027 ++ ssize_t count; /* num used entries */
3028 ++ int dupcnt; /* count dup hashvals seen */
3029 ++ int bufsize; /* total buffer size */
3030 ++ int firstu; /* first used byte in buffer */
3031 ++ int flags; /* from VOP call */
3032 ++ int resynch; /* T/F: resynch with cursor */
3033 ++ put_listent_func_t put_listent; /* list output fmt function */
3034 ++ int index; /* index into output buffer */
3035 ++} xfs_attr_list_context_t;
3036 ++
3037 ++
3038 ++/*========================================================================
3039 ++ * Function prototypes for the kernel.
3040 ++ *========================================================================*/
3041 ++
3042 ++/*
3043 ++ * Overall external interface routines.
3044 ++ */
3045 ++int xfs_attr_inactive(struct xfs_inode *dp);
3046 ++int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *);
3047 ++int xfs_attr_list_int(struct xfs_attr_list_context *);
3048 ++int xfs_inode_hasattr(struct xfs_inode *ip);
3049 ++int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
3050 ++int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
3051 ++ unsigned char *value, int *valuelenp, int flags);
3052 ++int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
3053 ++ unsigned char *value, int valuelen, int flags);
3054 ++int xfs_attr_set_args(struct xfs_da_args *args);
3055 ++int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
3056 ++int xfs_attr_remove_args(struct xfs_da_args *args);
3057 ++int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
3058 ++ int flags, struct attrlist_cursor_kern *cursor);
3059 ++
3060 ++
3061 ++#endif /* __XFS_ATTR_H__ */
3062 +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
3063 +index 3a496ffe6551..06a7da8dbda5 100644
3064 +--- a/fs/xfs/libxfs/xfs_bmap.c
3065 ++++ b/fs/xfs/libxfs/xfs_bmap.c
3066 +@@ -1019,6 +1019,34 @@ xfs_bmap_add_attrfork_local(
3067 + return -EFSCORRUPTED;
3068 + }
3069 +
3070 ++/* Set an inode attr fork off based on the format */
3071 ++int
3072 ++xfs_bmap_set_attrforkoff(
3073 ++ struct xfs_inode *ip,
3074 ++ int size,
3075 ++ int *version)
3076 ++{
3077 ++ switch (ip->i_d.di_format) {
3078 ++ case XFS_DINODE_FMT_DEV:
3079 ++ ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3080 ++ break;
3081 ++ case XFS_DINODE_FMT_LOCAL:
3082 ++ case XFS_DINODE_FMT_EXTENTS:
3083 ++ case XFS_DINODE_FMT_BTREE:
3084 ++ ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3085 ++ if (!ip->i_d.di_forkoff)
3086 ++ ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
3087 ++ else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
3088 ++ *version = 2;
3089 ++ break;
3090 ++ default:
3091 ++ ASSERT(0);
3092 ++ return -EINVAL;
3093 ++ }
3094 ++
3095 ++ return 0;
3096 ++}
3097 ++
3098 + /*
3099 + * Convert inode from non-attributed to attributed.
3100 + * Must not be in a transaction, ip must not be locked.
3101 +@@ -1070,26 +1098,9 @@ xfs_bmap_add_attrfork(
3102 +
3103 + xfs_trans_ijoin(tp, ip, 0);
3104 + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3105 +-
3106 +- switch (ip->i_d.di_format) {
3107 +- case XFS_DINODE_FMT_DEV:
3108 +- ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3109 +- break;
3110 +- case XFS_DINODE_FMT_LOCAL:
3111 +- case XFS_DINODE_FMT_EXTENTS:
3112 +- case XFS_DINODE_FMT_BTREE:
3113 +- ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3114 +- if (!ip->i_d.di_forkoff)
3115 +- ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
3116 +- else if (mp->m_flags & XFS_MOUNT_ATTR2)
3117 +- version = 2;
3118 +- break;
3119 +- default:
3120 +- ASSERT(0);
3121 +- error = -EINVAL;
3122 ++ error = xfs_bmap_set_attrforkoff(ip, size, &version);
3123 ++ if (error)
3124 + goto trans_cancel;
3125 +- }
3126 +-
3127 + ASSERT(ip->i_afp == NULL);
3128 + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
3129 + ip->i_afp->if_flags = XFS_IFEXTENTS;
3130 +@@ -1178,7 +1189,10 @@ xfs_iread_extents(
3131 + * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
3132 + */
3133 + level = be16_to_cpu(block->bb_level);
3134 +- ASSERT(level > 0);
3135 ++ if (unlikely(level == 0)) {
3136 ++ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
3137 ++ return -EFSCORRUPTED;
3138 ++ }
3139 + pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
3140 + bno = be64_to_cpu(*pp);
3141 +
3142 +diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
3143 +index b6e9b639e731..488dc8860fd7 100644
3144 +--- a/fs/xfs/libxfs/xfs_bmap.h
3145 ++++ b/fs/xfs/libxfs/xfs_bmap.h
3146 +@@ -183,6 +183,7 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
3147 + xfs_filblks_t len);
3148 + void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
3149 + int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
3150 ++int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
3151 + void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
3152 + void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
3153 + xfs_filblks_t len, struct xfs_owner_info *oinfo,
3154 +diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
3155 +index e792b167150a..c52beee31836 100644
3156 +--- a/fs/xfs/libxfs/xfs_defer.c
3157 ++++ b/fs/xfs/libxfs/xfs_defer.c
3158 +@@ -266,13 +266,15 @@ xfs_defer_trans_roll(
3159 +
3160 + trace_xfs_defer_trans_roll(tp, _RET_IP_);
3161 +
3162 +- /* Roll the transaction. */
3163 ++ /*
3164 ++ * Roll the transaction. Rolling always given a new transaction (even
3165 ++ * if committing the old one fails!) to hand back to the caller, so we
3166 ++ * join the held resources to the new transaction so that we always
3167 ++ * return with the held resources joined to @tpp, no matter what
3168 ++ * happened.
3169 ++ */
3170 + error = xfs_trans_roll(tpp);
3171 + tp = *tpp;
3172 +- if (error) {
3173 +- trace_xfs_defer_trans_roll_error(tp, error);
3174 +- return error;
3175 +- }
3176 +
3177 + /* Rejoin the joined inodes. */
3178 + for (i = 0; i < ipcount; i++)
3179 +@@ -284,6 +286,8 @@ xfs_defer_trans_roll(
3180 + xfs_trans_bhold(tp, bplist[i]);
3181 + }
3182 +
3183 ++ if (error)
3184 ++ trace_xfs_defer_trans_roll_error(tp, error);
3185 + return error;
3186 + }
3187 +
3188 +diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
3189 +deleted file mode 100644
3190 +index 033ff8c478e2..000000000000
3191 +--- a/fs/xfs/xfs_attr.h
3192 ++++ /dev/null
3193 +@@ -1,148 +0,0 @@
3194 +-// SPDX-License-Identifier: GPL-2.0
3195 +-/*
3196 +- * Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc.
3197 +- * All Rights Reserved.
3198 +- */
3199 +-#ifndef __XFS_ATTR_H__
3200 +-#define __XFS_ATTR_H__
3201 +-
3202 +-struct xfs_inode;
3203 +-struct xfs_da_args;
3204 +-struct xfs_attr_list_context;
3205 +-
3206 +-/*
3207 +- * Large attribute lists are structured around Btrees where all the data
3208 +- * elements are in the leaf nodes. Attribute names are hashed into an int,
3209 +- * then that int is used as the index into the Btree. Since the hashval
3210 +- * of an attribute name may not be unique, we may have duplicate keys.
3211 +- * The internal links in the Btree are logical block offsets into the file.
3212 +- *
3213 +- * Small attribute lists use a different format and are packed as tightly
3214 +- * as possible so as to fit into the literal area of the inode.
3215 +- */
3216 +-
3217 +-/*========================================================================
3218 +- * External interfaces
3219 +- *========================================================================*/
3220 +-
3221 +-
3222 +-#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
3223 +-#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
3224 +-#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
3225 +-#define ATTR_SECURE 0x0008 /* use attrs in security namespace */
3226 +-#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */
3227 +-#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */
3228 +-
3229 +-#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
3230 +-#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
3231 +-
3232 +-#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
3233 +-
3234 +-#define XFS_ATTR_FLAGS \
3235 +- { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
3236 +- { ATTR_ROOT, "ROOT" }, \
3237 +- { ATTR_TRUST, "TRUST" }, \
3238 +- { ATTR_SECURE, "SECURE" }, \
3239 +- { ATTR_CREATE, "CREATE" }, \
3240 +- { ATTR_REPLACE, "REPLACE" }, \
3241 +- { ATTR_KERNOTIME, "KERNOTIME" }, \
3242 +- { ATTR_KERNOVAL, "KERNOVAL" }, \
3243 +- { ATTR_INCOMPLETE, "INCOMPLETE" }
3244 +-
3245 +-/*
3246 +- * The maximum size (into the kernel or returned from the kernel) of an
3247 +- * attribute value or the buffer used for an attr_list() call. Larger
3248 +- * sizes will result in an ERANGE return code.
3249 +- */
3250 +-#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */
3251 +-
3252 +-/*
3253 +- * Define how lists of attribute names are returned to the user from
3254 +- * the attr_list() call. A large, 32bit aligned, buffer is passed in
3255 +- * along with its size. We put an array of offsets at the top that each
3256 +- * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom.
3257 +- */
3258 +-typedef struct attrlist {
3259 +- __s32 al_count; /* number of entries in attrlist */
3260 +- __s32 al_more; /* T/F: more attrs (do call again) */
3261 +- __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */
3262 +-} attrlist_t;
3263 +-
3264 +-/*
3265 +- * Show the interesting info about one attribute. This is what the
3266 +- * al_offset[i] entry points to.
3267 +- */
3268 +-typedef struct attrlist_ent { /* data from attr_list() */
3269 +- __u32 a_valuelen; /* number bytes in value of attr */
3270 +- char a_name[1]; /* attr name (NULL terminated) */
3271 +-} attrlist_ent_t;
3272 +-
3273 +-/*
3274 +- * Given a pointer to the (char*) buffer containing the attr_list() result,
3275 +- * and an index, return a pointer to the indicated attribute in the buffer.
3276 +- */
3277 +-#define ATTR_ENTRY(buffer, index) \
3278 +- ((attrlist_ent_t *) \
3279 +- &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ])
3280 +-
3281 +-/*
3282 +- * Kernel-internal version of the attrlist cursor.
3283 +- */
3284 +-typedef struct attrlist_cursor_kern {
3285 +- __u32 hashval; /* hash value of next entry to add */
3286 +- __u32 blkno; /* block containing entry (suggestion) */
3287 +- __u32 offset; /* offset in list of equal-hashvals */
3288 +- __u16 pad1; /* padding to match user-level */
3289 +- __u8 pad2; /* padding to match user-level */
3290 +- __u8 initted; /* T/F: cursor has been initialized */
3291 +-} attrlist_cursor_kern_t;
3292 +-
3293 +-
3294 +-/*========================================================================
3295 +- * Structure used to pass context around among the routines.
3296 +- *========================================================================*/
3297 +-
3298 +-
3299 +-/* void; state communicated via *context */
3300 +-typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
3301 +- unsigned char *, int, int);
3302 +-
3303 +-typedef struct xfs_attr_list_context {
3304 +- struct xfs_trans *tp;
3305 +- struct xfs_inode *dp; /* inode */
3306 +- struct attrlist_cursor_kern *cursor; /* position in list */
3307 +- char *alist; /* output buffer */
3308 +- int seen_enough; /* T/F: seen enough of list? */
3309 +- ssize_t count; /* num used entries */
3310 +- int dupcnt; /* count dup hashvals seen */
3311 +- int bufsize; /* total buffer size */
3312 +- int firstu; /* first used byte in buffer */
3313 +- int flags; /* from VOP call */
3314 +- int resynch; /* T/F: resynch with cursor */
3315 +- put_listent_func_t put_listent; /* list output fmt function */
3316 +- int index; /* index into output buffer */
3317 +-} xfs_attr_list_context_t;
3318 +-
3319 +-
3320 +-/*========================================================================
3321 +- * Function prototypes for the kernel.
3322 +- *========================================================================*/
3323 +-
3324 +-/*
3325 +- * Overall external interface routines.
3326 +- */
3327 +-int xfs_attr_inactive(struct xfs_inode *dp);
3328 +-int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *);
3329 +-int xfs_attr_list_int(struct xfs_attr_list_context *);
3330 +-int xfs_inode_hasattr(struct xfs_inode *ip);
3331 +-int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
3332 +-int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
3333 +- unsigned char *value, int *valuelenp, int flags);
3334 +-int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
3335 +- unsigned char *value, int valuelen, int flags);
3336 +-int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
3337 +-int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
3338 +- int flags, struct attrlist_cursor_kern *cursor);
3339 +-
3340 +-
3341 +-#endif /* __XFS_ATTR_H__ */
3342 +diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
3343 +index 87e6dd5326d5..a1af984e4913 100644
3344 +--- a/fs/xfs/xfs_dquot.c
3345 ++++ b/fs/xfs/xfs_dquot.c
3346 +@@ -277,7 +277,8 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
3347 +
3348 + /*
3349 + * Ensure that the given in-core dquot has a buffer on disk backing it, and
3350 +- * return the buffer. This is called when the bmapi finds a hole.
3351 ++ * return the buffer locked and held. This is called when the bmapi finds a
3352 ++ * hole.
3353 + */
3354 + STATIC int
3355 + xfs_dquot_disk_alloc(
3356 +@@ -355,13 +356,14 @@ xfs_dquot_disk_alloc(
3357 + * If everything succeeds, the caller of this function is returned a
3358 + * buffer that is locked and held to the transaction. The caller
3359 + * is responsible for unlocking any buffer passed back, either
3360 +- * manually or by committing the transaction.
3361 ++ * manually or by committing the transaction. On error, the buffer is
3362 ++ * released and not passed back.
3363 + */
3364 + xfs_trans_bhold(tp, bp);
3365 + error = xfs_defer_finish(tpp);
3366 +- tp = *tpp;
3367 + if (error) {
3368 +- xfs_buf_relse(bp);
3369 ++ xfs_trans_bhold_release(*tpp, bp);
3370 ++ xfs_trans_brelse(*tpp, bp);
3371 + return error;
3372 + }
3373 + *bpp = bp;
3374 +@@ -521,7 +523,6 @@ xfs_qm_dqread_alloc(
3375 + struct xfs_buf **bpp)
3376 + {
3377 + struct xfs_trans *tp;
3378 +- struct xfs_buf *bp;
3379 + int error;
3380 +
3381 + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
3382 +@@ -529,7 +530,7 @@ xfs_qm_dqread_alloc(
3383 + if (error)
3384 + goto err;
3385 +
3386 +- error = xfs_dquot_disk_alloc(&tp, dqp, &bp);
3387 ++ error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
3388 + if (error)
3389 + goto err_cancel;
3390 +
3391 +@@ -539,10 +540,10 @@ xfs_qm_dqread_alloc(
3392 + * Buffer was held to the transaction, so we have to unlock it
3393 + * manually here because we're not passing it back.
3394 + */
3395 +- xfs_buf_relse(bp);
3396 ++ xfs_buf_relse(*bpp);
3397 ++ *bpp = NULL;
3398 + goto err;
3399 + }
3400 +- *bpp = bp;
3401 + return 0;
3402 +
3403 + err_cancel:
3404 +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
3405 +index 74047bd0c1ae..e427ad097e2e 100644
3406 +--- a/fs/xfs/xfs_iops.c
3407 ++++ b/fs/xfs/xfs_iops.c
3408 +@@ -803,6 +803,7 @@ xfs_setattr_nonsize(
3409 +
3410 + out_cancel:
3411 + xfs_trans_cancel(tp);
3412 ++ xfs_iunlock(ip, XFS_ILOCK_EXCL);
3413 + out_dqrele:
3414 + xfs_qm_dqrele(udqp);
3415 + xfs_qm_dqrele(gdqp);
3416 +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
3417 +index 147546e0c11b..815dcfa64743 100644
3418 +--- a/include/trace/events/rxrpc.h
3419 ++++ b/include/trace/events/rxrpc.h
3420 +@@ -500,10 +500,10 @@ rxrpc_tx_points;
3421 + #define E_(a, b) { a, b }
3422 +
3423 + TRACE_EVENT(rxrpc_local,
3424 +- TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
3425 ++ TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
3426 + int usage, const void *where),
3427 +
3428 +- TP_ARGS(local, op, usage, where),
3429 ++ TP_ARGS(local_debug_id, op, usage, where),
3430 +
3431 + TP_STRUCT__entry(
3432 + __field(unsigned int, local )
3433 +@@ -513,7 +513,7 @@ TRACE_EVENT(rxrpc_local,
3434 + ),
3435 +
3436 + TP_fast_assign(
3437 +- __entry->local = local->debug_id;
3438 ++ __entry->local = local_debug_id;
3439 + __entry->op = op;
3440 + __entry->usage = usage;
3441 + __entry->where = where;
3442 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
3443 +index 8e009cee6517..26814a14013c 100644
3444 +--- a/kernel/irq/irqdesc.c
3445 ++++ b/kernel/irq/irqdesc.c
3446 +@@ -294,6 +294,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
3447 + }
3448 + }
3449 +
3450 ++static void irq_sysfs_del(struct irq_desc *desc)
3451 ++{
3452 ++ /*
3453 ++ * If irq_sysfs_init() has not yet been invoked (early boot), then
3454 ++ * irq_kobj_base is NULL and the descriptor was never added.
3455 ++ * kobject_del() complains about a object with no parent, so make
3456 ++ * it conditional.
3457 ++ */
3458 ++ if (irq_kobj_base)
3459 ++ kobject_del(&desc->kobj);
3460 ++}
3461 ++
3462 + static int __init irq_sysfs_init(void)
3463 + {
3464 + struct irq_desc *desc;
3465 +@@ -324,6 +336,7 @@ static struct kobj_type irq_kobj_type = {
3466 + };
3467 +
3468 + static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
3469 ++static void irq_sysfs_del(struct irq_desc *desc) {}
3470 +
3471 + #endif /* CONFIG_SYSFS */
3472 +
3473 +@@ -437,7 +450,7 @@ static void free_desc(unsigned int irq)
3474 + * The sysfs entry must be serialized against a concurrent
3475 + * irq_sysfs_init() as well.
3476 + */
3477 +- kobject_del(&desc->kobj);
3478 ++ irq_sysfs_del(desc);
3479 + delete_irq_desc(irq);
3480 +
3481 + /*
3482 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3483 +index 6fad1864ba03..09ce8528bbdd 100644
3484 +--- a/mm/huge_memory.c
3485 ++++ b/mm/huge_memory.c
3486 +@@ -33,6 +33,7 @@
3487 + #include <linux/page_idle.h>
3488 + #include <linux/shmem_fs.h>
3489 + #include <linux/oom.h>
3490 ++#include <linux/page_owner.h>
3491 +
3492 + #include <asm/tlb.h>
3493 + #include <asm/pgalloc.h>
3494 +@@ -2477,6 +2478,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
3495 + }
3496 +
3497 + ClearPageCompound(head);
3498 ++
3499 ++ split_page_owner(head, HPAGE_PMD_ORDER);
3500 ++
3501 + /* See comment in __split_huge_page_tail() */
3502 + if (PageAnon(head)) {
3503 + /* Additional pin to radix tree of swap cache */
3504 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
3505 +index 9da65552e7ca..c2c4f739da8f 100644
3506 +--- a/mm/zsmalloc.c
3507 ++++ b/mm/zsmalloc.c
3508 +@@ -53,6 +53,7 @@
3509 + #include <linux/zpool.h>
3510 + #include <linux/mount.h>
3511 + #include <linux/migrate.h>
3512 ++#include <linux/wait.h>
3513 + #include <linux/pagemap.h>
3514 + #include <linux/fs.h>
3515 +
3516 +@@ -267,6 +268,10 @@ struct zs_pool {
3517 + #ifdef CONFIG_COMPACTION
3518 + struct inode *inode;
3519 + struct work_struct free_work;
3520 ++ /* A wait queue for when migration races with async_free_zspage() */
3521 ++ struct wait_queue_head migration_wait;
3522 ++ atomic_long_t isolated_pages;
3523 ++ bool destroying;
3524 + #endif
3525 + };
3526 +
3527 +@@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
3528 + zspage->isolated--;
3529 + }
3530 +
3531 ++static void putback_zspage_deferred(struct zs_pool *pool,
3532 ++ struct size_class *class,
3533 ++ struct zspage *zspage)
3534 ++{
3535 ++ enum fullness_group fg;
3536 ++
3537 ++ fg = putback_zspage(class, zspage);
3538 ++ if (fg == ZS_EMPTY)
3539 ++ schedule_work(&pool->free_work);
3540 ++
3541 ++}
3542 ++
3543 ++static inline void zs_pool_dec_isolated(struct zs_pool *pool)
3544 ++{
3545 ++ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
3546 ++ atomic_long_dec(&pool->isolated_pages);
3547 ++ /*
3548 ++ * There's no possibility of racing, since wait_for_isolated_drain()
3549 ++ * checks the isolated count under &class->lock after enqueuing
3550 ++ * on migration_wait.
3551 ++ */
3552 ++ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
3553 ++ wake_up_all(&pool->migration_wait);
3554 ++}
3555 ++
3556 + static void replace_sub_page(struct size_class *class, struct zspage *zspage,
3557 + struct page *newpage, struct page *oldpage)
3558 + {
3559 +@@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
3560 + */
3561 + if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
3562 + get_zspage_mapping(zspage, &class_idx, &fullness);
3563 ++ atomic_long_inc(&pool->isolated_pages);
3564 + remove_zspage(class, zspage, fullness);
3565 + }
3566 +
3567 +@@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
3568 + * Page migration is done so let's putback isolated zspage to
3569 + * the list if @page is final isolated subpage in the zspage.
3570 + */
3571 +- if (!is_zspage_isolated(zspage))
3572 +- putback_zspage(class, zspage);
3573 ++ if (!is_zspage_isolated(zspage)) {
3574 ++ /*
3575 ++ * We cannot race with zs_destroy_pool() here because we wait
3576 ++ * for isolation to hit zero before we start destroying.
3577 ++ * Also, we ensure that everyone can see pool->destroying before
3578 ++ * we start waiting.
3579 ++ */
3580 ++ putback_zspage_deferred(pool, class, zspage);
3581 ++ zs_pool_dec_isolated(pool);
3582 ++ }
3583 +
3584 + reset_page(page);
3585 + put_page(page);
3586 +@@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page)
3587 + spin_lock(&class->lock);
3588 + dec_zspage_isolation(zspage);
3589 + if (!is_zspage_isolated(zspage)) {
3590 +- fg = putback_zspage(class, zspage);
3591 + /*
3592 + * Due to page_lock, we cannot free zspage immediately
3593 + * so let's defer.
3594 + */
3595 +- if (fg == ZS_EMPTY)
3596 +- schedule_work(&pool->free_work);
3597 ++ putback_zspage_deferred(pool, class, zspage);
3598 ++ zs_pool_dec_isolated(pool);
3599 + }
3600 + spin_unlock(&class->lock);
3601 + }
3602 +@@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool)
3603 + return 0;
3604 + }
3605 +
3606 ++static bool pool_isolated_are_drained(struct zs_pool *pool)
3607 ++{
3608 ++ return atomic_long_read(&pool->isolated_pages) == 0;
3609 ++}
3610 ++
3611 ++/* Function for resolving migration */
3612 ++static void wait_for_isolated_drain(struct zs_pool *pool)
3613 ++{
3614 ++
3615 ++ /*
3616 ++ * We're in the process of destroying the pool, so there are no
3617 ++ * active allocations. zs_page_isolate() fails for completely free
3618 ++ * zspages, so we need only wait for the zs_pool's isolated
3619 ++ * count to hit zero.
3620 ++ */
3621 ++ wait_event(pool->migration_wait,
3622 ++ pool_isolated_are_drained(pool));
3623 ++}
3624 ++
3625 + static void zs_unregister_migration(struct zs_pool *pool)
3626 + {
3627 ++ pool->destroying = true;
3628 ++ /*
3629 ++ * We need a memory barrier here to ensure global visibility of
3630 ++ * pool->destroying. Thus pool->isolated pages will either be 0 in which
3631 ++ * case we don't care, or it will be > 0 and pool->destroying will
3632 ++ * ensure that we wake up once isolation hits 0.
3633 ++ */
3634 ++ smp_mb();
3635 ++ wait_for_isolated_drain(pool); /* This can block */
3636 + flush_work(&pool->free_work);
3637 + iput(pool->inode);
3638 + }
3639 +@@ -2366,6 +2432,8 @@ struct zs_pool *zs_create_pool(const char *name)
3640 + if (!pool->name)
3641 + goto err;
3642 +
3643 ++ init_waitqueue_head(&pool->migration_wait);
3644 ++
3645 + if (create_cache(pool))
3646 + goto err;
3647 +
3648 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3649 +index 995b3842ba7c..62ffc989a44a 100644
3650 +--- a/net/bridge/netfilter/ebtables.c
3651 ++++ b/net/bridge/netfilter/ebtables.c
3652 +@@ -2274,8 +2274,10 @@ static int compat_do_replace(struct net *net, void __user *user,
3653 + state.buf_kern_len = size64;
3654 +
3655 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
3656 +- if (WARN_ON(ret < 0))
3657 ++ if (WARN_ON(ret < 0)) {
3658 ++ vfree(entries_tmp);
3659 + goto out_unlock;
3660 ++ }
3661 +
3662 + vfree(entries_tmp);
3663 + tmp.entries_size = size64;
3664 +diff --git a/net/can/gw.c b/net/can/gw.c
3665 +index 53859346dc9a..bd2161470e45 100644
3666 +--- a/net/can/gw.c
3667 ++++ b/net/can/gw.c
3668 +@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
3669 + pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
3670 + max_hops);
3671 +
3672 +- register_pernet_subsys(&cangw_pernet_ops);
3673 ++ ret = register_pernet_subsys(&cangw_pernet_ops);
3674 ++ if (ret)
3675 ++ return ret;
3676 ++
3677 ++ ret = -ENOMEM;
3678 + cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
3679 + 0, 0, NULL);
3680 +-
3681 + if (!cgw_cache)
3682 +- return -ENOMEM;
3683 ++ goto out_cache_create;
3684 +
3685 + /* set notifier */
3686 + notifier.notifier_call = cgw_notifier;
3687 +- register_netdevice_notifier(&notifier);
3688 ++ ret = register_netdevice_notifier(&notifier);
3689 ++ if (ret)
3690 ++ goto out_register_notifier;
3691 +
3692 + ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
3693 + NULL, cgw_dump_jobs, 0);
3694 +- if (ret) {
3695 +- unregister_netdevice_notifier(&notifier);
3696 +- kmem_cache_destroy(cgw_cache);
3697 +- return -ENOBUFS;
3698 +- }
3699 +-
3700 +- /* Only the first call to rtnl_register_module can fail */
3701 +- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
3702 +- cgw_create_job, NULL, 0);
3703 +- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
3704 +- cgw_remove_job, NULL, 0);
3705 ++ if (ret)
3706 ++ goto out_rtnl_register1;
3707 ++
3708 ++ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
3709 ++ cgw_create_job, NULL, 0);
3710 ++ if (ret)
3711 ++ goto out_rtnl_register2;
3712 ++ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
3713 ++ cgw_remove_job, NULL, 0);
3714 ++ if (ret)
3715 ++ goto out_rtnl_register3;
3716 +
3717 + return 0;
3718 ++
3719 ++out_rtnl_register3:
3720 ++ rtnl_unregister(PF_CAN, RTM_NEWROUTE);
3721 ++out_rtnl_register2:
3722 ++ rtnl_unregister(PF_CAN, RTM_GETROUTE);
3723 ++out_rtnl_register1:
3724 ++ unregister_netdevice_notifier(&notifier);
3725 ++out_register_notifier:
3726 ++ kmem_cache_destroy(cgw_cache);
3727 ++out_cache_create:
3728 ++ unregister_pernet_subsys(&cangw_pernet_ops);
3729 ++
3730 ++ return ret;
3731 + }
3732 +
3733 + static __exit void cgw_module_exit(void)
3734 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
3735 +index 60934bd8796c..76c41a84550e 100644
3736 +--- a/net/ceph/osd_client.c
3737 ++++ b/net/ceph/osd_client.c
3738 +@@ -1423,7 +1423,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
3739 + struct ceph_osds up, acting;
3740 + bool force_resend = false;
3741 + bool unpaused = false;
3742 +- bool legacy_change;
3743 ++ bool legacy_change = false;
3744 + bool split = false;
3745 + bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
3746 + bool recovery_deletes = ceph_osdmap_flag(osdc,
3747 +@@ -1511,15 +1511,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
3748 + t->osd = acting.primary;
3749 + }
3750 +
3751 +- if (unpaused || legacy_change || force_resend ||
3752 +- (split && con && CEPH_HAVE_FEATURE(con->peer_features,
3753 +- RESEND_ON_SPLIT)))
3754 ++ if (unpaused || legacy_change || force_resend || split)
3755 + ct_res = CALC_TARGET_NEED_RESEND;
3756 + else
3757 + ct_res = CALC_TARGET_NO_ACTION;
3758 +
3759 + out:
3760 +- dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
3761 ++ dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
3762 ++ legacy_change, force_resend, split, ct_res, t->osd);
3763 + return ct_res;
3764 + }
3765 +
3766 +diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3767 +index 13ade5782847..4f01321e793c 100644
3768 +--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3769 ++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3770 +@@ -230,7 +230,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
3771 +
3772 + e.id = ip_to_id(map, ip);
3773 +
3774 +- if (opt->flags & IPSET_DIM_ONE_SRC)
3775 ++ if (opt->flags & IPSET_DIM_TWO_SRC)
3776 + ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
3777 + else
3778 + ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
3779 +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
3780 +index 1577f2f76060..e2538c578671 100644
3781 +--- a/net/netfilter/ipset/ip_set_core.c
3782 ++++ b/net/netfilter/ipset/ip_set_core.c
3783 +@@ -1157,7 +1157,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
3784 + return -ENOENT;
3785 +
3786 + write_lock_bh(&ip_set_ref_lock);
3787 +- if (set->ref != 0) {
3788 ++ if (set->ref != 0 || set->ref_netlink != 0) {
3789 + ret = -IPSET_ERR_REFERENCED;
3790 + goto out;
3791 + }
3792 +diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
3793 +index fd87de3ed55b..16ec822e4044 100644
3794 +--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
3795 ++++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
3796 +@@ -95,15 +95,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
3797 + struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
3798 + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
3799 +
3800 +- /* MAC can be src only */
3801 +- if (!(opt->flags & IPSET_DIM_TWO_SRC))
3802 +- return 0;
3803 +-
3804 + if (skb_mac_header(skb) < skb->head ||
3805 + (skb_mac_header(skb) + ETH_HLEN) > skb->data)
3806 + return -EINVAL;
3807 +
3808 +- if (opt->flags & IPSET_DIM_ONE_SRC)
3809 ++ if (opt->flags & IPSET_DIM_TWO_SRC)
3810 + ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
3811 + else
3812 + ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
3813 +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
3814 +index d76e5e58905d..7319d3ca30e9 100644
3815 +--- a/net/rxrpc/af_rxrpc.c
3816 ++++ b/net/rxrpc/af_rxrpc.c
3817 +@@ -195,7 +195,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
3818 +
3819 + service_in_use:
3820 + write_unlock(&local->services_lock);
3821 +- rxrpc_put_local(local);
3822 ++ rxrpc_unuse_local(local);
3823 + ret = -EADDRINUSE;
3824 + error_unlock:
3825 + release_sock(&rx->sk);
3826 +@@ -908,7 +908,7 @@ static int rxrpc_release_sock(struct sock *sk)
3827 + rxrpc_queue_work(&rxnet->service_conn_reaper);
3828 + rxrpc_queue_work(&rxnet->client_conn_reaper);
3829 +
3830 +- rxrpc_put_local(rx->local);
3831 ++ rxrpc_unuse_local(rx->local);
3832 + rx->local = NULL;
3833 + key_put(rx->key);
3834 + rx->key = NULL;
3835 +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
3836 +index 03e0fc8c183f..dfd9eab77cc8 100644
3837 +--- a/net/rxrpc/ar-internal.h
3838 ++++ b/net/rxrpc/ar-internal.h
3839 +@@ -258,7 +258,8 @@ struct rxrpc_security {
3840 + */
3841 + struct rxrpc_local {
3842 + struct rcu_head rcu;
3843 +- atomic_t usage;
3844 ++ atomic_t active_users; /* Number of users of the local endpoint */
3845 ++ atomic_t usage; /* Number of references to the structure */
3846 + struct rxrpc_net *rxnet; /* The network ns in which this resides */
3847 + struct list_head link;
3848 + struct socket *socket; /* my UDP socket */
3849 +@@ -998,6 +999,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
3850 + struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
3851 + struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
3852 + void rxrpc_put_local(struct rxrpc_local *);
3853 ++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
3854 ++void rxrpc_unuse_local(struct rxrpc_local *);
3855 + void rxrpc_queue_local(struct rxrpc_local *);
3856 + void rxrpc_destroy_all_locals(struct rxrpc_net *);
3857 +
3858 +@@ -1057,6 +1060,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
3859 + struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
3860 + struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
3861 + void rxrpc_put_peer(struct rxrpc_peer *);
3862 ++void rxrpc_put_peer_locked(struct rxrpc_peer *);
3863 +
3864 + /*
3865 + * proc.c
3866 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
3867 +index d591f54cb91f..7965600ee5de 100644
3868 +--- a/net/rxrpc/input.c
3869 ++++ b/net/rxrpc/input.c
3870 +@@ -1106,8 +1106,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
3871 + {
3872 + _enter("%p,%p", local, skb);
3873 +
3874 +- skb_queue_tail(&local->event_queue, skb);
3875 +- rxrpc_queue_local(local);
3876 ++ if (rxrpc_get_local_maybe(local)) {
3877 ++ skb_queue_tail(&local->event_queue, skb);
3878 ++ rxrpc_queue_local(local);
3879 ++ } else {
3880 ++ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
3881 ++ }
3882 + }
3883 +
3884 + /*
3885 +@@ -1117,8 +1121,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
3886 + {
3887 + CHECK_SLAB_OKAY(&local->usage);
3888 +
3889 +- skb_queue_tail(&local->reject_queue, skb);
3890 +- rxrpc_queue_local(local);
3891 ++ if (rxrpc_get_local_maybe(local)) {
3892 ++ skb_queue_tail(&local->reject_queue, skb);
3893 ++ rxrpc_queue_local(local);
3894 ++ } else {
3895 ++ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
3896 ++ }
3897 + }
3898 +
3899 + /*
3900 +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
3901 +index 10317dbdab5f..c752ad487067 100644
3902 +--- a/net/rxrpc/local_object.c
3903 ++++ b/net/rxrpc/local_object.c
3904 +@@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
3905 + local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
3906 + if (local) {
3907 + atomic_set(&local->usage, 1);
3908 ++ atomic_set(&local->active_users, 1);
3909 + local->rxnet = rxnet;
3910 + INIT_LIST_HEAD(&local->link);
3911 + INIT_WORK(&local->processor, rxrpc_local_processor);
3912 +@@ -96,7 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
3913 + local->debug_id = atomic_inc_return(&rxrpc_debug_id);
3914 + memcpy(&local->srx, srx, sizeof(*srx));
3915 + local->srx.srx_service = 0;
3916 +- trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
3917 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
3918 + }
3919 +
3920 + _leave(" = %p", local);
3921 +@@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
3922 + * bind the transport socket may still fail if we're attempting
3923 + * to use a local address that the dying object is still using.
3924 + */
3925 +- if (!rxrpc_get_local_maybe(local)) {
3926 +- cursor = cursor->next;
3927 +- list_del_init(&local->link);
3928 ++ if (!rxrpc_use_local(local))
3929 + break;
3930 +- }
3931 +
3932 + age = "old";
3933 + goto found;
3934 +@@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
3935 + if (ret < 0)
3936 + goto sock_error;
3937 +
3938 +- list_add_tail(&local->link, cursor);
3939 ++ if (cursor != &rxnet->local_endpoints)
3940 ++ list_replace_init(cursor, &local->link);
3941 ++ else
3942 ++ list_add_tail(&local->link, cursor);
3943 + age = "new";
3944 +
3945 + found:
3946 +@@ -324,7 +325,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
3947 + int n;
3948 +
3949 + n = atomic_inc_return(&local->usage);
3950 +- trace_rxrpc_local(local, rxrpc_local_got, n, here);
3951 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
3952 + return local;
3953 + }
3954 +
3955 +@@ -338,7 +339,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
3956 + if (local) {
3957 + int n = atomic_fetch_add_unless(&local->usage, 1, 0);
3958 + if (n > 0)
3959 +- trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
3960 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_got,
3961 ++ n + 1, here);
3962 + else
3963 + local = NULL;
3964 + }
3965 +@@ -346,24 +348,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
3966 + }
3967 +
3968 + /*
3969 +- * Queue a local endpoint.
3970 ++ * Queue a local endpoint and pass the caller's reference to the work item.
3971 + */
3972 + void rxrpc_queue_local(struct rxrpc_local *local)
3973 + {
3974 + const void *here = __builtin_return_address(0);
3975 ++ unsigned int debug_id = local->debug_id;
3976 ++ int n = atomic_read(&local->usage);
3977 +
3978 + if (rxrpc_queue_work(&local->processor))
3979 +- trace_rxrpc_local(local, rxrpc_local_queued,
3980 +- atomic_read(&local->usage), here);
3981 +-}
3982 +-
3983 +-/*
3984 +- * A local endpoint reached its end of life.
3985 +- */
3986 +-static void __rxrpc_put_local(struct rxrpc_local *local)
3987 +-{
3988 +- _enter("%d", local->debug_id);
3989 +- rxrpc_queue_work(&local->processor);
3990 ++ trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
3991 ++ else
3992 ++ rxrpc_put_local(local);
3993 + }
3994 +
3995 + /*
3996 +@@ -376,10 +372,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
3997 +
3998 + if (local) {
3999 + n = atomic_dec_return(&local->usage);
4000 +- trace_rxrpc_local(local, rxrpc_local_put, n, here);
4001 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
4002 +
4003 + if (n == 0)
4004 +- __rxrpc_put_local(local);
4005 ++ call_rcu(&local->rcu, rxrpc_local_rcu);
4006 ++ }
4007 ++}
4008 ++
4009 ++/*
4010 ++ * Start using a local endpoint.
4011 ++ */
4012 ++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
4013 ++{
4014 ++ unsigned int au;
4015 ++
4016 ++ local = rxrpc_get_local_maybe(local);
4017 ++ if (!local)
4018 ++ return NULL;
4019 ++
4020 ++ au = atomic_fetch_add_unless(&local->active_users, 1, 0);
4021 ++ if (au == 0) {
4022 ++ rxrpc_put_local(local);
4023 ++ return NULL;
4024 ++ }
4025 ++
4026 ++ return local;
4027 ++}
4028 ++
4029 ++/*
4030 ++ * Cease using a local endpoint. Once the number of active users reaches 0, we
4031 ++ * start the closure of the transport in the work processor.
4032 ++ */
4033 ++void rxrpc_unuse_local(struct rxrpc_local *local)
4034 ++{
4035 ++ unsigned int au;
4036 ++
4037 ++ if (local) {
4038 ++ au = atomic_dec_return(&local->active_users);
4039 ++ if (au == 0)
4040 ++ rxrpc_queue_local(local);
4041 ++ else
4042 ++ rxrpc_put_local(local);
4043 + }
4044 + }
4045 +
4046 +@@ -397,16 +430,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
4047 +
4048 + _enter("%d", local->debug_id);
4049 +
4050 +- /* We can get a race between an incoming call packet queueing the
4051 +- * processor again and the work processor starting the destruction
4052 +- * process which will shut down the UDP socket.
4053 +- */
4054 +- if (local->dead) {
4055 +- _leave(" [already dead]");
4056 +- return;
4057 +- }
4058 +- local->dead = true;
4059 +-
4060 + mutex_lock(&rxnet->local_mutex);
4061 + list_del_init(&local->link);
4062 + mutex_unlock(&rxnet->local_mutex);
4063 +@@ -426,13 +449,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
4064 + */
4065 + rxrpc_purge_queue(&local->reject_queue);
4066 + rxrpc_purge_queue(&local->event_queue);
4067 +-
4068 +- _debug("rcu local %d", local->debug_id);
4069 +- call_rcu(&local->rcu, rxrpc_local_rcu);
4070 + }
4071 +
4072 + /*
4073 +- * Process events on an endpoint
4074 ++ * Process events on an endpoint. The work item carries a ref which
4075 ++ * we must release.
4076 + */
4077 + static void rxrpc_local_processor(struct work_struct *work)
4078 + {
4079 +@@ -440,13 +461,15 @@ static void rxrpc_local_processor(struct work_struct *work)
4080 + container_of(work, struct rxrpc_local, processor);
4081 + bool again;
4082 +
4083 +- trace_rxrpc_local(local, rxrpc_local_processing,
4084 ++ trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
4085 + atomic_read(&local->usage), NULL);
4086 +
4087 + do {
4088 + again = false;
4089 +- if (atomic_read(&local->usage) == 0)
4090 +- return rxrpc_local_destroyer(local);
4091 ++ if (atomic_read(&local->active_users) == 0) {
4092 ++ rxrpc_local_destroyer(local);
4093 ++ break;
4094 ++ }
4095 +
4096 + if (!skb_queue_empty(&local->reject_queue)) {
4097 + rxrpc_reject_packets(local);
4098 +@@ -458,6 +481,8 @@ static void rxrpc_local_processor(struct work_struct *work)
4099 + again = true;
4100 + }
4101 + } while (again);
4102 ++
4103 ++ rxrpc_put_local(local);
4104 + }
4105 +
4106 + /*
4107 +diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
4108 +index bd2fa3b7caa7..dc7fdaf20445 100644
4109 +--- a/net/rxrpc/peer_event.c
4110 ++++ b/net/rxrpc/peer_event.c
4111 +@@ -375,7 +375,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
4112 + spin_lock_bh(&rxnet->peer_hash_lock);
4113 + list_add_tail(&peer->keepalive_link,
4114 + &rxnet->peer_keepalive[slot & mask]);
4115 +- rxrpc_put_peer(peer);
4116 ++ rxrpc_put_peer_locked(peer);
4117 + }
4118 +
4119 + spin_unlock_bh(&rxnet->peer_hash_lock);
4120 +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
4121 +index 5691b7d266ca..71547e8673b9 100644
4122 +--- a/net/rxrpc/peer_object.c
4123 ++++ b/net/rxrpc/peer_object.c
4124 +@@ -440,6 +440,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
4125 + }
4126 + }
4127 +
4128 ++/*
4129 ++ * Drop a ref on a peer record where the caller already holds the
4130 ++ * peer_hash_lock.
4131 ++ */
4132 ++void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
4133 ++{
4134 ++ const void *here = __builtin_return_address(0);
4135 ++ int n;
4136 ++
4137 ++ n = atomic_dec_return(&peer->usage);
4138 ++ trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
4139 ++ if (n == 0) {
4140 ++ hash_del_rcu(&peer->hash_link);
4141 ++ list_del_init(&peer->keepalive_link);
4142 ++ kfree_rcu(peer, rcu);
4143 ++ }
4144 ++}
4145 ++
4146 + /*
4147 + * Make sure all peer records have been discarded.
4148 + */
4149 +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
4150 +index be01f9c5d963..5d6ab4f6fd7a 100644
4151 +--- a/net/rxrpc/sendmsg.c
4152 ++++ b/net/rxrpc/sendmsg.c
4153 +@@ -230,6 +230,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
4154 + rxrpc_set_call_completion(call,
4155 + RXRPC_CALL_LOCAL_ERROR,
4156 + 0, ret);
4157 ++ rxrpc_notify_socket(call);
4158 + goto out;
4159 + }
4160 + _debug("need instant resend %d", ret);
4161 +diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
4162 +index 160b2764b2ad..6a8c279a4b20 100644
4163 +--- a/sound/soc/davinci/davinci-mcasp.c
4164 ++++ b/sound/soc/davinci/davinci-mcasp.c
4165 +@@ -1150,6 +1150,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
4166 + return ret;
4167 + }
4168 +
4169 ++static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
4170 ++ struct snd_pcm_hw_rule *rule)
4171 ++{
4172 ++ struct davinci_mcasp_ruledata *rd = rule->private;
4173 ++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
4174 ++ struct snd_mask nfmt;
4175 ++ int i, slot_width;
4176 ++
4177 ++ snd_mask_none(&nfmt);
4178 ++ slot_width = rd->mcasp->slot_width;
4179 ++
4180 ++ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
4181 ++ if (snd_mask_test(fmt, i)) {
4182 ++ if (snd_pcm_format_width(i) <= slot_width) {
4183 ++ snd_mask_set(&nfmt, i);
4184 ++ }
4185 ++ }
4186 ++ }
4187 ++
4188 ++ return snd_mask_refine(fmt, &nfmt);
4189 ++}
4190 ++
4191 + static const unsigned int davinci_mcasp_dai_rates[] = {
4192 + 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
4193 + 88200, 96000, 176400, 192000,
4194 +@@ -1257,7 +1279,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4195 + struct davinci_mcasp_ruledata *ruledata =
4196 + &mcasp->ruledata[substream->stream];
4197 + u32 max_channels = 0;
4198 +- int i, dir;
4199 ++ int i, dir, ret;
4200 + int tdm_slots = mcasp->tdm_slots;
4201 +
4202 + /* Do not allow more then one stream per direction */
4203 +@@ -1286,6 +1308,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4204 + max_channels++;
4205 + }
4206 + ruledata->serializers = max_channels;
4207 ++ ruledata->mcasp = mcasp;
4208 + max_channels *= tdm_slots;
4209 + /*
4210 + * If the already active stream has less channels than the calculated
4211 +@@ -1311,20 +1334,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4212 + 0, SNDRV_PCM_HW_PARAM_CHANNELS,
4213 + &mcasp->chconstr[substream->stream]);
4214 +
4215 +- if (mcasp->slot_width)
4216 +- snd_pcm_hw_constraint_minmax(substream->runtime,
4217 +- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
4218 +- 8, mcasp->slot_width);
4219 ++ if (mcasp->slot_width) {
4220 ++ /* Only allow formats require <= slot_width bits on the bus */
4221 ++ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
4222 ++ SNDRV_PCM_HW_PARAM_FORMAT,
4223 ++ davinci_mcasp_hw_rule_slot_width,
4224 ++ ruledata,
4225 ++ SNDRV_PCM_HW_PARAM_FORMAT, -1);
4226 ++ if (ret)
4227 ++ return ret;
4228 ++ }
4229 +
4230 + /*
4231 + * If we rely on implicit BCLK divider setting we should
4232 + * set constraints based on what we can provide.
4233 + */
4234 + if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
4235 +- int ret;
4236 +-
4237 +- ruledata->mcasp = mcasp;
4238 +-
4239 + ret = snd_pcm_hw_rule_add(substream->runtime, 0,
4240 + SNDRV_PCM_HW_PARAM_RATE,
4241 + davinci_mcasp_hw_rule_rate,
4242 +diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
4243 +index 60d43d53a8f5..11399f81c92f 100644
4244 +--- a/sound/soc/rockchip/rockchip_i2s.c
4245 ++++ b/sound/soc/rockchip/rockchip_i2s.c
4246 +@@ -329,7 +329,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
4247 + val |= I2S_CHN_4;
4248 + break;
4249 + case 2:
4250 +- case 1:
4251 + val |= I2S_CHN_2;
4252 + break;
4253 + default:
4254 +@@ -462,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
4255 + },
4256 + .capture = {
4257 + .stream_name = "Capture",
4258 +- .channels_min = 1,
4259 ++ .channels_min = 2,
4260 + .channels_max = 2,
4261 + .rates = SNDRV_PCM_RATE_8000_192000,
4262 + .formats = (SNDRV_PCM_FMTBIT_S8 |
4263 +@@ -662,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
4264 + }
4265 +
4266 + if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
4267 +- if (val >= 1 && val <= 8)
4268 ++ if (val >= 2 && val <= 8)
4269 + soc_dai->capture.channels_max = val;
4270 + }
4271 +
4272 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
4273 +index 62aa320c2070..dafc3b7f8d72 100644
4274 +--- a/sound/soc/soc-core.c
4275 ++++ b/sound/soc/soc-core.c
4276 +@@ -1513,8 +1513,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
4277 + }
4278 + }
4279 +
4280 +- if (dai_link->dai_fmt)
4281 +- snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
4282 ++ if (dai_link->dai_fmt) {
4283 ++ ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
4284 ++ if (ret)
4285 ++ return ret;
4286 ++ }
4287 +
4288 + ret = soc_post_component_init(rtd, dai_link->name);
4289 + if (ret)
4290 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
4291 +index 3bfc788372f3..4ce57510b623 100644
4292 +--- a/sound/soc/soc-dapm.c
4293 ++++ b/sound/soc/soc-dapm.c
4294 +@@ -1145,8 +1145,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
4295 + list_add_tail(&widget->work_list, list);
4296 +
4297 + if (custom_stop_condition && custom_stop_condition(widget, dir)) {
4298 +- widget->endpoints[dir] = 1;
4299 +- return widget->endpoints[dir];
4300 ++ list = NULL;
4301 ++ custom_stop_condition = NULL;
4302 + }
4303 +
4304 + if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
4305 +@@ -1183,8 +1183,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
4306 + *
4307 + * Optionally, can be supplied with a function acting as a stopping condition.
4308 + * This function takes the dapm widget currently being examined and the walk
4309 +- * direction as an arguments, it should return true if the walk should be
4310 +- * stopped and false otherwise.
4311 ++ * direction as an arguments, it should return true if widgets from that point
4312 ++ * in the graph onwards should not be added to the widget list.
4313 + */
4314 + static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
4315 + struct list_head *list,
4316 +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
4317 +index fa56fde6e8d8..91c0a4434da2 100644
4318 +--- a/tools/perf/bench/numa.c
4319 ++++ b/tools/perf/bench/numa.c
4320 +@@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
4321 +
4322 + /* Allocate and initialize all memory on CPU#0: */
4323 + if (init_cpu0) {
4324 +- orig_mask = bind_to_node(0);
4325 +- bind_to_memnode(0);
4326 ++ int node = numa_node_of_cpu(0);
4327 ++
4328 ++ orig_mask = bind_to_node(node);
4329 ++ bind_to_memnode(node);
4330 + }
4331 +
4332 + bytes = bytes0 + HPSIZE;
4333 +diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
4334 +index f42f228e8899..137955197ba8 100644
4335 +--- a/tools/perf/builtin-ftrace.c
4336 ++++ b/tools/perf/builtin-ftrace.c
4337 +@@ -174,7 +174,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
4338 + int last_cpu;
4339 +
4340 + last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
4341 +- mask_size = (last_cpu + 3) / 4 + 1;
4342 ++ mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
4343 + mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
4344 +
4345 + cpumask = malloc(mask_size);
4346 +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
4347 +index 68c92bb599ee..6b36b7110669 100644
4348 +--- a/tools/perf/pmu-events/jevents.c
4349 ++++ b/tools/perf/pmu-events/jevents.c
4350 +@@ -450,6 +450,7 @@ static struct fixed {
4351 + { "inst_retired.any_p", "event=0xc0" },
4352 + { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
4353 + { "cpu_clk_unhalted.thread", "event=0x3c" },
4354 ++ { "cpu_clk_unhalted.core", "event=0x3c" },
4355 + { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
4356 + { NULL, NULL},
4357 + };
4358 +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
4359 +index 383674f448fc..f93846edc1e0 100644
4360 +--- a/tools/perf/util/cpumap.c
4361 ++++ b/tools/perf/util/cpumap.c
4362 +@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
4363 + unsigned char *bitmap;
4364 + int last_cpu = cpu_map__cpu(map, map->nr - 1);
4365 +
4366 +- bitmap = zalloc((last_cpu + 7) / 8);
4367 ++ if (buf == NULL)
4368 ++ return 0;
4369 ++
4370 ++ bitmap = zalloc(last_cpu / 8 + 1);
4371 + if (bitmap == NULL) {
4372 + buf[0] = '\0';
4373 + return 0;
4374 +diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/sendmsg6_prog.c
4375 +index 5aeaa284fc47..a68062820410 100644
4376 +--- a/tools/testing/selftests/bpf/sendmsg6_prog.c
4377 ++++ b/tools/testing/selftests/bpf/sendmsg6_prog.c
4378 +@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
4379 + }
4380 +
4381 + /* Rewrite destination. */
4382 +- if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
4383 +- ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
4384 ++ if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
4385 + ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
4386 + ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
4387 + ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
4388 +diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
4389 +new file mode 100644
4390 +index 000000000000..63ed533f73d6
4391 +--- /dev/null
4392 ++++ b/tools/testing/selftests/kvm/config
4393 +@@ -0,0 +1,3 @@
4394 ++CONFIG_KVM=y
4395 ++CONFIG_KVM_INTEL=y
4396 ++CONFIG_KVM_AMD=y
4397 +diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
4398 +index cca2baa03fb8..a8d8e8b3dc81 100755
4399 +--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
4400 ++++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
4401 +@@ -93,18 +93,10 @@ sw1_create()
4402 + ip route add vrf v$ol1 192.0.2.16/28 \
4403 + nexthop dev g1a \
4404 + nexthop dev g1b
4405 +-
4406 +- tc qdisc add dev $ul1 clsact
4407 +- tc filter add dev $ul1 egress pref 111 prot ipv4 \
4408 +- flower dst_ip 192.0.2.66 action pass
4409 +- tc filter add dev $ul1 egress pref 222 prot ipv4 \
4410 +- flower dst_ip 192.0.2.82 action pass
4411 + }
4412 +
4413 + sw1_destroy()
4414 + {
4415 +- tc qdisc del dev $ul1 clsact
4416 +-
4417 + ip route del vrf v$ol1 192.0.2.16/28
4418 +
4419 + ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
4420 +@@ -139,10 +131,18 @@ sw2_create()
4421 + ip route add vrf v$ol2 192.0.2.0/28 \
4422 + nexthop dev g2a \
4423 + nexthop dev g2b
4424 ++
4425 ++ tc qdisc add dev $ul2 clsact
4426 ++ tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
4427 ++ flower vlan_id 111 action pass
4428 ++ tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
4429 ++ flower vlan_id 222 action pass
4430 + }
4431 +
4432 + sw2_destroy()
4433 + {
4434 ++ tc qdisc del dev $ul2 clsact
4435 ++
4436 + ip route del vrf v$ol2 192.0.2.0/28
4437 +
4438 + ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
4439 +@@ -187,12 +187,16 @@ setup_prepare()
4440 + sw1_create
4441 + sw2_create
4442 + h2_create
4443 ++
4444 ++ forwarding_enable
4445 + }
4446 +
4447 + cleanup()
4448 + {
4449 + pre_cleanup
4450 +
4451 ++ forwarding_restore
4452 ++
4453 + h2_destroy
4454 + sw2_destroy
4455 + sw1_destroy
4456 +@@ -211,15 +215,15 @@ multipath4_test()
4457 + nexthop dev g1a weight $weight1 \
4458 + nexthop dev g1b weight $weight2
4459 +
4460 +- local t0_111=$(tc_rule_stats_get $ul1 111 egress)
4461 +- local t0_222=$(tc_rule_stats_get $ul1 222 egress)
4462 ++ local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
4463 ++ local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
4464 +
4465 + ip vrf exec v$h1 \
4466 + $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
4467 + -d 1msec -t udp "sp=1024,dp=0-32768"
4468 +
4469 +- local t1_111=$(tc_rule_stats_get $ul1 111 egress)
4470 +- local t1_222=$(tc_rule_stats_get $ul1 222 egress)
4471 ++ local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
4472 ++ local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
4473 +
4474 + local d111=$((t1_111 - t0_111))
4475 + local d222=$((t1_222 - t0_222))