Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 05 Dec 2018 19:44:13
Message-Id: 1544039024.e7b7c443dae26e4044393b6d27d3102424bb61d8.mpagano@gentoo
1 commit: e7b7c443dae26e4044393b6d27d3102424bb61d8
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 5 19:43:44 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 5 19:43:44 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e7b7c443
7
8 proj/linux-patches: Linux patch 4.9.143
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1142_linux-4.9.143.patch | 1835 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1839 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1aec075..d4392f7 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -611,6 +611,10 @@ Patch: 1141_linux-4.9.142.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.142
23
24 +Patch: 1142_linux-4.9.143.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.143
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1142_linux-4.9.143.patch b/1142_linux-4.9.143.patch
33 new file mode 100644
34 index 0000000..07af4fd
35 --- /dev/null
36 +++ b/1142_linux-4.9.143.patch
37 @@ -0,0 +1,1835 @@
38 +diff --git a/Makefile b/Makefile
39 +index 72ed8ff90329..8ec52cd19526 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 142
46 ++SUBLEVEL = 143
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +@@ -509,6 +509,39 @@ ifneq ($(filter install,$(MAKECMDGOALS)),)
51 + endif
52 + endif
53 +
54 ++ifeq ($(cc-name),clang)
55 ++ifneq ($(CROSS_COMPILE),)
56 ++CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
57 ++GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
58 ++endif
59 ++ifneq ($(GCC_TOOLCHAIN),)
60 ++CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
61 ++endif
62 ++KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
63 ++KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
64 ++KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
65 ++KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
66 ++KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
67 ++KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
68 ++KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
69 ++# Quiet clang warning: comparison of unsigned expression < 0 is always false
70 ++KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
71 ++# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
72 ++# source of a reference will be _MergedGlobals and not on of the whitelisted names.
73 ++# See modpost pattern 2
74 ++KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
75 ++KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
76 ++KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
77 ++KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
78 ++else
79 ++
80 ++# These warnings generated too much noise in a regular build.
81 ++# Use make W=1 to enable them (see scripts/Makefile.build)
82 ++KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
83 ++KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
84 ++endif
85 ++
86 ++
87 + ifeq ($(mixed-targets),1)
88 + # ===========================================================================
89 + # We're called with mixed targets (*config and build targets).
90 +@@ -704,38 +737,6 @@ ifdef CONFIG_CC_STACKPROTECTOR
91 + endif
92 + KBUILD_CFLAGS += $(stackp-flag)
93 +
94 +-ifeq ($(cc-name),clang)
95 +-ifneq ($(CROSS_COMPILE),)
96 +-CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
97 +-GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
98 +-endif
99 +-ifneq ($(GCC_TOOLCHAIN),)
100 +-CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
101 +-endif
102 +-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
103 +-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
104 +-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
105 +-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
106 +-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
107 +-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
108 +-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
109 +-# Quiet clang warning: comparison of unsigned expression < 0 is always false
110 +-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
111 +-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
112 +-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
113 +-# See modpost pattern 2
114 +-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
115 +-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
116 +-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
117 +-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
118 +-else
119 +-
120 +-# These warnings generated too much noise in a regular build.
121 +-# Use make W=1 to enable them (see scripts/Makefile.build)
122 +-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
123 +-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
124 +-endif
125 +-
126 + ifdef CONFIG_FRAME_POINTER
127 + KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
128 + else
129 +diff --git a/arch/arm/Makefile b/arch/arm/Makefile
130 +index 6be9ee148b78..e14ddca59d02 100644
131 +--- a/arch/arm/Makefile
132 ++++ b/arch/arm/Makefile
133 +@@ -104,7 +104,7 @@ tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
134 + tune-y := $(tune-y)
135 +
136 + ifeq ($(CONFIG_AEABI),y)
137 +-CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp
138 ++CFLAGS_ABI :=-mabi=aapcs-linux -mfpu=vfp
139 + else
140 + CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,)
141 + endif
142 +diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
143 +index d50430c40045..552c7d7f84ce 100644
144 +--- a/arch/arm/boot/compressed/Makefile
145 ++++ b/arch/arm/boot/compressed/Makefile
146 +@@ -112,7 +112,7 @@ CFLAGS_fdt_ro.o := $(nossp_flags)
147 + CFLAGS_fdt_rw.o := $(nossp_flags)
148 + CFLAGS_fdt_wip.o := $(nossp_flags)
149 +
150 +-ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
151 ++ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
152 + asflags-y := -DZIMAGE
153 +
154 + # Supply kernel BSS size to the decompressor via a linker symbol.
155 +diff --git a/arch/arm/firmware/trusted_foundations.c b/arch/arm/firmware/trusted_foundations.c
156 +index 3fb1b5a1dce9..689e6565abfc 100644
157 +--- a/arch/arm/firmware/trusted_foundations.c
158 ++++ b/arch/arm/firmware/trusted_foundations.c
159 +@@ -31,21 +31,25 @@
160 +
161 + static unsigned long cpu_boot_addr;
162 +
163 +-static void __naked tf_generic_smc(u32 type, u32 arg1, u32 arg2)
164 ++static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
165 + {
166 ++ register u32 r0 asm("r0") = type;
167 ++ register u32 r1 asm("r1") = arg1;
168 ++ register u32 r2 asm("r2") = arg2;
169 ++
170 + asm volatile(
171 + ".arch_extension sec\n\t"
172 +- "stmfd sp!, {r4 - r11, lr}\n\t"
173 ++ "stmfd sp!, {r4 - r11}\n\t"
174 + __asmeq("%0", "r0")
175 + __asmeq("%1", "r1")
176 + __asmeq("%2", "r2")
177 + "mov r3, #0\n\t"
178 + "mov r4, #0\n\t"
179 + "smc #0\n\t"
180 +- "ldmfd sp!, {r4 - r11, pc}"
181 ++ "ldmfd sp!, {r4 - r11}\n\t"
182 + :
183 +- : "r" (type), "r" (arg1), "r" (arg2)
184 +- : "memory");
185 ++ : "r" (r0), "r" (r1), "r" (r2)
186 ++ : "memory", "r3", "r12", "lr");
187 + }
188 +
189 + static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
190 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
191 +index 655a65eaf105..cadf99923600 100644
192 +--- a/arch/x86/events/core.c
193 ++++ b/arch/x86/events/core.c
194 +@@ -437,26 +437,6 @@ int x86_setup_perfctr(struct perf_event *event)
195 + if (config == -1LL)
196 + return -EINVAL;
197 +
198 +- /*
199 +- * Branch tracing:
200 +- */
201 +- if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
202 +- !attr->freq && hwc->sample_period == 1) {
203 +- /* BTS is not supported by this architecture. */
204 +- if (!x86_pmu.bts_active)
205 +- return -EOPNOTSUPP;
206 +-
207 +- /* BTS is currently only allowed for user-mode. */
208 +- if (!attr->exclude_kernel)
209 +- return -EOPNOTSUPP;
210 +-
211 +- /* disallow bts if conflicting events are present */
212 +- if (x86_add_exclusive(x86_lbr_exclusive_lbr))
213 +- return -EBUSY;
214 +-
215 +- event->destroy = hw_perf_lbr_event_destroy;
216 +- }
217 +-
218 + hwc->config |= config;
219 +
220 + return 0;
221 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
222 +index 815039327932..4f8560774082 100644
223 +--- a/arch/x86/events/intel/core.c
224 ++++ b/arch/x86/events/intel/core.c
225 +@@ -2198,16 +2198,7 @@ done:
226 + static struct event_constraint *
227 + intel_bts_constraints(struct perf_event *event)
228 + {
229 +- struct hw_perf_event *hwc = &event->hw;
230 +- unsigned int hw_event, bts_event;
231 +-
232 +- if (event->attr.freq)
233 +- return NULL;
234 +-
235 +- hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
236 +- bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
237 +-
238 +- if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
239 ++ if (unlikely(intel_pmu_has_bts(event)))
240 + return &bts_constraint;
241 +
242 + return NULL;
243 +@@ -2822,10 +2813,47 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
244 + return flags;
245 + }
246 +
247 ++static int intel_pmu_bts_config(struct perf_event *event)
248 ++{
249 ++ struct perf_event_attr *attr = &event->attr;
250 ++
251 ++ if (unlikely(intel_pmu_has_bts(event))) {
252 ++ /* BTS is not supported by this architecture. */
253 ++ if (!x86_pmu.bts_active)
254 ++ return -EOPNOTSUPP;
255 ++
256 ++ /* BTS is currently only allowed for user-mode. */
257 ++ if (!attr->exclude_kernel)
258 ++ return -EOPNOTSUPP;
259 ++
260 ++ /* disallow bts if conflicting events are present */
261 ++ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
262 ++ return -EBUSY;
263 ++
264 ++ event->destroy = hw_perf_lbr_event_destroy;
265 ++ }
266 ++
267 ++ return 0;
268 ++}
269 ++
270 ++static int core_pmu_hw_config(struct perf_event *event)
271 ++{
272 ++ int ret = x86_pmu_hw_config(event);
273 ++
274 ++ if (ret)
275 ++ return ret;
276 ++
277 ++ return intel_pmu_bts_config(event);
278 ++}
279 ++
280 + static int intel_pmu_hw_config(struct perf_event *event)
281 + {
282 + int ret = x86_pmu_hw_config(event);
283 +
284 ++ if (ret)
285 ++ return ret;
286 ++
287 ++ ret = intel_pmu_bts_config(event);
288 + if (ret)
289 + return ret;
290 +
291 +@@ -2848,7 +2876,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
292 + /*
293 + * BTS is set up earlier in this path, so don't account twice
294 + */
295 +- if (!intel_pmu_has_bts(event)) {
296 ++ if (!unlikely(intel_pmu_has_bts(event))) {
297 + /* disallow lbr if conflicting events are present */
298 + if (x86_add_exclusive(x86_lbr_exclusive_lbr))
299 + return -EBUSY;
300 +@@ -3265,7 +3293,7 @@ static __initconst const struct x86_pmu core_pmu = {
301 + .enable_all = core_pmu_enable_all,
302 + .enable = core_pmu_enable_event,
303 + .disable = x86_pmu_disable_event,
304 +- .hw_config = x86_pmu_hw_config,
305 ++ .hw_config = core_pmu_hw_config,
306 + .schedule_events = x86_schedule_events,
307 + .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
308 + .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
309 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
310 +index 1bfebbc4d156..7ace39c51ff7 100644
311 +--- a/arch/x86/events/perf_event.h
312 ++++ b/arch/x86/events/perf_event.h
313 +@@ -835,11 +835,16 @@ static inline int amd_pmu_init(void)
314 +
315 + static inline bool intel_pmu_has_bts(struct perf_event *event)
316 + {
317 +- if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
318 +- !event->attr.freq && event->hw.sample_period == 1)
319 +- return true;
320 ++ struct hw_perf_event *hwc = &event->hw;
321 ++ unsigned int hw_event, bts_event;
322 ++
323 ++ if (event->attr.freq)
324 ++ return false;
325 ++
326 ++ hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
327 ++ bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
328 +
329 +- return false;
330 ++ return hw_event == bts_event && hwc->sample_period == 1;
331 + }
332 +
333 + int intel_pmu_save_and_restart(struct perf_event *event);
334 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
335 +index 8a4d6bc8fed0..676edfc19a95 100644
336 +--- a/arch/x86/kvm/mmu.c
337 ++++ b/arch/x86/kvm/mmu.c
338 +@@ -4297,9 +4297,9 @@ static bool need_remote_flush(u64 old, u64 new)
339 + }
340 +
341 + static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
342 +- const u8 *new, int *bytes)
343 ++ int *bytes)
344 + {
345 +- u64 gentry;
346 ++ u64 gentry = 0;
347 + int r;
348 +
349 + /*
350 +@@ -4311,22 +4311,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
351 + /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
352 + *gpa &= ~(gpa_t)7;
353 + *bytes = 8;
354 +- r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
355 +- if (r)
356 +- gentry = 0;
357 +- new = (const u8 *)&gentry;
358 + }
359 +
360 +- switch (*bytes) {
361 +- case 4:
362 +- gentry = *(const u32 *)new;
363 +- break;
364 +- case 8:
365 +- gentry = *(const u64 *)new;
366 +- break;
367 +- default:
368 +- gentry = 0;
369 +- break;
370 ++ if (*bytes == 4 || *bytes == 8) {
371 ++ r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
372 ++ if (r)
373 ++ gentry = 0;
374 + }
375 +
376 + return gentry;
377 +@@ -4437,8 +4427,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
378 +
379 + pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
380 +
381 +- gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
382 +-
383 + /*
384 + * No need to care whether allocation memory is successful
385 + * or not since pte prefetch is skiped if it does not have
386 +@@ -4447,6 +4435,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
387 + mmu_topup_memory_caches(vcpu);
388 +
389 + spin_lock(&vcpu->kvm->mmu_lock);
390 ++
391 ++ gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
392 ++
393 + ++vcpu->kvm->stat.mmu_pte_write;
394 + kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
395 +
396 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
397 +index 5f44d63a9d69..4bc35ac28d11 100644
398 +--- a/arch/x86/kvm/svm.c
399 ++++ b/arch/x86/kvm/svm.c
400 +@@ -1672,21 +1672,31 @@ out:
401 + return ERR_PTR(err);
402 + }
403 +
404 ++static void svm_clear_current_vmcb(struct vmcb *vmcb)
405 ++{
406 ++ int i;
407 ++
408 ++ for_each_online_cpu(i)
409 ++ cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
410 ++}
411 ++
412 + static void svm_free_vcpu(struct kvm_vcpu *vcpu)
413 + {
414 + struct vcpu_svm *svm = to_svm(vcpu);
415 +
416 ++ /*
417 ++ * The vmcb page can be recycled, causing a false negative in
418 ++ * svm_vcpu_load(). So, ensure that no logical CPU has this
419 ++ * vmcb page recorded as its current vmcb.
420 ++ */
421 ++ svm_clear_current_vmcb(svm->vmcb);
422 ++
423 + __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
424 + __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
425 + __free_page(virt_to_page(svm->nested.hsave));
426 + __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
427 + kvm_vcpu_uninit(vcpu);
428 + kmem_cache_free(kvm_vcpu_cache, svm);
429 +- /*
430 +- * The vmcb page can be recycled, causing a false negative in
431 +- * svm_vcpu_load(). So do a full IBPB now.
432 +- */
433 +- indirect_branch_prediction_barrier();
434 + }
435 +
436 + static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
437 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
438 +index 5013ef165f44..27d13b870e07 100644
439 +--- a/arch/x86/kvm/x86.c
440 ++++ b/arch/x86/kvm/x86.c
441 +@@ -6661,7 +6661,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
442 + else {
443 + if (vcpu->arch.apicv_active)
444 + kvm_x86_ops->sync_pir_to_irr(vcpu);
445 +- kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
446 ++ if (ioapic_in_kernel(vcpu->kvm))
447 ++ kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
448 + }
449 + bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
450 + vcpu_to_synic(vcpu)->vec_bitmap, 256);
451 +diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
452 +index 8e10e357ee32..f1af06b8f3cd 100644
453 +--- a/arch/xtensa/kernel/asm-offsets.c
454 ++++ b/arch/xtensa/kernel/asm-offsets.c
455 +@@ -91,14 +91,14 @@ int main(void)
456 + DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
457 + DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
458 + #if XTENSA_HAVE_COPROCESSORS
459 +- DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
460 +- DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
461 +- DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
462 +- DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
463 +- DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
464 +- DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
465 +- DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
466 +- DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
467 ++ DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
468 ++ DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
469 ++ DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
470 ++ DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
471 ++ DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
472 ++ DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
473 ++ DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
474 ++ DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
475 + #endif
476 + DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
477 + DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
478 +diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
479 +index e0ded48561db..570307c91846 100644
480 +--- a/arch/xtensa/kernel/process.c
481 ++++ b/arch/xtensa/kernel/process.c
482 +@@ -85,18 +85,21 @@ void coprocessor_release_all(struct thread_info *ti)
483 +
484 + void coprocessor_flush_all(struct thread_info *ti)
485 + {
486 +- unsigned long cpenable;
487 ++ unsigned long cpenable, old_cpenable;
488 + int i;
489 +
490 + preempt_disable();
491 +
492 ++ RSR_CPENABLE(old_cpenable);
493 + cpenable = ti->cpenable;
494 ++ WSR_CPENABLE(cpenable);
495 +
496 + for (i = 0; i < XCHAL_CP_MAX; i++) {
497 + if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
498 + coprocessor_flush(ti, i);
499 + cpenable >>= 1;
500 + }
501 ++ WSR_CPENABLE(old_cpenable);
502 +
503 + preempt_enable();
504 + }
505 +diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
506 +index 10f56133b281..8e08cb4fd7df 100644
507 +--- a/drivers/bus/arm-cci.c
508 ++++ b/drivers/bus/arm-cci.c
509 +@@ -2103,8 +2103,6 @@ asmlinkage void __naked cci_enable_port_for_self(void)
510 + [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
511 + [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
512 + [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
513 +-
514 +- unreachable();
515 + }
516 +
517 + /**
518 +diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
519 +index e2cec5b357fd..a32cd71f94bb 100644
520 +--- a/drivers/dma/at_hdmac.c
521 ++++ b/drivers/dma/at_hdmac.c
522 +@@ -1774,6 +1774,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
523 + atchan->descs_allocated = 0;
524 + atchan->status = 0;
525 +
526 ++ /*
527 ++ * Free atslave allocated in at_dma_xlate()
528 ++ */
529 ++ kfree(chan->private);
530 ++ chan->private = NULL;
531 ++
532 + dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
533 + }
534 +
535 +@@ -1808,7 +1814,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
536 + dma_cap_zero(mask);
537 + dma_cap_set(DMA_SLAVE, mask);
538 +
539 +- atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
540 ++ atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
541 + if (!atslave)
542 + return NULL;
543 +
544 +@@ -2139,6 +2145,8 @@ static int at_dma_remove(struct platform_device *pdev)
545 + struct resource *io;
546 +
547 + at_dma_off(atdma);
548 ++ if (pdev->dev.of_node)
549 ++ of_dma_controller_free(pdev->dev.of_node);
550 + dma_async_device_unregister(&atdma->dma_common);
551 +
552 + dma_pool_destroy(atdma->memset_pool);
553 +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
554 +index 2cd9496eb696..310f8feb5174 100644
555 +--- a/drivers/firmware/efi/libstub/Makefile
556 ++++ b/drivers/firmware/efi/libstub/Makefile
557 +@@ -12,7 +12,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
558 +
559 + cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
560 + cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
561 +- -fno-builtin -fpic -mno-single-pic-base
562 ++ -fno-builtin -fpic \
563 ++ $(call cc-option,-mno-single-pic-base)
564 +
565 + cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
566 +
567 +diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
568 +index aded10662020..09d10dcf1fc6 100644
569 +--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
570 ++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
571 +@@ -355,6 +355,14 @@ efi_status_t efi_parse_options(char *cmdline)
572 + {
573 + char *str;
574 +
575 ++ /*
576 ++ * Currently, the only efi= option we look for is 'nochunk', which
577 ++ * is intended to work around known issues on certain x86 UEFI
578 ++ * versions. So ignore for now on other architectures.
579 ++ */
580 ++ if (!IS_ENABLED(CONFIG_X86))
581 ++ return EFI_SUCCESS;
582 ++
583 + /*
584 + * If no EFI parameters were specified on the cmdline we've got
585 + * nothing to do.
586 +@@ -528,7 +536,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
587 + size = files[j].size;
588 + while (size) {
589 + unsigned long chunksize;
590 +- if (size > __chunk_size)
591 ++
592 ++ if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
593 + chunksize = __chunk_size;
594 + else
595 + chunksize = size;
596 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
597 +index 1606e7f08f4b..784c45484825 100644
598 +--- a/drivers/hv/channel.c
599 ++++ b/drivers/hv/channel.c
600 +@@ -448,6 +448,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
601 + }
602 + wait_for_completion(&msginfo->waitevent);
603 +
604 ++ if (msginfo->response.gpadl_created.creation_status != 0) {
605 ++ pr_err("Failed to establish GPADL: err = 0x%x\n",
606 ++ msginfo->response.gpadl_created.creation_status);
607 ++
608 ++ ret = -EDQUOT;
609 ++ goto cleanup;
610 ++ }
611 ++
612 + if (channel->rescind) {
613 + ret = -ENODEV;
614 + goto cleanup;
615 +diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
616 +index 0a9e8fadfa9d..37ab30566464 100644
617 +--- a/drivers/iio/magnetometer/st_magn_buffer.c
618 ++++ b/drivers/iio/magnetometer/st_magn_buffer.c
619 +@@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
620 + return st_sensors_set_dataready_irq(indio_dev, state);
621 + }
622 +
623 +-static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
624 +-{
625 +- return st_sensors_set_enable(indio_dev, true);
626 +-}
627 +-
628 + static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
629 + {
630 + int err;
631 +@@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
632 + if (err < 0)
633 + goto st_magn_buffer_postenable_error;
634 +
635 +- return err;
636 ++ return st_sensors_set_enable(indio_dev, true);
637 +
638 + st_magn_buffer_postenable_error:
639 + kfree(mdata->buffer_data);
640 +@@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
641 + int err;
642 + struct st_sensor_data *mdata = iio_priv(indio_dev);
643 +
644 +- err = iio_triggered_buffer_predisable(indio_dev);
645 ++ err = st_sensors_set_enable(indio_dev, false);
646 + if (err < 0)
647 + goto st_magn_buffer_predisable_error;
648 +
649 +- err = st_sensors_set_enable(indio_dev, false);
650 ++ err = iio_triggered_buffer_predisable(indio_dev);
651 +
652 + st_magn_buffer_predisable_error:
653 + kfree(mdata->buffer_data);
654 +@@ -75,7 +70,6 @@ st_magn_buffer_predisable_error:
655 + }
656 +
657 + static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
658 +- .preenable = &st_magn_buffer_preenable,
659 + .postenable = &st_magn_buffer_postenable,
660 + .predisable = &st_magn_buffer_predisable,
661 + };
662 +diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
663 +index 8cedef0daae4..b0aea48907b7 100644
664 +--- a/drivers/media/usb/em28xx/em28xx-dvb.c
665 ++++ b/drivers/media/usb/em28xx/em28xx-dvb.c
666 +@@ -2016,6 +2016,8 @@ static int em28xx_dvb_fini(struct em28xx *dev)
667 + }
668 + }
669 +
670 ++ em28xx_unregister_dvb(dvb);
671 ++
672 + /* remove I2C SEC */
673 + client = dvb->i2c_client_sec;
674 + if (client) {
675 +@@ -2037,7 +2039,6 @@ static int em28xx_dvb_fini(struct em28xx *dev)
676 + i2c_unregister_device(client);
677 + }
678 +
679 +- em28xx_unregister_dvb(dvb);
680 + kfree(dvb);
681 + dev->dvb = NULL;
682 + kref_put(&dev->ref, em28xx_free_device);
683 +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
684 +index f806a4471eb9..32ab0f43f506 100644
685 +--- a/drivers/misc/mic/scif/scif_rma.c
686 ++++ b/drivers/misc/mic/scif/scif_rma.c
687 +@@ -414,7 +414,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev,
688 + if (err)
689 + goto error_window;
690 + err = scif_map_page(&window->num_pages_lookup.lookup[j],
691 +- vmalloc_dma_phys ?
692 ++ vmalloc_num_pages ?
693 + vmalloc_to_page(&window->num_pages[i]) :
694 + virt_to_page(&window->num_pages[i]),
695 + remote_dev);
696 +diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
697 +index a31f4610b493..2c2604e3f633 100644
698 +--- a/drivers/net/rionet.c
699 ++++ b/drivers/net/rionet.c
700 +@@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
701 + * it just report sending a packet to the target
702 + * (without actual packet transfer).
703 + */
704 +- dev_kfree_skb_any(skb);
705 + ndev->stats.tx_packets++;
706 + ndev->stats.tx_bytes += skb->len;
707 ++ dev_kfree_skb_any(skb);
708 + }
709 + }
710 +
711 +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
712 +index 76465b117b72..f1f8227e7342 100644
713 +--- a/drivers/net/usb/ipheth.c
714 ++++ b/drivers/net/usb/ipheth.c
715 +@@ -140,7 +140,6 @@ struct ipheth_device {
716 + struct usb_device *udev;
717 + struct usb_interface *intf;
718 + struct net_device *net;
719 +- struct sk_buff *tx_skb;
720 + struct urb *tx_urb;
721 + struct urb *rx_urb;
722 + unsigned char *tx_buf;
723 +@@ -229,6 +228,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
724 + case -ENOENT:
725 + case -ECONNRESET:
726 + case -ESHUTDOWN:
727 ++ case -EPROTO:
728 + return;
729 + case 0:
730 + break;
731 +@@ -280,7 +280,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
732 + dev_err(&dev->intf->dev, "%s: urb status: %d\n",
733 + __func__, status);
734 +
735 +- dev_kfree_skb_irq(dev->tx_skb);
736 + netif_wake_queue(dev->net);
737 + }
738 +
739 +@@ -410,7 +409,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
740 + if (skb->len > IPHETH_BUF_SIZE) {
741 + WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
742 + dev->net->stats.tx_dropped++;
743 +- dev_kfree_skb_irq(skb);
744 ++ dev_kfree_skb_any(skb);
745 + return NETDEV_TX_OK;
746 + }
747 +
748 +@@ -430,12 +429,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
749 + dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
750 + __func__, retval);
751 + dev->net->stats.tx_errors++;
752 +- dev_kfree_skb_irq(skb);
753 ++ dev_kfree_skb_any(skb);
754 + } else {
755 +- dev->tx_skb = skb;
756 +-
757 + dev->net->stats.tx_packets++;
758 + dev->net->stats.tx_bytes += skb->len;
759 ++ dev_consume_skb_any(skb);
760 + netif_stop_queue(net);
761 + }
762 +
763 +diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
764 +index 96f83f09b8c5..7f4da727bb7b 100644
765 +--- a/drivers/net/wireless/ti/wlcore/cmd.c
766 ++++ b/drivers/net/wireless/ti/wlcore/cmd.c
767 +@@ -35,7 +35,6 @@
768 + #include "wl12xx_80211.h"
769 + #include "cmd.h"
770 + #include "event.h"
771 +-#include "ps.h"
772 + #include "tx.h"
773 + #include "hw_ops.h"
774 +
775 +@@ -192,10 +191,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
776 +
777 + timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
778 +
779 +- ret = wl1271_ps_elp_wakeup(wl);
780 +- if (ret < 0)
781 +- return ret;
782 +-
783 + do {
784 + if (time_after(jiffies, timeout_time)) {
785 + wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
786 +@@ -227,7 +222,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
787 + } while (!event);
788 +
789 + out:
790 +- wl1271_ps_elp_sleep(wl);
791 + kfree(events_vector);
792 + return ret;
793 + }
794 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
795 +index a5e603062ee0..8f77fc0630ce 100644
796 +--- a/drivers/s390/net/qeth_core_main.c
797 ++++ b/drivers/s390/net/qeth_core_main.c
798 +@@ -4540,8 +4540,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
799 + {
800 + struct qeth_ipa_cmd *cmd;
801 + struct qeth_arp_query_info *qinfo;
802 +- struct qeth_snmp_cmd *snmp;
803 + unsigned char *data;
804 ++ void *snmp_data;
805 + __u16 data_len;
806 +
807 + QETH_CARD_TEXT(card, 3, "snpcmdcb");
808 +@@ -4549,7 +4549,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
809 + cmd = (struct qeth_ipa_cmd *) sdata;
810 + data = (unsigned char *)((char *)cmd - reply->offset);
811 + qinfo = (struct qeth_arp_query_info *) reply->param;
812 +- snmp = &cmd->data.setadapterparms.data.snmp;
813 +
814 + if (cmd->hdr.return_code) {
815 + QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
816 +@@ -4562,10 +4561,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
817 + return 0;
818 + }
819 + data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
820 +- if (cmd->data.setadapterparms.hdr.seq_no == 1)
821 +- data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
822 +- else
823 +- data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
824 ++ if (cmd->data.setadapterparms.hdr.seq_no == 1) {
825 ++ snmp_data = &cmd->data.setadapterparms.data.snmp;
826 ++ data_len -= offsetof(struct qeth_ipa_cmd,
827 ++ data.setadapterparms.data.snmp);
828 ++ } else {
829 ++ snmp_data = &cmd->data.setadapterparms.data.snmp.request;
830 ++ data_len -= offsetof(struct qeth_ipa_cmd,
831 ++ data.setadapterparms.data.snmp.request);
832 ++ }
833 +
834 + /* check if there is enough room in userspace */
835 + if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
836 +@@ -4578,16 +4582,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
837 + QETH_CARD_TEXT_(card, 4, "sseqn%i",
838 + cmd->data.setadapterparms.hdr.seq_no);
839 + /*copy entries to user buffer*/
840 +- if (cmd->data.setadapterparms.hdr.seq_no == 1) {
841 +- memcpy(qinfo->udata + qinfo->udata_offset,
842 +- (char *)snmp,
843 +- data_len + offsetof(struct qeth_snmp_cmd, data));
844 +- qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
845 +- } else {
846 +- memcpy(qinfo->udata + qinfo->udata_offset,
847 +- (char *)&snmp->request, data_len);
848 +- }
849 ++ memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
850 + qinfo->udata_offset += data_len;
851 ++
852 + /* check if all replies received ... */
853 + QETH_CARD_TEXT_(card, 4, "srtot%i",
854 + cmd->data.setadapterparms.hdr.used_total);
855 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
856 +index 1e8f68960014..808437c5ec49 100644
857 +--- a/drivers/usb/core/quirks.c
858 ++++ b/drivers/usb/core/quirks.c
859 +@@ -64,6 +64,9 @@ static const struct usb_device_id usb_quirk_list[] = {
860 + /* Microsoft LifeCam-VX700 v2.0 */
861 + { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
862 +
863 ++ /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
864 ++ { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
865 ++
866 + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
867 + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
868 + { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
869 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
870 +index 26efe8c7535f..ed6b9bfe3759 100644
871 +--- a/drivers/usb/dwc3/gadget.c
872 ++++ b/drivers/usb/dwc3/gadget.c
873 +@@ -1280,9 +1280,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
874 + unsigned transfer_in_flight;
875 + unsigned started;
876 +
877 +- if (dep->flags & DWC3_EP_STALL)
878 +- return 0;
879 +-
880 + if (dep->number > 1)
881 + trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
882 + else
883 +@@ -1307,8 +1304,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
884 + else
885 + dep->flags |= DWC3_EP_STALL;
886 + } else {
887 +- if (!(dep->flags & DWC3_EP_STALL))
888 +- return 0;
889 +
890 + ret = dwc3_send_clear_stall_ep_cmd(dep);
891 + if (ret)
892 +diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
893 +index 8fe624ad302a..7ca779493671 100644
894 +--- a/drivers/usb/storage/unusual_realtek.h
895 ++++ b/drivers/usb/storage/unusual_realtek.h
896 +@@ -39,4 +39,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
897 + "USB Card Reader",
898 + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
899 +
900 ++UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
901 ++ "Realtek",
902 ++ "USB Card Reader",
903 ++ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
904 ++
905 ++UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
906 ++ "Realtek",
907 ++ "USB Card Reader",
908 ++ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
909 ++
910 + #endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
911 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
912 +index f6e111984ce2..a7b69deb6d70 100644
913 +--- a/fs/btrfs/super.c
914 ++++ b/fs/btrfs/super.c
915 +@@ -2226,6 +2226,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
916 + vol = memdup_user((void __user *)arg, sizeof(*vol));
917 + if (IS_ERR(vol))
918 + return PTR_ERR(vol);
919 ++ vol->name[BTRFS_PATH_NAME_MAX] = '\0';
920 +
921 + switch (cmd) {
922 + case BTRFS_IOC_SCAN_DEV:
923 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
924 +index 9517de0e668c..fd6c74662e9a 100644
925 +--- a/fs/btrfs/transaction.c
926 ++++ b/fs/btrfs/transaction.c
927 +@@ -1924,6 +1924,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
928 + return ret;
929 + }
930 +
931 ++ btrfs_trans_release_metadata(trans, root);
932 ++ trans->block_rsv = NULL;
933 ++
934 + /* make a pass through all the delayed refs we have so far
935 + * any runnings procs may add more while we are here
936 + */
937 +@@ -1933,9 +1936,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
938 + return ret;
939 + }
940 +
941 +- btrfs_trans_release_metadata(trans, root);
942 +- trans->block_rsv = NULL;
943 +-
944 + cur_trans = trans->transaction;
945 +
946 + /*
947 +diff --git a/fs/direct-io.c b/fs/direct-io.c
948 +index c6220a2daefd..07cc38ec66ca 100644
949 +--- a/fs/direct-io.c
950 ++++ b/fs/direct-io.c
951 +@@ -278,8 +278,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
952 + */
953 + dio->iocb->ki_pos += transferred;
954 +
955 +- if (dio->op == REQ_OP_WRITE)
956 +- ret = generic_write_sync(dio->iocb, transferred);
957 ++ if (ret > 0 && dio->op == REQ_OP_WRITE)
958 ++ ret = generic_write_sync(dio->iocb, ret);
959 + dio->iocb->ki_complete(dio->iocb, ret, 0);
960 + }
961 +
962 +diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
963 +index fbdb8f171893..babef30d440b 100644
964 +--- a/fs/ext2/xattr.c
965 ++++ b/fs/ext2/xattr.c
966 +@@ -609,9 +609,9 @@ skip_replace:
967 + }
968 +
969 + cleanup:
970 +- brelse(bh);
971 + if (!(bh && header == HDR(bh)))
972 + kfree(header);
973 ++ brelse(bh);
974 + up_write(&EXT2_I(inode)->xattr_sem);
975 +
976 + return error;
977 +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
978 +index 1def337b16d4..8e880f7f67b2 100644
979 +--- a/include/linux/workqueue.h
980 ++++ b/include/linux/workqueue.h
981 +@@ -106,9 +106,9 @@ struct work_struct {
982 + #endif
983 + };
984 +
985 +-#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
986 ++#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
987 + #define WORK_DATA_STATIC_INIT() \
988 +- ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
989 ++ ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
990 +
991 + struct delayed_work {
992 + struct work_struct work;
993 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
994 +index 9f7bba700e4e..7ea8da990b9d 100644
995 +--- a/mm/huge_memory.c
996 ++++ b/mm/huge_memory.c
997 +@@ -1839,7 +1839,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
998 + }
999 + }
1000 +
1001 +-static void freeze_page(struct page *page)
1002 ++static void unmap_page(struct page *page)
1003 + {
1004 + enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
1005 + TTU_RMAP_LOCKED;
1006 +@@ -1862,7 +1862,7 @@ static void freeze_page(struct page *page)
1007 + VM_BUG_ON_PAGE(ret, page + i - 1);
1008 + }
1009 +
1010 +-static void unfreeze_page(struct page *page)
1011 ++static void remap_page(struct page *page)
1012 + {
1013 + int i;
1014 +
1015 +@@ -1876,26 +1876,13 @@ static void __split_huge_page_tail(struct page *head, int tail,
1016 + struct page *page_tail = head + tail;
1017 +
1018 + VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
1019 +- VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail);
1020 +
1021 + /*
1022 +- * tail_page->_refcount is zero and not changing from under us. But
1023 +- * get_page_unless_zero() may be running from under us on the
1024 +- * tail_page. If we used atomic_set() below instead of atomic_inc() or
1025 +- * atomic_add(), we would then run atomic_set() concurrently with
1026 +- * get_page_unless_zero(), and atomic_set() is implemented in C not
1027 +- * using locked ops. spin_unlock on x86 sometime uses locked ops
1028 +- * because of PPro errata 66, 92, so unless somebody can guarantee
1029 +- * atomic_set() here would be safe on all archs (and not only on x86),
1030 +- * it's safer to use atomic_inc()/atomic_add().
1031 ++ * Clone page flags before unfreezing refcount.
1032 ++ *
1033 ++ * After successful get_page_unless_zero() might follow flags change,
1034 ++ * for exmaple lock_page() which set PG_waiters.
1035 + */
1036 +- if (PageAnon(head)) {
1037 +- page_ref_inc(page_tail);
1038 +- } else {
1039 +- /* Additional pin to radix tree */
1040 +- page_ref_add(page_tail, 2);
1041 +- }
1042 +-
1043 + page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1044 + page_tail->flags |= (head->flags &
1045 + ((1L << PG_referenced) |
1046 +@@ -1907,36 +1894,42 @@ static void __split_huge_page_tail(struct page *head, int tail,
1047 + (1L << PG_unevictable) |
1048 + (1L << PG_dirty)));
1049 +
1050 +- /*
1051 +- * After clearing PageTail the gup refcount can be released.
1052 +- * Page flags also must be visible before we make the page non-compound.
1053 +- */
1054 ++ /* ->mapping in first tail page is compound_mapcount */
1055 ++ VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
1056 ++ page_tail);
1057 ++ page_tail->mapping = head->mapping;
1058 ++ page_tail->index = head->index + tail;
1059 ++
1060 ++ /* Page flags must be visible before we make the page non-compound. */
1061 + smp_wmb();
1062 +
1063 ++ /*
1064 ++ * Clear PageTail before unfreezing page refcount.
1065 ++ *
1066 ++ * After successful get_page_unless_zero() might follow put_page()
1067 ++ * which needs correct compound_head().
1068 ++ */
1069 + clear_compound_head(page_tail);
1070 +
1071 ++ /* Finally unfreeze refcount. Additional reference from page cache. */
1072 ++ page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
1073 ++ PageSwapCache(head)));
1074 ++
1075 + if (page_is_young(head))
1076 + set_page_young(page_tail);
1077 + if (page_is_idle(head))
1078 + set_page_idle(page_tail);
1079 +
1080 +- /* ->mapping in first tail page is compound_mapcount */
1081 +- VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
1082 +- page_tail);
1083 +- page_tail->mapping = head->mapping;
1084 +-
1085 +- page_tail->index = head->index + tail;
1086 + page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
1087 + lru_add_page_tail(head, page_tail, lruvec, list);
1088 + }
1089 +
1090 + static void __split_huge_page(struct page *page, struct list_head *list,
1091 +- unsigned long flags)
1092 ++ pgoff_t end, unsigned long flags)
1093 + {
1094 + struct page *head = compound_head(page);
1095 + struct zone *zone = page_zone(head);
1096 + struct lruvec *lruvec;
1097 +- pgoff_t end = -1;
1098 + int i;
1099 +
1100 + lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
1101 +@@ -1944,9 +1937,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
1102 + /* complete memcg works before add pages to LRU */
1103 + mem_cgroup_split_huge_fixup(head);
1104 +
1105 +- if (!PageAnon(page))
1106 +- end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
1107 +-
1108 + for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1109 + __split_huge_page_tail(head, i, lruvec, list);
1110 + /* Some pages can be beyond i_size: drop them from page cache */
1111 +@@ -1971,7 +1961,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
1112 +
1113 + spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
1114 +
1115 +- unfreeze_page(head);
1116 ++ remap_page(head);
1117 +
1118 + for (i = 0; i < HPAGE_PMD_NR; i++) {
1119 + struct page *subpage = head + i;
1120 +@@ -2099,6 +2089,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1121 + int count, mapcount, extra_pins, ret;
1122 + bool mlocked;
1123 + unsigned long flags;
1124 ++ pgoff_t end;
1125 +
1126 + VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
1127 + VM_BUG_ON_PAGE(!PageLocked(page), page);
1128 +@@ -2120,6 +2111,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1129 + goto out;
1130 + }
1131 + extra_pins = 0;
1132 ++ end = -1;
1133 + mapping = NULL;
1134 + anon_vma_lock_write(anon_vma);
1135 + } else {
1136 +@@ -2135,10 +2127,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1137 + extra_pins = HPAGE_PMD_NR;
1138 + anon_vma = NULL;
1139 + i_mmap_lock_read(mapping);
1140 ++
1141 ++ /*
1142 ++ *__split_huge_page() may need to trim off pages beyond EOF:
1143 ++ * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
1144 ++ * which cannot be nested inside the page tree lock. So note
1145 ++ * end now: i_size itself may be changed at any moment, but
1146 ++ * head page lock is good enough to serialize the trimming.
1147 ++ */
1148 ++ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
1149 + }
1150 +
1151 + /*
1152 +- * Racy check if we can split the page, before freeze_page() will
1153 ++ * Racy check if we can split the page, before unmap_page() will
1154 + * split PMDs
1155 + */
1156 + if (total_mapcount(head) != page_count(head) - extra_pins - 1) {
1157 +@@ -2147,7 +2148,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1158 + }
1159 +
1160 + mlocked = PageMlocked(page);
1161 +- freeze_page(head);
1162 ++ unmap_page(head);
1163 + VM_BUG_ON_PAGE(compound_mapcount(head), head);
1164 +
1165 + /* Make sure the page is not on per-CPU pagevec as it takes pin */
1166 +@@ -2184,7 +2185,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1167 + if (mapping)
1168 + __dec_node_page_state(page, NR_SHMEM_THPS);
1169 + spin_unlock(&pgdata->split_queue_lock);
1170 +- __split_huge_page(page, list, flags);
1171 ++ __split_huge_page(page, list, end, flags);
1172 + ret = 0;
1173 + } else {
1174 + if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
1175 +@@ -2199,7 +2200,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1176 + fail: if (mapping)
1177 + spin_unlock(&mapping->tree_lock);
1178 + spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
1179 +- unfreeze_page(head);
1180 ++ remap_page(head);
1181 + ret = -EBUSY;
1182 + }
1183 +
1184 +diff --git a/mm/khugepaged.c b/mm/khugepaged.c
1185 +index 1df37ee996d5..e0cfc3a54b6a 100644
1186 +--- a/mm/khugepaged.c
1187 ++++ b/mm/khugepaged.c
1188 +@@ -1286,7 +1286,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1189 + * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1190 + *
1191 + * Basic scheme is simple, details are more complex:
1192 +- * - allocate and freeze a new huge page;
1193 ++ * - allocate and lock a new huge page;
1194 + * - scan over radix tree replacing old pages the new one
1195 + * + swap in pages if necessary;
1196 + * + fill in gaps;
1197 +@@ -1294,11 +1294,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1198 + * - if replacing succeed:
1199 + * + copy data over;
1200 + * + free old pages;
1201 +- * + unfreeze huge page;
1202 ++ * + unlock huge page;
1203 + * - if replacing failed;
1204 + * + put all pages back and unfreeze them;
1205 + * + restore gaps in the radix-tree;
1206 +- * + free huge page;
1207 ++ * + unlock and free huge page;
1208 + */
1209 + static void collapse_shmem(struct mm_struct *mm,
1210 + struct address_space *mapping, pgoff_t start,
1211 +@@ -1332,18 +1332,15 @@ static void collapse_shmem(struct mm_struct *mm,
1212 + goto out;
1213 + }
1214 +
1215 ++ __SetPageLocked(new_page);
1216 ++ __SetPageSwapBacked(new_page);
1217 + new_page->index = start;
1218 + new_page->mapping = mapping;
1219 +- __SetPageSwapBacked(new_page);
1220 +- __SetPageLocked(new_page);
1221 +- BUG_ON(!page_ref_freeze(new_page, 1));
1222 +-
1223 +
1224 + /*
1225 +- * At this point the new_page is 'frozen' (page_count() is zero), locked
1226 +- * and not up-to-date. It's safe to insert it into radix tree, because
1227 +- * nobody would be able to map it or use it in other way until we
1228 +- * unfreeze it.
1229 ++ * At this point the new_page is locked and not up-to-date.
1230 ++ * It's safe to insert it into the page cache, because nobody would
1231 ++ * be able to map it or use it in another way until we unlock it.
1232 + */
1233 +
1234 + index = start;
1235 +@@ -1351,19 +1348,29 @@ static void collapse_shmem(struct mm_struct *mm,
1236 + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1237 + int n = min(iter.index, end) - index;
1238 +
1239 ++ /*
1240 ++ * Stop if extent has been hole-punched, and is now completely
1241 ++ * empty (the more obvious i_size_read() check would take an
1242 ++ * irq-unsafe seqlock on 32-bit).
1243 ++ */
1244 ++ if (n >= HPAGE_PMD_NR) {
1245 ++ result = SCAN_TRUNCATED;
1246 ++ goto tree_locked;
1247 ++ }
1248 ++
1249 + /*
1250 + * Handle holes in the radix tree: charge it from shmem and
1251 + * insert relevant subpage of new_page into the radix-tree.
1252 + */
1253 + if (n && !shmem_charge(mapping->host, n)) {
1254 + result = SCAN_FAIL;
1255 +- break;
1256 ++ goto tree_locked;
1257 + }
1258 +- nr_none += n;
1259 + for (; index < min(iter.index, end); index++) {
1260 + radix_tree_insert(&mapping->page_tree, index,
1261 + new_page + (index % HPAGE_PMD_NR));
1262 + }
1263 ++ nr_none += n;
1264 +
1265 + /* We are done. */
1266 + if (index >= end)
1267 +@@ -1379,12 +1386,12 @@ static void collapse_shmem(struct mm_struct *mm,
1268 + result = SCAN_FAIL;
1269 + goto tree_unlocked;
1270 + }
1271 +- spin_lock_irq(&mapping->tree_lock);
1272 + } else if (trylock_page(page)) {
1273 + get_page(page);
1274 ++ spin_unlock_irq(&mapping->tree_lock);
1275 + } else {
1276 + result = SCAN_PAGE_LOCK;
1277 +- break;
1278 ++ goto tree_locked;
1279 + }
1280 +
1281 + /*
1282 +@@ -1393,17 +1400,24 @@ static void collapse_shmem(struct mm_struct *mm,
1283 + */
1284 + VM_BUG_ON_PAGE(!PageLocked(page), page);
1285 + VM_BUG_ON_PAGE(!PageUptodate(page), page);
1286 +- VM_BUG_ON_PAGE(PageTransCompound(page), page);
1287 ++
1288 ++ /*
1289 ++ * If file was truncated then extended, or hole-punched, before
1290 ++ * we locked the first page, then a THP might be there already.
1291 ++ */
1292 ++ if (PageTransCompound(page)) {
1293 ++ result = SCAN_PAGE_COMPOUND;
1294 ++ goto out_unlock;
1295 ++ }
1296 +
1297 + if (page_mapping(page) != mapping) {
1298 + result = SCAN_TRUNCATED;
1299 + goto out_unlock;
1300 + }
1301 +- spin_unlock_irq(&mapping->tree_lock);
1302 +
1303 + if (isolate_lru_page(page)) {
1304 + result = SCAN_DEL_PAGE_LRU;
1305 +- goto out_isolate_failed;
1306 ++ goto out_unlock;
1307 + }
1308 +
1309 + if (page_mapped(page))
1310 +@@ -1425,7 +1439,9 @@ static void collapse_shmem(struct mm_struct *mm,
1311 + */
1312 + if (!page_ref_freeze(page, 3)) {
1313 + result = SCAN_PAGE_COUNT;
1314 +- goto out_lru;
1315 ++ spin_unlock_irq(&mapping->tree_lock);
1316 ++ putback_lru_page(page);
1317 ++ goto out_unlock;
1318 + }
1319 +
1320 + /*
1321 +@@ -1441,17 +1457,10 @@ static void collapse_shmem(struct mm_struct *mm,
1322 + slot = radix_tree_iter_next(&iter);
1323 + index++;
1324 + continue;
1325 +-out_lru:
1326 +- spin_unlock_irq(&mapping->tree_lock);
1327 +- putback_lru_page(page);
1328 +-out_isolate_failed:
1329 +- unlock_page(page);
1330 +- put_page(page);
1331 +- goto tree_unlocked;
1332 + out_unlock:
1333 + unlock_page(page);
1334 + put_page(page);
1335 +- break;
1336 ++ goto tree_unlocked;
1337 + }
1338 +
1339 + /*
1340 +@@ -1459,14 +1468,18 @@ out_unlock:
1341 + * This code only triggers if there's nothing in radix tree
1342 + * beyond 'end'.
1343 + */
1344 +- if (result == SCAN_SUCCEED && index < end) {
1345 ++ if (index < end) {
1346 + int n = end - index;
1347 +
1348 ++ /* Stop if extent has been truncated, and is now empty */
1349 ++ if (n >= HPAGE_PMD_NR) {
1350 ++ result = SCAN_TRUNCATED;
1351 ++ goto tree_locked;
1352 ++ }
1353 + if (!shmem_charge(mapping->host, n)) {
1354 + result = SCAN_FAIL;
1355 + goto tree_locked;
1356 + }
1357 +-
1358 + for (; index < end; index++) {
1359 + radix_tree_insert(&mapping->page_tree, index,
1360 + new_page + (index % HPAGE_PMD_NR));
1361 +@@ -1474,57 +1487,62 @@ out_unlock:
1362 + nr_none += n;
1363 + }
1364 +
1365 ++ __inc_node_page_state(new_page, NR_SHMEM_THPS);
1366 ++ if (nr_none) {
1367 ++ struct zone *zone = page_zone(new_page);
1368 ++
1369 ++ __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1370 ++ __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1371 ++ }
1372 ++
1373 + tree_locked:
1374 + spin_unlock_irq(&mapping->tree_lock);
1375 + tree_unlocked:
1376 +
1377 + if (result == SCAN_SUCCEED) {
1378 +- unsigned long flags;
1379 +- struct zone *zone = page_zone(new_page);
1380 +-
1381 + /*
1382 + * Replacing old pages with new one has succeed, now we need to
1383 + * copy the content and free old pages.
1384 + */
1385 ++ index = start;
1386 + list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1387 ++ while (index < page->index) {
1388 ++ clear_highpage(new_page + (index % HPAGE_PMD_NR));
1389 ++ index++;
1390 ++ }
1391 + copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1392 + page);
1393 + list_del(&page->lru);
1394 +- unlock_page(page);
1395 +- page_ref_unfreeze(page, 1);
1396 + page->mapping = NULL;
1397 ++ page_ref_unfreeze(page, 1);
1398 + ClearPageActive(page);
1399 + ClearPageUnevictable(page);
1400 ++ unlock_page(page);
1401 + put_page(page);
1402 ++ index++;
1403 + }
1404 +-
1405 +- local_irq_save(flags);
1406 +- __inc_node_page_state(new_page, NR_SHMEM_THPS);
1407 +- if (nr_none) {
1408 +- __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1409 +- __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1410 ++ while (index < end) {
1411 ++ clear_highpage(new_page + (index % HPAGE_PMD_NR));
1412 ++ index++;
1413 + }
1414 +- local_irq_restore(flags);
1415 +
1416 +- /*
1417 +- * Remove pte page tables, so we can re-faulti
1418 +- * the page as huge.
1419 +- */
1420 +- retract_page_tables(mapping, start);
1421 +-
1422 +- /* Everything is ready, let's unfreeze the new_page */
1423 +- set_page_dirty(new_page);
1424 + SetPageUptodate(new_page);
1425 +- page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1426 ++ page_ref_add(new_page, HPAGE_PMD_NR - 1);
1427 ++ set_page_dirty(new_page);
1428 + mem_cgroup_commit_charge(new_page, memcg, false, true);
1429 + lru_cache_add_anon(new_page);
1430 +- unlock_page(new_page);
1431 +
1432 ++ /*
1433 ++ * Remove pte page tables, so we can re-fault the page as huge.
1434 ++ */
1435 ++ retract_page_tables(mapping, start);
1436 + *hpage = NULL;
1437 + } else {
1438 + /* Something went wrong: rollback changes to the radix-tree */
1439 +- shmem_uncharge(mapping->host, nr_none);
1440 + spin_lock_irq(&mapping->tree_lock);
1441 ++ mapping->nrpages -= nr_none;
1442 ++ shmem_uncharge(mapping->host, nr_none);
1443 ++
1444 + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1445 + start) {
1446 + if (iter.index >= end)
1447 +@@ -1549,20 +1567,19 @@ tree_unlocked:
1448 + page_ref_unfreeze(page, 2);
1449 + radix_tree_replace_slot(slot, page);
1450 + spin_unlock_irq(&mapping->tree_lock);
1451 +- putback_lru_page(page);
1452 + unlock_page(page);
1453 ++ putback_lru_page(page);
1454 + spin_lock_irq(&mapping->tree_lock);
1455 + slot = radix_tree_iter_next(&iter);
1456 + }
1457 + VM_BUG_ON(nr_none);
1458 + spin_unlock_irq(&mapping->tree_lock);
1459 +
1460 +- /* Unfreeze new_page, caller would take care about freeing it */
1461 +- page_ref_unfreeze(new_page, 1);
1462 + mem_cgroup_cancel_charge(new_page, memcg, true);
1463 +- unlock_page(new_page);
1464 + new_page->mapping = NULL;
1465 + }
1466 ++
1467 ++ unlock_page(new_page);
1468 + out:
1469 + VM_BUG_ON(!list_empty(&pagelist));
1470 + /* TODO: tracepoints */
1471 +diff --git a/mm/shmem.c b/mm/shmem.c
1472 +index 358a92be43eb..9b17bd4cbc5e 100644
1473 +--- a/mm/shmem.c
1474 ++++ b/mm/shmem.c
1475 +@@ -181,6 +181,38 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
1476 + vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
1477 + }
1478 +
1479 ++static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
1480 ++{
1481 ++ struct shmem_inode_info *info = SHMEM_I(inode);
1482 ++ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1483 ++
1484 ++ if (shmem_acct_block(info->flags, pages))
1485 ++ return false;
1486 ++
1487 ++ if (sbinfo->max_blocks) {
1488 ++ if (percpu_counter_compare(&sbinfo->used_blocks,
1489 ++ sbinfo->max_blocks - pages) > 0)
1490 ++ goto unacct;
1491 ++ percpu_counter_add(&sbinfo->used_blocks, pages);
1492 ++ }
1493 ++
1494 ++ return true;
1495 ++
1496 ++unacct:
1497 ++ shmem_unacct_blocks(info->flags, pages);
1498 ++ return false;
1499 ++}
1500 ++
1501 ++static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
1502 ++{
1503 ++ struct shmem_inode_info *info = SHMEM_I(inode);
1504 ++ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1505 ++
1506 ++ if (sbinfo->max_blocks)
1507 ++ percpu_counter_sub(&sbinfo->used_blocks, pages);
1508 ++ shmem_unacct_blocks(info->flags, pages);
1509 ++}
1510 ++
1511 + static const struct super_operations shmem_ops;
1512 + static const struct address_space_operations shmem_aops;
1513 + static const struct file_operations shmem_file_operations;
1514 +@@ -237,61 +269,46 @@ static void shmem_recalc_inode(struct inode *inode)
1515 +
1516 + freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
1517 + if (freed > 0) {
1518 +- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1519 +- if (sbinfo->max_blocks)
1520 +- percpu_counter_add(&sbinfo->used_blocks, -freed);
1521 + info->alloced -= freed;
1522 + inode->i_blocks -= freed * BLOCKS_PER_PAGE;
1523 +- shmem_unacct_blocks(info->flags, freed);
1524 ++ shmem_inode_unacct_blocks(inode, freed);
1525 + }
1526 + }
1527 +
1528 + bool shmem_charge(struct inode *inode, long pages)
1529 + {
1530 + struct shmem_inode_info *info = SHMEM_I(inode);
1531 +- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1532 + unsigned long flags;
1533 +
1534 +- if (shmem_acct_block(info->flags, pages))
1535 ++ if (!shmem_inode_acct_block(inode, pages))
1536 + return false;
1537 ++
1538 ++ /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
1539 ++ inode->i_mapping->nrpages += pages;
1540 ++
1541 + spin_lock_irqsave(&info->lock, flags);
1542 + info->alloced += pages;
1543 + inode->i_blocks += pages * BLOCKS_PER_PAGE;
1544 + shmem_recalc_inode(inode);
1545 + spin_unlock_irqrestore(&info->lock, flags);
1546 +- inode->i_mapping->nrpages += pages;
1547 +
1548 +- if (!sbinfo->max_blocks)
1549 +- return true;
1550 +- if (percpu_counter_compare(&sbinfo->used_blocks,
1551 +- sbinfo->max_blocks - pages) > 0) {
1552 +- inode->i_mapping->nrpages -= pages;
1553 +- spin_lock_irqsave(&info->lock, flags);
1554 +- info->alloced -= pages;
1555 +- shmem_recalc_inode(inode);
1556 +- spin_unlock_irqrestore(&info->lock, flags);
1557 +- shmem_unacct_blocks(info->flags, pages);
1558 +- return false;
1559 +- }
1560 +- percpu_counter_add(&sbinfo->used_blocks, pages);
1561 + return true;
1562 + }
1563 +
1564 + void shmem_uncharge(struct inode *inode, long pages)
1565 + {
1566 + struct shmem_inode_info *info = SHMEM_I(inode);
1567 +- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1568 + unsigned long flags;
1569 +
1570 ++ /* nrpages adjustment done by __delete_from_page_cache() or caller */
1571 ++
1572 + spin_lock_irqsave(&info->lock, flags);
1573 + info->alloced -= pages;
1574 + inode->i_blocks -= pages * BLOCKS_PER_PAGE;
1575 + shmem_recalc_inode(inode);
1576 + spin_unlock_irqrestore(&info->lock, flags);
1577 +
1578 +- if (sbinfo->max_blocks)
1579 +- percpu_counter_sub(&sbinfo->used_blocks, pages);
1580 +- shmem_unacct_blocks(info->flags, pages);
1581 ++ shmem_inode_unacct_blocks(inode, pages);
1582 + }
1583 +
1584 + /*
1585 +@@ -1424,9 +1441,10 @@ static struct page *shmem_alloc_page(gfp_t gfp,
1586 + }
1587 +
1588 + static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1589 +- struct shmem_inode_info *info, struct shmem_sb_info *sbinfo,
1590 ++ struct inode *inode,
1591 + pgoff_t index, bool huge)
1592 + {
1593 ++ struct shmem_inode_info *info = SHMEM_I(inode);
1594 + struct page *page;
1595 + int nr;
1596 + int err = -ENOSPC;
1597 +@@ -1435,14 +1453,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1598 + huge = false;
1599 + nr = huge ? HPAGE_PMD_NR : 1;
1600 +
1601 +- if (shmem_acct_block(info->flags, nr))
1602 ++ if (!shmem_inode_acct_block(inode, nr))
1603 + goto failed;
1604 +- if (sbinfo->max_blocks) {
1605 +- if (percpu_counter_compare(&sbinfo->used_blocks,
1606 +- sbinfo->max_blocks - nr) > 0)
1607 +- goto unacct;
1608 +- percpu_counter_add(&sbinfo->used_blocks, nr);
1609 +- }
1610 +
1611 + if (huge)
1612 + page = shmem_alloc_hugepage(gfp, info, index);
1613 +@@ -1455,10 +1467,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1614 + }
1615 +
1616 + err = -ENOMEM;
1617 +- if (sbinfo->max_blocks)
1618 +- percpu_counter_add(&sbinfo->used_blocks, -nr);
1619 +-unacct:
1620 +- shmem_unacct_blocks(info->flags, nr);
1621 ++ shmem_inode_unacct_blocks(inode, nr);
1622 + failed:
1623 + return ERR_PTR(err);
1624 + }
1625 +@@ -1485,11 +1494,13 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1626 + {
1627 + struct page *oldpage, *newpage;
1628 + struct address_space *swap_mapping;
1629 ++ swp_entry_t entry;
1630 + pgoff_t swap_index;
1631 + int error;
1632 +
1633 + oldpage = *pagep;
1634 +- swap_index = page_private(oldpage);
1635 ++ entry.val = page_private(oldpage);
1636 ++ swap_index = swp_offset(entry);
1637 + swap_mapping = page_mapping(oldpage);
1638 +
1639 + /*
1640 +@@ -1508,7 +1519,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1641 + __SetPageLocked(newpage);
1642 + __SetPageSwapBacked(newpage);
1643 + SetPageUptodate(newpage);
1644 +- set_page_private(newpage, swap_index);
1645 ++ set_page_private(newpage, entry.val);
1646 + SetPageSwapCache(newpage);
1647 +
1648 + /*
1649 +@@ -1718,10 +1729,9 @@ repeat:
1650 + }
1651 +
1652 + alloc_huge:
1653 +- page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1654 +- index, true);
1655 ++ page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1656 + if (IS_ERR(page)) {
1657 +-alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1658 ++alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
1659 + index, false);
1660 + }
1661 + if (IS_ERR(page)) {
1662 +@@ -1843,10 +1853,7 @@ clear:
1663 + * Error recovery.
1664 + */
1665 + unacct:
1666 +- if (sbinfo->max_blocks)
1667 +- percpu_counter_sub(&sbinfo->used_blocks,
1668 +- 1 << compound_order(page));
1669 +- shmem_unacct_blocks(info->flags, 1 << compound_order(page));
1670 ++ shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
1671 +
1672 + if (PageTransHuge(page)) {
1673 + unlock_page(page);
1674 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1675 +index 68ecb7d71c2b..dca1fed0d7da 100644
1676 +--- a/net/core/skbuff.c
1677 ++++ b/net/core/skbuff.c
1678 +@@ -4421,6 +4421,10 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
1679 + nf_reset(skb);
1680 + nf_reset_trace(skb);
1681 +
1682 ++#ifdef CONFIG_NET_SWITCHDEV
1683 ++ skb->offload_fwd_mark = 0;
1684 ++#endif
1685 ++
1686 + if (!xnet)
1687 + return;
1688 +
1689 +diff --git a/sound/core/control.c b/sound/core/control.c
1690 +index 995cde48c1be..511368fe974e 100644
1691 +--- a/sound/core/control.c
1692 ++++ b/sound/core/control.c
1693 +@@ -346,6 +346,40 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
1694 + return 0;
1695 + }
1696 +
1697 ++/* add a new kcontrol object; call with card->controls_rwsem locked */
1698 ++static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
1699 ++{
1700 ++ struct snd_ctl_elem_id id;
1701 ++ unsigned int idx;
1702 ++ unsigned int count;
1703 ++
1704 ++ id = kcontrol->id;
1705 ++ if (id.index > UINT_MAX - kcontrol->count)
1706 ++ return -EINVAL;
1707 ++
1708 ++ if (snd_ctl_find_id(card, &id)) {
1709 ++ dev_err(card->dev,
1710 ++ "control %i:%i:%i:%s:%i is already present\n",
1711 ++ id.iface, id.device, id.subdevice, id.name, id.index);
1712 ++ return -EBUSY;
1713 ++ }
1714 ++
1715 ++ if (snd_ctl_find_hole(card, kcontrol->count) < 0)
1716 ++ return -ENOMEM;
1717 ++
1718 ++ list_add_tail(&kcontrol->list, &card->controls);
1719 ++ card->controls_count += kcontrol->count;
1720 ++ kcontrol->id.numid = card->last_numid + 1;
1721 ++ card->last_numid += kcontrol->count;
1722 ++
1723 ++ id = kcontrol->id;
1724 ++ count = kcontrol->count;
1725 ++ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
1726 ++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
1727 ++
1728 ++ return 0;
1729 ++}
1730 ++
1731 + /**
1732 + * snd_ctl_add - add the control instance to the card
1733 + * @card: the card instance
1734 +@@ -362,45 +396,18 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
1735 + */
1736 + int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
1737 + {
1738 +- struct snd_ctl_elem_id id;
1739 +- unsigned int idx;
1740 +- unsigned int count;
1741 + int err = -EINVAL;
1742 +
1743 + if (! kcontrol)
1744 + return err;
1745 + if (snd_BUG_ON(!card || !kcontrol->info))
1746 + goto error;
1747 +- id = kcontrol->id;
1748 +- if (id.index > UINT_MAX - kcontrol->count)
1749 +- goto error;
1750 +
1751 + down_write(&card->controls_rwsem);
1752 +- if (snd_ctl_find_id(card, &id)) {
1753 +- up_write(&card->controls_rwsem);
1754 +- dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
1755 +- id.iface,
1756 +- id.device,
1757 +- id.subdevice,
1758 +- id.name,
1759 +- id.index);
1760 +- err = -EBUSY;
1761 +- goto error;
1762 +- }
1763 +- if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
1764 +- up_write(&card->controls_rwsem);
1765 +- err = -ENOMEM;
1766 +- goto error;
1767 +- }
1768 +- list_add_tail(&kcontrol->list, &card->controls);
1769 +- card->controls_count += kcontrol->count;
1770 +- kcontrol->id.numid = card->last_numid + 1;
1771 +- card->last_numid += kcontrol->count;
1772 +- id = kcontrol->id;
1773 +- count = kcontrol->count;
1774 ++ err = __snd_ctl_add(card, kcontrol);
1775 + up_write(&card->controls_rwsem);
1776 +- for (idx = 0; idx < count; idx++, id.index++, id.numid++)
1777 +- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
1778 ++ if (err < 0)
1779 ++ goto error;
1780 + return 0;
1781 +
1782 + error:
1783 +@@ -1354,9 +1361,12 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1784 + kctl->tlv.c = snd_ctl_elem_user_tlv;
1785 +
1786 + /* This function manage to free the instance on failure. */
1787 +- err = snd_ctl_add(card, kctl);
1788 +- if (err < 0)
1789 +- return err;
1790 ++ down_write(&card->controls_rwsem);
1791 ++ err = __snd_ctl_add(card, kctl);
1792 ++ if (err < 0) {
1793 ++ snd_ctl_free_one(kctl);
1794 ++ goto unlock;
1795 ++ }
1796 + offset = snd_ctl_get_ioff(kctl, &info->id);
1797 + snd_ctl_build_ioff(&info->id, kctl, offset);
1798 + /*
1799 +@@ -1367,10 +1377,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1800 + * which locks the element.
1801 + */
1802 +
1803 +- down_write(&card->controls_rwsem);
1804 + card->user_ctl_count++;
1805 +- up_write(&card->controls_rwsem);
1806 +
1807 ++ unlock:
1808 ++ up_write(&card->controls_rwsem);
1809 + return 0;
1810 + }
1811 +
1812 +diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
1813 +index 913b731d2236..f40330ddb9b2 100644
1814 +--- a/sound/isa/wss/wss_lib.c
1815 ++++ b/sound/isa/wss/wss_lib.c
1816 +@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct snd_pcm_substream *substream)
1817 + if (err < 0) {
1818 + if (chip->release_dma)
1819 + chip->release_dma(chip, chip->dma_private_data, chip->dma1);
1820 +- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1821 + return err;
1822 + }
1823 + chip->playback_substream = substream;
1824 +@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct snd_pcm_substream *substream)
1825 + if (err < 0) {
1826 + if (chip->release_dma)
1827 + chip->release_dma(chip, chip->dma_private_data, chip->dma2);
1828 +- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1829 + return err;
1830 + }
1831 + chip->capture_substream = substream;
1832 +diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
1833 +index 82259ca61e64..c4840fda44b4 100644
1834 +--- a/sound/pci/ac97/ac97_codec.c
1835 ++++ b/sound/pci/ac97/ac97_codec.c
1836 +@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
1837 + {
1838 + struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
1839 + int reg = kcontrol->private_value & 0xff;
1840 +- int shift = (kcontrol->private_value >> 8) & 0xff;
1841 ++ int shift = (kcontrol->private_value >> 8) & 0x0f;
1842 + int mask = (kcontrol->private_value >> 16) & 0xff;
1843 + // int invert = (kcontrol->private_value >> 24) & 0xff;
1844 + unsigned short value, old, new;
1845 +diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
1846 +index 30bdc971883b..017e241b0ec9 100644
1847 +--- a/sound/sparc/cs4231.c
1848 ++++ b/sound/sparc/cs4231.c
1849 +@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(struct snd_pcm_substream *substream)
1850 + runtime->hw = snd_cs4231_playback;
1851 +
1852 + err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
1853 +- if (err < 0) {
1854 +- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1855 ++ if (err < 0)
1856 + return err;
1857 +- }
1858 + chip->playback_substream = substream;
1859 + chip->p_periods_sent = 0;
1860 + snd_pcm_set_sync(substream);
1861 +@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struct snd_pcm_substream *substream)
1862 + runtime->hw = snd_cs4231_capture;
1863 +
1864 + err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
1865 +- if (err < 0) {
1866 +- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
1867 ++ if (err < 0)
1868 + return err;
1869 +- }
1870 + chip->capture_substream = substream;
1871 + chip->c_periods_sent = 0;
1872 + snd_pcm_set_sync(substream);