Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Fri, 16 Aug 2019 12:13:22
Message-Id: 1565957567.f739056a9cd88ac7ae209434eb265b1a1dcb903e.mpagano@gentoo
1 commit: f739056a9cd88ac7ae209434eb265b1a1dcb903e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 16 12:12:47 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 16 12:12:47 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f739056a
7
8 Linux patch 4.19.67
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1066_linux-4.19.67.patch | 2715 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2719 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 6813edb..142096b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -307,6 +307,10 @@ Patch: 1065_linux-4.19.66.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.66
23
24 +Patch: 1066_linux-4.19.67.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.67
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1066_linux-4.19.67.patch b/1066_linux-4.19.67.patch
33 new file mode 100644
34 index 0000000..9ac1ae3
35 --- /dev/null
36 +++ b/1066_linux-4.19.67.patch
37 @@ -0,0 +1,2715 @@
38 +diff --git a/Makefile b/Makefile
39 +index 065e5b34dc02..b6aa6e8d4411 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 66
47 ++SUBLEVEL = 67
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
52 +index 36efe410dcd7..9e33c41f5411 100644
53 +--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
54 ++++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
55 +@@ -125,6 +125,9 @@
56 + };
57 +
58 + mdio-bus-mux {
59 ++ #address-cells = <1>;
60 ++ #size-cells = <0>;
61 ++
62 + /* BIT(9) = 1 => external mdio */
63 + mdio_ext: mdio@200 {
64 + reg = <0x200>;
65 +diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
66 +index cd350dee4df3..efcd400b2abb 100644
67 +--- a/arch/arm/mach-davinci/sleep.S
68 ++++ b/arch/arm/mach-davinci/sleep.S
69 +@@ -37,6 +37,7 @@
70 + #define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
71 +
72 + .text
73 ++ .arch armv5te
74 + /*
75 + * Move DaVinci into deep sleep state
76 + *
77 +diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
78 +index 578174a33d22..51cd66dc1bb0 100644
79 +--- a/arch/powerpc/kvm/powerpc.c
80 ++++ b/arch/powerpc/kvm/powerpc.c
81 +@@ -61,6 +61,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
82 + return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
83 + }
84 +
85 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
86 ++{
87 ++ return kvm_arch_vcpu_runnable(vcpu);
88 ++}
89 ++
90 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
91 + {
92 + return false;
93 +diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
94 +index 41e3908b397f..0d753291c43c 100644
95 +--- a/arch/s390/include/asm/page.h
96 ++++ b/arch/s390/include/asm/page.h
97 +@@ -176,6 +176,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
98 + #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
99 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
100 +
101 ++#define ARCH_ZONE_DMA_BITS 31
102 ++
103 + #include <asm-generic/memory_model.h>
104 + #include <asm-generic/getorder.h>
105 +
106 +diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
107 +index c4428a176973..2622c0742c92 100644
108 +--- a/arch/x86/boot/string.c
109 ++++ b/arch/x86/boot/string.c
110 +@@ -34,6 +34,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
111 + return diff;
112 + }
113 +
114 ++/*
115 ++ * Clang may lower `memcmp == 0` to `bcmp == 0`.
116 ++ */
117 ++int bcmp(const void *s1, const void *s2, size_t len)
118 ++{
119 ++ return memcmp(s1, s2, len);
120 ++}
121 ++
122 + int strcmp(const char *str1, const char *str2)
123 + {
124 + const unsigned char *s1 = (const unsigned char *)str1;
125 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
126 +index 2877e1fbadd8..3245b95ad2d9 100644
127 +--- a/arch/x86/include/asm/kvm_host.h
128 ++++ b/arch/x86/include/asm/kvm_host.h
129 +@@ -1113,6 +1113,7 @@ struct kvm_x86_ops {
130 + int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
131 + uint32_t guest_irq, bool set);
132 + void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
133 ++ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
134 +
135 + int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
136 + void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
137 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
138 +index ea454d3f7763..0f33f00aa4df 100644
139 +--- a/arch/x86/kvm/svm.c
140 ++++ b/arch/x86/kvm/svm.c
141 +@@ -5146,6 +5146,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
142 + kvm_vcpu_wake_up(vcpu);
143 + }
144 +
145 ++static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
146 ++{
147 ++ return false;
148 ++}
149 ++
150 + static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
151 + {
152 + unsigned long flags;
153 +@@ -7203,6 +7208,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
154 +
155 + .pmu_ops = &amd_pmu_ops,
156 + .deliver_posted_interrupt = svm_deliver_avic_intr,
157 ++ .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
158 + .update_pi_irte = svm_update_pi_irte,
159 + .setup_mce = svm_setup_mce,
160 +
161 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
162 +index 4cf16378dffe..2e310ea62d60 100644
163 +--- a/arch/x86/kvm/vmx.c
164 ++++ b/arch/x86/kvm/vmx.c
165 +@@ -10411,6 +10411,11 @@ static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
166 + return ((rvi & 0xf0) > (vppr & 0xf0));
167 + }
168 +
169 ++static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
170 ++{
171 ++ return pi_test_on(vcpu_to_pi_desc(vcpu));
172 ++}
173 ++
174 + static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
175 + {
176 + if (!kvm_vcpu_apicv_active(vcpu))
177 +@@ -14387,6 +14392,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
178 + .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
179 + .sync_pir_to_irr = vmx_sync_pir_to_irr,
180 + .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
181 ++ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
182 +
183 + .set_tss_addr = vmx_set_tss_addr,
184 + .set_identity_map_addr = vmx_set_identity_map_addr,
185 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
186 +index cea6568667c4..e10a7a42449b 100644
187 +--- a/arch/x86/kvm/x86.c
188 ++++ b/arch/x86/kvm/x86.c
189 +@@ -9336,6 +9336,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
190 + return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
191 + }
192 +
193 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
194 ++{
195 ++ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
196 ++ return true;
197 ++
198 ++ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
199 ++ kvm_test_request(KVM_REQ_SMI, vcpu) ||
200 ++ kvm_test_request(KVM_REQ_EVENT, vcpu))
201 ++ return true;
202 ++
203 ++ if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
204 ++ return true;
205 ++
206 ++ return false;
207 ++}
208 ++
209 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
210 + {
211 + return vcpu->arch.preempted_in_kernel;
212 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
213 +index 9d9765e4d1ef..1bcb7242ad79 100644
214 +--- a/arch/x86/mm/fault.c
215 ++++ b/arch/x86/mm/fault.c
216 +@@ -261,13 +261,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
217 +
218 + pmd = pmd_offset(pud, address);
219 + pmd_k = pmd_offset(pud_k, address);
220 +- if (!pmd_present(*pmd_k))
221 +- return NULL;
222 +
223 +- if (!pmd_present(*pmd))
224 ++ if (pmd_present(*pmd) != pmd_present(*pmd_k))
225 + set_pmd(pmd, *pmd_k);
226 ++
227 ++ if (!pmd_present(*pmd_k))
228 ++ return NULL;
229 + else
230 +- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
231 ++ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
232 +
233 + return pmd_k;
234 + }
235 +@@ -287,17 +288,13 @@ void vmalloc_sync_all(void)
236 + spin_lock(&pgd_lock);
237 + list_for_each_entry(page, &pgd_list, lru) {
238 + spinlock_t *pgt_lock;
239 +- pmd_t *ret;
240 +
241 + /* the pgt_lock only for Xen */
242 + pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
243 +
244 + spin_lock(pgt_lock);
245 +- ret = vmalloc_sync_one(page_address(page), address);
246 ++ vmalloc_sync_one(page_address(page), address);
247 + spin_unlock(pgt_lock);
248 +-
249 +- if (!ret)
250 +- break;
251 + }
252 + spin_unlock(&pgd_lock);
253 + }
254 +diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
255 +index 3cf302b26332..8901a1f89cf5 100644
256 +--- a/arch/x86/purgatory/Makefile
257 ++++ b/arch/x86/purgatory/Makefile
258 +@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
259 + targets += $(purgatory-y)
260 + PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
261 +
262 ++$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
263 ++ $(call if_changed_rule,cc_o_c)
264 ++
265 + $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
266 + $(call if_changed_rule,cc_o_c)
267 +
268 +@@ -17,11 +20,34 @@ KCOV_INSTRUMENT := n
269 +
270 + # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
271 + # in turn leaves some undefined symbols like __fentry__ in purgatory and not
272 +-# sure how to relocate those. Like kexec-tools, use custom flags.
273 +-
274 +-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
275 +-KBUILD_CFLAGS += -m$(BITS)
276 +-KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
277 ++# sure how to relocate those.
278 ++ifdef CONFIG_FUNCTION_TRACER
279 ++CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
280 ++CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
281 ++CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
282 ++CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
283 ++endif
284 ++
285 ++ifdef CONFIG_STACKPROTECTOR
286 ++CFLAGS_REMOVE_sha256.o += -fstack-protector
287 ++CFLAGS_REMOVE_purgatory.o += -fstack-protector
288 ++CFLAGS_REMOVE_string.o += -fstack-protector
289 ++CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
290 ++endif
291 ++
292 ++ifdef CONFIG_STACKPROTECTOR_STRONG
293 ++CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
294 ++CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
295 ++CFLAGS_REMOVE_string.o += -fstack-protector-strong
296 ++CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
297 ++endif
298 ++
299 ++ifdef CONFIG_RETPOLINE
300 ++CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
301 ++CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
302 ++CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
303 ++CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
304 ++endif
305 +
306 + $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
307 + $(call if_changed,ld)
308 +diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
309 +index 025c34ac0d84..7971f7a8af59 100644
310 +--- a/arch/x86/purgatory/purgatory.c
311 ++++ b/arch/x86/purgatory/purgatory.c
312 +@@ -70,3 +70,9 @@ void purgatory(void)
313 + }
314 + copy_backup_region();
315 + }
316 ++
317 ++/*
318 ++ * Defined in order to reuse memcpy() and memset() from
319 ++ * arch/x86/boot/compressed/string.c
320 ++ */
321 ++void warn(const char *msg) {}
322 +diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
323 +deleted file mode 100644
324 +index 795ca4f2cb3c..000000000000
325 +--- a/arch/x86/purgatory/string.c
326 ++++ /dev/null
327 +@@ -1,25 +0,0 @@
328 +-/*
329 +- * Simple string functions.
330 +- *
331 +- * Copyright (C) 2014 Red Hat Inc.
332 +- *
333 +- * Author:
334 +- * Vivek Goyal <vgoyal@××××××.com>
335 +- *
336 +- * This source code is licensed under the GNU General Public License,
337 +- * Version 2. See the file COPYING for more details.
338 +- */
339 +-
340 +-#include <linux/types.h>
341 +-
342 +-#include "../boot/string.c"
343 +-
344 +-void *memcpy(void *dst, const void *src, size_t len)
345 +-{
346 +- return __builtin_memcpy(dst, src, len);
347 +-}
348 +-
349 +-void *memset(void *dst, int c, size_t len)
350 +-{
351 +- return __builtin_memset(dst, c, len);
352 +-}
353 +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
354 +index 43c2615434b4..e11b5da6f828 100644
355 +--- a/drivers/acpi/arm64/iort.c
356 ++++ b/drivers/acpi/arm64/iort.c
357 +@@ -616,8 +616,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
358 +
359 + /* Move to ITS specific data */
360 + its = (struct acpi_iort_its_group *)node->node_data;
361 +- if (idx > its->its_count) {
362 +- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
363 ++ if (idx >= its->its_count) {
364 ++ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
365 + idx, its->its_count);
366 + return -ENXIO;
367 + }
368 +diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
369 +index cb919b964066..3cdadf75c82d 100644
370 +--- a/drivers/block/drbd/drbd_receiver.c
371 ++++ b/drivers/block/drbd/drbd_receiver.c
372 +@@ -5240,7 +5240,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
373 + unsigned int key_len;
374 + char secret[SHARED_SECRET_MAX]; /* 64 byte */
375 + unsigned int resp_size;
376 +- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
377 ++ struct shash_desc *desc;
378 + struct packet_info pi;
379 + struct net_conf *nc;
380 + int err, rv;
381 +@@ -5253,6 +5253,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
382 + memcpy(secret, nc->shared_secret, key_len);
383 + rcu_read_unlock();
384 +
385 ++ desc = kmalloc(sizeof(struct shash_desc) +
386 ++ crypto_shash_descsize(connection->cram_hmac_tfm),
387 ++ GFP_KERNEL);
388 ++ if (!desc) {
389 ++ rv = -1;
390 ++ goto fail;
391 ++ }
392 + desc->tfm = connection->cram_hmac_tfm;
393 + desc->flags = 0;
394 +
395 +@@ -5395,7 +5402,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
396 + kfree(peers_ch);
397 + kfree(response);
398 + kfree(right_response);
399 +- shash_desc_zero(desc);
400 ++ if (desc) {
401 ++ shash_desc_zero(desc);
402 ++ kfree(desc);
403 ++ }
404 +
405 + return rv;
406 + }
407 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
408 +index f1e63eb7cbca..cef8e00c9d9d 100644
409 +--- a/drivers/block/loop.c
410 ++++ b/drivers/block/loop.c
411 +@@ -886,7 +886,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
412 +
413 + static int loop_kthread_worker_fn(void *worker_ptr)
414 + {
415 +- current->flags |= PF_LESS_THROTTLE;
416 ++ current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
417 + return kthread_worker_fn(worker_ptr);
418 + }
419 +
420 +diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
421 +index c7710c149de8..a0620c9ec064 100644
422 +--- a/drivers/cpufreq/pasemi-cpufreq.c
423 ++++ b/drivers/cpufreq/pasemi-cpufreq.c
424 +@@ -145,10 +145,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
425 + int err = -ENODEV;
426 +
427 + cpu = of_get_cpu_node(policy->cpu, NULL);
428 ++ if (!cpu)
429 ++ goto out;
430 +
431 ++ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
432 + of_node_put(cpu);
433 +- if (!cpu)
434 ++ if (!max_freqp) {
435 ++ err = -EINVAL;
436 + goto out;
437 ++ }
438 ++
439 ++ /* we need the freq in kHz */
440 ++ max_freq = *max_freqp / 1000;
441 +
442 + dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
443 + if (!dn)
444 +@@ -185,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
445 + }
446 +
447 + pr_debug("init cpufreq on CPU %d\n", policy->cpu);
448 +-
449 +- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
450 +- if (!max_freqp) {
451 +- err = -EINVAL;
452 +- goto out_unmap_sdcpwr;
453 +- }
454 +-
455 +- /* we need the freq in kHz */
456 +- max_freq = *max_freqp / 1000;
457 +-
458 + pr_debug("max clock-frequency is at %u kHz\n", max_freq);
459 + pr_debug("initializing frequency table\n");
460 +
461 +@@ -212,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
462 +
463 + return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
464 +
465 +-out_unmap_sdcpwr:
466 +- iounmap(sdcpwr_mapbase);
467 +-
468 + out_unmap_sdcasr:
469 + iounmap(sdcasr_mapbase);
470 + out:
471 +diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
472 +index ca1f0d780b61..e5dcb29b687f 100644
473 +--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
474 ++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
475 +@@ -61,6 +61,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
476 + static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
477 + unsigned int authsize)
478 + {
479 ++ switch (authsize) {
480 ++ case 16:
481 ++ case 15:
482 ++ case 14:
483 ++ case 13:
484 ++ case 12:
485 ++ case 8:
486 ++ case 4:
487 ++ break;
488 ++ default:
489 ++ return -EINVAL;
490 ++ }
491 ++
492 + return 0;
493 + }
494 +
495 +@@ -107,6 +120,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
496 + memset(&rctx->cmd, 0, sizeof(rctx->cmd));
497 + INIT_LIST_HEAD(&rctx->cmd.entry);
498 + rctx->cmd.engine = CCP_ENGINE_AES;
499 ++ rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
500 + rctx->cmd.u.aes.type = ctx->u.aes.type;
501 + rctx->cmd.u.aes.mode = ctx->u.aes.mode;
502 + rctx->cmd.u.aes.action = encrypt;
503 +diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
504 +index e212badd39fa..1e2e42106dee 100644
505 +--- a/drivers/crypto/ccp/ccp-ops.c
506 ++++ b/drivers/crypto/ccp/ccp-ops.c
507 +@@ -625,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
508 +
509 + unsigned long long *final;
510 + unsigned int dm_offset;
511 ++ unsigned int authsize;
512 + unsigned int jobid;
513 + unsigned int ilen;
514 + bool in_place = true; /* Default value */
515 +@@ -646,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
516 + if (!aes->key) /* Gotta have a key SGL */
517 + return -EINVAL;
518 +
519 ++ /* Zero defaults to 16 bytes, the maximum size */
520 ++ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
521 ++ switch (authsize) {
522 ++ case 16:
523 ++ case 15:
524 ++ case 14:
525 ++ case 13:
526 ++ case 12:
527 ++ case 8:
528 ++ case 4:
529 ++ break;
530 ++ default:
531 ++ return -EINVAL;
532 ++ }
533 ++
534 + /* First, decompose the source buffer into AAD & PT,
535 + * and the destination buffer into AAD, CT & tag, or
536 + * the input into CT & tag.
537 +@@ -660,7 +676,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
538 + p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
539 + } else {
540 + /* Input length for decryption includes tag */
541 +- ilen = aes->src_len - AES_BLOCK_SIZE;
542 ++ ilen = aes->src_len - authsize;
543 + p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
544 + }
545 +
546 +@@ -769,8 +785,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
547 + while (src.sg_wa.bytes_left) {
548 + ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
549 + if (!src.sg_wa.bytes_left) {
550 +- unsigned int nbytes = aes->src_len
551 +- % AES_BLOCK_SIZE;
552 ++ unsigned int nbytes = ilen % AES_BLOCK_SIZE;
553 +
554 + if (nbytes) {
555 + op.eom = 1;
556 +@@ -842,19 +857,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
557 +
558 + if (aes->action == CCP_AES_ACTION_ENCRYPT) {
559 + /* Put the ciphered tag after the ciphertext. */
560 +- ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
561 ++ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
562 + } else {
563 + /* Does this ciphered tag match the input? */
564 +- ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
565 ++ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
566 + DMA_BIDIRECTIONAL);
567 + if (ret)
568 + goto e_tag;
569 +- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
570 ++ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
571 + if (ret)
572 + goto e_tag;
573 +
574 + ret = crypto_memneq(tag.address, final_wa.address,
575 +- AES_BLOCK_SIZE) ? -EBADMSG : 0;
576 ++ authsize) ? -EBADMSG : 0;
577 + ccp_dm_free(&tag);
578 + }
579 +
580 +@@ -862,11 +877,11 @@ e_tag:
581 + ccp_dm_free(&final_wa);
582 +
583 + e_dst:
584 +- if (aes->src_len && !in_place)
585 ++ if (ilen > 0 && !in_place)
586 + ccp_free_data(&dst, cmd_q);
587 +
588 + e_src:
589 +- if (aes->src_len)
590 ++ if (ilen > 0)
591 + ccp_free_data(&src, cmd_q);
592 +
593 + e_aad:
594 +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
595 +index 6e83880046d7..ed212c8b4108 100644
596 +--- a/drivers/firmware/Kconfig
597 ++++ b/drivers/firmware/Kconfig
598 +@@ -198,7 +198,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
599 +
600 + config ISCSI_IBFT_FIND
601 + bool "iSCSI Boot Firmware Table Attributes"
602 +- depends on X86 && ACPI
603 ++ depends on X86 && ISCSI_IBFT
604 + default n
605 + help
606 + This option enables the kernel to find the region of memory
607 +@@ -209,7 +209,8 @@ config ISCSI_IBFT_FIND
608 + config ISCSI_IBFT
609 + tristate "iSCSI Boot Firmware Table Attributes module"
610 + select ISCSI_BOOT_SYSFS
611 +- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
612 ++ select ISCSI_IBFT_FIND if X86
613 ++ depends on ACPI && SCSI && SCSI_LOWLEVEL
614 + default n
615 + help
616 + This option enables support for detection and exposing of iSCSI
617 +diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
618 +index c51462f5aa1e..966aef334c42 100644
619 +--- a/drivers/firmware/iscsi_ibft.c
620 ++++ b/drivers/firmware/iscsi_ibft.c
621 +@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
622 + MODULE_LICENSE("GPL");
623 + MODULE_VERSION(IBFT_ISCSI_VERSION);
624 +
625 ++#ifndef CONFIG_ISCSI_IBFT_FIND
626 ++struct acpi_table_ibft *ibft_addr;
627 ++#endif
628 ++
629 + struct ibft_hdr {
630 + u8 id;
631 + u8 version;
632 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
633 +index e3f5e5d6f0c1..f4b89d1ea6f6 100644
634 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
635 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
636 +@@ -462,8 +462,10 @@ void dc_link_set_test_pattern(struct dc_link *link,
637 +
638 + static void destruct(struct dc *dc)
639 + {
640 +- dc_release_state(dc->current_state);
641 +- dc->current_state = NULL;
642 ++ if (dc->current_state) {
643 ++ dc_release_state(dc->current_state);
644 ++ dc->current_state = NULL;
645 ++ }
646 +
647 + destroy_links(dc);
648 +
649 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
650 +index e0a96abb3c46..f0d68aa7c8fc 100644
651 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
652 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
653 +@@ -222,7 +222,7 @@ bool resource_construct(
654 + * PORT_CONNECTIVITY == 1 (as instructed by HW team).
655 + */
656 + update_num_audio(&straps, &num_audio, &pool->audio_support);
657 +- for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
658 ++ for (i = 0; i < caps->num_audio; i++) {
659 + struct audio *aud = create_funcs->create_audio(ctx, i);
660 +
661 + if (aud == NULL) {
662 +@@ -1713,6 +1713,12 @@ static struct audio *find_first_free_audio(
663 + return pool->audios[i];
664 + }
665 + }
666 ++
667 ++ /* use engine id to find free audio */
668 ++ if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
669 ++ return pool->audios[id];
670 ++ }
671 ++
672 + /*not found the matching one, first come first serve*/
673 + for (i = 0; i < pool->audio_count; i++) {
674 + if (res_ctx->is_audio_acquired[i] == false) {
675 +@@ -1866,6 +1872,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
676 + pix_clk /= 2;
677 + if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
678 + switch (timing->display_color_depth) {
679 ++ case COLOR_DEPTH_666:
680 + case COLOR_DEPTH_888:
681 + normalized_pix_clk = pix_clk;
682 + break;
683 +@@ -1949,7 +1956,7 @@ enum dc_status resource_map_pool_resources(
684 + /* TODO: Add check if ASIC support and EDID audio */
685 + if (!stream->sink->converter_disable_audio &&
686 + dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
687 +- stream->audio_info.mode_count) {
688 ++ stream->audio_info.mode_count && stream->audio_info.flags.all) {
689 + pipe_ctx->stream_res.audio = find_first_free_audio(
690 + &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
691 +
692 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
693 +index 070ab56a8aca..da8b198538e5 100644
694 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
695 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
696 +@@ -242,6 +242,10 @@ static void dmcu_set_backlight_level(
697 + s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
698 +
699 + REG_WRITE(BIOS_SCRATCH_2, s2);
700 ++
701 ++ /* waitDMCUReadyForCmd */
702 ++ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
703 ++ 0, 1, 80000);
704 + }
705 +
706 + static void dce_abm_init(struct abm *abm)
707 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
708 +index c0b9ca13393b..f4469fa5afb5 100644
709 +--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
710 ++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
711 +@@ -159,7 +159,7 @@ struct resource_pool {
712 + struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
713 + unsigned int clk_src_count;
714 +
715 +- struct audio *audios[MAX_PIPES];
716 ++ struct audio *audios[MAX_AUDIOS];
717 + unsigned int audio_count;
718 + struct audio_support audio_support;
719 +
720 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
721 +index cf7433ebf91a..71901743a938 100644
722 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
723 ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
724 +@@ -34,6 +34,7 @@
725 + * Data types shared between different Virtual HW blocks
726 + ******************************************************************************/
727 +
728 ++#define MAX_AUDIOS 7
729 + #define MAX_PIPES 6
730 +
731 + struct gamma_curve {
732 +diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
733 +index 781af1d42d76..b64a6ffc0aed 100644
734 +--- a/drivers/gpu/drm/drm_framebuffer.c
735 ++++ b/drivers/gpu/drm/drm_framebuffer.c
736 +@@ -793,7 +793,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
737 + struct drm_device *dev = fb->dev;
738 + struct drm_atomic_state *state;
739 + struct drm_plane *plane;
740 +- struct drm_connector *conn;
741 ++ struct drm_connector *conn __maybe_unused;
742 + struct drm_connector_state *conn_state;
743 + int i, ret;
744 + unsigned plane_mask;
745 +diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
746 +index a132a8037ecc..77df7903e071 100644
747 +--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
748 ++++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
749 +@@ -413,8 +413,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
750 + else
751 + txesc2_div = 10;
752 +
753 +- I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
754 +- I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
755 ++ I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
756 ++ I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
757 + }
758 +
759 + /* Program BXT Mipi clocks and dividers */
760 +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
761 +index 9671a4bad643..31f1023214d3 100644
762 +--- a/drivers/hid/hid-sony.c
763 ++++ b/drivers/hid/hid-sony.c
764 +@@ -587,10 +587,14 @@ static void sony_set_leds(struct sony_sc *sc);
765 + static inline void sony_schedule_work(struct sony_sc *sc,
766 + enum sony_worker which)
767 + {
768 ++ unsigned long flags;
769 ++
770 + switch (which) {
771 + case SONY_WORKER_STATE:
772 +- if (!sc->defer_initialization)
773 ++ spin_lock_irqsave(&sc->lock, flags);
774 ++ if (!sc->defer_initialization && sc->state_worker_initialized)
775 + schedule_work(&sc->state_worker);
776 ++ spin_unlock_irqrestore(&sc->lock, flags);
777 + break;
778 + case SONY_WORKER_HOTPLUG:
779 + if (sc->hotplug_worker_initialized)
780 +@@ -2553,13 +2557,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
781 +
782 + static inline void sony_cancel_work_sync(struct sony_sc *sc)
783 + {
784 ++ unsigned long flags;
785 ++
786 + if (sc->hotplug_worker_initialized)
787 + cancel_work_sync(&sc->hotplug_worker);
788 +- if (sc->state_worker_initialized)
789 ++ if (sc->state_worker_initialized) {
790 ++ spin_lock_irqsave(&sc->lock, flags);
791 ++ sc->state_worker_initialized = 0;
792 ++ spin_unlock_irqrestore(&sc->lock, flags);
793 + cancel_work_sync(&sc->state_worker);
794 ++ }
795 + }
796 +
797 +-
798 + static int sony_input_configured(struct hid_device *hdev,
799 + struct hid_input *hidinput)
800 + {
801 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
802 +index 78603b78cf41..eba692cddbde 100644
803 +--- a/drivers/hwmon/nct6775.c
804 ++++ b/drivers/hwmon/nct6775.c
805 +@@ -818,7 +818,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
806 + static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
807 + static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
808 + static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
809 +-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
810 ++static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
811 + static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
812 + static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
813 +
814 +@@ -3673,6 +3673,7 @@ static int nct6775_probe(struct platform_device *pdev)
815 + data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
816 + data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
817 + data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
818 ++ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
819 + data->REG_PWM[0] = NCT6106_REG_PWM;
820 + data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
821 + data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
822 +diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
823 +index 2876c18ed841..38ffbdb0a85f 100644
824 +--- a/drivers/hwmon/nct7802.c
825 ++++ b/drivers/hwmon/nct7802.c
826 +@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
827 + &sensor_dev_attr_in3_alarm.dev_attr.attr,
828 + &sensor_dev_attr_in3_beep.dev_attr.attr,
829 +
830 +- &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
831 ++ &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
832 + &sensor_dev_attr_in4_min.dev_attr.attr,
833 + &sensor_dev_attr_in4_max.dev_attr.attr,
834 + &sensor_dev_attr_in4_alarm.dev_attr.attr,
835 +@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
836 +
837 + if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
838 + return 0;
839 +- if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
840 ++ if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
841 + return 0;
842 +- if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
843 ++ if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
844 + return 0;
845 +
846 + return attr->mode;
847 +diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
848 +index 063e89eff791..c776a3509a71 100644
849 +--- a/drivers/iio/accel/cros_ec_accel_legacy.c
850 ++++ b/drivers/iio/accel/cros_ec_accel_legacy.c
851 +@@ -328,7 +328,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
852 + .modified = 1, \
853 + .info_mask_separate = \
854 + BIT(IIO_CHAN_INFO_RAW) | \
855 +- BIT(IIO_CHAN_INFO_SCALE) | \
856 + BIT(IIO_CHAN_INFO_CALIBBIAS), \
857 + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
858 + .ext_info = cros_ec_accel_legacy_ext_info, \
859 +diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
860 +index 0538ff8c4ac1..ce9af43fa2de 100644
861 +--- a/drivers/iio/adc/max9611.c
862 ++++ b/drivers/iio/adc/max9611.c
863 +@@ -86,7 +86,7 @@
864 + #define MAX9611_TEMP_MAX_POS 0x7f80
865 + #define MAX9611_TEMP_MAX_NEG 0xff80
866 + #define MAX9611_TEMP_MIN_NEG 0xd980
867 +-#define MAX9611_TEMP_MASK GENMASK(7, 15)
868 ++#define MAX9611_TEMP_MASK GENMASK(15, 7)
869 + #define MAX9611_TEMP_SHIFT 0x07
870 + #define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
871 + #define MAX9611_TEMP_SCALE_NUM 1000000
872 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
873 +index 530142b5a115..eb9b9de47fd1 100644
874 +--- a/drivers/input/mouse/elantech.c
875 ++++ b/drivers/input/mouse/elantech.c
876 +@@ -1810,6 +1810,30 @@ static int elantech_create_smbus(struct psmouse *psmouse,
877 + leave_breadcrumbs);
878 + }
879 +
880 ++static bool elantech_use_host_notify(struct psmouse *psmouse,
881 ++ struct elantech_device_info *info)
882 ++{
883 ++ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
884 ++ return true;
885 ++
886 ++ switch (info->bus) {
887 ++ case ETP_BUS_PS2_ONLY:
888 ++ /* expected case */
889 ++ break;
890 ++ case ETP_BUS_SMB_HST_NTFY_ONLY:
891 ++ case ETP_BUS_PS2_SMB_HST_NTFY:
892 ++ /* SMbus implementation is stable since 2018 */
893 ++ if (dmi_get_bios_year() >= 2018)
894 ++ return true;
895 ++ default:
896 ++ psmouse_dbg(psmouse,
897 ++ "Ignoring SMBus bus provider %d\n", info->bus);
898 ++ break;
899 ++ }
900 ++
901 ++ return false;
902 ++}
903 ++
904 + /**
905 + * elantech_setup_smbus - called once the PS/2 devices are enumerated
906 + * and decides to instantiate a SMBus InterTouch device.
907 +@@ -1829,7 +1853,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
908 + * i2c_blacklist_pnp_ids.
909 + * Old ICs are up to the user to decide.
910 + */
911 +- if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
912 ++ if (!elantech_use_host_notify(psmouse, info) ||
913 + psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
914 + return -ENXIO;
915 + }
916 +@@ -1849,34 +1873,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
917 + return 0;
918 + }
919 +
920 +-static bool elantech_use_host_notify(struct psmouse *psmouse,
921 +- struct elantech_device_info *info)
922 +-{
923 +- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
924 +- return true;
925 +-
926 +- switch (info->bus) {
927 +- case ETP_BUS_PS2_ONLY:
928 +- /* expected case */
929 +- break;
930 +- case ETP_BUS_SMB_ALERT_ONLY:
931 +- /* fall-through */
932 +- case ETP_BUS_PS2_SMB_ALERT:
933 +- psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
934 +- break;
935 +- case ETP_BUS_SMB_HST_NTFY_ONLY:
936 +- /* fall-through */
937 +- case ETP_BUS_PS2_SMB_HST_NTFY:
938 +- return true;
939 +- default:
940 +- psmouse_dbg(psmouse,
941 +- "Ignoring SMBus bus provider %d.\n",
942 +- info->bus);
943 +- }
944 +-
945 +- return false;
946 +-}
947 +-
948 + int elantech_init_smbus(struct psmouse *psmouse)
949 + {
950 + struct elantech_device_info info;
951 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
952 +index af7d48431b85..06cebde2422e 100644
953 +--- a/drivers/input/mouse/synaptics.c
954 ++++ b/drivers/input/mouse/synaptics.c
955 +@@ -185,6 +185,7 @@ static const char * const smbus_pnp_ids[] = {
956 + "LEN2055", /* E580 */
957 + "SYN3052", /* HP EliteBook 840 G4 */
958 + "SYN3221", /* HP 15-ay000 */
959 ++ "SYN323d", /* HP Spectre X360 13-w013dx */
960 + NULL
961 + };
962 +
963 +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
964 +index d61570d64ee7..48304e26f988 100644
965 +--- a/drivers/input/touchscreen/usbtouchscreen.c
966 ++++ b/drivers/input/touchscreen/usbtouchscreen.c
967 +@@ -1672,6 +1672,8 @@ static int usbtouch_probe(struct usb_interface *intf,
968 + if (!usbtouch || !input_dev)
969 + goto out_free;
970 +
971 ++ mutex_init(&usbtouch->pm_mutex);
972 ++
973 + type = &usbtouch_dev_info[id->driver_info];
974 + usbtouch->type = type;
975 + if (!type->process_pkt)
976 +diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
977 +index ed5cefb83768..89deb451e0ac 100644
978 +--- a/drivers/mmc/host/cavium.c
979 ++++ b/drivers/mmc/host/cavium.c
980 +@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
981 + {
982 + data->bytes_xfered = data->blocks * data->blksz;
983 + data->error = 0;
984 ++ dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
985 + return 1;
986 + }
987 +
988 +@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
989 + mmc->max_segs = 1;
990 +
991 + /* DMA size field can address up to 8 MB */
992 +- mmc->max_seg_size = 8 * 1024 * 1024;
993 ++ mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
994 ++ dma_get_max_seg_size(host->dev));
995 + mmc->max_req_size = mmc->max_seg_size;
996 + /* External DMA is in 512 byte blocks */
997 + mmc->max_blk_size = 512;
998 +diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
999 +index 602c19e23f05..786d852a70d5 100644
1000 +--- a/drivers/net/can/rcar/rcar_canfd.c
1001 ++++ b/drivers/net/can/rcar/rcar_canfd.c
1002 +@@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
1003 +
1004 + /* All packets processed */
1005 + if (num_pkts < quota) {
1006 +- napi_complete_done(napi, num_pkts);
1007 +- /* Enable Rx FIFO interrupts */
1008 +- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
1009 +- RCANFD_RFCC_RFIE);
1010 ++ if (napi_complete_done(napi, num_pkts)) {
1011 ++ /* Enable Rx FIFO interrupts */
1012 ++ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
1013 ++ RCANFD_RFCC_RFIE);
1014 ++ }
1015 + }
1016 + return num_pkts;
1017 + }
1018 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1019 +index 611f9d31be5d..740ef47eab01 100644
1020 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1021 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1022 +@@ -576,16 +576,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
1023 + dev->state &= ~PCAN_USB_STATE_STARTED;
1024 + netif_stop_queue(netdev);
1025 +
1026 ++ close_candev(netdev);
1027 ++
1028 ++ dev->can.state = CAN_STATE_STOPPED;
1029 ++
1030 + /* unlink all pending urbs and free used memory */
1031 + peak_usb_unlink_all_urbs(dev);
1032 +
1033 + if (dev->adapter->dev_stop)
1034 + dev->adapter->dev_stop(dev);
1035 +
1036 +- close_candev(netdev);
1037 +-
1038 +- dev->can.state = CAN_STATE_STOPPED;
1039 +-
1040 + /* can set bus off now */
1041 + if (dev->adapter->dev_set_bus) {
1042 + int err = dev->adapter->dev_set_bus(dev, 0);
1043 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
1044 +index dd161c5eea8e..41988358f63c 100644
1045 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
1046 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
1047 +@@ -849,7 +849,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
1048 + goto err_out;
1049 +
1050 + /* allocate command buffer once for all for the interface */
1051 +- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
1052 ++ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
1053 + GFP_KERNEL);
1054 + if (!pdev->cmd_buffer_addr)
1055 + goto err_out_1;
1056 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1057 +index d516def846ab..b304198f0b3a 100644
1058 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1059 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1060 +@@ -502,7 +502,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
1061 + u8 *buffer;
1062 + int err;
1063 +
1064 +- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
1065 ++ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
1066 + if (!buffer)
1067 + return -ENOMEM;
1068 +
1069 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1070 +index f2aba5b160c2..d45c435a599d 100644
1071 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1072 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1073 +@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
1074 + static struct ch_tc_flower_entry *allocate_flower_entry(void)
1075 + {
1076 + struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
1077 +- spin_lock_init(&new->lock);
1078 ++ if (new)
1079 ++ spin_lock_init(&new->lock);
1080 + return new;
1081 + }
1082 +
1083 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1084 +index 8b7d70e3a379..3fe7605a2cca 100644
1085 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1086 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
1087 +@@ -724,7 +724,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
1088 +
1089 + for (i = 0; i < n_profiles; i++) {
1090 + /* the tables start at element 3 */
1091 +- static int pos = 3;
1092 ++ int pos = 3;
1093 +
1094 + /* The EWRD profiles officially go from 2 to 4, but we
1095 + * save them in sar_profiles[1-3] (because we don't
1096 +@@ -836,6 +836,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
1097 + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1098 + }
1099 +
1100 ++static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
1101 ++{
1102 ++ /*
1103 ++ * The GEO_TX_POWER_LIMIT command is not supported on earlier
1104 ++ * firmware versions. Unfortunately, we don't have a TLV API
1105 ++ * flag to rely on, so rely on the major version which is in
1106 ++ * the first byte of ucode_ver. This was implemented
1107 ++ * initially on version 38 and then backported to 36, 29 and
1108 ++ * 17.
1109 ++ */
1110 ++ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
1111 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
1112 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
1113 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
1114 ++}
1115 ++
1116 + int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
1117 + {
1118 + struct iwl_geo_tx_power_profiles_resp *resp;
1119 +@@ -851,6 +867,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
1120 + .data = { &geo_cmd },
1121 + };
1122 +
1123 ++ if (!iwl_mvm_sar_geo_support(mvm))
1124 ++ return -EOPNOTSUPP;
1125 ++
1126 + ret = iwl_mvm_send_cmd(mvm, &cmd);
1127 + if (ret) {
1128 + IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
1129 +@@ -876,13 +895,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1130 + int ret, i, j;
1131 + u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
1132 +
1133 +- /*
1134 +- * This command is not supported on earlier firmware versions.
1135 +- * Unfortunately, we don't have a TLV API flag to rely on, so
1136 +- * rely on the major version which is in the first byte of
1137 +- * ucode_ver.
1138 +- */
1139 +- if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
1140 ++ if (!iwl_mvm_sar_geo_support(mvm))
1141 + return 0;
1142 +
1143 + ret = iwl_mvm_sar_get_wgds_table(mvm);
1144 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1145 +index 93f0d387688a..42fdb7970cfd 100644
1146 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1147 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1148 +@@ -403,6 +403,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
1149 + DMA_TO_DEVICE);
1150 + }
1151 +
1152 ++ meta->tbs = 0;
1153 ++
1154 + if (trans->cfg->use_tfh) {
1155 + struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1156 +
1157 +diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
1158 +index b025ba164412..e39bb5c42c9a 100644
1159 +--- a/drivers/net/wireless/marvell/mwifiex/main.h
1160 ++++ b/drivers/net/wireless/marvell/mwifiex/main.h
1161 +@@ -124,6 +124,7 @@ enum {
1162 +
1163 + #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
1164 +
1165 ++#define WPA_GTK_OUI_OFFSET 2
1166 + #define RSN_GTK_OUI_OFFSET 2
1167 +
1168 + #define MWIFIEX_OUI_NOT_PRESENT 0
1169 +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
1170 +index 6dd771ce68a3..ed27147efcb3 100644
1171 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c
1172 ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
1173 +@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
1174 + u8 ret = MWIFIEX_OUI_NOT_PRESENT;
1175 +
1176 + if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
1177 +- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
1178 ++ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
1179 ++ WPA_GTK_OUI_OFFSET);
1180 + oui = &mwifiex_wpa_oui[cipher][0];
1181 + ret = mwifiex_search_oui_in_ie(iebody, oui);
1182 + if (ret)
1183 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
1184 +index 260248fbb8fe..a11e210d173e 100644
1185 +--- a/drivers/nvme/host/multipath.c
1186 ++++ b/drivers/nvme/host/multipath.c
1187 +@@ -20,11 +20,6 @@ module_param(multipath, bool, 0444);
1188 + MODULE_PARM_DESC(multipath,
1189 + "turn on native support for multiple controllers per subsystem");
1190 +
1191 +-inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
1192 +-{
1193 +- return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
1194 +-}
1195 +-
1196 + /*
1197 + * If multipathing is enabled we need to always use the subsystem instance
1198 + * number for numbering our devices to avoid conflicts between subsystems that
1199 +@@ -516,7 +511,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
1200 + {
1201 + int error;
1202 +
1203 +- if (!nvme_ctrl_use_ana(ctrl))
1204 ++ /* check if multipath is enabled and we have the capability */
1205 ++ if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
1206 + return 0;
1207 +
1208 + ctrl->anacap = id->anacap;
1209 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
1210 +index e82cdaec81c9..d5e29b57eb34 100644
1211 +--- a/drivers/nvme/host/nvme.h
1212 ++++ b/drivers/nvme/host/nvme.h
1213 +@@ -464,7 +464,11 @@ extern const struct attribute_group nvme_ns_id_attr_group;
1214 + extern const struct block_device_operations nvme_ns_head_ops;
1215 +
1216 + #ifdef CONFIG_NVME_MULTIPATH
1217 +-bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
1218 ++static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
1219 ++{
1220 ++ return ctrl->ana_log_buf != NULL;
1221 ++}
1222 ++
1223 + void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
1224 + struct nvme_ctrl *ctrl, int *flags);
1225 + void nvme_failover_req(struct request *req);
1226 +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
1227 +index 4ac4a73037f5..4b7cc8d425b1 100644
1228 +--- a/drivers/s390/cio/qdio_main.c
1229 ++++ b/drivers/s390/cio/qdio_main.c
1230 +@@ -1569,13 +1569,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1231 + rc = qdio_kick_outbound_q(q, phys_aob);
1232 + } else if (need_siga_sync(q)) {
1233 + rc = qdio_siga_sync_q(q);
1234 ++ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1235 ++ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1236 ++ state == SLSB_CU_OUTPUT_PRIMED) {
1237 ++ /* The previous buffer is not processed yet, tack on. */
1238 ++ qperf_inc(q, fast_requeue);
1239 + } else {
1240 +- /* try to fast requeue buffers */
1241 +- get_buf_state(q, prev_buf(bufnr), &state, 0);
1242 +- if (state != SLSB_CU_OUTPUT_PRIMED)
1243 +- rc = qdio_kick_outbound_q(q, 0);
1244 +- else
1245 +- qperf_inc(q, fast_requeue);
1246 ++ rc = qdio_kick_outbound_q(q, 0);
1247 + }
1248 +
1249 + /* in case of SIGA errors we must process the error immediately */
1250 +diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
1251 +index 70a006ba4d05..4fe06ff7b2c8 100644
1252 +--- a/drivers/s390/cio/vfio_ccw_cp.c
1253 ++++ b/drivers/s390/cio/vfio_ccw_cp.c
1254 +@@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
1255 + sizeof(*pa->pa_iova_pfn) +
1256 + sizeof(*pa->pa_pfn),
1257 + GFP_KERNEL);
1258 +- if (unlikely(!pa->pa_iova_pfn))
1259 ++ if (unlikely(!pa->pa_iova_pfn)) {
1260 ++ pa->pa_nr = 0;
1261 + return -ENOMEM;
1262 ++ }
1263 + pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
1264 +
1265 + pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
1266 +diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
1267 +index d1154baa9436..9c21938ed67e 100644
1268 +--- a/drivers/scsi/device_handler/scsi_dh_alua.c
1269 ++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
1270 +@@ -54,6 +54,7 @@
1271 + #define ALUA_FAILOVER_TIMEOUT 60
1272 + #define ALUA_FAILOVER_RETRIES 5
1273 + #define ALUA_RTPG_DELAY_MSECS 5
1274 ++#define ALUA_RTPG_RETRY_DELAY 2
1275 +
1276 + /* device handler flags */
1277 + #define ALUA_OPTIMIZE_STPG 0x01
1278 +@@ -696,7 +697,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
1279 + case SCSI_ACCESS_STATE_TRANSITIONING:
1280 + if (time_before(jiffies, pg->expiry)) {
1281 + /* State transition, retry */
1282 +- pg->interval = 2;
1283 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
1284 + err = SCSI_DH_RETRY;
1285 + } else {
1286 + struct alua_dh_data *h;
1287 +@@ -821,6 +822,8 @@ static void alua_rtpg_work(struct work_struct *work)
1288 + spin_lock_irqsave(&pg->lock, flags);
1289 + pg->flags &= ~ALUA_PG_RUNNING;
1290 + pg->flags |= ALUA_PG_RUN_RTPG;
1291 ++ if (!pg->interval)
1292 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
1293 + spin_unlock_irqrestore(&pg->lock, flags);
1294 + queue_delayed_work(kaluad_wq, &pg->rtpg_work,
1295 + pg->interval * HZ);
1296 +@@ -832,6 +835,8 @@ static void alua_rtpg_work(struct work_struct *work)
1297 + spin_lock_irqsave(&pg->lock, flags);
1298 + if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
1299 + pg->flags &= ~ALUA_PG_RUNNING;
1300 ++ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
1301 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
1302 + pg->flags |= ALUA_PG_RUN_RTPG;
1303 + spin_unlock_irqrestore(&pg->lock, flags);
1304 + queue_delayed_work(kaluad_wq, &pg->rtpg_work,
1305 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
1306 +index b64ca977825d..71d53bb239e2 100644
1307 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
1308 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
1309 +@@ -4874,8 +4874,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
1310 +
1311 + spin_lock_irqsave(vhost->host->host_lock, flags);
1312 + ibmvfc_purge_requests(vhost, DID_ERROR);
1313 +- ibmvfc_free_event_pool(vhost);
1314 + spin_unlock_irqrestore(vhost->host->host_lock, flags);
1315 ++ ibmvfc_free_event_pool(vhost);
1316 +
1317 + ibmvfc_free_mem(vhost);
1318 + spin_lock(&ibmvfc_driver_lock);
1319 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1320 +index e0c87228438d..806ceabcabc3 100644
1321 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
1322 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1323 +@@ -3025,6 +3025,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
1324 + u32 size;
1325 + unsigned long buff_addr;
1326 + unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
1327 ++ unsigned long chunk_left_bytes;
1328 + unsigned long src_addr;
1329 + unsigned long flags;
1330 + u32 buff_offset;
1331 +@@ -3050,6 +3051,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
1332 + }
1333 +
1334 + size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
1335 ++ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
1336 ++ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
1337 + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
1338 +
1339 + src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
1340 +diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
1341 +index 9bc56eb48d2a..890d264ac687 100644
1342 +--- a/drivers/staging/android/ion/ion_page_pool.c
1343 ++++ b/drivers/staging/android/ion/ion_page_pool.c
1344 +@@ -8,11 +8,14 @@
1345 + #include <linux/list.h>
1346 + #include <linux/slab.h>
1347 + #include <linux/swap.h>
1348 ++#include <linux/sched/signal.h>
1349 +
1350 + #include "ion.h"
1351 +
1352 + static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
1353 + {
1354 ++ if (fatal_signal_pending(current))
1355 ++ return NULL;
1356 + return alloc_pages(pool->gfp_mask, pool->order);
1357 + }
1358 +
1359 +diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
1360 +index c747e9ca4518..0cef1d6d2e2b 100644
1361 +--- a/drivers/staging/gasket/apex_driver.c
1362 ++++ b/drivers/staging/gasket/apex_driver.c
1363 +@@ -538,7 +538,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
1364 + break;
1365 + case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
1366 + ret = scnprintf(buf, PAGE_SIZE, "%u\n",
1367 +- gasket_page_table_num_entries(
1368 ++ gasket_page_table_num_simple_entries(
1369 + gasket_dev->page_table[0]));
1370 + break;
1371 + case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
1372 +diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
1373 +index b989ca26fc78..2f0372976459 100644
1374 +--- a/drivers/tty/tty_ldsem.c
1375 ++++ b/drivers/tty/tty_ldsem.c
1376 +@@ -116,8 +116,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
1377 +
1378 + list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
1379 + tsk = waiter->task;
1380 +- smp_mb();
1381 +- waiter->task = NULL;
1382 ++ smp_store_release(&waiter->task, NULL);
1383 + wake_up_process(tsk);
1384 + put_task_struct(tsk);
1385 + }
1386 +@@ -217,7 +216,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
1387 + for (;;) {
1388 + set_current_state(TASK_UNINTERRUPTIBLE);
1389 +
1390 +- if (!waiter.task)
1391 ++ if (!smp_load_acquire(&waiter.task))
1392 + break;
1393 + if (!timeout)
1394 + break;
1395 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
1396 +index ffccd40ea67d..29c6414f48f1 100644
1397 +--- a/drivers/usb/core/devio.c
1398 ++++ b/drivers/usb/core/devio.c
1399 +@@ -1792,8 +1792,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1400 + return 0;
1401 +
1402 + error:
1403 +- if (as && as->usbm)
1404 +- dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
1405 + kfree(isopkt);
1406 + kfree(dr);
1407 + if (as)
1408 +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
1409 +index 671bce18782c..8616c52849c6 100644
1410 +--- a/drivers/usb/host/xhci-rcar.c
1411 ++++ b/drivers/usb/host/xhci-rcar.c
1412 +@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
1413 + * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
1414 + * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
1415 + * xhci_gen_setup().
1416 ++ *
1417 ++ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
1418 ++ * and the process speed is down when the roothub port enters U3,
1419 ++ * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
1420 + */
1421 + if (xhci_rcar_is_gen2(hcd->self.controller) ||
1422 +- xhci_rcar_is_gen3(hcd->self.controller))
1423 +- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
1424 ++ xhci_rcar_is_gen3(hcd->self.controller)) {
1425 ++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
1426 ++ }
1427 +
1428 + if (!xhci_rcar_wait_for_pll_active(hcd))
1429 + return -ETIMEDOUT;
1430 +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
1431 +index c2991b8a65ce..55db0fc87927 100644
1432 +--- a/drivers/usb/misc/iowarrior.c
1433 ++++ b/drivers/usb/misc/iowarrior.c
1434 +@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
1435 + dev = usb_get_intfdata(interface);
1436 + mutex_lock(&iowarrior_open_disc_lock);
1437 + usb_set_intfdata(interface, NULL);
1438 ++ /* prevent device read, write and ioctl */
1439 ++ dev->present = 0;
1440 +
1441 + minor = dev->minor;
1442 ++ mutex_unlock(&iowarrior_open_disc_lock);
1443 ++ /* give back our minor - this will call close() locks need to be dropped at this point*/
1444 +
1445 +- /* give back our minor */
1446 + usb_deregister_dev(interface, &iowarrior_class);
1447 +
1448 + mutex_lock(&dev->mutex);
1449 +
1450 + /* prevent device read, write and ioctl */
1451 +- dev->present = 0;
1452 +
1453 + mutex_unlock(&dev->mutex);
1454 +- mutex_unlock(&iowarrior_open_disc_lock);
1455 +
1456 + if (dev->opened) {
1457 + /* There is a process that holds a filedescriptor to the device ,
1458 +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
1459 +index 7b306aa22d25..6715a128e6c8 100644
1460 +--- a/drivers/usb/misc/yurex.c
1461 ++++ b/drivers/usb/misc/yurex.c
1462 +@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
1463 +
1464 + dev_dbg(&dev->interface->dev, "%s\n", __func__);
1465 +
1466 +- usb_put_dev(dev->udev);
1467 + if (dev->cntl_urb) {
1468 + usb_kill_urb(dev->cntl_urb);
1469 + kfree(dev->cntl_req);
1470 +@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
1471 + dev->int_buffer, dev->urb->transfer_dma);
1472 + usb_free_urb(dev->urb);
1473 + }
1474 ++ usb_put_dev(dev->udev);
1475 + kfree(dev);
1476 + }
1477 +
1478 +diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
1479 +index 3457c1fdebd1..5f29ce8d6c3f 100644
1480 +--- a/drivers/usb/typec/tcpm.c
1481 ++++ b/drivers/usb/typec/tcpm.c
1482 +@@ -378,7 +378,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
1483 + return SNK_UNATTACHED;
1484 + else if (port->try_role == TYPEC_SOURCE)
1485 + return SRC_UNATTACHED;
1486 +- else if (port->tcpc->config->default_role == TYPEC_SINK)
1487 ++ else if (port->tcpc->config &&
1488 ++ port->tcpc->config->default_role == TYPEC_SINK)
1489 + return SNK_UNATTACHED;
1490 + /* Fall through to return SRC_UNATTACHED */
1491 + } else if (port->port_type == TYPEC_PORT_SNK) {
1492 +@@ -585,7 +586,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
1493 +
1494 + static void tcpm_debugfs_exit(struct tcpm_port *port)
1495 + {
1496 ++ int i;
1497 ++
1498 ++ mutex_lock(&port->logbuffer_lock);
1499 ++ for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
1500 ++ kfree(port->logbuffer[i]);
1501 ++ port->logbuffer[i] = NULL;
1502 ++ }
1503 ++ mutex_unlock(&port->logbuffer_lock);
1504 ++
1505 + debugfs_remove(port->dentry);
1506 ++ if (list_empty(&rootdir->d_subdirs)) {
1507 ++ debugfs_remove(rootdir);
1508 ++ rootdir = NULL;
1509 ++ }
1510 + }
1511 +
1512 + #else
1513 +@@ -1094,7 +1108,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1514 + break;
1515 + case CMD_ATTENTION:
1516 + /* Attention command does not have response */
1517 +- typec_altmode_attention(adev, p[1]);
1518 ++ if (adev)
1519 ++ typec_altmode_attention(adev, p[1]);
1520 + return 0;
1521 + default:
1522 + break;
1523 +@@ -1146,20 +1161,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1524 + }
1525 + break;
1526 + case CMD_ENTER_MODE:
1527 +- typec_altmode_update_active(pdev, true);
1528 +-
1529 +- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
1530 +- response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
1531 +- response[0] |= VDO_OPOS(adev->mode);
1532 +- return 1;
1533 ++ if (adev && pdev) {
1534 ++ typec_altmode_update_active(pdev, true);
1535 ++
1536 ++ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
1537 ++ response[0] = VDO(adev->svid, 1,
1538 ++ CMD_EXIT_MODE);
1539 ++ response[0] |= VDO_OPOS(adev->mode);
1540 ++ return 1;
1541 ++ }
1542 + }
1543 + return 0;
1544 + case CMD_EXIT_MODE:
1545 +- typec_altmode_update_active(pdev, false);
1546 ++ if (adev && pdev) {
1547 ++ typec_altmode_update_active(pdev, false);
1548 +
1549 +- /* Back to USB Operation */
1550 +- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
1551 +- NULL));
1552 ++ /* Back to USB Operation */
1553 ++ WARN_ON(typec_altmode_notify(adev,
1554 ++ TYPEC_STATE_USB,
1555 ++ NULL));
1556 ++ }
1557 + break;
1558 + default:
1559 + break;
1560 +@@ -1169,8 +1190,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1561 + switch (cmd) {
1562 + case CMD_ENTER_MODE:
1563 + /* Back to USB Operation */
1564 +- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
1565 +- NULL));
1566 ++ if (adev)
1567 ++ WARN_ON(typec_altmode_notify(adev,
1568 ++ TYPEC_STATE_USB,
1569 ++ NULL));
1570 + break;
1571 + default:
1572 + break;
1573 +@@ -1181,7 +1204,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1574 + }
1575 +
1576 + /* Informing the alternate mode drivers about everything */
1577 +- typec_altmode_vdm(adev, p[0], &p[1], cnt);
1578 ++ if (adev)
1579 ++ typec_altmode_vdm(adev, p[0], &p[1], cnt);
1580 +
1581 + return rlen;
1582 + }
1583 +@@ -4083,7 +4107,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
1584 + mutex_lock(&port->lock);
1585 + if (tcpc->try_role)
1586 + ret = tcpc->try_role(tcpc, role);
1587 +- if (!ret && !tcpc->config->try_role_hw)
1588 ++ if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
1589 + port->try_role = role;
1590 + port->try_src_count = 0;
1591 + port->try_snk_count = 0;
1592 +@@ -4730,7 +4754,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
1593 + port->typec_caps.prefer_role = tcfg->default_role;
1594 + port->typec_caps.type = tcfg->type;
1595 + port->typec_caps.data = tcfg->data;
1596 +- port->self_powered = port->tcpc->config->self_powered;
1597 ++ port->self_powered = tcfg->self_powered;
1598 +
1599 + return 0;
1600 + }
1601 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1602 +index c181f1621e1a..2bc47eb6215e 100644
1603 +--- a/fs/cifs/smb2pdu.c
1604 ++++ b/fs/cifs/smb2pdu.c
1605 +@@ -168,7 +168,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
1606 + if (tcon == NULL)
1607 + return 0;
1608 +
1609 +- if (smb2_command == SMB2_TREE_CONNECT)
1610 ++ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
1611 + return 0;
1612 +
1613 + if (tcon->tidStatus == CifsExiting) {
1614 +@@ -1006,7 +1006,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1615 + else
1616 + req->SecurityMode = 0;
1617 +
1618 ++#ifdef CONFIG_CIFS_DFS_UPCALL
1619 ++ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1620 ++#else
1621 + req->Capabilities = 0;
1622 ++#endif /* DFS_UPCALL */
1623 ++
1624 + req->Channel = 0; /* MBZ */
1625 +
1626 + sess_data->iov[0].iov_base = (char *)req;
1627 +diff --git a/fs/dax.c b/fs/dax.c
1628 +index 75a289c31c7e..f0d932fa39c2 100644
1629 +--- a/fs/dax.c
1630 ++++ b/fs/dax.c
1631 +@@ -659,7 +659,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
1632 + * guaranteed to either see new references or prevent new
1633 + * references from being established.
1634 + */
1635 +- unmap_mapping_range(mapping, 0, 0, 1);
1636 ++ unmap_mapping_range(mapping, 0, 0, 0);
1637 +
1638 + while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
1639 + min(end - index, (pgoff_t)PAGEVEC_SIZE),
1640 +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
1641 +index 7f8bb0868c0f..d14d71d8d7ee 100644
1642 +--- a/fs/gfs2/bmap.c
1643 ++++ b/fs/gfs2/bmap.c
1644 +@@ -392,6 +392,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
1645 + return mp->mp_aheight - x - 1;
1646 + }
1647 +
1648 ++static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
1649 ++{
1650 ++ sector_t factor = 1, block = 0;
1651 ++ int hgt;
1652 ++
1653 ++ for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
1654 ++ if (hgt < mp->mp_aheight)
1655 ++ block += mp->mp_list[hgt] * factor;
1656 ++ factor *= sdp->sd_inptrs;
1657 ++ }
1658 ++ return block;
1659 ++}
1660 ++
1661 + static void release_metapath(struct metapath *mp)
1662 + {
1663 + int i;
1664 +@@ -432,60 +445,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
1665 + return ptr - first;
1666 + }
1667 +
1668 +-typedef const __be64 *(*gfs2_metadata_walker)(
1669 +- struct metapath *mp,
1670 +- const __be64 *start, const __be64 *end,
1671 +- u64 factor, void *data);
1672 ++enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
1673 +
1674 +-#define WALK_STOP ((__be64 *)0)
1675 +-#define WALK_NEXT ((__be64 *)1)
1676 ++/*
1677 ++ * gfs2_metadata_walker - walk an indirect block
1678 ++ * @mp: Metapath to indirect block
1679 ++ * @ptrs: Number of pointers to look at
1680 ++ *
1681 ++ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
1682 ++ * indirect block to follow.
1683 ++ */
1684 ++typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
1685 ++ unsigned int ptrs);
1686 ++
1687 ++/*
1688 ++ * gfs2_walk_metadata - walk a tree of indirect blocks
1689 ++ * @inode: The inode
1690 ++ * @mp: Starting point of walk
1691 ++ * @max_len: Maximum number of blocks to walk
1692 ++ * @walker: Called during the walk
1693 ++ *
1694 ++ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
1695 ++ * past the end of metadata, and a negative error code otherwise.
1696 ++ */
1697 +
1698 +-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
1699 +- u64 len, struct metapath *mp, gfs2_metadata_walker walker,
1700 +- void *data)
1701 ++static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
1702 ++ u64 max_len, gfs2_metadata_walker walker)
1703 + {
1704 +- struct metapath clone;
1705 + struct gfs2_inode *ip = GFS2_I(inode);
1706 + struct gfs2_sbd *sdp = GFS2_SB(inode);
1707 +- const __be64 *start, *end, *ptr;
1708 + u64 factor = 1;
1709 + unsigned int hgt;
1710 +- int ret = 0;
1711 ++ int ret;
1712 +
1713 +- for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
1714 ++ /*
1715 ++ * The walk starts in the lowest allocated indirect block, which may be
1716 ++ * before the position indicated by @mp. Adjust @max_len accordingly
1717 ++ * to avoid a short walk.
1718 ++ */
1719 ++ for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
1720 ++ max_len += mp->mp_list[hgt] * factor;
1721 ++ mp->mp_list[hgt] = 0;
1722 + factor *= sdp->sd_inptrs;
1723 ++ }
1724 +
1725 + for (;;) {
1726 +- u64 step;
1727 ++ u16 start = mp->mp_list[hgt];
1728 ++ enum walker_status status;
1729 ++ unsigned int ptrs;
1730 ++ u64 len;
1731 +
1732 + /* Walk indirect block. */
1733 +- start = metapointer(hgt, mp);
1734 +- end = metaend(hgt, mp);
1735 +-
1736 +- step = (end - start) * factor;
1737 +- if (step > len)
1738 +- end = start + DIV_ROUND_UP_ULL(len, factor);
1739 +-
1740 +- ptr = walker(mp, start, end, factor, data);
1741 +- if (ptr == WALK_STOP)
1742 ++ ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
1743 ++ len = ptrs * factor;
1744 ++ if (len > max_len)
1745 ++ ptrs = DIV_ROUND_UP_ULL(max_len, factor);
1746 ++ status = walker(mp, ptrs);
1747 ++ switch (status) {
1748 ++ case WALK_STOP:
1749 ++ return 1;
1750 ++ case WALK_FOLLOW:
1751 ++ BUG_ON(mp->mp_aheight == mp->mp_fheight);
1752 ++ ptrs = mp->mp_list[hgt] - start;
1753 ++ len = ptrs * factor;
1754 + break;
1755 +- if (step >= len)
1756 ++ case WALK_CONTINUE:
1757 + break;
1758 +- len -= step;
1759 +- if (ptr != WALK_NEXT) {
1760 +- BUG_ON(!*ptr);
1761 +- mp->mp_list[hgt] += ptr - start;
1762 +- goto fill_up_metapath;
1763 + }
1764 ++ if (len >= max_len)
1765 ++ break;
1766 ++ max_len -= len;
1767 ++ if (status == WALK_FOLLOW)
1768 ++ goto fill_up_metapath;
1769 +
1770 + lower_metapath:
1771 + /* Decrease height of metapath. */
1772 +- if (mp != &clone) {
1773 +- clone_metapath(&clone, mp);
1774 +- mp = &clone;
1775 +- }
1776 + brelse(mp->mp_bh[hgt]);
1777 + mp->mp_bh[hgt] = NULL;
1778 ++ mp->mp_list[hgt] = 0;
1779 + if (!hgt)
1780 + break;
1781 + hgt--;
1782 +@@ -493,10 +530,7 @@ lower_metapath:
1783 +
1784 + /* Advance in metadata tree. */
1785 + (mp->mp_list[hgt])++;
1786 +- start = metapointer(hgt, mp);
1787 +- end = metaend(hgt, mp);
1788 +- if (start >= end) {
1789 +- mp->mp_list[hgt] = 0;
1790 ++ if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
1791 + if (!hgt)
1792 + break;
1793 + goto lower_metapath;
1794 +@@ -504,44 +538,36 @@ lower_metapath:
1795 +
1796 + fill_up_metapath:
1797 + /* Increase height of metapath. */
1798 +- if (mp != &clone) {
1799 +- clone_metapath(&clone, mp);
1800 +- mp = &clone;
1801 +- }
1802 + ret = fillup_metapath(ip, mp, ip->i_height - 1);
1803 + if (ret < 0)
1804 +- break;
1805 ++ return ret;
1806 + hgt += ret;
1807 + for (; ret; ret--)
1808 + do_div(factor, sdp->sd_inptrs);
1809 + mp->mp_aheight = hgt + 1;
1810 + }
1811 +- if (mp == &clone)
1812 +- release_metapath(mp);
1813 +- return ret;
1814 ++ return 0;
1815 + }
1816 +
1817 +-struct gfs2_hole_walker_args {
1818 +- u64 blocks;
1819 +-};
1820 +-
1821 +-static const __be64 *gfs2_hole_walker(struct metapath *mp,
1822 +- const __be64 *start, const __be64 *end,
1823 +- u64 factor, void *data)
1824 ++static enum walker_status gfs2_hole_walker(struct metapath *mp,
1825 ++ unsigned int ptrs)
1826 + {
1827 +- struct gfs2_hole_walker_args *args = data;
1828 +- const __be64 *ptr;
1829 ++ const __be64 *start, *ptr, *end;
1830 ++ unsigned int hgt;
1831 ++
1832 ++ hgt = mp->mp_aheight - 1;
1833 ++ start = metapointer(hgt, mp);
1834 ++ end = start + ptrs;
1835 +
1836 + for (ptr = start; ptr < end; ptr++) {
1837 + if (*ptr) {
1838 +- args->blocks += (ptr - start) * factor;
1839 ++ mp->mp_list[hgt] += ptr - start;
1840 + if (mp->mp_aheight == mp->mp_fheight)
1841 + return WALK_STOP;
1842 +- return ptr; /* increase height */
1843 ++ return WALK_FOLLOW;
1844 + }
1845 + }
1846 +- args->blocks += (end - start) * factor;
1847 +- return WALK_NEXT;
1848 ++ return WALK_CONTINUE;
1849 + }
1850 +
1851 + /**
1852 +@@ -559,12 +585,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
1853 + static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
1854 + struct metapath *mp, struct iomap *iomap)
1855 + {
1856 +- struct gfs2_hole_walker_args args = { };
1857 +- int ret = 0;
1858 ++ struct metapath clone;
1859 ++ u64 hole_size;
1860 ++ int ret;
1861 +
1862 +- ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
1863 +- if (!ret)
1864 +- iomap->length = args.blocks << inode->i_blkbits;
1865 ++ clone_metapath(&clone, mp);
1866 ++ ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
1867 ++ if (ret < 0)
1868 ++ goto out;
1869 ++
1870 ++ if (ret == 1)
1871 ++ hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
1872 ++ else
1873 ++ hole_size = len;
1874 ++ iomap->length = hole_size << inode->i_blkbits;
1875 ++ ret = 0;
1876 ++
1877 ++out:
1878 ++ release_metapath(&clone);
1879 + return ret;
1880 + }
1881 +
1882 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1883 +index 904e08bbb289..31ae3bd5d9d2 100644
1884 +--- a/fs/nfs/nfs4proc.c
1885 ++++ b/fs/nfs/nfs4proc.c
1886 +@@ -3133,7 +3133,7 @@ static int _nfs4_do_setattr(struct inode *inode,
1887 +
1888 + if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
1889 + /* Use that stateid */
1890 +- } else if (ctx != NULL) {
1891 ++ } else if (ctx != NULL && ctx->state) {
1892 + struct nfs_lock_context *l_ctx;
1893 + if (!nfs4_valid_open_stateid(ctx->state))
1894 + return -EBADF;
1895 +diff --git a/include/linux/ccp.h b/include/linux/ccp.h
1896 +index 7e9c991c95e0..43ed9e77cf81 100644
1897 +--- a/include/linux/ccp.h
1898 ++++ b/include/linux/ccp.h
1899 +@@ -173,6 +173,8 @@ struct ccp_aes_engine {
1900 + enum ccp_aes_mode mode;
1901 + enum ccp_aes_action action;
1902 +
1903 ++ u32 authsize;
1904 ++
1905 + struct scatterlist *key;
1906 + u32 key_len; /* In bytes */
1907 +
1908 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
1909 +index 30efb3663892..d42a36e4e6c2 100644
1910 +--- a/include/linux/kvm_host.h
1911 ++++ b/include/linux/kvm_host.h
1912 +@@ -818,6 +818,7 @@ void kvm_arch_check_processor_compat(void *rtn);
1913 + int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1914 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1915 + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1916 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1917 +
1918 + #ifndef __KVM_HAVE_ARCH_VM_ALLOC
1919 + /*
1920 +diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
1921 +index e87f2d5b3cc6..127c2713b543 100644
1922 +--- a/include/sound/compress_driver.h
1923 ++++ b/include/sound/compress_driver.h
1924 +@@ -171,10 +171,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
1925 + if (snd_BUG_ON(!stream))
1926 + return;
1927 +
1928 +- if (stream->direction == SND_COMPRESS_PLAYBACK)
1929 +- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
1930 +- else
1931 +- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
1932 ++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
1933 +
1934 + wake_up(&stream->runtime->sleep);
1935 + }
1936 +diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
1937 +index 7acc16f34942..fa43dd5a7b3d 100644
1938 +--- a/include/uapi/linux/nl80211.h
1939 ++++ b/include/uapi/linux/nl80211.h
1940 +@@ -2732,7 +2732,7 @@ enum nl80211_attrs {
1941 + #define NL80211_HT_CAPABILITY_LEN 26
1942 + #define NL80211_VHT_CAPABILITY_LEN 12
1943 + #define NL80211_HE_MIN_CAPABILITY_LEN 16
1944 +-#define NL80211_HE_MAX_CAPABILITY_LEN 51
1945 ++#define NL80211_HE_MAX_CAPABILITY_LEN 54
1946 + #define NL80211_MAX_NR_CIPHER_SUITES 5
1947 + #define NL80211_MAX_NR_AKM_SUITES 2
1948 +
1949 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1950 +index e8979c72514b..7ca44b8523c8 100644
1951 +--- a/kernel/events/core.c
1952 ++++ b/kernel/events/core.c
1953 +@@ -10957,7 +10957,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
1954 + goto err_unlock;
1955 + }
1956 +
1957 +- perf_install_in_context(ctx, event, cpu);
1958 ++ perf_install_in_context(ctx, event, event->cpu);
1959 + perf_unpin_context(ctx);
1960 + mutex_unlock(&ctx->mutex);
1961 +
1962 +diff --git a/lib/test_firmware.c b/lib/test_firmware.c
1963 +index fd48a15a0710..a74b1aae7461 100644
1964 +--- a/lib/test_firmware.c
1965 ++++ b/lib/test_firmware.c
1966 +@@ -894,8 +894,11 @@ static int __init test_firmware_init(void)
1967 + return -ENOMEM;
1968 +
1969 + rc = __test_firmware_config_init();
1970 +- if (rc)
1971 ++ if (rc) {
1972 ++ kfree(test_fw_config);
1973 ++ pr_err("could not init firmware test config: %d\n", rc);
1974 + return rc;
1975 ++ }
1976 +
1977 + rc = misc_register(&test_fw_misc_device);
1978 + if (rc) {
1979 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
1980 +index a46ec261a44e..d8e877365f9f 100644
1981 +--- a/mm/vmalloc.c
1982 ++++ b/mm/vmalloc.c
1983 +@@ -1751,6 +1751,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
1984 + if (!addr)
1985 + return NULL;
1986 +
1987 ++ /*
1988 ++ * First make sure the mappings are removed from all page-tables
1989 ++ * before they are freed.
1990 ++ */
1991 ++ vmalloc_sync_all();
1992 ++
1993 + /*
1994 + * In this function, newly allocated vm_struct has VM_UNINITIALIZED
1995 + * flag. It means that vm_struct is not fully initialized.
1996 +@@ -2296,6 +2302,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
1997 + /*
1998 + * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1999 + * have one.
2000 ++ *
2001 ++ * The purpose of this function is to make sure the vmalloc area
2002 ++ * mappings are identical in all page-tables in the system.
2003 + */
2004 + void __weak vmalloc_sync_all(void)
2005 + {
2006 +diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
2007 +index 12843c9ef142..74b19a5c572e 100644
2008 +--- a/net/ipv4/netfilter/ipt_rpfilter.c
2009 ++++ b/net/ipv4/netfilter/ipt_rpfilter.c
2010 +@@ -96,6 +96,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
2011 + flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
2012 + flow.flowi4_tos = RT_TOS(iph->tos);
2013 + flow.flowi4_scope = RT_SCOPE_UNIVERSE;
2014 ++ flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
2015 +
2016 + return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
2017 + }
2018 +diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
2019 +index c3c6b09acdc4..0f3407f2851e 100644
2020 +--- a/net/ipv6/netfilter/ip6t_rpfilter.c
2021 ++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
2022 +@@ -58,7 +58,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
2023 + if (rpfilter_addr_linklocal(&iph->saddr)) {
2024 + lookup_flags |= RT6_LOOKUP_F_IFACE;
2025 + fl6.flowi6_oif = dev->ifindex;
2026 +- } else if ((flags & XT_RPFILTER_LOOSE) == 0)
2027 ++ /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
2028 ++ } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
2029 ++ (flags & XT_RPFILTER_LOOSE) == 0)
2030 + fl6.flowi6_oif = dev->ifindex;
2031 +
2032 + rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
2033 +@@ -73,7 +75,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
2034 + goto out;
2035 + }
2036 +
2037 +- if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
2038 ++ if (rt->rt6i_idev->dev == dev ||
2039 ++ l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
2040 ++ (flags & XT_RPFILTER_LOOSE))
2041 + ret = true;
2042 + out:
2043 + ip6_rt_put(rt);
2044 +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
2045 +index bb886e7db47f..f783d1377d9a 100644
2046 +--- a/net/mac80211/driver-ops.c
2047 ++++ b/net/mac80211/driver-ops.c
2048 +@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
2049 + if (!check_sdata_in_driver(sdata))
2050 + return -EIO;
2051 +
2052 +- if (WARN_ONCE(params->cw_min == 0 ||
2053 +- params->cw_min > params->cw_max,
2054 +- "%s: invalid CW_min/CW_max: %d/%d\n",
2055 +- sdata->name, params->cw_min, params->cw_max))
2056 ++ if (params->cw_min == 0 || params->cw_min > params->cw_max) {
2057 ++ /*
2058 ++ * If we can't configure hardware anyway, don't warn. We may
2059 ++ * never have initialized the CW parameters.
2060 ++ */
2061 ++ WARN_ONCE(local->ops->conf_tx,
2062 ++ "%s: invalid CW_min/CW_max: %d/%d\n",
2063 ++ sdata->name, params->cw_min, params->cw_max);
2064 + return -EINVAL;
2065 ++ }
2066 +
2067 + trace_drv_conf_tx(local, sdata, ac, params);
2068 + if (local->ops->conf_tx)
2069 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2070 +index 1aaa73fa308e..b5c06242a92e 100644
2071 +--- a/net/mac80211/mlme.c
2072 ++++ b/net/mac80211/mlme.c
2073 +@@ -1967,6 +1967,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
2074 + ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
2075 + }
2076 +
2077 ++ /* WMM specification requires all 4 ACIs. */
2078 ++ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
2079 ++ if (params[ac].cw_min == 0) {
2080 ++ sdata_info(sdata,
2081 ++ "AP has invalid WMM params (missing AC %d), using defaults\n",
2082 ++ ac);
2083 ++ return false;
2084 ++ }
2085 ++ }
2086 ++
2087 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
2088 + mlme_dbg(sdata,
2089 + "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
2090 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
2091 +index 842f3f86fb2e..7011ab27c437 100644
2092 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
2093 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
2094 +@@ -480,6 +480,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
2095 + struct ip_ct_tcp_state *receiver = &state->seen[!dir];
2096 + const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
2097 + __u32 seq, ack, sack, end, win, swin;
2098 ++ u16 win_raw;
2099 + s32 receiver_offset;
2100 + bool res, in_recv_win;
2101 +
2102 +@@ -488,7 +489,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
2103 + */
2104 + seq = ntohl(tcph->seq);
2105 + ack = sack = ntohl(tcph->ack_seq);
2106 +- win = ntohs(tcph->window);
2107 ++ win_raw = ntohs(tcph->window);
2108 ++ win = win_raw;
2109 + end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
2110 +
2111 + if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
2112 +@@ -663,14 +665,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
2113 + && state->last_seq == seq
2114 + && state->last_ack == ack
2115 + && state->last_end == end
2116 +- && state->last_win == win)
2117 ++ && state->last_win == win_raw)
2118 + state->retrans++;
2119 + else {
2120 + state->last_dir = dir;
2121 + state->last_seq = seq;
2122 + state->last_ack = ack;
2123 + state->last_end = end;
2124 +- state->last_win = win;
2125 ++ state->last_win = win_raw;
2126 + state->retrans = 0;
2127 + }
2128 + }
2129 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
2130 +index 916913454624..7f2c1915763f 100644
2131 +--- a/net/netfilter/nfnetlink.c
2132 ++++ b/net/netfilter/nfnetlink.c
2133 +@@ -575,7 +575,7 @@ static int nfnetlink_bind(struct net *net, int group)
2134 + ss = nfnetlink_get_subsys(type << 8);
2135 + rcu_read_unlock();
2136 + if (!ss)
2137 +- request_module("nfnetlink-subsys-%d", type);
2138 ++ request_module_nowait("nfnetlink-subsys-%d", type);
2139 + return 0;
2140 + }
2141 + #endif
2142 +diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
2143 +index c2d237144f74..b8f23f75aea6 100644
2144 +--- a/net/netfilter/nft_hash.c
2145 ++++ b/net/netfilter/nft_hash.c
2146 +@@ -196,7 +196,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
2147 + priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
2148 +
2149 + priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
2150 +- if (priv->modulus <= 1)
2151 ++ if (priv->modulus < 1)
2152 + return -ERANGE;
2153 +
2154 + if (priv->offset + priv->modulus - 1 < priv->offset)
2155 +diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
2156 +index 067459760a7b..3524dbc31316 100755
2157 +--- a/scripts/sphinx-pre-install
2158 ++++ b/scripts/sphinx-pre-install
2159 +@@ -301,7 +301,7 @@ sub give_redhat_hints()
2160 + #
2161 + # Checks valid for RHEL/CentOS version 7.x.
2162 + #
2163 +- if (! $system_release =~ /Fedora/) {
2164 ++ if (!($system_release =~ /Fedora/)) {
2165 + $map{"virtualenv"} = "python-virtualenv";
2166 + }
2167 +
2168 +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
2169 +index 8b78ddffa509..516ec3587325 100644
2170 +--- a/sound/core/compress_offload.c
2171 ++++ b/sound/core/compress_offload.c
2172 +@@ -575,10 +575,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
2173 + stream->metadata_set = false;
2174 + stream->next_track = false;
2175 +
2176 +- if (stream->direction == SND_COMPRESS_PLAYBACK)
2177 +- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
2178 +- else
2179 +- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
2180 ++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
2181 + } else {
2182 + return -EPERM;
2183 + }
2184 +@@ -694,8 +691,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
2185 + {
2186 + int retval;
2187 +
2188 +- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
2189 ++ switch (stream->runtime->state) {
2190 ++ case SNDRV_PCM_STATE_SETUP:
2191 ++ if (stream->direction != SND_COMPRESS_CAPTURE)
2192 ++ return -EPERM;
2193 ++ break;
2194 ++ case SNDRV_PCM_STATE_PREPARED:
2195 ++ break;
2196 ++ default:
2197 + return -EPERM;
2198 ++ }
2199 ++
2200 + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
2201 + if (!retval)
2202 + stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
2203 +@@ -706,9 +712,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
2204 + {
2205 + int retval;
2206 +
2207 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
2208 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
2209 ++ switch (stream->runtime->state) {
2210 ++ case SNDRV_PCM_STATE_OPEN:
2211 ++ case SNDRV_PCM_STATE_SETUP:
2212 ++ case SNDRV_PCM_STATE_PREPARED:
2213 + return -EPERM;
2214 ++ default:
2215 ++ break;
2216 ++ }
2217 ++
2218 + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
2219 + if (!retval) {
2220 + snd_compr_drain_notify(stream);
2221 +@@ -796,9 +808,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
2222 + {
2223 + int retval;
2224 +
2225 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
2226 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
2227 ++ switch (stream->runtime->state) {
2228 ++ case SNDRV_PCM_STATE_OPEN:
2229 ++ case SNDRV_PCM_STATE_SETUP:
2230 ++ case SNDRV_PCM_STATE_PREPARED:
2231 ++ case SNDRV_PCM_STATE_PAUSED:
2232 + return -EPERM;
2233 ++ case SNDRV_PCM_STATE_XRUN:
2234 ++ return -EPIPE;
2235 ++ default:
2236 ++ break;
2237 ++ }
2238 +
2239 + retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
2240 + if (retval) {
2241 +@@ -818,6 +838,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
2242 + if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
2243 + return -EPERM;
2244 +
2245 ++ /* next track doesn't have any meaning for capture streams */
2246 ++ if (stream->direction == SND_COMPRESS_CAPTURE)
2247 ++ return -EPERM;
2248 ++
2249 + /* you can signal next track if this is intended to be a gapless stream
2250 + * and current track metadata is set
2251 + */
2252 +@@ -835,9 +859,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
2253 + static int snd_compr_partial_drain(struct snd_compr_stream *stream)
2254 + {
2255 + int retval;
2256 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
2257 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
2258 ++
2259 ++ switch (stream->runtime->state) {
2260 ++ case SNDRV_PCM_STATE_OPEN:
2261 ++ case SNDRV_PCM_STATE_SETUP:
2262 ++ case SNDRV_PCM_STATE_PREPARED:
2263 ++ case SNDRV_PCM_STATE_PAUSED:
2264 ++ return -EPERM;
2265 ++ case SNDRV_PCM_STATE_XRUN:
2266 ++ return -EPIPE;
2267 ++ default:
2268 ++ break;
2269 ++ }
2270 ++
2271 ++ /* partial drain doesn't have any meaning for capture streams */
2272 ++ if (stream->direction == SND_COMPRESS_CAPTURE)
2273 + return -EPERM;
2274 ++
2275 + /* stream can be drained only when next track has been signalled */
2276 + if (stream->next_track == false)
2277 + return -EPERM;
2278 +diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
2279 +index 1ebf00c83409..715cd99f28de 100644
2280 +--- a/sound/firewire/packets-buffer.c
2281 ++++ b/sound/firewire/packets-buffer.c
2282 +@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
2283 + packets_per_page = PAGE_SIZE / packet_size;
2284 + if (WARN_ON(!packets_per_page)) {
2285 + err = -EINVAL;
2286 +- goto error;
2287 ++ goto err_packets;
2288 + }
2289 + pages = DIV_ROUND_UP(count, packets_per_page);
2290 +
2291 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
2292 +index a12e594d4e3b..a41c1bec7c88 100644
2293 +--- a/sound/pci/hda/hda_controller.c
2294 ++++ b/sound/pci/hda/hda_controller.c
2295 +@@ -609,11 +609,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
2296 + }
2297 + runtime->private_data = azx_dev;
2298 +
2299 +- if (chip->gts_present)
2300 +- azx_pcm_hw.info = azx_pcm_hw.info |
2301 +- SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
2302 +-
2303 + runtime->hw = azx_pcm_hw;
2304 ++ if (chip->gts_present)
2305 ++ runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
2306 + runtime->hw.channels_min = hinfo->channels_min;
2307 + runtime->hw.channels_max = hinfo->channels_max;
2308 + runtime->hw.formats = hinfo->formats;
2309 +@@ -626,6 +624,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
2310 + 20,
2311 + 178000000);
2312 +
2313 ++ /* by some reason, the playback stream stalls on PulseAudio with
2314 ++ * tsched=1 when a capture stream triggers. Until we figure out the
2315 ++ * real cause, disable tsched mode by telling the PCM info flag.
2316 ++ */
2317 ++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
2318 ++ runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
2319 ++
2320 + if (chip->align_buffer_size)
2321 + /* constrain buffer sizes to be multiple of 128
2322 + bytes. This is more efficient in terms of memory
2323 +diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
2324 +index 53c3cd28bc99..8a9dd4767b1e 100644
2325 +--- a/sound/pci/hda/hda_controller.h
2326 ++++ b/sound/pci/hda/hda_controller.h
2327 +@@ -40,7 +40,7 @@
2328 + /* 14 unused */
2329 + #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
2330 + #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
2331 +-/* 17 unused */
2332 ++#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
2333 + #define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
2334 + #define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
2335 + #define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
2336 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2337 +index 308ce76149cc..81cea34aff1c 100644
2338 +--- a/sound/pci/hda/hda_intel.c
2339 ++++ b/sound/pci/hda/hda_intel.c
2340 +@@ -78,6 +78,7 @@ enum {
2341 + POS_FIX_VIACOMBO,
2342 + POS_FIX_COMBO,
2343 + POS_FIX_SKL,
2344 ++ POS_FIX_FIFO,
2345 + };
2346 +
2347 + /* Defines for ATI HD Audio support in SB450 south bridge */
2348 +@@ -149,7 +150,7 @@ module_param_array(model, charp, NULL, 0444);
2349 + MODULE_PARM_DESC(model, "Use the given board model.");
2350 + module_param_array(position_fix, int, NULL, 0444);
2351 + MODULE_PARM_DESC(position_fix, "DMA pointer read method."
2352 +- "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
2353 ++ "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
2354 + module_param_array(bdl_pos_adj, int, NULL, 0644);
2355 + MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
2356 + module_param_array(probe_mask, int, NULL, 0444);
2357 +@@ -350,6 +351,11 @@ enum {
2358 + #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
2359 + (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
2360 +
2361 ++/* quirks for AMD SB */
2362 ++#define AZX_DCAPS_PRESET_AMD_SB \
2363 ++ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
2364 ++ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
2365 ++
2366 + /* quirks for Nvidia */
2367 + #define AZX_DCAPS_PRESET_NVIDIA \
2368 + (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
2369 +@@ -920,6 +926,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
2370 + return bound_pos + mod_dma_pos;
2371 + }
2372 +
2373 ++#define AMD_FIFO_SIZE 32
2374 ++
2375 ++/* get the current DMA position with FIFO size correction */
2376 ++static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
2377 ++{
2378 ++ struct snd_pcm_substream *substream = azx_dev->core.substream;
2379 ++ struct snd_pcm_runtime *runtime = substream->runtime;
2380 ++ unsigned int pos, delay;
2381 ++
2382 ++ pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
2383 ++ if (!runtime)
2384 ++ return pos;
2385 ++
2386 ++ runtime->delay = AMD_FIFO_SIZE;
2387 ++ delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
2388 ++ if (azx_dev->insufficient) {
2389 ++ if (pos < delay) {
2390 ++ delay = pos;
2391 ++ runtime->delay = bytes_to_frames(runtime, pos);
2392 ++ } else {
2393 ++ azx_dev->insufficient = 0;
2394 ++ }
2395 ++ }
2396 ++
2397 ++ /* correct the DMA position for capture stream */
2398 ++ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
2399 ++ if (pos < delay)
2400 ++ pos += azx_dev->core.bufsize;
2401 ++ pos -= delay;
2402 ++ }
2403 ++
2404 ++ return pos;
2405 ++}
2406 ++
2407 ++static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
2408 ++ unsigned int pos)
2409 ++{
2410 ++ struct snd_pcm_substream *substream = azx_dev->core.substream;
2411 ++
2412 ++ /* just read back the calculated value in the above */
2413 ++ return substream->runtime->delay;
2414 ++}
2415 ++
2416 + static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
2417 + struct azx_dev *azx_dev)
2418 + {
2419 +@@ -1528,6 +1577,7 @@ static int check_position_fix(struct azx *chip, int fix)
2420 + case POS_FIX_VIACOMBO:
2421 + case POS_FIX_COMBO:
2422 + case POS_FIX_SKL:
2423 ++ case POS_FIX_FIFO:
2424 + return fix;
2425 + }
2426 +
2427 +@@ -1544,6 +1594,10 @@ static int check_position_fix(struct azx *chip, int fix)
2428 + dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
2429 + return POS_FIX_VIACOMBO;
2430 + }
2431 ++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
2432 ++ dev_dbg(chip->card->dev, "Using FIFO position fix\n");
2433 ++ return POS_FIX_FIFO;
2434 ++ }
2435 + if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
2436 + dev_dbg(chip->card->dev, "Using LPIB position fix\n");
2437 + return POS_FIX_LPIB;
2438 +@@ -1564,6 +1618,7 @@ static void assign_position_fix(struct azx *chip, int fix)
2439 + [POS_FIX_VIACOMBO] = azx_via_get_position,
2440 + [POS_FIX_COMBO] = azx_get_pos_lpib,
2441 + [POS_FIX_SKL] = azx_get_pos_skl,
2442 ++ [POS_FIX_FIFO] = azx_get_pos_fifo,
2443 + };
2444 +
2445 + chip->get_position[0] = chip->get_position[1] = callbacks[fix];
2446 +@@ -1578,6 +1633,9 @@ static void assign_position_fix(struct azx *chip, int fix)
2447 + azx_get_delay_from_lpib;
2448 + }
2449 +
2450 ++ if (fix == POS_FIX_FIFO)
2451 ++ chip->get_delay[0] = chip->get_delay[1] =
2452 ++ azx_get_delay_from_fifo;
2453 + }
2454 +
2455 + /*
2456 +@@ -2594,6 +2652,9 @@ static const struct pci_device_id azx_ids[] = {
2457 + /* AMD Hudson */
2458 + { PCI_DEVICE(0x1022, 0x780d),
2459 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
2460 ++ /* AMD, X370 & co */
2461 ++ { PCI_DEVICE(0x1022, 0x1457),
2462 ++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2463 + /* AMD Stoney */
2464 + { PCI_DEVICE(0x1022, 0x157a),
2465 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
2466 +diff --git a/sound/sound_core.c b/sound/sound_core.c
2467 +index 40ad000c2e3c..dd64c4b19f23 100644
2468 +--- a/sound/sound_core.c
2469 ++++ b/sound/sound_core.c
2470 +@@ -280,7 +280,8 @@ retry:
2471 + goto retry;
2472 + }
2473 + spin_unlock(&sound_loader_lock);
2474 +- return -EBUSY;
2475 ++ r = -EBUSY;
2476 ++ goto fail;
2477 + }
2478 + }
2479 +
2480 +diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
2481 +index e1fbb9cc9ea7..a197fc3b9ab0 100644
2482 +--- a/sound/usb/hiface/pcm.c
2483 ++++ b/sound/usb/hiface/pcm.c
2484 +@@ -604,14 +604,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
2485 + ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
2486 + hiface_pcm_out_urb_handler);
2487 + if (ret < 0)
2488 +- return ret;
2489 ++ goto error;
2490 + }
2491 +
2492 + ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
2493 + if (ret < 0) {
2494 +- kfree(rt);
2495 + dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
2496 +- return ret;
2497 ++ goto error;
2498 + }
2499 +
2500 + pcm->private_data = rt;
2501 +@@ -624,4 +623,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
2502 +
2503 + chip->pcm = rt;
2504 + return 0;
2505 ++
2506 ++error:
2507 ++ for (i = 0; i < PCM_N_URBS; i++)
2508 ++ kfree(rt->out_urbs[i].buffer);
2509 ++ kfree(rt);
2510 ++ return ret;
2511 + }
2512 +diff --git a/sound/usb/stream.c b/sound/usb/stream.c
2513 +index d9e3de495c16..bc582202bd10 100644
2514 +--- a/sound/usb/stream.c
2515 ++++ b/sound/usb/stream.c
2516 +@@ -1053,6 +1053,7 @@ found_clock:
2517 +
2518 + pd = kzalloc(sizeof(*pd), GFP_KERNEL);
2519 + if (!pd) {
2520 ++ kfree(fp->chmap);
2521 + kfree(fp->rate_table);
2522 + kfree(fp);
2523 + return NULL;
2524 +diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
2525 +index a19690a17291..c8c86a0c9b79 100644
2526 +--- a/tools/perf/arch/s390/util/machine.c
2527 ++++ b/tools/perf/arch/s390/util/machine.c
2528 +@@ -6,8 +6,9 @@
2529 + #include "machine.h"
2530 + #include "api/fs/fs.h"
2531 + #include "debug.h"
2532 ++#include "symbol.h"
2533 +
2534 +-int arch__fix_module_text_start(u64 *start, const char *name)
2535 ++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
2536 + {
2537 + u64 m_start = *start;
2538 + char path[PATH_MAX];
2539 +@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
2540 + if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
2541 + pr_debug2("Using module %s start:%#lx\n", path, m_start);
2542 + *start = m_start;
2543 ++ } else {
2544 ++ /* Successful read of the modules segment text start address.
2545 ++ * Calculate difference between module start address
2546 ++ * in memory and module text segment start address.
2547 ++ * For example module load address is 0x3ff8011b000
2548 ++ * (from /proc/modules) and module text segment start
2549 ++ * address is 0x3ff8011b870 (from file above).
2550 ++ *
2551 ++ * Adjust the module size and subtract the GOT table
2552 ++ * size located at the beginning of the module.
2553 ++ */
2554 ++ *size -= (*start - m_start);
2555 + }
2556 +
2557 + return 0;
2558 + }
2559 ++
2560 ++/* On s390 kernel text segment start is located at very low memory addresses,
2561 ++ * for example 0x10000. Modules are located at very high memory addresses,
2562 ++ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
2563 ++ * and beginning of first module's text segment is very big.
2564 ++ * Therefore do not fill this gap and do not assign it to the kernel dso map.
2565 ++ */
2566 ++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
2567 ++{
2568 ++ if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
2569 ++ /* Last kernel symbol mapped to end of page */
2570 ++ p->end = roundup(p->end, page_size);
2571 ++ else
2572 ++ p->end = c->start;
2573 ++ pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
2574 ++}
2575 +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
2576 +index 99de91698de1..0bdb34fee9d8 100644
2577 +--- a/tools/perf/builtin-probe.c
2578 ++++ b/tools/perf/builtin-probe.c
2579 +@@ -711,6 +711,16 @@ __cmd_probe(int argc, const char **argv)
2580 +
2581 + ret = perf_add_probe_events(params.events, params.nevents);
2582 + if (ret < 0) {
2583 ++
2584 ++ /*
2585 ++ * When perf_add_probe_events() fails it calls
2586 ++ * cleanup_perf_probe_events(pevs, npevs), i.e.
2587 ++ * cleanup_perf_probe_events(params.events, params.nevents), which
2588 ++ * will call clear_perf_probe_event(), so set nevents to zero
2589 ++ * to avoid cleanup_params() to call clear_perf_probe_event() again
2590 ++ * on the same pevs.
2591 ++ */
2592 ++ params.nevents = 0;
2593 + pr_err_with_code(" Error: Failed to add events.", ret);
2594 + return ret;
2595 + }
2596 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
2597 +index 7f2e3b1c746c..a94bd6850a0b 100644
2598 +--- a/tools/perf/util/header.c
2599 ++++ b/tools/perf/util/header.c
2600 +@@ -3472,7 +3472,7 @@ int perf_event__process_feature(struct perf_tool *tool,
2601 + return 0;
2602 +
2603 + ff.buf = (void *)fe->data;
2604 +- ff.size = event->header.size - sizeof(event->header);
2605 ++ ff.size = event->header.size - sizeof(*fe);
2606 + ff.ph = &session->header;
2607 +
2608 + if (feat_ops[feat].process(&ff, NULL))
2609 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
2610 +index 076718a7b3ea..003b70daf0bf 100644
2611 +--- a/tools/perf/util/machine.c
2612 ++++ b/tools/perf/util/machine.c
2613 +@@ -1295,6 +1295,7 @@ static int machine__set_modules_path(struct machine *machine)
2614 + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
2615 + }
2616 + int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
2617 ++ u64 *size __maybe_unused,
2618 + const char *name __maybe_unused)
2619 + {
2620 + return 0;
2621 +@@ -1306,7 +1307,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
2622 + struct machine *machine = arg;
2623 + struct map *map;
2624 +
2625 +- if (arch__fix_module_text_start(&start, name) < 0)
2626 ++ if (arch__fix_module_text_start(&start, &size, name) < 0)
2627 + return -1;
2628 +
2629 + map = machine__findnew_module_map(machine, start, name);
2630 +diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
2631 +index ebde3ea70225..6f3767808bd9 100644
2632 +--- a/tools/perf/util/machine.h
2633 ++++ b/tools/perf/util/machine.h
2634 +@@ -219,7 +219,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
2635 +
2636 + struct map *machine__findnew_module_map(struct machine *machine, u64 start,
2637 + const char *filename);
2638 +-int arch__fix_module_text_start(u64 *start, const char *name);
2639 ++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
2640 +
2641 + int machine__load_kallsyms(struct machine *machine, const char *filename);
2642 +
2643 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
2644 +index 0715f972a275..91404bacc3df 100644
2645 +--- a/tools/perf/util/symbol.c
2646 ++++ b/tools/perf/util/symbol.c
2647 +@@ -86,6 +86,11 @@ static int prefix_underscores_count(const char *str)
2648 + return tail - str;
2649 + }
2650 +
2651 ++void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
2652 ++{
2653 ++ p->end = c->start;
2654 ++}
2655 ++
2656 + const char * __weak arch__normalize_symbol_name(const char *name)
2657 + {
2658 + return name;
2659 +@@ -212,7 +217,7 @@ void symbols__fixup_end(struct rb_root *symbols)
2660 + curr = rb_entry(nd, struct symbol, rb_node);
2661 +
2662 + if (prev->end == prev->start && prev->end != curr->start)
2663 +- prev->end = curr->start;
2664 ++ arch__symbols__fixup_end(prev, curr);
2665 + }
2666 +
2667 + /* Last entry */
2668 +diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
2669 +index f25fae4b5743..76ef2facd934 100644
2670 +--- a/tools/perf/util/symbol.h
2671 ++++ b/tools/perf/util/symbol.h
2672 +@@ -349,6 +349,7 @@ const char *arch__normalize_symbol_name(const char *name);
2673 + #define SYMBOL_A 0
2674 + #define SYMBOL_B 1
2675 +
2676 ++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
2677 + int arch__compare_symbol_names(const char *namea, const char *nameb);
2678 + int arch__compare_symbol_names_n(const char *namea, const char *nameb,
2679 + unsigned int n);
2680 +diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
2681 +index 56007a7e0b4d..2c146d0c217b 100644
2682 +--- a/tools/perf/util/thread.c
2683 ++++ b/tools/perf/util/thread.c
2684 +@@ -192,14 +192,24 @@ struct comm *thread__comm(const struct thread *thread)
2685 +
2686 + struct comm *thread__exec_comm(const struct thread *thread)
2687 + {
2688 +- struct comm *comm, *last = NULL;
2689 ++ struct comm *comm, *last = NULL, *second_last = NULL;
2690 +
2691 + list_for_each_entry(comm, &thread->comm_list, list) {
2692 + if (comm->exec)
2693 + return comm;
2694 ++ second_last = last;
2695 + last = comm;
2696 + }
2697 +
2698 ++ /*
2699 ++ * 'last' with no start time might be the parent's comm of a synthesized
2700 ++ * thread (created by processing a synthesized fork event). For a main
2701 ++ * thread, that is very probably wrong. Prefer a later comm to avoid
2702 ++ * that case.
2703 ++ */
2704 ++ if (second_last && !last->start && thread->pid_ == thread->tid)
2705 ++ return second_last;
2706 ++
2707 + return last;
2708 + }
2709 +
2710 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2711 +index 2b36a51afb57..4a584a575221 100644
2712 +--- a/virt/kvm/kvm_main.c
2713 ++++ b/virt/kvm/kvm_main.c
2714 +@@ -2317,6 +2317,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2715 + #endif
2716 + }
2717 +
2718 ++/*
2719 ++ * Unlike kvm_arch_vcpu_runnable, this function is called outside
2720 ++ * a vcpu_load/vcpu_put pair. However, for most architectures
2721 ++ * kvm_arch_vcpu_runnable does not require vcpu_load.
2722 ++ */
2723 ++bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
2724 ++{
2725 ++ return kvm_arch_vcpu_runnable(vcpu);
2726 ++}
2727 ++
2728 ++static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
2729 ++{
2730 ++ if (kvm_arch_dy_runnable(vcpu))
2731 ++ return true;
2732 ++
2733 ++#ifdef CONFIG_KVM_ASYNC_PF
2734 ++ if (!list_empty_careful(&vcpu->async_pf.done))
2735 ++ return true;
2736 ++#endif
2737 ++
2738 ++ return false;
2739 ++}
2740 ++
2741 + void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2742 + {
2743 + struct kvm *kvm = me->kvm;
2744 +@@ -2346,7 +2369,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2745 + continue;
2746 + if (vcpu == me)
2747 + continue;
2748 +- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
2749 ++ if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
2750 + continue;
2751 + if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
2752 + continue;