Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 29 Oct 2019 14:00:23
Message-Id: 1572357542.678f732c84b8e5770563eb32bf4f3f87041ed174.mpagano@gentoo
1 commit: 678f732c84b8e5770563eb32bf4f3f87041ed174
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 16 12:14:29 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 13:59:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=678f732c
7
8 Linux patch 4.14.139
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1138_linux-4.14.139.patch | 2094 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2098 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2b98c17..4659ab2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -595,6 +595,10 @@ Patch: 1137_linux-4.14.138.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.138
23
24 +Patch: 1138_linux-4.14.139.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.139
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1138_linux-4.14.139.patch b/1138_linux-4.14.139.patch
33 new file mode 100644
34 index 0000000..ca1dda2
35 --- /dev/null
36 +++ b/1138_linux-4.14.139.patch
37 @@ -0,0 +1,2094 @@
38 +diff --git a/Makefile b/Makefile
39 +index 82ae13348266..3ccf48b2714a 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 138
47 ++SUBLEVEL = 139
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
52 +index cd350dee4df3..efcd400b2abb 100644
53 +--- a/arch/arm/mach-davinci/sleep.S
54 ++++ b/arch/arm/mach-davinci/sleep.S
55 +@@ -37,6 +37,7 @@
56 + #define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
57 +
58 + .text
59 ++ .arch armv5te
60 + /*
61 + * Move DaVinci into deep sleep state
62 + *
63 +diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
64 +index 47d45733a346..af1f065dc9f3 100644
65 +--- a/arch/powerpc/kvm/powerpc.c
66 ++++ b/arch/powerpc/kvm/powerpc.c
67 +@@ -58,6 +58,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
68 + return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
69 + }
70 +
71 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
72 ++{
73 ++ return kvm_arch_vcpu_runnable(vcpu);
74 ++}
75 ++
76 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
77 + {
78 + return false;
79 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
80 +index 9f3eb334c818..94af073476ce 100644
81 +--- a/arch/x86/include/asm/kvm_host.h
82 ++++ b/arch/x86/include/asm/kvm_host.h
83 +@@ -1077,6 +1077,7 @@ struct kvm_x86_ops {
84 + int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
85 + uint32_t guest_irq, bool set);
86 + void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
87 ++ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
88 +
89 + int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
90 + void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
91 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
92 +index 3a7e79f6cc77..093e7f567e69 100644
93 +--- a/arch/x86/kvm/svm.c
94 ++++ b/arch/x86/kvm/svm.c
95 +@@ -4637,6 +4637,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
96 + kvm_vcpu_wake_up(vcpu);
97 + }
98 +
99 ++static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
100 ++{
101 ++ return false;
102 ++}
103 ++
104 + static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
105 + {
106 + unsigned long flags;
107 +@@ -5746,6 +5751,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
108 +
109 + .pmu_ops = &amd_pmu_ops,
110 + .deliver_posted_interrupt = svm_deliver_avic_intr,
111 ++ .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
112 + .update_pi_irte = svm_update_pi_irte,
113 + .setup_mce = svm_setup_mce,
114 + };
115 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
116 +index ae484edcf7a3..f467d85b0352 100644
117 +--- a/arch/x86/kvm/vmx.c
118 ++++ b/arch/x86/kvm/vmx.c
119 +@@ -9431,6 +9431,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
120 + return max_irr;
121 + }
122 +
123 ++static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
124 ++{
125 ++ return pi_test_on(vcpu_to_pi_desc(vcpu));
126 ++}
127 ++
128 + static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
129 + {
130 + if (!kvm_vcpu_apicv_active(vcpu))
131 +@@ -12756,6 +12761,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
132 + .hwapic_isr_update = vmx_hwapic_isr_update,
133 + .sync_pir_to_irr = vmx_sync_pir_to_irr,
134 + .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
135 ++ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
136 +
137 + .set_tss_addr = vmx_set_tss_addr,
138 + .get_tdp_level = get_ept_level,
139 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
140 +index a8526042d176..a620936d97cf 100644
141 +--- a/arch/x86/kvm/x86.c
142 ++++ b/arch/x86/kvm/x86.c
143 +@@ -8711,6 +8711,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
144 + return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
145 + }
146 +
147 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
148 ++{
149 ++ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
150 ++ return true;
151 ++
152 ++ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
153 ++ kvm_test_request(KVM_REQ_SMI, vcpu) ||
154 ++ kvm_test_request(KVM_REQ_EVENT, vcpu))
155 ++ return true;
156 ++
157 ++ if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
158 ++ return true;
159 ++
160 ++ return false;
161 ++}
162 ++
163 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
164 + {
165 + return vcpu->arch.preempted_in_kernel;
166 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
167 +index b162f92fd55c..27cab342a0b2 100644
168 +--- a/arch/x86/mm/fault.c
169 ++++ b/arch/x86/mm/fault.c
170 +@@ -260,13 +260,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
171 +
172 + pmd = pmd_offset(pud, address);
173 + pmd_k = pmd_offset(pud_k, address);
174 +- if (!pmd_present(*pmd_k))
175 +- return NULL;
176 +
177 +- if (!pmd_present(*pmd))
178 ++ if (pmd_present(*pmd) != pmd_present(*pmd_k))
179 + set_pmd(pmd, *pmd_k);
180 ++
181 ++ if (!pmd_present(*pmd_k))
182 ++ return NULL;
183 + else
184 +- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
185 ++ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
186 +
187 + return pmd_k;
188 + }
189 +@@ -286,17 +287,13 @@ void vmalloc_sync_all(void)
190 + spin_lock(&pgd_lock);
191 + list_for_each_entry(page, &pgd_list, lru) {
192 + spinlock_t *pgt_lock;
193 +- pmd_t *ret;
194 +
195 + /* the pgt_lock only for Xen */
196 + pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
197 +
198 + spin_lock(pgt_lock);
199 +- ret = vmalloc_sync_one(page_address(page), address);
200 ++ vmalloc_sync_one(page_address(page), address);
201 + spin_unlock(pgt_lock);
202 +-
203 +- if (!ret)
204 +- break;
205 + }
206 + spin_unlock(&pgd_lock);
207 + }
208 +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
209 +index ca414910710e..b0a7afd4e7d3 100644
210 +--- a/drivers/acpi/arm64/iort.c
211 ++++ b/drivers/acpi/arm64/iort.c
212 +@@ -506,8 +506,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
213 +
214 + /* Move to ITS specific data */
215 + its = (struct acpi_iort_its_group *)node->node_data;
216 +- if (idx > its->its_count) {
217 +- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
218 ++ if (idx >= its->its_count) {
219 ++ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
220 + idx, its->its_count);
221 + return -ENXIO;
222 + }
223 +diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
224 +index 1aad373da50e..8fbdfaacc222 100644
225 +--- a/drivers/block/drbd/drbd_receiver.c
226 ++++ b/drivers/block/drbd/drbd_receiver.c
227 +@@ -5237,7 +5237,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
228 + unsigned int key_len;
229 + char secret[SHARED_SECRET_MAX]; /* 64 byte */
230 + unsigned int resp_size;
231 +- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
232 ++ struct shash_desc *desc;
233 + struct packet_info pi;
234 + struct net_conf *nc;
235 + int err, rv;
236 +@@ -5250,6 +5250,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
237 + memcpy(secret, nc->shared_secret, key_len);
238 + rcu_read_unlock();
239 +
240 ++ desc = kmalloc(sizeof(struct shash_desc) +
241 ++ crypto_shash_descsize(connection->cram_hmac_tfm),
242 ++ GFP_KERNEL);
243 ++ if (!desc) {
244 ++ rv = -1;
245 ++ goto fail;
246 ++ }
247 + desc->tfm = connection->cram_hmac_tfm;
248 + desc->flags = 0;
249 +
250 +@@ -5392,7 +5399,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
251 + kfree(peers_ch);
252 + kfree(response);
253 + kfree(right_response);
254 +- shash_desc_zero(desc);
255 ++ if (desc) {
256 ++ shash_desc_zero(desc);
257 ++ kfree(desc);
258 ++ }
259 +
260 + return rv;
261 + }
262 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
263 +index bd447de4a5b8..87d7c42affbc 100644
264 +--- a/drivers/block/loop.c
265 ++++ b/drivers/block/loop.c
266 +@@ -857,7 +857,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
267 +
268 + static int loop_kthread_worker_fn(void *worker_ptr)
269 + {
270 +- current->flags |= PF_LESS_THROTTLE;
271 ++ current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
272 + return kthread_worker_fn(worker_ptr);
273 + }
274 +
275 +diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
276 +index 8456492124f0..d1bdd8f62247 100644
277 +--- a/drivers/cpufreq/pasemi-cpufreq.c
278 ++++ b/drivers/cpufreq/pasemi-cpufreq.c
279 +@@ -145,10 +145,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
280 + int err = -ENODEV;
281 +
282 + cpu = of_get_cpu_node(policy->cpu, NULL);
283 ++ if (!cpu)
284 ++ goto out;
285 +
286 ++ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
287 + of_node_put(cpu);
288 +- if (!cpu)
289 ++ if (!max_freqp) {
290 ++ err = -EINVAL;
291 + goto out;
292 ++ }
293 ++
294 ++ /* we need the freq in kHz */
295 ++ max_freq = *max_freqp / 1000;
296 +
297 + dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
298 + if (!dn)
299 +@@ -185,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
300 + }
301 +
302 + pr_debug("init cpufreq on CPU %d\n", policy->cpu);
303 +-
304 +- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
305 +- if (!max_freqp) {
306 +- err = -EINVAL;
307 +- goto out_unmap_sdcpwr;
308 +- }
309 +-
310 +- /* we need the freq in kHz */
311 +- max_freq = *max_freqp / 1000;
312 +-
313 + pr_debug("max clock-frequency is at %u kHz\n", max_freq);
314 + pr_debug("initializing frequency table\n");
315 +
316 +@@ -212,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
317 +
318 + return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
319 +
320 +-out_unmap_sdcpwr:
321 +- iounmap(sdcpwr_mapbase);
322 +-
323 + out_unmap_sdcasr:
324 + iounmap(sdcasr_mapbase);
325 + out:
326 +diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
327 +index 52313524a4dd..2ab97ecd9a08 100644
328 +--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
329 ++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
330 +@@ -63,6 +63,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
331 + static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
332 + unsigned int authsize)
333 + {
334 ++ switch (authsize) {
335 ++ case 16:
336 ++ case 15:
337 ++ case 14:
338 ++ case 13:
339 ++ case 12:
340 ++ case 8:
341 ++ case 4:
342 ++ break;
343 ++ default:
344 ++ return -EINVAL;
345 ++ }
346 ++
347 + return 0;
348 + }
349 +
350 +@@ -109,6 +122,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
351 + memset(&rctx->cmd, 0, sizeof(rctx->cmd));
352 + INIT_LIST_HEAD(&rctx->cmd.entry);
353 + rctx->cmd.engine = CCP_ENGINE_AES;
354 ++ rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
355 + rctx->cmd.u.aes.type = ctx->u.aes.type;
356 + rctx->cmd.u.aes.mode = ctx->u.aes.mode;
357 + rctx->cmd.u.aes.action = encrypt;
358 +diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
359 +index 73e49840305b..1e2e42106dee 100644
360 +--- a/drivers/crypto/ccp/ccp-ops.c
361 ++++ b/drivers/crypto/ccp/ccp-ops.c
362 +@@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
363 + return 0;
364 + }
365 +
366 +-static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
367 +- struct scatterlist *sg, unsigned int sg_offset,
368 +- unsigned int len)
369 ++static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
370 ++ struct scatterlist *sg, unsigned int sg_offset,
371 ++ unsigned int len)
372 + {
373 + WARN_ON(!wa->address);
374 +
375 ++ if (len > (wa->length - wa_offset))
376 ++ return -EINVAL;
377 ++
378 + scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
379 + 0);
380 ++ return 0;
381 + }
382 +
383 + static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
384 +@@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
385 + unsigned int len)
386 + {
387 + u8 *p, *q;
388 ++ int rc;
389 +
390 +- ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
391 ++ rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
392 ++ if (rc)
393 ++ return rc;
394 +
395 + p = wa->address + wa_offset;
396 + q = p + len - 1;
397 +@@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
398 + return ret;
399 +
400 + dm_offset = CCP_SB_BYTES - aes->key_len;
401 +- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
402 ++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
403 ++ if (ret)
404 ++ goto e_key;
405 + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
406 + CCP_PASSTHRU_BYTESWAP_256BIT);
407 + if (ret) {
408 +@@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
409 + goto e_key;
410 +
411 + dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
412 +- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
413 ++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
414 ++ if (ret)
415 ++ goto e_ctx;
416 + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
417 + CCP_PASSTHRU_BYTESWAP_256BIT);
418 + if (ret) {
419 +@@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
420 + goto e_src;
421 + }
422 +
423 +- ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
424 +- aes->cmac_key_len);
425 ++ ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
426 ++ aes->cmac_key_len);
427 ++ if (ret)
428 ++ goto e_src;
429 + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
430 + CCP_PASSTHRU_BYTESWAP_256BIT);
431 + if (ret) {
432 +@@ -612,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
433 +
434 + unsigned long long *final;
435 + unsigned int dm_offset;
436 ++ unsigned int authsize;
437 + unsigned int jobid;
438 + unsigned int ilen;
439 + bool in_place = true; /* Default value */
440 +@@ -633,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
441 + if (!aes->key) /* Gotta have a key SGL */
442 + return -EINVAL;
443 +
444 ++ /* Zero defaults to 16 bytes, the maximum size */
445 ++ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
446 ++ switch (authsize) {
447 ++ case 16:
448 ++ case 15:
449 ++ case 14:
450 ++ case 13:
451 ++ case 12:
452 ++ case 8:
453 ++ case 4:
454 ++ break;
455 ++ default:
456 ++ return -EINVAL;
457 ++ }
458 ++
459 + /* First, decompose the source buffer into AAD & PT,
460 + * and the destination buffer into AAD, CT & tag, or
461 + * the input into CT & tag.
462 +@@ -647,7 +676,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
463 + p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
464 + } else {
465 + /* Input length for decryption includes tag */
466 +- ilen = aes->src_len - AES_BLOCK_SIZE;
467 ++ ilen = aes->src_len - authsize;
468 + p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
469 + }
470 +
471 +@@ -669,7 +698,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
472 + return ret;
473 +
474 + dm_offset = CCP_SB_BYTES - aes->key_len;
475 +- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
476 ++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
477 ++ if (ret)
478 ++ goto e_key;
479 + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
480 + CCP_PASSTHRU_BYTESWAP_256BIT);
481 + if (ret) {
482 +@@ -688,7 +719,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
483 + goto e_key;
484 +
485 + dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
486 +- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
487 ++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
488 ++ if (ret)
489 ++ goto e_ctx;
490 +
491 + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
492 + CCP_PASSTHRU_BYTESWAP_256BIT);
493 +@@ -752,8 +785,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
494 + while (src.sg_wa.bytes_left) {
495 + ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
496 + if (!src.sg_wa.bytes_left) {
497 +- unsigned int nbytes = aes->src_len
498 +- % AES_BLOCK_SIZE;
499 ++ unsigned int nbytes = ilen % AES_BLOCK_SIZE;
500 +
501 + if (nbytes) {
502 + op.eom = 1;
503 +@@ -780,7 +812,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
504 + goto e_dst;
505 + }
506 +
507 +- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
508 ++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
509 ++ if (ret)
510 ++ goto e_dst;
511 +
512 + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
513 + CCP_PASSTHRU_BYTESWAP_256BIT);
514 +@@ -823,17 +857,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
515 +
516 + if (aes->action == CCP_AES_ACTION_ENCRYPT) {
517 + /* Put the ciphered tag after the ciphertext. */
518 +- ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
519 ++ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
520 + } else {
521 + /* Does this ciphered tag match the input? */
522 +- ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
523 ++ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
524 + DMA_BIDIRECTIONAL);
525 + if (ret)
526 + goto e_tag;
527 +- ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
528 ++ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
529 ++ if (ret)
530 ++ goto e_tag;
531 +
532 + ret = crypto_memneq(tag.address, final_wa.address,
533 +- AES_BLOCK_SIZE) ? -EBADMSG : 0;
534 ++ authsize) ? -EBADMSG : 0;
535 + ccp_dm_free(&tag);
536 + }
537 +
538 +@@ -841,11 +877,11 @@ e_tag:
539 + ccp_dm_free(&final_wa);
540 +
541 + e_dst:
542 +- if (aes->src_len && !in_place)
543 ++ if (ilen > 0 && !in_place)
544 + ccp_free_data(&dst, cmd_q);
545 +
546 + e_src:
547 +- if (aes->src_len)
548 ++ if (ilen > 0)
549 + ccp_free_data(&src, cmd_q);
550 +
551 + e_aad:
552 +@@ -925,7 +961,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
553 + return ret;
554 +
555 + dm_offset = CCP_SB_BYTES - aes->key_len;
556 +- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
557 ++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
558 ++ if (ret)
559 ++ goto e_key;
560 + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
561 + CCP_PASSTHRU_BYTESWAP_256BIT);
562 + if (ret) {
563 +@@ -946,7 +984,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
564 + if (aes->mode != CCP_AES_MODE_ECB) {
565 + /* Load the AES context - convert to LE */
566 + dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
567 +- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
568 ++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
569 ++ if (ret)
570 ++ goto e_ctx;
571 + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
572 + CCP_PASSTHRU_BYTESWAP_256BIT);
573 + if (ret) {
574 +@@ -1124,8 +1164,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
575 + * big endian to little endian.
576 + */
577 + dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
578 +- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
579 +- ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
580 ++ ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
581 ++ if (ret)
582 ++ goto e_key;
583 ++ ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
584 ++ if (ret)
585 ++ goto e_key;
586 + } else {
587 + /* Version 5 CCPs use a 512-bit space for the key: each portion
588 + * occupies 256 bits, or one entire slot, and is zero-padded.
589 +@@ -1134,9 +1178,13 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
590 +
591 + dm_offset = CCP_SB_BYTES;
592 + pad = dm_offset - xts->key_len;
593 +- ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
594 +- ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
595 +- xts->key_len);
596 ++ ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
597 ++ if (ret)
598 ++ goto e_key;
599 ++ ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
600 ++ xts->key_len, xts->key_len);
601 ++ if (ret)
602 ++ goto e_key;
603 + }
604 + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
605 + CCP_PASSTHRU_BYTESWAP_256BIT);
606 +@@ -1155,7 +1203,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
607 + if (ret)
608 + goto e_key;
609 +
610 +- ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
611 ++ ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
612 ++ if (ret)
613 ++ goto e_ctx;
614 + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
615 + CCP_PASSTHRU_BYTESWAP_NOOP);
616 + if (ret) {
617 +@@ -1298,12 +1348,18 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
618 + dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
619 +
620 + len_singlekey = des3->key_len / 3;
621 +- ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
622 +- des3->key, 0, len_singlekey);
623 +- ccp_set_dm_area(&key, dm_offset + len_singlekey,
624 +- des3->key, len_singlekey, len_singlekey);
625 +- ccp_set_dm_area(&key, dm_offset,
626 +- des3->key, 2 * len_singlekey, len_singlekey);
627 ++ ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
628 ++ des3->key, 0, len_singlekey);
629 ++ if (ret)
630 ++ goto e_key;
631 ++ ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
632 ++ des3->key, len_singlekey, len_singlekey);
633 ++ if (ret)
634 ++ goto e_key;
635 ++ ret = ccp_set_dm_area(&key, dm_offset,
636 ++ des3->key, 2 * len_singlekey, len_singlekey);
637 ++ if (ret)
638 ++ goto e_key;
639 +
640 + /* Copy the key to the SB */
641 + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
642 +@@ -1331,7 +1387,10 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
643 +
644 + /* Load the context into the LSB */
645 + dm_offset = CCP_SB_BYTES - des3->iv_len;
646 +- ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
647 ++ ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
648 ++ des3->iv_len);
649 ++ if (ret)
650 ++ goto e_ctx;
651 +
652 + if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
653 + load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
654 +@@ -1615,8 +1674,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
655 + }
656 + } else {
657 + /* Restore the context */
658 +- ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
659 +- sb_count * CCP_SB_BYTES);
660 ++ ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
661 ++ sb_count * CCP_SB_BYTES);
662 ++ if (ret)
663 ++ goto e_ctx;
664 + }
665 +
666 + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
667 +@@ -1938,7 +1999,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
668 + if (ret)
669 + return ret;
670 +
671 +- ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
672 ++ ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
673 ++ if (ret)
674 ++ goto e_mask;
675 + ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
676 + CCP_PASSTHRU_BYTESWAP_NOOP);
677 + if (ret) {
678 +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
679 +index 6e4ed5a9c6fd..42c4ff75281b 100644
680 +--- a/drivers/firmware/Kconfig
681 ++++ b/drivers/firmware/Kconfig
682 +@@ -156,7 +156,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
683 +
684 + config ISCSI_IBFT_FIND
685 + bool "iSCSI Boot Firmware Table Attributes"
686 +- depends on X86 && ACPI
687 ++ depends on X86 && ISCSI_IBFT
688 + default n
689 + help
690 + This option enables the kernel to find the region of memory
691 +@@ -167,7 +167,8 @@ config ISCSI_IBFT_FIND
692 + config ISCSI_IBFT
693 + tristate "iSCSI Boot Firmware Table Attributes module"
694 + select ISCSI_BOOT_SYSFS
695 +- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
696 ++ select ISCSI_IBFT_FIND if X86
697 ++ depends on ACPI && SCSI && SCSI_LOWLEVEL
698 + default n
699 + help
700 + This option enables support for detection and exposing of iSCSI
701 +diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
702 +index 132b9bae4b6a..220bbc91cebd 100644
703 +--- a/drivers/firmware/iscsi_ibft.c
704 ++++ b/drivers/firmware/iscsi_ibft.c
705 +@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
706 + MODULE_LICENSE("GPL");
707 + MODULE_VERSION(IBFT_ISCSI_VERSION);
708 +
709 ++#ifndef CONFIG_ISCSI_IBFT_FIND
710 ++struct acpi_table_ibft *ibft_addr;
711 ++#endif
712 ++
713 + struct ibft_hdr {
714 + u8 id;
715 + u8 version;
716 +diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
717 +index c21e10c780ac..af40189cdb60 100644
718 +--- a/drivers/gpu/drm/drm_framebuffer.c
719 ++++ b/drivers/gpu/drm/drm_framebuffer.c
720 +@@ -773,7 +773,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
721 + struct drm_device *dev = fb->dev;
722 + struct drm_atomic_state *state;
723 + struct drm_plane *plane;
724 +- struct drm_connector *conn;
725 ++ struct drm_connector *conn __maybe_unused;
726 + struct drm_connector_state *conn_state;
727 + int i, ret = 0;
728 + unsigned plane_mask;
729 +diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
730 +index 2ff2ee7f3b78..03c592753fc3 100644
731 +--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
732 ++++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
733 +@@ -422,8 +422,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
734 + else
735 + txesc2_div = 10;
736 +
737 +- I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
738 +- I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
739 ++ I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
740 ++ I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
741 + }
742 +
743 + /* Program BXT Mipi clocks and dividers */
744 +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
745 +index d03203a82e8f..51f7bcd799fa 100644
746 +--- a/drivers/hid/hid-sony.c
747 ++++ b/drivers/hid/hid-sony.c
748 +@@ -578,10 +578,14 @@ static void sony_set_leds(struct sony_sc *sc);
749 + static inline void sony_schedule_work(struct sony_sc *sc,
750 + enum sony_worker which)
751 + {
752 ++ unsigned long flags;
753 ++
754 + switch (which) {
755 + case SONY_WORKER_STATE:
756 +- if (!sc->defer_initialization)
757 ++ spin_lock_irqsave(&sc->lock, flags);
758 ++ if (!sc->defer_initialization && sc->state_worker_initialized)
759 + schedule_work(&sc->state_worker);
760 ++ spin_unlock_irqrestore(&sc->lock, flags);
761 + break;
762 + case SONY_WORKER_HOTPLUG:
763 + if (sc->hotplug_worker_initialized)
764 +@@ -2488,13 +2492,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
765 +
766 + static inline void sony_cancel_work_sync(struct sony_sc *sc)
767 + {
768 ++ unsigned long flags;
769 ++
770 + if (sc->hotplug_worker_initialized)
771 + cancel_work_sync(&sc->hotplug_worker);
772 +- if (sc->state_worker_initialized)
773 ++ if (sc->state_worker_initialized) {
774 ++ spin_lock_irqsave(&sc->lock, flags);
775 ++ sc->state_worker_initialized = 0;
776 ++ spin_unlock_irqrestore(&sc->lock, flags);
777 + cancel_work_sync(&sc->state_worker);
778 ++ }
779 + }
780 +
781 +-
782 + static int sony_input_configured(struct hid_device *hdev,
783 + struct hid_input *hidinput)
784 + {
785 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
786 +index ca9941fa741b..7e14143ed119 100644
787 +--- a/drivers/hwmon/nct6775.c
788 ++++ b/drivers/hwmon/nct6775.c
789 +@@ -769,7 +769,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
790 + static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
791 + static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
792 + static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
793 +-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
794 ++static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
795 + static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
796 + static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
797 +
798 +@@ -3592,6 +3592,7 @@ static int nct6775_probe(struct platform_device *pdev)
799 + data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
800 + data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
801 + data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
802 ++ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
803 + data->REG_PWM[0] = NCT6106_REG_PWM;
804 + data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
805 + data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
806 +diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
807 +index 2876c18ed841..38ffbdb0a85f 100644
808 +--- a/drivers/hwmon/nct7802.c
809 ++++ b/drivers/hwmon/nct7802.c
810 +@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
811 + &sensor_dev_attr_in3_alarm.dev_attr.attr,
812 + &sensor_dev_attr_in3_beep.dev_attr.attr,
813 +
814 +- &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
815 ++ &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
816 + &sensor_dev_attr_in4_min.dev_attr.attr,
817 + &sensor_dev_attr_in4_max.dev_attr.attr,
818 + &sensor_dev_attr_in4_alarm.dev_attr.attr,
819 +@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
820 +
821 + if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
822 + return 0;
823 +- if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
824 ++ if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
825 + return 0;
826 +- if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
827 ++ if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
828 + return 0;
829 +
830 + return attr->mode;
831 +diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
832 +index b1dd17cbce58..f8f298c33b28 100644
833 +--- a/drivers/iio/adc/max9611.c
834 ++++ b/drivers/iio/adc/max9611.c
835 +@@ -86,7 +86,7 @@
836 + #define MAX9611_TEMP_MAX_POS 0x7f80
837 + #define MAX9611_TEMP_MAX_NEG 0xff80
838 + #define MAX9611_TEMP_MIN_NEG 0xd980
839 +-#define MAX9611_TEMP_MASK GENMASK(7, 15)
840 ++#define MAX9611_TEMP_MASK GENMASK(15, 7)
841 + #define MAX9611_TEMP_SHIFT 0x07
842 + #define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
843 + #define MAX9611_TEMP_SCALE_NUM 1000000
844 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
845 +index 7c8d4baf647b..7db53eab7012 100644
846 +--- a/drivers/input/mouse/synaptics.c
847 ++++ b/drivers/input/mouse/synaptics.c
848 +@@ -185,6 +185,7 @@ static const char * const smbus_pnp_ids[] = {
849 + "LEN2055", /* E580 */
850 + "SYN3052", /* HP EliteBook 840 G4 */
851 + "SYN3221", /* HP 15-ay000 */
852 ++ "SYN323d", /* HP Spectre X360 13-w013dx */
853 + NULL
854 + };
855 +
856 +diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
857 +index ad0e64fdba34..76f6a4f628b3 100644
858 +--- a/drivers/misc/Makefile
859 ++++ b/drivers/misc/Makefile
860 +@@ -69,8 +69,7 @@ KCOV_INSTRUMENT_lkdtm_rodata.o := n
861 +
862 + OBJCOPYFLAGS :=
863 + OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
864 +- --set-section-flags .text=alloc,readonly \
865 +- --rename-section .text=.rodata
866 ++ --rename-section .text=.rodata,alloc,readonly,load
867 + targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
868 + $(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
869 + $(call if_changed,objcopy)
870 +diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
871 +index fbd29f00fca0..d76fea1098e2 100644
872 +--- a/drivers/mmc/host/cavium.c
873 ++++ b/drivers/mmc/host/cavium.c
874 +@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
875 + {
876 + data->bytes_xfered = data->blocks * data->blksz;
877 + data->error = 0;
878 ++ dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
879 + return 1;
880 + }
881 +
882 +@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
883 + mmc->max_segs = 1;
884 +
885 + /* DMA size field can address up to 8 MB */
886 +- mmc->max_seg_size = 8 * 1024 * 1024;
887 ++ mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
888 ++ dma_get_max_seg_size(host->dev));
889 + mmc->max_req_size = mmc->max_seg_size;
890 + /* External DMA is in 512 byte blocks */
891 + mmc->max_blk_size = 512;
892 +diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
893 +index 602c19e23f05..786d852a70d5 100644
894 +--- a/drivers/net/can/rcar/rcar_canfd.c
895 ++++ b/drivers/net/can/rcar/rcar_canfd.c
896 +@@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
897 +
898 + /* All packets processed */
899 + if (num_pkts < quota) {
900 +- napi_complete_done(napi, num_pkts);
901 +- /* Enable Rx FIFO interrupts */
902 +- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
903 +- RCANFD_RFCC_RFIE);
904 ++ if (napi_complete_done(napi, num_pkts)) {
905 ++ /* Enable Rx FIFO interrupts */
906 ++ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
907 ++ RCANFD_RFCC_RFIE);
908 ++ }
909 + }
910 + return num_pkts;
911 + }
912 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
913 +index 1ca76e03e965..d68c79f9a4b9 100644
914 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
915 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
916 +@@ -594,16 +594,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
917 + dev->state &= ~PCAN_USB_STATE_STARTED;
918 + netif_stop_queue(netdev);
919 +
920 ++ close_candev(netdev);
921 ++
922 ++ dev->can.state = CAN_STATE_STOPPED;
923 ++
924 + /* unlink all pending urbs and free used memory */
925 + peak_usb_unlink_all_urbs(dev);
926 +
927 + if (dev->adapter->dev_stop)
928 + dev->adapter->dev_stop(dev);
929 +
930 +- close_candev(netdev);
931 +-
932 +- dev->can.state = CAN_STATE_STOPPED;
933 +-
934 + /* can set bus off now */
935 + if (dev->adapter->dev_set_bus) {
936 + int err = dev->adapter->dev_set_bus(dev, 0);
937 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
938 +index 53d6bb045e9e..773fc15ac3ab 100644
939 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
940 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
941 +@@ -852,7 +852,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
942 + goto err_out;
943 +
944 + /* allocate command buffer once for all for the interface */
945 +- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
946 ++ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
947 + GFP_KERNEL);
948 + if (!pdev->cmd_buffer_addr)
949 + goto err_out_1;
950 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
951 +index bbdd6058cd2f..d85fdc6949c6 100644
952 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
953 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
954 +@@ -500,7 +500,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
955 + u8 *buffer;
956 + int err;
957 +
958 +- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
959 ++ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
960 + if (!buffer)
961 + return -ENOMEM;
962 +
963 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
964 +index e9e466cae322..534c0ea7b232 100644
965 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
966 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
967 +@@ -778,7 +778,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
968 +
969 + for (i = 0; i < n_profiles; i++) {
970 + /* the tables start at element 3 */
971 +- static int pos = 3;
972 ++ int pos = 3;
973 +
974 + /* The EWRD profiles officially go from 2 to 4, but we
975 + * save them in sar_profiles[1-3] (because we don't
976 +@@ -912,6 +912,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
977 + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
978 + }
979 +
980 ++static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
981 ++{
982 ++ /*
983 ++ * The GEO_TX_POWER_LIMIT command is not supported on earlier
984 ++ * firmware versions. Unfortunately, we don't have a TLV API
985 ++ * flag to rely on, so rely on the major version which is in
986 ++ * the first byte of ucode_ver. This was implemented
987 ++ * initially on version 38 and then backported to 36, 29 and
988 ++ * 17.
989 ++ */
990 ++ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
991 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
992 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
993 ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
994 ++}
995 ++
996 + int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
997 + {
998 + struct iwl_geo_tx_power_profiles_resp *resp;
999 +@@ -927,6 +943,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
1000 + .data = { &geo_cmd },
1001 + };
1002 +
1003 ++ if (!iwl_mvm_sar_geo_support(mvm))
1004 ++ return -EOPNOTSUPP;
1005 ++
1006 + ret = iwl_mvm_send_cmd(mvm, &cmd);
1007 + if (ret) {
1008 + IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
1009 +@@ -952,13 +971,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
1010 + int ret, i, j;
1011 + u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
1012 +
1013 +- /*
1014 +- * This command is not supported on earlier firmware versions.
1015 +- * Unfortunately, we don't have a TLV API flag to rely on, so
1016 +- * rely on the major version which is in the first byte of
1017 +- * ucode_ver.
1018 +- */
1019 +- if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
1020 ++ if (!iwl_mvm_sar_geo_support(mvm))
1021 + return 0;
1022 +
1023 + ret = iwl_mvm_sar_get_wgds_table(mvm);
1024 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1025 +index 4704137a26e0..c3a2e6b6da65 100644
1026 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1027 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
1028 +@@ -401,6 +401,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
1029 + DMA_TO_DEVICE);
1030 + }
1031 +
1032 ++ meta->tbs = 0;
1033 ++
1034 + if (trans->cfg->use_tfh) {
1035 + struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1036 +
1037 +diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
1038 +index a76bd797e454..597af4e66325 100644
1039 +--- a/drivers/net/wireless/marvell/mwifiex/main.h
1040 ++++ b/drivers/net/wireless/marvell/mwifiex/main.h
1041 +@@ -122,6 +122,7 @@ enum {
1042 +
1043 + #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
1044 +
1045 ++#define WPA_GTK_OUI_OFFSET 2
1046 + #define RSN_GTK_OUI_OFFSET 2
1047 +
1048 + #define MWIFIEX_OUI_NOT_PRESENT 0
1049 +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
1050 +index 29284f9a0646..67c334221077 100644
1051 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c
1052 ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
1053 +@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
1054 + u8 ret = MWIFIEX_OUI_NOT_PRESENT;
1055 +
1056 + if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
1057 +- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
1058 ++ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
1059 ++ WPA_GTK_OUI_OFFSET);
1060 + oui = &mwifiex_wpa_oui[cipher][0];
1061 + ret = mwifiex_search_oui_in_ie(iebody, oui);
1062 + if (ret)
1063 +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
1064 +index ab8dd81fbc2b..1a40c73961b8 100644
1065 +--- a/drivers/s390/cio/qdio_main.c
1066 ++++ b/drivers/s390/cio/qdio_main.c
1067 +@@ -1577,13 +1577,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1068 + rc = qdio_kick_outbound_q(q, phys_aob);
1069 + } else if (need_siga_sync(q)) {
1070 + rc = qdio_siga_sync_q(q);
1071 ++ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1072 ++ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1073 ++ state == SLSB_CU_OUTPUT_PRIMED) {
1074 ++ /* The previous buffer is not processed yet, tack on. */
1075 ++ qperf_inc(q, fast_requeue);
1076 + } else {
1077 +- /* try to fast requeue buffers */
1078 +- get_buf_state(q, prev_buf(bufnr), &state, 0);
1079 +- if (state != SLSB_CU_OUTPUT_PRIMED)
1080 +- rc = qdio_kick_outbound_q(q, 0);
1081 +- else
1082 +- qperf_inc(q, fast_requeue);
1083 ++ rc = qdio_kick_outbound_q(q, 0);
1084 + }
1085 +
1086 + /* in case of SIGA errors we must process the error immediately */
1087 +diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
1088 +index 1419eaea03d8..5a9e457caef3 100644
1089 +--- a/drivers/s390/cio/vfio_ccw_cp.c
1090 ++++ b/drivers/s390/cio/vfio_ccw_cp.c
1091 +@@ -119,8 +119,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
1092 + sizeof(*pa->pa_iova_pfn) +
1093 + sizeof(*pa->pa_pfn),
1094 + GFP_KERNEL);
1095 +- if (unlikely(!pa->pa_iova_pfn))
1096 ++ if (unlikely(!pa->pa_iova_pfn)) {
1097 ++ pa->pa_nr = 0;
1098 + return -ENOMEM;
1099 ++ }
1100 + pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
1101 +
1102 + ret = pfn_array_pin(pa, mdev);
1103 +diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
1104 +index 09c6a16fab93..41f5f6410163 100644
1105 +--- a/drivers/scsi/device_handler/scsi_dh_alua.c
1106 ++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
1107 +@@ -53,6 +53,7 @@
1108 + #define ALUA_FAILOVER_TIMEOUT 60
1109 + #define ALUA_FAILOVER_RETRIES 5
1110 + #define ALUA_RTPG_DELAY_MSECS 5
1111 ++#define ALUA_RTPG_RETRY_DELAY 2
1112 +
1113 + /* device handler flags */
1114 + #define ALUA_OPTIMIZE_STPG 0x01
1115 +@@ -677,7 +678,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
1116 + case SCSI_ACCESS_STATE_TRANSITIONING:
1117 + if (time_before(jiffies, pg->expiry)) {
1118 + /* State transition, retry */
1119 +- pg->interval = 2;
1120 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
1121 + err = SCSI_DH_RETRY;
1122 + } else {
1123 + struct alua_dh_data *h;
1124 +@@ -802,6 +803,8 @@ static void alua_rtpg_work(struct work_struct *work)
1125 + spin_lock_irqsave(&pg->lock, flags);
1126 + pg->flags &= ~ALUA_PG_RUNNING;
1127 + pg->flags |= ALUA_PG_RUN_RTPG;
1128 ++ if (!pg->interval)
1129 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
1130 + spin_unlock_irqrestore(&pg->lock, flags);
1131 + queue_delayed_work(kaluad_wq, &pg->rtpg_work,
1132 + pg->interval * HZ);
1133 +@@ -813,6 +816,8 @@ static void alua_rtpg_work(struct work_struct *work)
1134 + spin_lock_irqsave(&pg->lock, flags);
1135 + if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
1136 + pg->flags &= ~ALUA_PG_RUNNING;
1137 ++ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
1138 ++ pg->interval = ALUA_RTPG_RETRY_DELAY;
1139 + pg->flags |= ALUA_PG_RUN_RTPG;
1140 + spin_unlock_irqrestore(&pg->lock, flags);
1141 + queue_delayed_work(kaluad_wq, &pg->rtpg_work,
1142 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
1143 +index a06b24a61622..34612add3829 100644
1144 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
1145 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
1146 +@@ -4876,8 +4876,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
1147 +
1148 + spin_lock_irqsave(vhost->host->host_lock, flags);
1149 + ibmvfc_purge_requests(vhost, DID_ERROR);
1150 +- ibmvfc_free_event_pool(vhost);
1151 + spin_unlock_irqrestore(vhost->host->host_lock, flags);
1152 ++ ibmvfc_free_event_pool(vhost);
1153 +
1154 + ibmvfc_free_mem(vhost);
1155 + spin_lock(&ibmvfc_driver_lock);
1156 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1157 +index 73acd3e9ded7..8595d83229b7 100644
1158 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
1159 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1160 +@@ -2976,6 +2976,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
1161 + u32 size;
1162 + unsigned long buff_addr;
1163 + unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
1164 ++ unsigned long chunk_left_bytes;
1165 + unsigned long src_addr;
1166 + unsigned long flags;
1167 + u32 buff_offset;
1168 +@@ -3001,6 +3002,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
1169 + }
1170 +
1171 + size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
1172 ++ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
1173 ++ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
1174 + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
1175 +
1176 + src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
1177 +diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
1178 +index 5c2cec298816..c6ce34161281 100644
1179 +--- a/drivers/tty/tty_ldsem.c
1180 ++++ b/drivers/tty/tty_ldsem.c
1181 +@@ -139,8 +139,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
1182 +
1183 + list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
1184 + tsk = waiter->task;
1185 +- smp_mb();
1186 +- waiter->task = NULL;
1187 ++ smp_store_release(&waiter->task, NULL);
1188 + wake_up_process(tsk);
1189 + put_task_struct(tsk);
1190 + }
1191 +@@ -235,7 +234,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
1192 + for (;;) {
1193 + set_current_state(TASK_UNINTERRUPTIBLE);
1194 +
1195 +- if (!waiter.task)
1196 ++ if (!smp_load_acquire(&waiter.task))
1197 + break;
1198 + if (!timeout)
1199 + break;
1200 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
1201 +index 492977f78fde..62b2a7105f02 100644
1202 +--- a/drivers/usb/core/devio.c
1203 ++++ b/drivers/usb/core/devio.c
1204 +@@ -1811,8 +1811,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1205 + return 0;
1206 +
1207 + error:
1208 +- if (as && as->usbm)
1209 +- dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
1210 + kfree(isopkt);
1211 + kfree(dr);
1212 + if (as)
1213 +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
1214 +index 425c2edfd6ea..544e03452877 100644
1215 +--- a/drivers/usb/host/xhci-rcar.c
1216 ++++ b/drivers/usb/host/xhci-rcar.c
1217 +@@ -231,10 +231,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
1218 + * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
1219 + * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
1220 + * xhci_gen_setup().
1221 ++ *
1222 ++ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
1223 ++ * and the process speed is down when the roothub port enters U3,
1224 ++ * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
1225 + */
1226 + if (xhci_rcar_is_gen2(hcd->self.controller) ||
1227 +- xhci_rcar_is_gen3(hcd->self.controller))
1228 +- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
1229 ++ xhci_rcar_is_gen3(hcd->self.controller)) {
1230 ++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
1231 ++ }
1232 +
1233 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1234 + return xhci_rcar_download_firmware(hcd);
1235 +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
1236 +index be5881303681..43bee6dad5c9 100644
1237 +--- a/drivers/usb/misc/iowarrior.c
1238 ++++ b/drivers/usb/misc/iowarrior.c
1239 +@@ -870,19 +870,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
1240 + dev = usb_get_intfdata(interface);
1241 + mutex_lock(&iowarrior_open_disc_lock);
1242 + usb_set_intfdata(interface, NULL);
1243 ++ /* prevent device read, write and ioctl */
1244 ++ dev->present = 0;
1245 +
1246 + minor = dev->minor;
1247 ++ mutex_unlock(&iowarrior_open_disc_lock);
1248 ++ /* give back our minor - this will call close() locks need to be dropped at this point*/
1249 +
1250 +- /* give back our minor */
1251 + usb_deregister_dev(interface, &iowarrior_class);
1252 +
1253 + mutex_lock(&dev->mutex);
1254 +
1255 + /* prevent device read, write and ioctl */
1256 +- dev->present = 0;
1257 +
1258 + mutex_unlock(&dev->mutex);
1259 +- mutex_unlock(&iowarrior_open_disc_lock);
1260 +
1261 + if (dev->opened) {
1262 + /* There is a process that holds a filedescriptor to the device ,
1263 +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
1264 +index 8ee98bc6c468..081570677f24 100644
1265 +--- a/drivers/usb/misc/yurex.c
1266 ++++ b/drivers/usb/misc/yurex.c
1267 +@@ -96,7 +96,6 @@ static void yurex_delete(struct kref *kref)
1268 +
1269 + dev_dbg(&dev->interface->dev, "%s\n", __func__);
1270 +
1271 +- usb_put_dev(dev->udev);
1272 + if (dev->cntl_urb) {
1273 + usb_kill_urb(dev->cntl_urb);
1274 + kfree(dev->cntl_req);
1275 +@@ -112,6 +111,7 @@ static void yurex_delete(struct kref *kref)
1276 + dev->int_buffer, dev->urb->transfer_dma);
1277 + usb_free_urb(dev->urb);
1278 + }
1279 ++ usb_put_dev(dev->udev);
1280 + kfree(dev);
1281 + }
1282 +
1283 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1284 +index fd2d199dd413..0e1c36c92f60 100644
1285 +--- a/fs/cifs/smb2pdu.c
1286 ++++ b/fs/cifs/smb2pdu.c
1287 +@@ -166,7 +166,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
1288 + if (tcon == NULL)
1289 + return 0;
1290 +
1291 +- if (smb2_command == SMB2_TREE_CONNECT)
1292 ++ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
1293 + return 0;
1294 +
1295 + if (tcon->tidStatus == CifsExiting) {
1296 +@@ -834,7 +834,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1297 + else
1298 + req->SecurityMode = 0;
1299 +
1300 ++#ifdef CONFIG_CIFS_DFS_UPCALL
1301 ++ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1302 ++#else
1303 + req->Capabilities = 0;
1304 ++#endif /* DFS_UPCALL */
1305 ++
1306 + req->Channel = 0; /* MBZ */
1307 +
1308 + sess_data->iov[0].iov_base = (char *)req;
1309 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1310 +index 27deee5c8fa8..6409ff4876cb 100644
1311 +--- a/fs/nfs/nfs4proc.c
1312 ++++ b/fs/nfs/nfs4proc.c
1313 +@@ -2954,7 +2954,6 @@ static int _nfs4_do_setattr(struct inode *inode,
1314 + };
1315 + struct rpc_cred *delegation_cred = NULL;
1316 + unsigned long timestamp = jiffies;
1317 +- fmode_t fmode;
1318 + bool truncate;
1319 + int status;
1320 +
1321 +@@ -2962,11 +2961,12 @@ static int _nfs4_do_setattr(struct inode *inode,
1322 +
1323 + /* Servers should only apply open mode checks for file size changes */
1324 + truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
1325 +- fmode = truncate ? FMODE_WRITE : FMODE_READ;
1326 ++ if (!truncate)
1327 ++ goto zero_stateid;
1328 +
1329 +- if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) {
1330 ++ if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
1331 + /* Use that stateid */
1332 +- } else if (truncate && ctx != NULL) {
1333 ++ } else if (ctx != NULL && ctx->state) {
1334 + struct nfs_lock_context *l_ctx;
1335 + if (!nfs4_valid_open_stateid(ctx->state))
1336 + return -EBADF;
1337 +@@ -2978,8 +2978,10 @@ static int _nfs4_do_setattr(struct inode *inode,
1338 + nfs_put_lock_context(l_ctx);
1339 + if (status == -EIO)
1340 + return -EBADF;
1341 +- } else
1342 ++ } else {
1343 ++zero_stateid:
1344 + nfs4_stateid_copy(&arg->stateid, &zero_stateid);
1345 ++ }
1346 + if (delegation_cred)
1347 + msg.rpc_cred = delegation_cred;
1348 +
1349 +diff --git a/include/linux/ccp.h b/include/linux/ccp.h
1350 +index 7e9c991c95e0..43ed9e77cf81 100644
1351 +--- a/include/linux/ccp.h
1352 ++++ b/include/linux/ccp.h
1353 +@@ -173,6 +173,8 @@ struct ccp_aes_engine {
1354 + enum ccp_aes_mode mode;
1355 + enum ccp_aes_action action;
1356 +
1357 ++ u32 authsize;
1358 ++
1359 + struct scatterlist *key;
1360 + u32 key_len; /* In bytes */
1361 +
1362 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
1363 +index 026615e242d8..c8b9d3519c8e 100644
1364 +--- a/include/linux/kvm_host.h
1365 ++++ b/include/linux/kvm_host.h
1366 +@@ -808,6 +808,7 @@ void kvm_arch_check_processor_compat(void *rtn);
1367 + int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1368 + bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1369 + int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1370 ++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1371 +
1372 + #ifndef __KVM_HAVE_ARCH_VM_ALLOC
1373 + static inline struct kvm *kvm_arch_alloc_vm(void)
1374 +diff --git a/include/net/tcp.h b/include/net/tcp.h
1375 +index 7994e569644e..9de2c8cdcc51 100644
1376 +--- a/include/net/tcp.h
1377 ++++ b/include/net/tcp.h
1378 +@@ -1613,6 +1613,8 @@ static inline void tcp_init_send_head(struct sock *sk)
1379 + sk->sk_send_head = NULL;
1380 + }
1381 +
1382 ++static inline void tcp_init_send_head(struct sock *sk);
1383 ++
1384 + /* write queue abstraction */
1385 + static inline void tcp_write_queue_purge(struct sock *sk)
1386 + {
1387 +@@ -1621,6 +1623,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
1388 + tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1389 + while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1390 + sk_wmem_free_skb(sk, skb);
1391 ++ tcp_init_send_head(sk);
1392 + sk_mem_reclaim(sk);
1393 + tcp_clear_all_retrans_hints(tcp_sk(sk));
1394 + tcp_init_send_head(sk);
1395 +diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
1396 +index 392bac18398b..33a07c3badf0 100644
1397 +--- a/include/sound/compress_driver.h
1398 ++++ b/include/sound/compress_driver.h
1399 +@@ -186,10 +186,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
1400 + if (snd_BUG_ON(!stream))
1401 + return;
1402 +
1403 +- if (stream->direction == SND_COMPRESS_PLAYBACK)
1404 +- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
1405 +- else
1406 +- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
1407 ++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
1408 +
1409 + wake_up(&stream->runtime->sleep);
1410 + }
1411 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1412 +index 3d4eb6f840eb..ea4f3f7a0c6f 100644
1413 +--- a/kernel/events/core.c
1414 ++++ b/kernel/events/core.c
1415 +@@ -10474,7 +10474,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
1416 + goto err_unlock;
1417 + }
1418 +
1419 +- perf_install_in_context(ctx, event, cpu);
1420 ++ perf_install_in_context(ctx, event, event->cpu);
1421 + perf_unpin_context(ctx);
1422 + mutex_unlock(&ctx->mutex);
1423 +
1424 +diff --git a/lib/test_firmware.c b/lib/test_firmware.c
1425 +index f978aebe60c5..2e5e18bbfd28 100644
1426 +--- a/lib/test_firmware.c
1427 ++++ b/lib/test_firmware.c
1428 +@@ -895,8 +895,11 @@ static int __init test_firmware_init(void)
1429 + return -ENOMEM;
1430 +
1431 + rc = __test_firmware_config_init();
1432 +- if (rc)
1433 ++ if (rc) {
1434 ++ kfree(test_fw_config);
1435 ++ pr_err("could not init firmware test config: %d\n", rc);
1436 + return rc;
1437 ++ }
1438 +
1439 + rc = misc_register(&test_fw_misc_device);
1440 + if (rc) {
1441 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
1442 +index 6c906f6f16cc..0b8852d80f44 100644
1443 +--- a/mm/vmalloc.c
1444 ++++ b/mm/vmalloc.c
1445 +@@ -1765,6 +1765,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
1446 + if (!addr)
1447 + return NULL;
1448 +
1449 ++ /*
1450 ++ * First make sure the mappings are removed from all page-tables
1451 ++ * before they are freed.
1452 ++ */
1453 ++ vmalloc_sync_all();
1454 ++
1455 + /*
1456 + * In this function, newly allocated vm_struct has VM_UNINITIALIZED
1457 + * flag. It means that vm_struct is not fully initialized.
1458 +@@ -2314,6 +2320,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
1459 + /*
1460 + * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1461 + * have one.
1462 ++ *
1463 ++ * The purpose of this function is to make sure the vmalloc area
1464 ++ * mappings are identical in all page-tables in the system.
1465 + */
1466 + void __weak vmalloc_sync_all(void)
1467 + {
1468 +diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
1469 +index 37fb9552e858..341d1bd637af 100644
1470 +--- a/net/ipv4/netfilter/ipt_rpfilter.c
1471 ++++ b/net/ipv4/netfilter/ipt_rpfilter.c
1472 +@@ -96,6 +96,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
1473 + flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
1474 + flow.flowi4_tos = RT_TOS(iph->tos);
1475 + flow.flowi4_scope = RT_SCOPE_UNIVERSE;
1476 ++ flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
1477 +
1478 + return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
1479 + }
1480 +diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
1481 +index 40eb16bd9786..d535768bea0f 100644
1482 +--- a/net/ipv6/netfilter/ip6t_rpfilter.c
1483 ++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
1484 +@@ -58,7 +58,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
1485 + if (rpfilter_addr_linklocal(&iph->saddr)) {
1486 + lookup_flags |= RT6_LOOKUP_F_IFACE;
1487 + fl6.flowi6_oif = dev->ifindex;
1488 +- } else if ((flags & XT_RPFILTER_LOOSE) == 0)
1489 ++ /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
1490 ++ } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
1491 ++ (flags & XT_RPFILTER_LOOSE) == 0)
1492 + fl6.flowi6_oif = dev->ifindex;
1493 +
1494 + rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
1495 +@@ -73,7 +75,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
1496 + goto out;
1497 + }
1498 +
1499 +- if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
1500 ++ if (rt->rt6i_idev->dev == dev ||
1501 ++ l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
1502 ++ (flags & XT_RPFILTER_LOOSE))
1503 + ret = true;
1504 + out:
1505 + ip6_rt_put(rt);
1506 +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
1507 +index bb886e7db47f..f783d1377d9a 100644
1508 +--- a/net/mac80211/driver-ops.c
1509 ++++ b/net/mac80211/driver-ops.c
1510 +@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
1511 + if (!check_sdata_in_driver(sdata))
1512 + return -EIO;
1513 +
1514 +- if (WARN_ONCE(params->cw_min == 0 ||
1515 +- params->cw_min > params->cw_max,
1516 +- "%s: invalid CW_min/CW_max: %d/%d\n",
1517 +- sdata->name, params->cw_min, params->cw_max))
1518 ++ if (params->cw_min == 0 || params->cw_min > params->cw_max) {
1519 ++ /*
1520 ++ * If we can't configure hardware anyway, don't warn. We may
1521 ++ * never have initialized the CW parameters.
1522 ++ */
1523 ++ WARN_ONCE(local->ops->conf_tx,
1524 ++ "%s: invalid CW_min/CW_max: %d/%d\n",
1525 ++ sdata->name, params->cw_min, params->cw_max);
1526 + return -EINVAL;
1527 ++ }
1528 +
1529 + trace_drv_conf_tx(local, sdata, ac, params);
1530 + if (local->ops->conf_tx)
1531 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
1532 +index 960a57f7c983..d91db72b9e9e 100644
1533 +--- a/net/mac80211/mlme.c
1534 ++++ b/net/mac80211/mlme.c
1535 +@@ -1867,6 +1867,16 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
1536 + }
1537 + }
1538 +
1539 ++ /* WMM specification requires all 4 ACIs. */
1540 ++ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1541 ++ if (params[ac].cw_min == 0) {
1542 ++ sdata_info(sdata,
1543 ++ "AP has invalid WMM params (missing AC %d), using defaults\n",
1544 ++ ac);
1545 ++ return false;
1546 ++ }
1547 ++ }
1548 ++
1549 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1550 + mlme_dbg(sdata,
1551 + "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
1552 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
1553 +index 733d3e4a30d8..2cee032af46d 100644
1554 +--- a/net/netfilter/nfnetlink.c
1555 ++++ b/net/netfilter/nfnetlink.c
1556 +@@ -530,7 +530,7 @@ static int nfnetlink_bind(struct net *net, int group)
1557 + ss = nfnetlink_get_subsys(type << 8);
1558 + rcu_read_unlock();
1559 + if (!ss)
1560 +- request_module("nfnetlink-subsys-%d", type);
1561 ++ request_module_nowait("nfnetlink-subsys-%d", type);
1562 + return 0;
1563 + }
1564 + #endif
1565 +diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
1566 +index 24f2f7567ddb..010a565b4000 100644
1567 +--- a/net/netfilter/nft_hash.c
1568 ++++ b/net/netfilter/nft_hash.c
1569 +@@ -131,7 +131,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
1570 + priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
1571 +
1572 + priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
1573 +- if (priv->modulus <= 1)
1574 ++ if (priv->modulus < 1)
1575 + return -ERANGE;
1576 +
1577 + if (priv->offset + priv->modulus - 1 < priv->offset)
1578 +diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
1579 +index 067459760a7b..3524dbc31316 100755
1580 +--- a/scripts/sphinx-pre-install
1581 ++++ b/scripts/sphinx-pre-install
1582 +@@ -301,7 +301,7 @@ sub give_redhat_hints()
1583 + #
1584 + # Checks valid for RHEL/CentOS version 7.x.
1585 + #
1586 +- if (! $system_release =~ /Fedora/) {
1587 ++ if (!($system_release =~ /Fedora/)) {
1588 + $map{"virtualenv"} = "python-virtualenv";
1589 + }
1590 +
1591 +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
1592 +index 555df64d46ff..2e2d18468491 100644
1593 +--- a/sound/core/compress_offload.c
1594 ++++ b/sound/core/compress_offload.c
1595 +@@ -575,10 +575,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
1596 + stream->metadata_set = false;
1597 + stream->next_track = false;
1598 +
1599 +- if (stream->direction == SND_COMPRESS_PLAYBACK)
1600 +- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
1601 +- else
1602 +- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
1603 ++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
1604 + } else {
1605 + return -EPERM;
1606 + }
1607 +@@ -694,8 +691,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
1608 + {
1609 + int retval;
1610 +
1611 +- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
1612 ++ switch (stream->runtime->state) {
1613 ++ case SNDRV_PCM_STATE_SETUP:
1614 ++ if (stream->direction != SND_COMPRESS_CAPTURE)
1615 ++ return -EPERM;
1616 ++ break;
1617 ++ case SNDRV_PCM_STATE_PREPARED:
1618 ++ break;
1619 ++ default:
1620 + return -EPERM;
1621 ++ }
1622 ++
1623 + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
1624 + if (!retval)
1625 + stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
1626 +@@ -706,9 +712,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
1627 + {
1628 + int retval;
1629 +
1630 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
1631 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
1632 ++ switch (stream->runtime->state) {
1633 ++ case SNDRV_PCM_STATE_OPEN:
1634 ++ case SNDRV_PCM_STATE_SETUP:
1635 ++ case SNDRV_PCM_STATE_PREPARED:
1636 + return -EPERM;
1637 ++ default:
1638 ++ break;
1639 ++ }
1640 ++
1641 + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
1642 + if (!retval) {
1643 + snd_compr_drain_notify(stream);
1644 +@@ -796,9 +808,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
1645 + {
1646 + int retval;
1647 +
1648 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
1649 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
1650 ++ switch (stream->runtime->state) {
1651 ++ case SNDRV_PCM_STATE_OPEN:
1652 ++ case SNDRV_PCM_STATE_SETUP:
1653 ++ case SNDRV_PCM_STATE_PREPARED:
1654 ++ case SNDRV_PCM_STATE_PAUSED:
1655 + return -EPERM;
1656 ++ case SNDRV_PCM_STATE_XRUN:
1657 ++ return -EPIPE;
1658 ++ default:
1659 ++ break;
1660 ++ }
1661 +
1662 + retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
1663 + if (retval) {
1664 +@@ -818,6 +838,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
1665 + if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
1666 + return -EPERM;
1667 +
1668 ++ /* next track doesn't have any meaning for capture streams */
1669 ++ if (stream->direction == SND_COMPRESS_CAPTURE)
1670 ++ return -EPERM;
1671 ++
1672 + /* you can signal next track if this is intended to be a gapless stream
1673 + * and current track metadata is set
1674 + */
1675 +@@ -835,9 +859,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
1676 + static int snd_compr_partial_drain(struct snd_compr_stream *stream)
1677 + {
1678 + int retval;
1679 +- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
1680 +- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
1681 ++
1682 ++ switch (stream->runtime->state) {
1683 ++ case SNDRV_PCM_STATE_OPEN:
1684 ++ case SNDRV_PCM_STATE_SETUP:
1685 ++ case SNDRV_PCM_STATE_PREPARED:
1686 ++ case SNDRV_PCM_STATE_PAUSED:
1687 ++ return -EPERM;
1688 ++ case SNDRV_PCM_STATE_XRUN:
1689 ++ return -EPIPE;
1690 ++ default:
1691 ++ break;
1692 ++ }
1693 ++
1694 ++ /* partial drain doesn't have any meaning for capture streams */
1695 ++ if (stream->direction == SND_COMPRESS_CAPTURE)
1696 + return -EPERM;
1697 ++
1698 + /* stream can be drained only when next track has been signalled */
1699 + if (stream->next_track == false)
1700 + return -EPERM;
1701 +diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
1702 +index ea1506679c66..3b09b8ef3a09 100644
1703 +--- a/sound/firewire/packets-buffer.c
1704 ++++ b/sound/firewire/packets-buffer.c
1705 +@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
1706 + packets_per_page = PAGE_SIZE / packet_size;
1707 + if (WARN_ON(!packets_per_page)) {
1708 + err = -EINVAL;
1709 +- goto error;
1710 ++ goto err_packets;
1711 + }
1712 + pages = DIV_ROUND_UP(count, packets_per_page);
1713 +
1714 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
1715 +index a12e594d4e3b..a41c1bec7c88 100644
1716 +--- a/sound/pci/hda/hda_controller.c
1717 ++++ b/sound/pci/hda/hda_controller.c
1718 +@@ -609,11 +609,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
1719 + }
1720 + runtime->private_data = azx_dev;
1721 +
1722 +- if (chip->gts_present)
1723 +- azx_pcm_hw.info = azx_pcm_hw.info |
1724 +- SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
1725 +-
1726 + runtime->hw = azx_pcm_hw;
1727 ++ if (chip->gts_present)
1728 ++ runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
1729 + runtime->hw.channels_min = hinfo->channels_min;
1730 + runtime->hw.channels_max = hinfo->channels_max;
1731 + runtime->hw.formats = hinfo->formats;
1732 +@@ -626,6 +624,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
1733 + 20,
1734 + 178000000);
1735 +
1736 ++ /* by some reason, the playback stream stalls on PulseAudio with
1737 ++ * tsched=1 when a capture stream triggers. Until we figure out the
1738 ++ * real cause, disable tsched mode by telling the PCM info flag.
1739 ++ */
1740 ++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
1741 ++ runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
1742 ++
1743 + if (chip->align_buffer_size)
1744 + /* constrain buffer sizes to be multiple of 128
1745 + bytes. This is more efficient in terms of memory
1746 +diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
1747 +index 53c3cd28bc99..8a9dd4767b1e 100644
1748 +--- a/sound/pci/hda/hda_controller.h
1749 ++++ b/sound/pci/hda/hda_controller.h
1750 +@@ -40,7 +40,7 @@
1751 + /* 14 unused */
1752 + #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
1753 + #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
1754 +-/* 17 unused */
1755 ++#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
1756 + #define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
1757 + #define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
1758 + #define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
1759 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1760 +index d349f69ef03c..4631579e1e18 100644
1761 +--- a/sound/pci/hda/hda_intel.c
1762 ++++ b/sound/pci/hda/hda_intel.c
1763 +@@ -78,6 +78,7 @@ enum {
1764 + POS_FIX_VIACOMBO,
1765 + POS_FIX_COMBO,
1766 + POS_FIX_SKL,
1767 ++ POS_FIX_FIFO,
1768 + };
1769 +
1770 + /* Defines for ATI HD Audio support in SB450 south bridge */
1771 +@@ -149,7 +150,7 @@ module_param_array(model, charp, NULL, 0444);
1772 + MODULE_PARM_DESC(model, "Use the given board model.");
1773 + module_param_array(position_fix, int, NULL, 0444);
1774 + MODULE_PARM_DESC(position_fix, "DMA pointer read method."
1775 +- "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
1776 ++ "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
1777 + module_param_array(bdl_pos_adj, int, NULL, 0644);
1778 + MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
1779 + module_param_array(probe_mask, int, NULL, 0444);
1780 +@@ -350,6 +351,11 @@ enum {
1781 + #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
1782 + (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
1783 +
1784 ++/* quirks for AMD SB */
1785 ++#define AZX_DCAPS_PRESET_AMD_SB \
1786 ++ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
1787 ++ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
1788 ++
1789 + /* quirks for Nvidia */
1790 + #define AZX_DCAPS_PRESET_NVIDIA \
1791 + (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
1792 +@@ -917,6 +923,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
1793 + return bound_pos + mod_dma_pos;
1794 + }
1795 +
1796 ++#define AMD_FIFO_SIZE 32
1797 ++
1798 ++/* get the current DMA position with FIFO size correction */
1799 ++static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
1800 ++{
1801 ++ struct snd_pcm_substream *substream = azx_dev->core.substream;
1802 ++ struct snd_pcm_runtime *runtime = substream->runtime;
1803 ++ unsigned int pos, delay;
1804 ++
1805 ++ pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
1806 ++ if (!runtime)
1807 ++ return pos;
1808 ++
1809 ++ runtime->delay = AMD_FIFO_SIZE;
1810 ++ delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
1811 ++ if (azx_dev->insufficient) {
1812 ++ if (pos < delay) {
1813 ++ delay = pos;
1814 ++ runtime->delay = bytes_to_frames(runtime, pos);
1815 ++ } else {
1816 ++ azx_dev->insufficient = 0;
1817 ++ }
1818 ++ }
1819 ++
1820 ++ /* correct the DMA position for capture stream */
1821 ++ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
1822 ++ if (pos < delay)
1823 ++ pos += azx_dev->core.bufsize;
1824 ++ pos -= delay;
1825 ++ }
1826 ++
1827 ++ return pos;
1828 ++}
1829 ++
1830 ++static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
1831 ++ unsigned int pos)
1832 ++{
1833 ++ struct snd_pcm_substream *substream = azx_dev->core.substream;
1834 ++
1835 ++ /* just read back the calculated value in the above */
1836 ++ return substream->runtime->delay;
1837 ++}
1838 ++
1839 + static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
1840 + struct azx_dev *azx_dev)
1841 + {
1842 +@@ -1484,6 +1533,7 @@ static int check_position_fix(struct azx *chip, int fix)
1843 + case POS_FIX_VIACOMBO:
1844 + case POS_FIX_COMBO:
1845 + case POS_FIX_SKL:
1846 ++ case POS_FIX_FIFO:
1847 + return fix;
1848 + }
1849 +
1850 +@@ -1500,6 +1550,10 @@ static int check_position_fix(struct azx *chip, int fix)
1851 + dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
1852 + return POS_FIX_VIACOMBO;
1853 + }
1854 ++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
1855 ++ dev_dbg(chip->card->dev, "Using FIFO position fix\n");
1856 ++ return POS_FIX_FIFO;
1857 ++ }
1858 + if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
1859 + dev_dbg(chip->card->dev, "Using LPIB position fix\n");
1860 + return POS_FIX_LPIB;
1861 +@@ -1520,6 +1574,7 @@ static void assign_position_fix(struct azx *chip, int fix)
1862 + [POS_FIX_VIACOMBO] = azx_via_get_position,
1863 + [POS_FIX_COMBO] = azx_get_pos_lpib,
1864 + [POS_FIX_SKL] = azx_get_pos_skl,
1865 ++ [POS_FIX_FIFO] = azx_get_pos_fifo,
1866 + };
1867 +
1868 + chip->get_position[0] = chip->get_position[1] = callbacks[fix];
1869 +@@ -1534,6 +1589,9 @@ static void assign_position_fix(struct azx *chip, int fix)
1870 + azx_get_delay_from_lpib;
1871 + }
1872 +
1873 ++ if (fix == POS_FIX_FIFO)
1874 ++ chip->get_delay[0] = chip->get_delay[1] =
1875 ++ azx_get_delay_from_fifo;
1876 + }
1877 +
1878 + /*
1879 +@@ -2516,6 +2574,9 @@ static const struct pci_device_id azx_ids[] = {
1880 + /* AMD Hudson */
1881 + { PCI_DEVICE(0x1022, 0x780d),
1882 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
1883 ++ /* AMD, X370 & co */
1884 ++ { PCI_DEVICE(0x1022, 0x1457),
1885 ++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
1886 + /* AMD Stoney */
1887 + { PCI_DEVICE(0x1022, 0x157a),
1888 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
1889 +diff --git a/sound/sound_core.c b/sound/sound_core.c
1890 +index 99b73c675743..20d4e2e1bacf 100644
1891 +--- a/sound/sound_core.c
1892 ++++ b/sound/sound_core.c
1893 +@@ -287,7 +287,8 @@ retry:
1894 + goto retry;
1895 + }
1896 + spin_unlock(&sound_loader_lock);
1897 +- return -EBUSY;
1898 ++ r = -EBUSY;
1899 ++ goto fail;
1900 + }
1901 + }
1902 +
1903 +diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
1904 +index a19690a17291..c8c86a0c9b79 100644
1905 +--- a/tools/perf/arch/s390/util/machine.c
1906 ++++ b/tools/perf/arch/s390/util/machine.c
1907 +@@ -6,8 +6,9 @@
1908 + #include "machine.h"
1909 + #include "api/fs/fs.h"
1910 + #include "debug.h"
1911 ++#include "symbol.h"
1912 +
1913 +-int arch__fix_module_text_start(u64 *start, const char *name)
1914 ++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
1915 + {
1916 + u64 m_start = *start;
1917 + char path[PATH_MAX];
1918 +@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
1919 + if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
1920 + pr_debug2("Using module %s start:%#lx\n", path, m_start);
1921 + *start = m_start;
1922 ++ } else {
1923 ++ /* Successful read of the modules segment text start address.
1924 ++ * Calculate difference between module start address
1925 ++ * in memory and module text segment start address.
1926 ++ * For example module load address is 0x3ff8011b000
1927 ++ * (from /proc/modules) and module text segment start
1928 ++ * address is 0x3ff8011b870 (from file above).
1929 ++ *
1930 ++ * Adjust the module size and subtract the GOT table
1931 ++ * size located at the beginning of the module.
1932 ++ */
1933 ++ *size -= (*start - m_start);
1934 + }
1935 +
1936 + return 0;
1937 + }
1938 ++
1939 ++/* On s390 kernel text segment start is located at very low memory addresses,
1940 ++ * for example 0x10000. Modules are located at very high memory addresses,
1941 ++ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
1942 ++ * and beginning of first module's text segment is very big.
1943 ++ * Therefore do not fill this gap and do not assign it to the kernel dso map.
1944 ++ */
1945 ++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
1946 ++{
1947 ++ if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
1948 ++ /* Last kernel symbol mapped to end of page */
1949 ++ p->end = roundup(p->end, page_size);
1950 ++ else
1951 ++ p->end = c->start;
1952 ++ pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
1953 ++}
1954 +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
1955 +index c0065923a525..e1ac51aaedcf 100644
1956 +--- a/tools/perf/builtin-probe.c
1957 ++++ b/tools/perf/builtin-probe.c
1958 +@@ -712,6 +712,16 @@ __cmd_probe(int argc, const char **argv)
1959 +
1960 + ret = perf_add_probe_events(params.events, params.nevents);
1961 + if (ret < 0) {
1962 ++
1963 ++ /*
1964 ++ * When perf_add_probe_events() fails it calls
1965 ++ * cleanup_perf_probe_events(pevs, npevs), i.e.
1966 ++ * cleanup_perf_probe_events(params.events, params.nevents), which
1967 ++ * will call clear_perf_probe_event(), so set nevents to zero
1968 ++ * to avoid cleanup_params() to call clear_perf_probe_event() again
1969 ++ * on the same pevs.
1970 ++ */
1971 ++ params.nevents = 0;
1972 + pr_err_with_code(" Error: Failed to add events.", ret);
1973 + return ret;
1974 + }
1975 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
1976 +index 26437143c940..c892a28e7b04 100644
1977 +--- a/tools/perf/util/header.c
1978 ++++ b/tools/perf/util/header.c
1979 +@@ -3081,7 +3081,7 @@ int perf_event__process_feature(struct perf_tool *tool,
1980 + return 0;
1981 +
1982 + ff.buf = (void *)fe->data;
1983 +- ff.size = event->header.size - sizeof(event->header);
1984 ++ ff.size = event->header.size - sizeof(*fe);
1985 + ff.ph = &session->header;
1986 +
1987 + if (feat_ops[feat].process(&ff, NULL))
1988 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
1989 +index d246080cd85e..5145c6a84c47 100644
1990 +--- a/tools/perf/util/machine.c
1991 ++++ b/tools/perf/util/machine.c
1992 +@@ -1233,6 +1233,7 @@ static int machine__set_modules_path(struct machine *machine)
1993 + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1994 + }
1995 + int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1996 ++ u64 *size __maybe_unused,
1997 + const char *name __maybe_unused)
1998 + {
1999 + return 0;
2000 +@@ -1244,7 +1245,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
2001 + struct machine *machine = arg;
2002 + struct map *map;
2003 +
2004 +- if (arch__fix_module_text_start(&start, name) < 0)
2005 ++ if (arch__fix_module_text_start(&start, &size, name) < 0)
2006 + return -1;
2007 +
2008 + map = machine__findnew_module_map(machine, start, name);
2009 +diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
2010 +index 13041b036a5b..ec339cdf854c 100644
2011 +--- a/tools/perf/util/machine.h
2012 ++++ b/tools/perf/util/machine.h
2013 +@@ -213,7 +213,7 @@ struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
2014 +
2015 + struct map *machine__findnew_module_map(struct machine *machine, u64 start,
2016 + const char *filename);
2017 +-int arch__fix_module_text_start(u64 *start, const char *name);
2018 ++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
2019 +
2020 + int __machine__load_kallsyms(struct machine *machine, const char *filename,
2021 + enum map_type type, bool no_kcore);
2022 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
2023 +index 3936f69f385c..27bffcb213eb 100644
2024 +--- a/tools/perf/util/symbol.c
2025 ++++ b/tools/perf/util/symbol.c
2026 +@@ -93,6 +93,11 @@ static int prefix_underscores_count(const char *str)
2027 + return tail - str;
2028 + }
2029 +
2030 ++void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
2031 ++{
2032 ++ p->end = c->start;
2033 ++}
2034 ++
2035 + const char * __weak arch__normalize_symbol_name(const char *name)
2036 + {
2037 + return name;
2038 +@@ -219,7 +224,7 @@ void symbols__fixup_end(struct rb_root *symbols)
2039 + curr = rb_entry(nd, struct symbol, rb_node);
2040 +
2041 + if (prev->end == prev->start && prev->end != curr->start)
2042 +- prev->end = curr->start;
2043 ++ arch__symbols__fixup_end(prev, curr);
2044 + }
2045 +
2046 + /* Last entry */
2047 +diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
2048 +index 698c65e603a8..95ac21be8481 100644
2049 +--- a/tools/perf/util/symbol.h
2050 ++++ b/tools/perf/util/symbol.h
2051 +@@ -351,6 +351,7 @@ const char *arch__normalize_symbol_name(const char *name);
2052 + #define SYMBOL_A 0
2053 + #define SYMBOL_B 1
2054 +
2055 ++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
2056 + int arch__compare_symbol_names(const char *namea, const char *nameb);
2057 + int arch__compare_symbol_names_n(const char *namea, const char *nameb,
2058 + unsigned int n);
2059 +diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
2060 +index 1dbcd3c8dee0..177436c8e026 100644
2061 +--- a/tools/perf/util/thread.c
2062 ++++ b/tools/perf/util/thread.c
2063 +@@ -160,14 +160,24 @@ struct comm *thread__comm(const struct thread *thread)
2064 +
2065 + struct comm *thread__exec_comm(const struct thread *thread)
2066 + {
2067 +- struct comm *comm, *last = NULL;
2068 ++ struct comm *comm, *last = NULL, *second_last = NULL;
2069 +
2070 + list_for_each_entry(comm, &thread->comm_list, list) {
2071 + if (comm->exec)
2072 + return comm;
2073 ++ second_last = last;
2074 + last = comm;
2075 + }
2076 +
2077 ++ /*
2078 ++ * 'last' with no start time might be the parent's comm of a synthesized
2079 ++ * thread (created by processing a synthesized fork event). For a main
2080 ++ * thread, that is very probably wrong. Prefer a later comm to avoid
2081 ++ * that case.
2082 ++ */
2083 ++ if (second_last && !last->start && thread->pid_ == thread->tid)
2084 ++ return second_last;
2085 ++
2086 + return last;
2087 + }
2088 +
2089 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2090 +index dbbfcd082513..89fd40e57cae 100644
2091 +--- a/virt/kvm/kvm_main.c
2092 ++++ b/virt/kvm/kvm_main.c
2093 +@@ -2314,6 +2314,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2094 + #endif
2095 + }
2096 +
2097 ++/*
2098 ++ * Unlike kvm_arch_vcpu_runnable, this function is called outside
2099 ++ * a vcpu_load/vcpu_put pair. However, for most architectures
2100 ++ * kvm_arch_vcpu_runnable does not require vcpu_load.
2101 ++ */
2102 ++bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
2103 ++{
2104 ++ return kvm_arch_vcpu_runnable(vcpu);
2105 ++}
2106 ++
2107 ++static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
2108 ++{
2109 ++ if (kvm_arch_dy_runnable(vcpu))
2110 ++ return true;
2111 ++
2112 ++#ifdef CONFIG_KVM_ASYNC_PF
2113 ++ if (!list_empty_careful(&vcpu->async_pf.done))
2114 ++ return true;
2115 ++#endif
2116 ++
2117 ++ return false;
2118 ++}
2119 ++
2120 + void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2121 + {
2122 + struct kvm *kvm = me->kvm;
2123 +@@ -2343,7 +2366,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2124 + continue;
2125 + if (vcpu == me)
2126 + continue;
2127 +- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
2128 ++ if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
2129 + continue;
2130 + if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
2131 + continue;