Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.6 commit in: /
Date: Thu, 02 Jun 2016 19:39:28
Message-Id: 1464896351.dc34aa235aa823a2bb5b439479041daca6a93749.mpagano@gentoo
1 commit: dc34aa235aa823a2bb5b439479041daca6a93749
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jun 2 19:39:11 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jun 2 19:39:11 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dc34aa23
7
8 Linux patch 4.6.1
9
10 0000_README | 4 +
11 1000_linux-4.6.1.patch | 4584 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4588 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 8e70e78..220d627 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -43,6 +43,10 @@ EXPERIMENTAL
19 Individual Patch Descriptions:
20 --------------------------------------------------------------------------
21
22 +Patch: 1000_linux-4.6.1.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.6.1
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1000_linux-4.6.1.patch b/1000_linux-4.6.1.patch
31 new file mode 100644
32 index 0000000..7282235
33 --- /dev/null
34 +++ b/1000_linux-4.6.1.patch
35 @@ -0,0 +1,4584 @@
36 +diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
37 +index 798cba82c762..b48780977a68 100644
38 +--- a/Documentation/serial/tty.txt
39 ++++ b/Documentation/serial/tty.txt
40 +@@ -210,9 +210,6 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
41 +
42 + TTY_OTHER_CLOSED Device is a pty and the other side has closed.
43 +
44 +-TTY_OTHER_DONE Device is a pty and the other side has closed and
45 +- all pending input processing has been completed.
46 +-
47 + TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
48 + smaller chunks.
49 +
50 +diff --git a/Makefile b/Makefile
51 +index 0f9cb36d45c2..2fcc41ea99a3 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,6 +1,6 @@
55 + VERSION = 4
56 + PATCHLEVEL = 6
57 +-SUBLEVEL = 0
58 ++SUBLEVEL = 1
59 + EXTRAVERSION =
60 + NAME = Charred Weasel
61 +
62 +@@ -697,9 +697,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
63 + KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
64 + else
65 +
66 +-# This warning generated too much noise in a regular build.
67 +-# Use make W=1 to enable this warning (see scripts/Makefile.build)
68 ++# These warnings generated too much noise in a regular build.
69 ++# Use make W=1 to enable them (see scripts/Makefile.build)
70 + KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
71 ++KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
72 + endif
73 +
74 + ifdef CONFIG_FRAME_POINTER
75 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
76 +index d6d4191e68f2..dea1452a8419 100644
77 +--- a/arch/arm/kvm/mmu.c
78 ++++ b/arch/arm/kvm/mmu.c
79 +@@ -893,11 +893,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
80 + VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
81 +
82 + old_pmd = *pmd;
83 +- kvm_set_pmd(pmd, *new_pmd);
84 +- if (pmd_present(old_pmd))
85 ++ if (pmd_present(old_pmd)) {
86 ++ pmd_clear(pmd);
87 + kvm_tlb_flush_vmid_ipa(kvm, addr);
88 +- else
89 ++ } else {
90 + get_page(virt_to_page(pmd));
91 ++ }
92 ++
93 ++ kvm_set_pmd(pmd, *new_pmd);
94 + return 0;
95 + }
96 +
97 +@@ -946,12 +949,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
98 +
99 + /* Create 2nd stage page table mapping - Level 3 */
100 + old_pte = *pte;
101 +- kvm_set_pte(pte, *new_pte);
102 +- if (pte_present(old_pte))
103 ++ if (pte_present(old_pte)) {
104 ++ kvm_set_pte(pte, __pte(0));
105 + kvm_tlb_flush_vmid_ipa(kvm, addr);
106 +- else
107 ++ } else {
108 + get_page(virt_to_page(pte));
109 ++ }
110 +
111 ++ kvm_set_pte(pte, *new_pte);
112 + return 0;
113 + }
114 +
115 +diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
116 +index 5c25b831273d..9786f770088d 100644
117 +--- a/arch/arm64/include/asm/pgtable-hwdef.h
118 ++++ b/arch/arm64/include/asm/pgtable-hwdef.h
119 +@@ -133,7 +133,6 @@
120 + * Section
121 + */
122 + #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
123 +-#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
124 + #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
125 + #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
126 + #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
127 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
128 +index 989fef16d461..44430ce5819e 100644
129 +--- a/arch/arm64/include/asm/pgtable.h
130 ++++ b/arch/arm64/include/asm/pgtable.h
131 +@@ -280,6 +280,7 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot)
132 + #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
133 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
134 +
135 ++#define pmd_present(pmd) pte_present(pmd_pte(pmd))
136 + #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
137 + #define pmd_young(pmd) pte_young(pmd_pte(pmd))
138 + #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
139 +@@ -288,7 +289,7 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot)
140 + #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
141 + #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
142 + #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
143 +-#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
144 ++#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
145 +
146 + #define __HAVE_ARCH_PMD_WRITE
147 + #define pmd_write(pmd) pte_write(pmd_pte(pmd))
148 +@@ -327,7 +328,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
149 + unsigned long size, pgprot_t vma_prot);
150 +
151 + #define pmd_none(pmd) (!pmd_val(pmd))
152 +-#define pmd_present(pmd) (pmd_val(pmd))
153 +
154 + #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
155 +
156 +@@ -526,6 +526,21 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
157 + }
158 +
159 + #ifdef CONFIG_ARM64_HW_AFDBM
160 ++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
161 ++extern int ptep_set_access_flags(struct vm_area_struct *vma,
162 ++ unsigned long address, pte_t *ptep,
163 ++ pte_t entry, int dirty);
164 ++
165 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
166 ++#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
167 ++static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
168 ++ unsigned long address, pmd_t *pmdp,
169 ++ pmd_t entry, int dirty)
170 ++{
171 ++ return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
172 ++}
173 ++#endif
174 ++
175 + /*
176 + * Atomic pte/pmd modifications.
177 + */
178 +@@ -578,9 +593,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
179 + }
180 +
181 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
182 +-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
183 +-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
184 +- unsigned long address, pmd_t *pmdp)
185 ++#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
186 ++static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
187 ++ unsigned long address, pmd_t *pmdp)
188 + {
189 + return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
190 + }
191 +diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
192 +index 84c8684431c7..f0c3fb7ec8cf 100644
193 +--- a/arch/arm64/kernel/cpuinfo.c
194 ++++ b/arch/arm64/kernel/cpuinfo.c
195 +@@ -87,7 +87,8 @@ static const char *const compat_hwcap_str[] = {
196 + "idivt",
197 + "vfpd32",
198 + "lpae",
199 +- "evtstrm"
200 ++ "evtstrm",
201 ++ NULL
202 + };
203 +
204 + static const char *const compat_hwcap2_str[] = {
205 +diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
206 +index 4d1ac81870d2..e9e0e6db73f6 100644
207 +--- a/arch/arm64/kvm/inject_fault.c
208 ++++ b/arch/arm64/kvm/inject_fault.c
209 +@@ -162,7 +162,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
210 + esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
211 +
212 + if (!is_iabt)
213 +- esr |= ESR_ELx_EC_DABT_LOW;
214 ++ esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
215 +
216 + vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
217 + }
218 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
219 +index 95df28bc875f..3ae4a28c4aed 100644
220 +--- a/arch/arm64/mm/fault.c
221 ++++ b/arch/arm64/mm/fault.c
222 +@@ -81,6 +81,56 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
223 + printk("\n");
224 + }
225 +
226 ++#ifdef CONFIG_ARM64_HW_AFDBM
227 ++/*
228 ++ * This function sets the access flags (dirty, accessed), as well as write
229 ++ * permission, and only to a more permissive setting.
230 ++ *
231 ++ * It needs to cope with hardware update of the accessed/dirty state by other
232 ++ * agents in the system and can safely skip the __sync_icache_dcache() call as,
233 ++ * like set_pte_at(), the PTE is never changed from no-exec to exec here.
234 ++ *
235 ++ * Returns whether or not the PTE actually changed.
236 ++ */
237 ++int ptep_set_access_flags(struct vm_area_struct *vma,
238 ++ unsigned long address, pte_t *ptep,
239 ++ pte_t entry, int dirty)
240 ++{
241 ++ pteval_t old_pteval;
242 ++ unsigned int tmp;
243 ++
244 ++ if (pte_same(*ptep, entry))
245 ++ return 0;
246 ++
247 ++ /* only preserve the access flags and write permission */
248 ++ pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
249 ++
250 ++ /*
251 ++ * PTE_RDONLY is cleared by default in the asm below, so set it in
252 ++ * back if necessary (read-only or clean PTE).
253 ++ */
254 ++ if (!pte_write(entry) || !dirty)
255 ++ pte_val(entry) |= PTE_RDONLY;
256 ++
257 ++ /*
258 ++ * Setting the flags must be done atomically to avoid racing with the
259 ++ * hardware update of the access/dirty state.
260 ++ */
261 ++ asm volatile("// ptep_set_access_flags\n"
262 ++ " prfm pstl1strm, %2\n"
263 ++ "1: ldxr %0, %2\n"
264 ++ " and %0, %0, %3 // clear PTE_RDONLY\n"
265 ++ " orr %0, %0, %4 // set flags\n"
266 ++ " stxr %w1, %0, %2\n"
267 ++ " cbnz %w1, 1b\n"
268 ++ : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
269 ++ : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
270 ++
271 ++ flush_tlb_fix_spurious_fault(vma, address);
272 ++ return 1;
273 ++}
274 ++#endif
275 ++
276 + /*
277 + * The kernel tried to access some page that wasn't present.
278 + */
279 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
280 +index f6b12790716c..942b8f6bf35b 100644
281 +--- a/arch/mips/include/asm/kvm_host.h
282 ++++ b/arch/mips/include/asm/kvm_host.h
283 +@@ -747,7 +747,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
284 +
285 + uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
286 + void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
287 +-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
288 ++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
289 + void kvm_mips_init_count(struct kvm_vcpu *vcpu);
290 + int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
291 + int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
292 +diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
293 +index b37954cc880d..b8b7860ec1a8 100644
294 +--- a/arch/mips/kvm/emulate.c
295 ++++ b/arch/mips/kvm/emulate.c
296 +@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
297 + */
298 + static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
299 + {
300 +- ktime_t expires;
301 ++ struct mips_coproc *cop0 = vcpu->arch.cop0;
302 ++ ktime_t expires, threshold;
303 ++ uint32_t count, compare;
304 + int running;
305 +
306 +- /* Is the hrtimer pending? */
307 ++ /* Calculate the biased and scaled guest CP0_Count */
308 ++ count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
309 ++ compare = kvm_read_c0_guest_compare(cop0);
310 ++
311 ++ /*
312 ++ * Find whether CP0_Count has reached the closest timer interrupt. If
313 ++ * not, we shouldn't inject it.
314 ++ */
315 ++ if ((int32_t)(count - compare) < 0)
316 ++ return count;
317 ++
318 ++ /*
319 ++ * The CP0_Count we're going to return has already reached the closest
320 ++ * timer interrupt. Quickly check if it really is a new interrupt by
321 ++ * looking at whether the interval until the hrtimer expiry time is
322 ++ * less than 1/4 of the timer period.
323 ++ */
324 + expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
325 +- if (ktime_compare(now, expires) >= 0) {
326 ++ threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
327 ++ if (ktime_before(expires, threshold)) {
328 + /*
329 + * Cancel it while we handle it so there's no chance of
330 + * interference with the timeout handler.
331 +@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
332 + }
333 + }
334 +
335 +- /* Return the biased and scaled guest CP0_Count */
336 +- return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
337 ++ return count;
338 + }
339 +
340 + /**
341 +@@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
342 + }
343 +
344 + /**
345 +- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
346 +- * @vcpu: Virtual CPU.
347 +- *
348 +- * Recalculates and updates the expiry time of the hrtimer. This can be used
349 +- * after timer parameters have been altered which do not depend on the time that
350 +- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
351 +- * kvm_mips_resume_hrtimer() are used directly).
352 +- *
353 +- * It is guaranteed that no timer interrupts will be lost in the process.
354 +- *
355 +- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
356 +- */
357 +-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
358 +-{
359 +- ktime_t now;
360 +- uint32_t count;
361 +-
362 +- /*
363 +- * freeze_hrtimer takes care of a timer interrupts <= count, and
364 +- * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
365 +- */
366 +- now = kvm_mips_freeze_hrtimer(vcpu, &count);
367 +- kvm_mips_resume_hrtimer(vcpu, now, count);
368 +-}
369 +-
370 +-/**
371 + * kvm_mips_write_count() - Modify the count and update timer.
372 + * @vcpu: Virtual CPU.
373 + * @count: Guest CP0_Count value to set.
374 +@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
375 + * kvm_mips_write_compare() - Modify compare and update timer.
376 + * @vcpu: Virtual CPU.
377 + * @compare: New CP0_Compare value.
378 ++ * @ack: Whether to acknowledge timer interrupt.
379 + *
380 + * Update CP0_Compare to a new value and update the timeout.
381 ++ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
382 ++ * any pending timer interrupt is preserved.
383 + */
384 +-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
385 ++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
386 + {
387 + struct mips_coproc *cop0 = vcpu->arch.cop0;
388 ++ int dc;
389 ++ u32 old_compare = kvm_read_c0_guest_compare(cop0);
390 ++ ktime_t now;
391 ++ uint32_t count;
392 +
393 + /* if unchanged, must just be an ack */
394 +- if (kvm_read_c0_guest_compare(cop0) == compare)
395 ++ if (old_compare == compare) {
396 ++ if (!ack)
397 ++ return;
398 ++ kvm_mips_callbacks->dequeue_timer_int(vcpu);
399 ++ kvm_write_c0_guest_compare(cop0, compare);
400 + return;
401 ++ }
402 ++
403 ++ /* freeze_hrtimer() takes care of timer interrupts <= count */
404 ++ dc = kvm_mips_count_disabled(vcpu);
405 ++ if (!dc)
406 ++ now = kvm_mips_freeze_hrtimer(vcpu, &count);
407 ++
408 ++ if (ack)
409 ++ kvm_mips_callbacks->dequeue_timer_int(vcpu);
410 +
411 +- /* Update compare */
412 + kvm_write_c0_guest_compare(cop0, compare);
413 +
414 +- /* Update timeout if count enabled */
415 +- if (!kvm_mips_count_disabled(vcpu))
416 +- kvm_mips_update_hrtimer(vcpu);
417 ++ /* resume_hrtimer() takes care of timer interrupts > count */
418 ++ if (!dc)
419 ++ kvm_mips_resume_hrtimer(vcpu, now, count);
420 + }
421 +
422 + /**
423 +@@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
424 +
425 + /* If we are writing to COMPARE */
426 + /* Clear pending timer interrupt, if any */
427 +- kvm_mips_callbacks->dequeue_timer_int(vcpu);
428 + kvm_mips_write_compare(vcpu,
429 +- vcpu->arch.gprs[rt]);
430 ++ vcpu->arch.gprs[rt],
431 ++ true);
432 + } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
433 + unsigned int old_val, val, change;
434 +
435 +diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
436 +index c4038d2a724c..caa5ea1038a0 100644
437 +--- a/arch/mips/kvm/trap_emul.c
438 ++++ b/arch/mips/kvm/trap_emul.c
439 +@@ -546,7 +546,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
440 + kvm_mips_write_count(vcpu, v);
441 + break;
442 + case KVM_REG_MIPS_CP0_COMPARE:
443 +- kvm_mips_write_compare(vcpu, v);
444 ++ kvm_mips_write_compare(vcpu, v, false);
445 + break;
446 + case KVM_REG_MIPS_CP0_CAUSE:
447 + /*
448 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
449 +index 84fb4fcfaa41..93243554cae9 100644
450 +--- a/arch/powerpc/kvm/book3s_hv.c
451 ++++ b/arch/powerpc/kvm/book3s_hv.c
452 +@@ -27,6 +27,7 @@
453 + #include <linux/export.h>
454 + #include <linux/fs.h>
455 + #include <linux/anon_inodes.h>
456 ++#include <linux/cpu.h>
457 + #include <linux/cpumask.h>
458 + #include <linux/spinlock.h>
459 + #include <linux/page-flags.h>
460 +diff --git a/arch/x86/crypto/sha-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha-mb/sha1_x8_avx2.S
461 +index 8e1b47792b31..c9dae1cd2919 100644
462 +--- a/arch/x86/crypto/sha-mb/sha1_x8_avx2.S
463 ++++ b/arch/x86/crypto/sha-mb/sha1_x8_avx2.S
464 +@@ -296,7 +296,11 @@ W14 = TMP_
465 + #
466 + ENTRY(sha1_x8_avx2)
467 +
468 +- push RSP_SAVE
469 ++ # save callee-saved clobbered registers to comply with C function ABI
470 ++ push %r12
471 ++ push %r13
472 ++ push %r14
473 ++ push %r15
474 +
475 + #save rsp
476 + mov %rsp, RSP_SAVE
477 +@@ -446,7 +450,12 @@ lloop:
478 + ## Postamble
479 +
480 + mov RSP_SAVE, %rsp
481 +- pop RSP_SAVE
482 ++
483 ++ # restore callee-saved clobbered registers
484 ++ pop %r15
485 ++ pop %r14
486 ++ pop %r13
487 ++ pop %r12
488 +
489 + ret
490 + ENDPROC(sha1_x8_avx2)
491 +diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
492 +index 7012d18bb293..f921a1ed43f7 100644
493 +--- a/arch/x86/events/intel/uncore.c
494 ++++ b/arch/x86/events/intel/uncore.c
495 +@@ -888,7 +888,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
496 + return -ENODEV;
497 +
498 + pkg = topology_phys_to_logical_pkg(phys_id);
499 +- if (WARN_ON_ONCE(pkg < 0))
500 ++ if (pkg < 0)
501 + return -EINVAL;
502 +
503 + if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
504 +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
505 +index 3636ec06c887..aeab47932933 100644
506 +--- a/arch/x86/include/asm/cpufeature.h
507 ++++ b/arch/x86/include/asm/cpufeature.h
508 +@@ -63,9 +63,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
509 + (((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \
510 + (((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \
511 + (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \
512 +- (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \
513 +- (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \
514 +- (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
515 ++ (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \
516 ++ (((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \
517 ++ (((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
518 +
519 + #define DISABLED_MASK_BIT_SET(bit) \
520 + ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0 )) || \
521 +@@ -82,9 +82,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
522 + (((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \
523 + (((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \
524 + (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \
525 +- (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \
526 +- (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \
527 +- (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
528 ++ (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \
529 ++ (((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \
530 ++ (((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
531 +
532 + #define cpu_has(c, bit) \
533 + (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
534 +diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
535 +index 39343be7d4f4..911e9358ceb1 100644
536 +--- a/arch/x86/include/asm/disabled-features.h
537 ++++ b/arch/x86/include/asm/disabled-features.h
538 +@@ -29,11 +29,11 @@
539 + #endif /* CONFIG_X86_64 */
540 +
541 + #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
542 +-# define DISABLE_PKU (1<<(X86_FEATURE_PKU))
543 +-# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE))
544 +-#else
545 + # define DISABLE_PKU 0
546 + # define DISABLE_OSPKE 0
547 ++#else
548 ++# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
549 ++# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
550 + #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
551 +
552 + /*
553 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
554 +index 8394b3d1f94f..f45a4b9d28c8 100644
555 +--- a/arch/x86/kernel/cpu/common.c
556 ++++ b/arch/x86/kernel/cpu/common.c
557 +@@ -310,6 +310,10 @@ static bool pku_disabled;
558 +
559 + static __always_inline void setup_pku(struct cpuinfo_x86 *c)
560 + {
561 ++ /* check the boot processor, plus compile options for PKU: */
562 ++ if (!cpu_feature_enabled(X86_FEATURE_PKU))
563 ++ return;
564 ++ /* checks the actual processor's cpuid bits: */
565 + if (!cpu_has(c, X86_FEATURE_PKU))
566 + return;
567 + if (pku_disabled)
568 +diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
569 +index 3f8c732117ec..c146f3c262c3 100644
570 +--- a/arch/x86/kvm/mtrr.c
571 ++++ b/arch/x86/kvm/mtrr.c
572 +@@ -44,8 +44,6 @@ static bool msr_mtrr_valid(unsigned msr)
573 + case MSR_MTRRdefType:
574 + case MSR_IA32_CR_PAT:
575 + return true;
576 +- case 0x2f8:
577 +- return true;
578 + }
579 + return false;
580 + }
581 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
582 +index 133679d520af..faf52bac1416 100644
583 +--- a/arch/x86/kvm/vmx.c
584 ++++ b/arch/x86/kvm/vmx.c
585 +@@ -5050,8 +5050,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
586 + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
587 +
588 + cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
589 +- vmx_set_cr0(vcpu, cr0); /* enter rmode */
590 + vmx->vcpu.arch.cr0 = cr0;
591 ++ vmx_set_cr0(vcpu, cr0); /* enter rmode */
592 + vmx_set_cr4(vcpu, 0);
593 + vmx_set_efer(vcpu, 0);
594 + vmx_fpu_activate(vcpu);
595 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
596 +index beac4dfdade6..349b8ce92bf2 100644
597 +--- a/arch/x86/pci/xen.c
598 ++++ b/arch/x86/pci/xen.c
599 +@@ -491,8 +491,11 @@ int __init pci_xen_initial_domain(void)
600 + #endif
601 + __acpi_register_gsi = acpi_register_gsi_xen;
602 + __acpi_unregister_gsi = NULL;
603 +- /* Pre-allocate legacy irqs */
604 +- for (irq = 0; irq < nr_legacy_irqs(); irq++) {
605 ++ /*
606 ++ * Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here
607 ++ * because we don't have a PIC and thus nr_legacy_irqs() is zero.
608 ++ */
609 ++ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
610 + int trigger, polarity;
611 +
612 + if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
613 +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
614 +index cd2c3d6d40e0..993fd31394c8 100644
615 +--- a/drivers/acpi/device_pm.c
616 ++++ b/drivers/acpi/device_pm.c
617 +@@ -319,6 +319,7 @@ int acpi_device_fix_up_power(struct acpi_device *device)
618 +
619 + return ret;
620 + }
621 ++EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
622 +
623 + int acpi_device_update_power(struct acpi_device *device, int *state_p)
624 + {
625 +diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
626 +index 814d5f83b75e..f03677588b9d 100644
627 +--- a/drivers/acpi/osl.c
628 ++++ b/drivers/acpi/osl.c
629 +@@ -135,7 +135,7 @@ static struct osi_linux {
630 + unsigned int enable:1;
631 + unsigned int dmi:1;
632 + unsigned int cmdline:1;
633 +- unsigned int default_disabling:1;
634 ++ u8 default_disabling;
635 + } osi_linux = {0, 0, 0, 0};
636 +
637 + static u32 acpi_osi_handler(acpi_string interface, u32 supported)
638 +@@ -1751,10 +1751,13 @@ void __init acpi_osi_setup(char *str)
639 + if (*str == '!') {
640 + str++;
641 + if (*str == '\0') {
642 +- osi_linux.default_disabling = 1;
643 ++ /* Do not override acpi_osi=!* */
644 ++ if (!osi_linux.default_disabling)
645 ++ osi_linux.default_disabling =
646 ++ ACPI_DISABLE_ALL_VENDOR_STRINGS;
647 + return;
648 + } else if (*str == '*') {
649 +- acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
650 ++ osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
651 + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
652 + osi = &osi_setup_entries[i];
653 + osi->enable = false;
654 +@@ -1827,10 +1830,13 @@ static void __init acpi_osi_setup_late(void)
655 + acpi_status status;
656 +
657 + if (osi_linux.default_disabling) {
658 +- status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
659 ++ status = acpi_update_interfaces(osi_linux.default_disabling);
660 +
661 + if (ACPI_SUCCESS(status))
662 +- printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
663 ++ printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
664 ++ osi_linux.default_disabling ==
665 ++ ACPI_DISABLE_ALL_STRINGS ?
666 ++ " and feature groups" : "");
667 + }
668 +
669 + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
670 +diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
671 +index 80783dcb7f57..aba31210c802 100644
672 +--- a/drivers/bluetooth/hci_vhci.c
673 ++++ b/drivers/bluetooth/hci_vhci.c
674 +@@ -50,6 +50,7 @@ struct vhci_data {
675 + wait_queue_head_t read_wait;
676 + struct sk_buff_head readq;
677 +
678 ++ struct mutex open_mutex;
679 + struct delayed_work open_timeout;
680 + };
681 +
682 +@@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
683 + return 0;
684 + }
685 +
686 +-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
687 ++static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
688 + {
689 + struct hci_dev *hdev;
690 + struct sk_buff *skb;
691 + __u8 dev_type;
692 +
693 ++ if (data->hdev)
694 ++ return -EBADFD;
695 ++
696 + /* bits 0-1 are dev_type (BR/EDR or AMP) */
697 + dev_type = opcode & 0x03;
698 +
699 +@@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
700 + return 0;
701 + }
702 +
703 ++static int vhci_create_device(struct vhci_data *data, __u8 opcode)
704 ++{
705 ++ int err;
706 ++
707 ++ mutex_lock(&data->open_mutex);
708 ++ err = __vhci_create_device(data, opcode);
709 ++ mutex_unlock(&data->open_mutex);
710 ++
711 ++ return err;
712 ++}
713 ++
714 + static inline ssize_t vhci_get_user(struct vhci_data *data,
715 + struct iov_iter *from)
716 + {
717 +@@ -189,11 +204,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
718 + break;
719 +
720 + case HCI_VENDOR_PKT:
721 +- if (data->hdev) {
722 +- kfree_skb(skb);
723 +- return -EBADFD;
724 +- }
725 +-
726 + cancel_delayed_work_sync(&data->open_timeout);
727 +
728 + opcode = *((__u8 *) skb->data);
729 +@@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file)
730 + skb_queue_head_init(&data->readq);
731 + init_waitqueue_head(&data->read_wait);
732 +
733 ++ mutex_init(&data->open_mutex);
734 + INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
735 +
736 + file->private_data = data;
737 +@@ -333,15 +344,18 @@ static int vhci_open(struct inode *inode, struct file *file)
738 + static int vhci_release(struct inode *inode, struct file *file)
739 + {
740 + struct vhci_data *data = file->private_data;
741 +- struct hci_dev *hdev = data->hdev;
742 ++ struct hci_dev *hdev;
743 +
744 + cancel_delayed_work_sync(&data->open_timeout);
745 +
746 ++ hdev = data->hdev;
747 ++
748 + if (hdev) {
749 + hci_unregister_dev(hdev);
750 + hci_free_dev(hdev);
751 + }
752 +
753 ++ skb_queue_purge(&data->readq);
754 + file->private_data = NULL;
755 + kfree(data);
756 +
757 +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
758 +index c74ed3fd496d..90338c38e38a 100644
759 +--- a/drivers/clk/bcm/clk-bcm2835.c
760 ++++ b/drivers/clk/bcm/clk-bcm2835.c
761 +@@ -1079,10 +1079,12 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
762 + struct bcm2835_cprman *cprman = divider->cprman;
763 + const struct bcm2835_pll_divider_data *data = divider->data;
764 +
765 ++ spin_lock(&cprman->regs_lock);
766 + cprman_write(cprman, data->cm_reg,
767 + (cprman_read(cprman, data->cm_reg) &
768 + ~data->load_mask) | data->hold_mask);
769 + cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
770 ++ spin_unlock(&cprman->regs_lock);
771 + }
772 +
773 + static int bcm2835_pll_divider_on(struct clk_hw *hw)
774 +@@ -1091,12 +1093,14 @@ static int bcm2835_pll_divider_on(struct clk_hw *hw)
775 + struct bcm2835_cprman *cprman = divider->cprman;
776 + const struct bcm2835_pll_divider_data *data = divider->data;
777 +
778 ++ spin_lock(&cprman->regs_lock);
779 + cprman_write(cprman, data->a2w_reg,
780 + cprman_read(cprman, data->a2w_reg) &
781 + ~A2W_PLL_CHANNEL_DISABLE);
782 +
783 + cprman_write(cprman, data->cm_reg,
784 + cprman_read(cprman, data->cm_reg) & ~data->hold_mask);
785 ++ spin_unlock(&cprman->regs_lock);
786 +
787 + return 0;
788 + }
789 +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
790 +index 9c29080a84d8..5c4e193164d4 100644
791 +--- a/drivers/clk/qcom/gcc-msm8916.c
792 ++++ b/drivers/clk/qcom/gcc-msm8916.c
793 +@@ -2346,6 +2346,7 @@ static struct clk_branch gcc_crypto_ahb_clk = {
794 + "pcnoc_bfdcd_clk_src",
795 + },
796 + .num_parents = 1,
797 ++ .flags = CLK_SET_RATE_PARENT,
798 + .ops = &clk_branch2_ops,
799 + },
800 + },
801 +@@ -2381,6 +2382,7 @@ static struct clk_branch gcc_crypto_clk = {
802 + "crypto_clk_src",
803 + },
804 + .num_parents = 1,
805 ++ .flags = CLK_SET_RATE_PARENT,
806 + .ops = &clk_branch2_ops,
807 + },
808 + },
809 +diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
810 +index 6fd63a600614..5ef4be22eb80 100644
811 +--- a/drivers/crypto/caam/jr.c
812 ++++ b/drivers/crypto/caam/jr.c
813 +@@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg)
814 + struct device *caam_jr_alloc(void)
815 + {
816 + struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
817 +- struct device *dev = NULL;
818 ++ struct device *dev = ERR_PTR(-ENODEV);
819 + int min_tfm_cnt = INT_MAX;
820 + int tfm_cnt;
821 +
822 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
823 +index 7be3fbcd8d78..3830d7c4e138 100644
824 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
825 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
826 +@@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
827 + unsigned int todo;
828 + struct sg_mapping_iter mi, mo;
829 + unsigned int oi, oo; /* offset for in and out */
830 ++ unsigned long flags;
831 +
832 + if (areq->nbytes == 0)
833 + return 0;
834 +@@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
835 + return -EINVAL;
836 + }
837 +
838 +- spin_lock_bh(&ss->slock);
839 ++ spin_lock_irqsave(&ss->slock, flags);
840 +
841 + for (i = 0; i < op->keylen; i += 4)
842 + writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
843 +@@ -117,7 +118,7 @@ release_ss:
844 + sg_miter_stop(&mi);
845 + sg_miter_stop(&mo);
846 + writel(0, ss->base + SS_CTL);
847 +- spin_unlock_bh(&ss->slock);
848 ++ spin_unlock_irqrestore(&ss->slock, flags);
849 + return err;
850 + }
851 +
852 +@@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
853 + unsigned int ob = 0; /* offset in buf */
854 + unsigned int obo = 0; /* offset in bufo*/
855 + unsigned int obl = 0; /* length of data in bufo */
856 ++ unsigned long flags;
857 +
858 + if (areq->nbytes == 0)
859 + return 0;
860 +@@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
861 + if (no_chunk == 1)
862 + return sun4i_ss_opti_poll(areq);
863 +
864 +- spin_lock_bh(&ss->slock);
865 ++ spin_lock_irqsave(&ss->slock, flags);
866 +
867 + for (i = 0; i < op->keylen; i += 4)
868 + writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
869 +@@ -307,7 +309,7 @@ release_ss:
870 + sg_miter_stop(&mi);
871 + sg_miter_stop(&mo);
872 + writel(0, ss->base + SS_CTL);
873 +- spin_unlock_bh(&ss->slock);
874 ++ spin_unlock_irqrestore(&ss->slock, flags);
875 +
876 + return err;
877 + }
878 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
879 +index aae05547b924..b7ee8d30147d 100644
880 +--- a/drivers/crypto/talitos.c
881 ++++ b/drivers/crypto/talitos.c
882 +@@ -835,6 +835,16 @@ struct talitos_ahash_req_ctx {
883 + struct scatterlist *psrc;
884 + };
885 +
886 ++struct talitos_export_state {
887 ++ u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
888 ++ u8 buf[HASH_MAX_BLOCK_SIZE];
889 ++ unsigned int swinit;
890 ++ unsigned int first;
891 ++ unsigned int last;
892 ++ unsigned int to_hash_later;
893 ++ unsigned int nbuf;
894 ++};
895 ++
896 + static int aead_setkey(struct crypto_aead *authenc,
897 + const u8 *key, unsigned int keylen)
898 + {
899 +@@ -1981,6 +1991,46 @@ static int ahash_digest(struct ahash_request *areq)
900 + return ahash_process_req(areq, areq->nbytes);
901 + }
902 +
903 ++static int ahash_export(struct ahash_request *areq, void *out)
904 ++{
905 ++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
906 ++ struct talitos_export_state *export = out;
907 ++
908 ++ memcpy(export->hw_context, req_ctx->hw_context,
909 ++ req_ctx->hw_context_size);
910 ++ memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
911 ++ export->swinit = req_ctx->swinit;
912 ++ export->first = req_ctx->first;
913 ++ export->last = req_ctx->last;
914 ++ export->to_hash_later = req_ctx->to_hash_later;
915 ++ export->nbuf = req_ctx->nbuf;
916 ++
917 ++ return 0;
918 ++}
919 ++
920 ++static int ahash_import(struct ahash_request *areq, const void *in)
921 ++{
922 ++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
923 ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
924 ++ const struct talitos_export_state *export = in;
925 ++
926 ++ memset(req_ctx, 0, sizeof(*req_ctx));
927 ++ req_ctx->hw_context_size =
928 ++ (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
929 ++ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
930 ++ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
931 ++ memcpy(req_ctx->hw_context, export->hw_context,
932 ++ req_ctx->hw_context_size);
933 ++ memcpy(req_ctx->buf, export->buf, export->nbuf);
934 ++ req_ctx->swinit = export->swinit;
935 ++ req_ctx->first = export->first;
936 ++ req_ctx->last = export->last;
937 ++ req_ctx->to_hash_later = export->to_hash_later;
938 ++ req_ctx->nbuf = export->nbuf;
939 ++
940 ++ return 0;
941 ++}
942 ++
943 + struct keyhash_result {
944 + struct completion completion;
945 + int err;
946 +@@ -2458,6 +2508,7 @@ static struct talitos_alg_template driver_algs[] = {
947 + { .type = CRYPTO_ALG_TYPE_AHASH,
948 + .alg.hash = {
949 + .halg.digestsize = MD5_DIGEST_SIZE,
950 ++ .halg.statesize = sizeof(struct talitos_export_state),
951 + .halg.base = {
952 + .cra_name = "md5",
953 + .cra_driver_name = "md5-talitos",
954 +@@ -2473,6 +2524,7 @@ static struct talitos_alg_template driver_algs[] = {
955 + { .type = CRYPTO_ALG_TYPE_AHASH,
956 + .alg.hash = {
957 + .halg.digestsize = SHA1_DIGEST_SIZE,
958 ++ .halg.statesize = sizeof(struct talitos_export_state),
959 + .halg.base = {
960 + .cra_name = "sha1",
961 + .cra_driver_name = "sha1-talitos",
962 +@@ -2488,6 +2540,7 @@ static struct talitos_alg_template driver_algs[] = {
963 + { .type = CRYPTO_ALG_TYPE_AHASH,
964 + .alg.hash = {
965 + .halg.digestsize = SHA224_DIGEST_SIZE,
966 ++ .halg.statesize = sizeof(struct talitos_export_state),
967 + .halg.base = {
968 + .cra_name = "sha224",
969 + .cra_driver_name = "sha224-talitos",
970 +@@ -2503,6 +2556,7 @@ static struct talitos_alg_template driver_algs[] = {
971 + { .type = CRYPTO_ALG_TYPE_AHASH,
972 + .alg.hash = {
973 + .halg.digestsize = SHA256_DIGEST_SIZE,
974 ++ .halg.statesize = sizeof(struct talitos_export_state),
975 + .halg.base = {
976 + .cra_name = "sha256",
977 + .cra_driver_name = "sha256-talitos",
978 +@@ -2518,6 +2572,7 @@ static struct talitos_alg_template driver_algs[] = {
979 + { .type = CRYPTO_ALG_TYPE_AHASH,
980 + .alg.hash = {
981 + .halg.digestsize = SHA384_DIGEST_SIZE,
982 ++ .halg.statesize = sizeof(struct talitos_export_state),
983 + .halg.base = {
984 + .cra_name = "sha384",
985 + .cra_driver_name = "sha384-talitos",
986 +@@ -2533,6 +2588,7 @@ static struct talitos_alg_template driver_algs[] = {
987 + { .type = CRYPTO_ALG_TYPE_AHASH,
988 + .alg.hash = {
989 + .halg.digestsize = SHA512_DIGEST_SIZE,
990 ++ .halg.statesize = sizeof(struct talitos_export_state),
991 + .halg.base = {
992 + .cra_name = "sha512",
993 + .cra_driver_name = "sha512-talitos",
994 +@@ -2548,6 +2604,7 @@ static struct talitos_alg_template driver_algs[] = {
995 + { .type = CRYPTO_ALG_TYPE_AHASH,
996 + .alg.hash = {
997 + .halg.digestsize = MD5_DIGEST_SIZE,
998 ++ .halg.statesize = sizeof(struct talitos_export_state),
999 + .halg.base = {
1000 + .cra_name = "hmac(md5)",
1001 + .cra_driver_name = "hmac-md5-talitos",
1002 +@@ -2563,6 +2620,7 @@ static struct talitos_alg_template driver_algs[] = {
1003 + { .type = CRYPTO_ALG_TYPE_AHASH,
1004 + .alg.hash = {
1005 + .halg.digestsize = SHA1_DIGEST_SIZE,
1006 ++ .halg.statesize = sizeof(struct talitos_export_state),
1007 + .halg.base = {
1008 + .cra_name = "hmac(sha1)",
1009 + .cra_driver_name = "hmac-sha1-talitos",
1010 +@@ -2578,6 +2636,7 @@ static struct talitos_alg_template driver_algs[] = {
1011 + { .type = CRYPTO_ALG_TYPE_AHASH,
1012 + .alg.hash = {
1013 + .halg.digestsize = SHA224_DIGEST_SIZE,
1014 ++ .halg.statesize = sizeof(struct talitos_export_state),
1015 + .halg.base = {
1016 + .cra_name = "hmac(sha224)",
1017 + .cra_driver_name = "hmac-sha224-talitos",
1018 +@@ -2593,6 +2652,7 @@ static struct talitos_alg_template driver_algs[] = {
1019 + { .type = CRYPTO_ALG_TYPE_AHASH,
1020 + .alg.hash = {
1021 + .halg.digestsize = SHA256_DIGEST_SIZE,
1022 ++ .halg.statesize = sizeof(struct talitos_export_state),
1023 + .halg.base = {
1024 + .cra_name = "hmac(sha256)",
1025 + .cra_driver_name = "hmac-sha256-talitos",
1026 +@@ -2608,6 +2668,7 @@ static struct talitos_alg_template driver_algs[] = {
1027 + { .type = CRYPTO_ALG_TYPE_AHASH,
1028 + .alg.hash = {
1029 + .halg.digestsize = SHA384_DIGEST_SIZE,
1030 ++ .halg.statesize = sizeof(struct talitos_export_state),
1031 + .halg.base = {
1032 + .cra_name = "hmac(sha384)",
1033 + .cra_driver_name = "hmac-sha384-talitos",
1034 +@@ -2623,6 +2684,7 @@ static struct talitos_alg_template driver_algs[] = {
1035 + { .type = CRYPTO_ALG_TYPE_AHASH,
1036 + .alg.hash = {
1037 + .halg.digestsize = SHA512_DIGEST_SIZE,
1038 ++ .halg.statesize = sizeof(struct talitos_export_state),
1039 + .halg.base = {
1040 + .cra_name = "hmac(sha512)",
1041 + .cra_driver_name = "hmac-sha512-talitos",
1042 +@@ -2814,6 +2876,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1043 + t_alg->algt.alg.hash.finup = ahash_finup;
1044 + t_alg->algt.alg.hash.digest = ahash_digest;
1045 + t_alg->algt.alg.hash.setkey = ahash_setkey;
1046 ++ t_alg->algt.alg.hash.import = ahash_import;
1047 ++ t_alg->algt.alg.hash.export = ahash_export;
1048 +
1049 + if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
1050 + !strncmp(alg->cra_name, "hmac", 4)) {
1051 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1052 +index b6bf20496021..845ce90c2885 100644
1053 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
1054 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
1055 +@@ -448,16 +448,16 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
1056 +
1057 + /**
1058 + * srp_destroy_qp() - destroy an RDMA queue pair
1059 +- * @ch: SRP RDMA channel.
1060 ++ * @qp: RDMA queue pair.
1061 + *
1062 + * Drain the qp before destroying it. This avoids that the receive
1063 + * completion handler can access the queue pair while it is
1064 + * being destroyed.
1065 + */
1066 +-static void srp_destroy_qp(struct srp_rdma_ch *ch)
1067 ++static void srp_destroy_qp(struct ib_qp *qp)
1068 + {
1069 +- ib_drain_rq(ch->qp);
1070 +- ib_destroy_qp(ch->qp);
1071 ++ ib_drain_rq(qp);
1072 ++ ib_destroy_qp(qp);
1073 + }
1074 +
1075 + static int srp_create_ch_ib(struct srp_rdma_ch *ch)
1076 +@@ -530,7 +530,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
1077 + }
1078 +
1079 + if (ch->qp)
1080 +- srp_destroy_qp(ch);
1081 ++ srp_destroy_qp(ch->qp);
1082 + if (ch->recv_cq)
1083 + ib_free_cq(ch->recv_cq);
1084 + if (ch->send_cq)
1085 +@@ -554,7 +554,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
1086 + return 0;
1087 +
1088 + err_qp:
1089 +- srp_destroy_qp(ch);
1090 ++ srp_destroy_qp(qp);
1091 +
1092 + err_send_cq:
1093 + ib_free_cq(send_cq);
1094 +@@ -597,7 +597,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
1095 + ib_destroy_fmr_pool(ch->fmr_pool);
1096 + }
1097 +
1098 +- srp_destroy_qp(ch);
1099 ++ srp_destroy_qp(ch->qp);
1100 + ib_free_cq(ch->send_cq);
1101 + ib_free_cq(ch->recv_cq);
1102 +
1103 +@@ -1509,7 +1509,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1104 +
1105 + if (dev->use_fast_reg) {
1106 + state.sg = idb_sg;
1107 +- sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1108 ++ sg_init_one(idb_sg, req->indirect_desc, idb_len);
1109 + idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1110 + #ifdef CONFIG_NEED_SG_DMA_LENGTH
1111 + idb_sg->dma_length = idb_sg->length; /* hack^2 */
1112 +diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
1113 +index f2261ab54701..18663d4edae5 100644
1114 +--- a/drivers/input/misc/pwm-beeper.c
1115 ++++ b/drivers/input/misc/pwm-beeper.c
1116 +@@ -20,21 +20,40 @@
1117 + #include <linux/platform_device.h>
1118 + #include <linux/pwm.h>
1119 + #include <linux/slab.h>
1120 ++#include <linux/workqueue.h>
1121 +
1122 + struct pwm_beeper {
1123 + struct input_dev *input;
1124 + struct pwm_device *pwm;
1125 ++ struct work_struct work;
1126 + unsigned long period;
1127 + };
1128 +
1129 + #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
1130 +
1131 ++static void __pwm_beeper_set(struct pwm_beeper *beeper)
1132 ++{
1133 ++ unsigned long period = beeper->period;
1134 ++
1135 ++ if (period) {
1136 ++ pwm_config(beeper->pwm, period / 2, period);
1137 ++ pwm_enable(beeper->pwm);
1138 ++ } else
1139 ++ pwm_disable(beeper->pwm);
1140 ++}
1141 ++
1142 ++static void pwm_beeper_work(struct work_struct *work)
1143 ++{
1144 ++ struct pwm_beeper *beeper =
1145 ++ container_of(work, struct pwm_beeper, work);
1146 ++
1147 ++ __pwm_beeper_set(beeper);
1148 ++}
1149 ++
1150 + static int pwm_beeper_event(struct input_dev *input,
1151 + unsigned int type, unsigned int code, int value)
1152 + {
1153 +- int ret = 0;
1154 + struct pwm_beeper *beeper = input_get_drvdata(input);
1155 +- unsigned long period;
1156 +
1157 + if (type != EV_SND || value < 0)
1158 + return -EINVAL;
1159 +@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
1160 + return -EINVAL;
1161 + }
1162 +
1163 +- if (value == 0) {
1164 +- pwm_disable(beeper->pwm);
1165 +- } else {
1166 +- period = HZ_TO_NANOSECONDS(value);
1167 +- ret = pwm_config(beeper->pwm, period / 2, period);
1168 +- if (ret)
1169 +- return ret;
1170 +- ret = pwm_enable(beeper->pwm);
1171 +- if (ret)
1172 +- return ret;
1173 +- beeper->period = period;
1174 +- }
1175 ++ if (value == 0)
1176 ++ beeper->period = 0;
1177 ++ else
1178 ++ beeper->period = HZ_TO_NANOSECONDS(value);
1179 ++
1180 ++ schedule_work(&beeper->work);
1181 +
1182 + return 0;
1183 + }
1184 +
1185 ++static void pwm_beeper_stop(struct pwm_beeper *beeper)
1186 ++{
1187 ++ cancel_work_sync(&beeper->work);
1188 ++
1189 ++ if (beeper->period)
1190 ++ pwm_disable(beeper->pwm);
1191 ++}
1192 ++
1193 ++static void pwm_beeper_close(struct input_dev *input)
1194 ++{
1195 ++ struct pwm_beeper *beeper = input_get_drvdata(input);
1196 ++
1197 ++ pwm_beeper_stop(beeper);
1198 ++}
1199 ++
1200 + static int pwm_beeper_probe(struct platform_device *pdev)
1201 + {
1202 + unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
1203 +@@ -87,6 +115,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
1204 + goto err_free;
1205 + }
1206 +
1207 ++ INIT_WORK(&beeper->work, pwm_beeper_work);
1208 ++
1209 + beeper->input = input_allocate_device();
1210 + if (!beeper->input) {
1211 + dev_err(&pdev->dev, "Failed to allocate input device\n");
1212 +@@ -106,6 +136,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
1213 + beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
1214 +
1215 + beeper->input->event = pwm_beeper_event;
1216 ++ beeper->input->close = pwm_beeper_close;
1217 +
1218 + input_set_drvdata(beeper->input, beeper);
1219 +
1220 +@@ -135,7 +166,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
1221 +
1222 + input_unregister_device(beeper->input);
1223 +
1224 +- pwm_disable(beeper->pwm);
1225 + pwm_free(beeper->pwm);
1226 +
1227 + kfree(beeper);
1228 +@@ -147,8 +177,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
1229 + {
1230 + struct pwm_beeper *beeper = dev_get_drvdata(dev);
1231 +
1232 +- if (beeper->period)
1233 +- pwm_disable(beeper->pwm);
1234 ++ pwm_beeper_stop(beeper);
1235 +
1236 + return 0;
1237 + }
1238 +@@ -157,10 +186,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
1239 + {
1240 + struct pwm_beeper *beeper = dev_get_drvdata(dev);
1241 +
1242 +- if (beeper->period) {
1243 +- pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
1244 +- pwm_enable(beeper->pwm);
1245 +- }
1246 ++ if (beeper->period)
1247 ++ __pwm_beeper_set(beeper);
1248 +
1249 + return 0;
1250 + }
1251 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1252 +index 5b7d3c2129d8..390e0ece26dc 100644
1253 +--- a/drivers/irqchip/irq-gic-v3.c
1254 ++++ b/drivers/irqchip/irq-gic-v3.c
1255 +@@ -364,6 +364,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
1256 + if (static_key_true(&supports_deactivate))
1257 + gic_write_dir(irqnr);
1258 + #ifdef CONFIG_SMP
1259 ++ /*
1260 ++ * Unlike GICv2, we don't need an smp_rmb() here.
1261 ++ * The control dependency from gic_read_iar to
1262 ++ * the ISB in gic_write_eoir is enough to ensure
1263 ++ * that any shared data read by handle_IPI will
1264 ++ * be read after the ACK.
1265 ++ */
1266 + handle_IPI(irqnr, regs);
1267 + #else
1268 + WARN_ONCE(true, "Unexpected SGI received!\n");
1269 +@@ -383,6 +390,15 @@ static void __init gic_dist_init(void)
1270 + writel_relaxed(0, base + GICD_CTLR);
1271 + gic_dist_wait_for_rwp();
1272 +
1273 ++ /*
1274 ++ * Configure SPIs as non-secure Group-1. This will only matter
1275 ++ * if the GIC only has a single security state. This will not
1276 ++ * do the right thing if the kernel is running in secure mode,
1277 ++ * but that's not the intended use case anyway.
1278 ++ */
1279 ++ for (i = 32; i < gic_data.irq_nr; i += 32)
1280 ++ writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
1281 ++
1282 + gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
1283 +
1284 + /* Enable distributor with ARE, Group1 */
1285 +@@ -500,6 +516,9 @@ static void gic_cpu_init(void)
1286 +
1287 + rbase = gic_data_rdist_sgi_base();
1288 +
1289 ++ /* Configure SGIs/PPIs as non-secure Group-1 */
1290 ++ writel_relaxed(~0, rbase + GICR_IGROUPR0);
1291 ++
1292 + gic_cpu_config(rbase, gic_redist_wait_for_rwp);
1293 +
1294 + /* Give LPIs a spin */
1295 +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
1296 +index 282344b95ec2..5c4da5808b15 100644
1297 +--- a/drivers/irqchip/irq-gic.c
1298 ++++ b/drivers/irqchip/irq-gic.c
1299 +@@ -344,6 +344,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
1300 + if (static_key_true(&supports_deactivate))
1301 + writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
1302 + #ifdef CONFIG_SMP
1303 ++ /*
1304 ++ * Ensure any shared data written by the CPU sending
1305 ++ * the IPI is read after we've read the ACK register
1306 ++ * on the GIC.
1307 ++ *
1308 ++ * Pairs with the write barrier in gic_raise_softirq
1309 ++ */
1310 ++ smp_rmb();
1311 + handle_IPI(irqnr, regs);
1312 + #endif
1313 + continue;
1314 +diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
1315 +index 004926955263..b0155b05cddb 100644
1316 +--- a/drivers/mcb/mcb-parse.c
1317 ++++ b/drivers/mcb/mcb-parse.c
1318 +@@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
1319 + mdev->id = GDD_DEV(reg1);
1320 + mdev->rev = GDD_REV(reg1);
1321 + mdev->var = GDD_VAR(reg1);
1322 +- mdev->bar = GDD_BAR(reg1);
1323 ++ mdev->bar = GDD_BAR(reg2);
1324 + mdev->group = GDD_GRP(reg2);
1325 + mdev->inst = GDD_INS(reg2);
1326 +
1327 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1328 +index 14d3b37944df..85b16aadd459 100644
1329 +--- a/drivers/md/md.c
1330 ++++ b/drivers/md/md.c
1331 +@@ -307,7 +307,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
1332 + */
1333 + void mddev_suspend(struct mddev *mddev)
1334 + {
1335 +- WARN_ON_ONCE(current == mddev->thread->tsk);
1336 ++ WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
1337 + if (mddev->suspended++)
1338 + return;
1339 + synchronize_rcu();
1340 +diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
1341 +index b7b3e8ee64f2..c30290f33430 100644
1342 +--- a/drivers/mfd/omap-usb-tll.c
1343 ++++ b/drivers/mfd/omap-usb-tll.c
1344 +@@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev)
1345 +
1346 + if (IS_ERR(tll->ch_clk[i]))
1347 + dev_dbg(dev, "can't get clock : %s\n", clkname);
1348 ++ else
1349 ++ clk_prepare(tll->ch_clk[i]);
1350 + }
1351 +
1352 + pm_runtime_put_sync(dev);
1353 +@@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev)
1354 + tll_dev = NULL;
1355 + spin_unlock(&tll_lock);
1356 +
1357 +- for (i = 0; i < tll->nch; i++)
1358 +- if (!IS_ERR(tll->ch_clk[i]))
1359 ++ for (i = 0; i < tll->nch; i++) {
1360 ++ if (!IS_ERR(tll->ch_clk[i])) {
1361 ++ clk_unprepare(tll->ch_clk[i]);
1362 + clk_put(tll->ch_clk[i]);
1363 ++ }
1364 ++ }
1365 +
1366 + pm_runtime_disable(&pdev->dev);
1367 + return 0;
1368 +@@ -420,7 +425,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
1369 + if (IS_ERR(tll->ch_clk[i]))
1370 + continue;
1371 +
1372 +- r = clk_prepare_enable(tll->ch_clk[i]);
1373 ++ r = clk_enable(tll->ch_clk[i]);
1374 + if (r) {
1375 + dev_err(tll_dev,
1376 + "Error enabling ch %d clock: %d\n", i, r);
1377 +@@ -448,7 +453,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
1378 + for (i = 0; i < tll->nch; i++) {
1379 + if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
1380 + if (!IS_ERR(tll->ch_clk[i]))
1381 +- clk_disable_unprepare(tll->ch_clk[i]);
1382 ++ clk_disable(tll->ch_clk[i]);
1383 + }
1384 + }
1385 +
1386 +diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
1387 +index 194360a5f782..a039a5df6f21 100644
1388 +--- a/drivers/misc/mei/amthif.c
1389 ++++ b/drivers/misc/mei/amthif.c
1390 +@@ -380,8 +380,10 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
1391 +
1392 + dev = cl->dev;
1393 +
1394 +- if (dev->iamthif_state != MEI_IAMTHIF_READING)
1395 ++ if (dev->iamthif_state != MEI_IAMTHIF_READING) {
1396 ++ mei_irq_discard_msg(dev, mei_hdr);
1397 + return 0;
1398 ++ }
1399 +
1400 + ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
1401 + if (ret)
1402 +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
1403 +index 5d5996e39a67..038b6dd24fb4 100644
1404 +--- a/drivers/misc/mei/bus.c
1405 ++++ b/drivers/misc/mei/bus.c
1406 +@@ -220,17 +220,23 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv);
1407 + static void mei_cl_bus_event_work(struct work_struct *work)
1408 + {
1409 + struct mei_cl_device *cldev;
1410 ++ struct mei_device *bus;
1411 +
1412 + cldev = container_of(work, struct mei_cl_device, event_work);
1413 +
1414 ++ bus = cldev->bus;
1415 ++
1416 + if (cldev->event_cb)
1417 + cldev->event_cb(cldev, cldev->events, cldev->event_context);
1418 +
1419 + cldev->events = 0;
1420 +
1421 + /* Prepare for the next read */
1422 +- if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
1423 ++ if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
1424 ++ mutex_lock(&bus->device_lock);
1425 + mei_cl_read_start(cldev->cl, 0, NULL);
1426 ++ mutex_unlock(&bus->device_lock);
1427 ++ }
1428 + }
1429 +
1430 + /**
1431 +@@ -304,6 +310,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
1432 + unsigned long events_mask,
1433 + mei_cldev_event_cb_t event_cb, void *context)
1434 + {
1435 ++ struct mei_device *bus = cldev->bus;
1436 + int ret;
1437 +
1438 + if (cldev->event_cb)
1439 +@@ -316,15 +323,17 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
1440 + INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
1441 +
1442 + if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
1443 ++ mutex_lock(&bus->device_lock);
1444 + ret = mei_cl_read_start(cldev->cl, 0, NULL);
1445 ++ mutex_unlock(&bus->device_lock);
1446 + if (ret && ret != -EBUSY)
1447 + return ret;
1448 + }
1449 +
1450 + if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
1451 +- mutex_lock(&cldev->cl->dev->device_lock);
1452 ++ mutex_lock(&bus->device_lock);
1453 + ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
1454 +- mutex_unlock(&cldev->cl->dev->device_lock);
1455 ++ mutex_unlock(&bus->device_lock);
1456 + if (ret)
1457 + return ret;
1458 + }
1459 +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
1460 +index bab17e4197b6..09f5280fa021 100644
1461 +--- a/drivers/misc/mei/client.c
1462 ++++ b/drivers/misc/mei/client.c
1463 +@@ -1767,6 +1767,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1464 + wake_up(&cl->wait);
1465 +
1466 + break;
1467 ++ case MEI_FOP_DISCONNECT_RSP:
1468 ++ mei_io_cb_free(cb);
1469 ++ mei_cl_set_disconnected(cl);
1470 ++ break;
1471 + default:
1472 + BUG_ON(0);
1473 + }
1474 +diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
1475 +index 5e305d2605f3..8fe1ef8215c1 100644
1476 +--- a/drivers/misc/mei/hbm.c
1477 ++++ b/drivers/misc/mei/hbm.c
1478 +@@ -882,8 +882,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
1479 + cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
1480 + if (!cb)
1481 + return -ENOMEM;
1482 +- cl_dbg(dev, cl, "add disconnect response as first\n");
1483 +- list_add(&cb->list, &dev->ctrl_wr_list.list);
1484 ++ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
1485 + }
1486 + return 0;
1487 + }
1488 +diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
1489 +index 1e5cb1f704f8..3831a7ba2531 100644
1490 +--- a/drivers/misc/mei/interrupt.c
1491 ++++ b/drivers/misc/mei/interrupt.c
1492 +@@ -76,7 +76,6 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
1493 + * @dev: mei device
1494 + * @hdr: message header
1495 + */
1496 +-static inline
1497 + void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
1498 + {
1499 + /*
1500 +@@ -194,10 +193,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
1501 + return -EMSGSIZE;
1502 +
1503 + ret = mei_hbm_cl_disconnect_rsp(dev, cl);
1504 +- mei_cl_set_disconnected(cl);
1505 +- mei_io_cb_free(cb);
1506 +- mei_me_cl_put(cl->me_cl);
1507 +- cl->me_cl = NULL;
1508 ++ list_move_tail(&cb->list, &cmpl_list->list);
1509 +
1510 + return ret;
1511 + }
1512 +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
1513 +index db78e6d99456..859bdac22352 100644
1514 +--- a/drivers/misc/mei/mei_dev.h
1515 ++++ b/drivers/misc/mei/mei_dev.h
1516 +@@ -704,6 +704,8 @@ bool mei_hbuf_acquire(struct mei_device *dev);
1517 +
1518 + bool mei_write_is_idle(struct mei_device *dev);
1519 +
1520 ++void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr);
1521 ++
1522 + #if IS_ENABLED(CONFIG_DEBUG_FS)
1523 + int mei_dbgfs_register(struct mei_device *dev, const char *name);
1524 + void mei_dbgfs_deregister(struct mei_device *dev);
1525 +diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
1526 +index 88e45234d527..af57d2caac75 100644
1527 +--- a/drivers/misc/mic/vop/vop_vringh.c
1528 ++++ b/drivers/misc/mic/vop/vop_vringh.c
1529 +@@ -950,6 +950,11 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1530 + ret = -EINVAL;
1531 + goto free_ret;
1532 + }
1533 ++ /* Ensure desc has not changed between the two reads */
1534 ++ if (memcmp(&dd, dd_config, sizeof(dd))) {
1535 ++ ret = -EINVAL;
1536 ++ goto free_ret;
1537 ++ }
1538 + mutex_lock(&vdev->vdev_mutex);
1539 + mutex_lock(&vi->vop_mutex);
1540 + ret = vop_virtio_add_device(vdev, dd_config);
1541 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1542 +index 8a0147dfed27..b0a27413cb13 100644
1543 +--- a/drivers/mmc/card/block.c
1544 ++++ b/drivers/mmc/card/block.c
1545 +@@ -2494,11 +2494,12 @@ static const struct mmc_fixup blk_fixups[] =
1546 + MMC_QUIRK_BLK_NO_CMD23),
1547 +
1548 + /*
1549 +- * Some Micron MMC cards needs longer data read timeout than
1550 +- * indicated in CSD.
1551 ++ * Some MMC cards need longer data read timeout than indicated in CSD.
1552 + */
1553 + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
1554 + MMC_QUIRK_LONG_READ_TIME),
1555 ++ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1556 ++ MMC_QUIRK_LONG_READ_TIME),
1557 +
1558 + /*
1559 + * On these Samsung MoviNAND parts, performing secure erase or
1560 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
1561 +index 41b1e761965f..9eba56c68ca1 100644
1562 +--- a/drivers/mmc/core/core.c
1563 ++++ b/drivers/mmc/core/core.c
1564 +@@ -868,11 +868,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
1565 + /*
1566 + * Some cards require longer data read timeout than indicated in CSD.
1567 + * Address this by setting the read timeout to a "reasonably high"
1568 +- * value. For the cards tested, 300ms has proven enough. If necessary,
1569 ++ * value. For the cards tested, 600ms has proven enough. If necessary,
1570 + * this value can be increased if other problematic cards require this.
1571 + */
1572 + if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
1573 +- data->timeout_ns = 300000000;
1574 ++ data->timeout_ns = 600000000;
1575 + data->timeout_clks = 0;
1576 + }
1577 +
1578 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1579 +index 4dbe3df8024b..80169643d59f 100644
1580 +--- a/drivers/mmc/core/mmc.c
1581 ++++ b/drivers/mmc/core/mmc.c
1582 +@@ -333,6 +333,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
1583 + }
1584 + }
1585 +
1586 ++/* Minimum partition switch timeout in milliseconds */
1587 ++#define MMC_MIN_PART_SWITCH_TIME 300
1588 ++
1589 + /*
1590 + * Decode extended CSD.
1591 + */
1592 +@@ -397,6 +400,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
1593 +
1594 + /* EXT_CSD value is in units of 10ms, but we store in ms */
1595 + card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
1596 ++ /* Some eMMC set the value too low so set a minimum */
1597 ++ if (card->ext_csd.part_time &&
1598 ++ card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
1599 ++ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
1600 +
1601 + /* Sleep / awake timeout in 100ns units */
1602 + if (sa_shift > 0 && sa_shift <= 0x17)
1603 +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
1604 +index bed6a494f52c..c0e206d72153 100644
1605 +--- a/drivers/mmc/host/sdhci-acpi.c
1606 ++++ b/drivers/mmc/host/sdhci-acpi.c
1607 +@@ -277,7 +277,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
1608 + .chip = &sdhci_acpi_chip_int,
1609 + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
1610 + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
1611 +- MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
1612 ++ MMC_CAP_WAIT_WHILE_BUSY,
1613 + .caps2 = MMC_CAP2_HC_ERASE_SZ,
1614 + .flags = SDHCI_ACPI_RUNTIME_PM,
1615 + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1616 +@@ -292,7 +292,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
1617 + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1618 + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
1619 + .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
1620 +- MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
1621 ++ MMC_CAP_WAIT_WHILE_BUSY,
1622 + .flags = SDHCI_ACPI_RUNTIME_PM,
1623 + .pm_caps = MMC_PM_KEEP_POWER,
1624 + .probe_slot = sdhci_acpi_sdio_probe_slot,
1625 +@@ -304,7 +304,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
1626 + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1627 + .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1628 + SDHCI_QUIRK2_STOP_WITH_TC,
1629 +- .caps = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
1630 ++ .caps = MMC_CAP_WAIT_WHILE_BUSY,
1631 + .probe_slot = sdhci_acpi_sd_probe_slot,
1632 + };
1633 +
1634 +@@ -381,7 +381,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
1635 + {
1636 + struct device *dev = &pdev->dev;
1637 + acpi_handle handle = ACPI_HANDLE(dev);
1638 +- struct acpi_device *device;
1639 ++ struct acpi_device *device, *child;
1640 + struct sdhci_acpi_host *c;
1641 + struct sdhci_host *host;
1642 + struct resource *iomem;
1643 +@@ -393,6 +393,11 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
1644 + if (acpi_bus_get_device(handle, &device))
1645 + return -ENODEV;
1646 +
1647 ++ /* Power on the SDHCI controller and its children */
1648 ++ acpi_device_fix_up_power(device);
1649 ++ list_for_each_entry(child, &device->children, node)
1650 ++ acpi_device_fix_up_power(child);
1651 ++
1652 + if (acpi_bus_get_status(device) || !device->status.present)
1653 + return -ENODEV;
1654 +
1655 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
1656 +index 79e19017343e..b42dd6a1b639 100644
1657 +--- a/drivers/mmc/host/sdhci-pci-core.c
1658 ++++ b/drivers/mmc/host/sdhci-pci-core.c
1659 +@@ -361,7 +361,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
1660 + {
1661 + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
1662 + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
1663 +- MMC_CAP_BUS_WIDTH_TEST |
1664 + MMC_CAP_WAIT_WHILE_BUSY;
1665 + slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
1666 + slot->hw_reset = sdhci_pci_int_hw_reset;
1667 +@@ -377,15 +376,13 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
1668 + static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1669 + {
1670 + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1671 +- MMC_CAP_BUS_WIDTH_TEST |
1672 + MMC_CAP_WAIT_WHILE_BUSY;
1673 + return 0;
1674 + }
1675 +
1676 + static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1677 + {
1678 +- slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
1679 +- MMC_CAP_WAIT_WHILE_BUSY;
1680 ++ slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1681 + slot->cd_con_id = NULL;
1682 + slot->cd_idx = 0;
1683 + slot->cd_override_level = true;
1684 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
1685 +index 5b9834cf2820..96fddb016bf1 100644
1686 +--- a/drivers/mtd/ubi/eba.c
1687 ++++ b/drivers/mtd/ubi/eba.c
1688 +@@ -426,8 +426,25 @@ retry:
1689 + pnum, vol_id, lnum);
1690 + err = -EBADMSG;
1691 + } else {
1692 +- err = -EINVAL;
1693 +- ubi_ro_mode(ubi);
1694 ++ /*
1695 ++ * Ending up here in the non-Fastmap case
1696 ++ * is a clear bug as the VID header had to
1697 ++ * be present at scan time to have it referenced.
1698 ++ * With fastmap the story is more complicated.
1699 ++ * Fastmap has the mapping info without the need
1700 ++ * of a full scan. So the LEB could have been
1701 ++ * unmapped, Fastmap cannot know this and keeps
1702 ++ * the LEB referenced.
1703 ++ * This is valid and works as the layer above UBI
1704 ++ * has to do bookkeeping about used/referenced
1705 ++ * LEBs in any case.
1706 ++ */
1707 ++ if (ubi->fast_attach) {
1708 ++ err = -EBADMSG;
1709 ++ } else {
1710 ++ err = -EINVAL;
1711 ++ ubi_ro_mode(ubi);
1712 ++ }
1713 + }
1714 + }
1715 + goto out_free;
1716 +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
1717 +index 263b439e21a8..990898b9dc72 100644
1718 +--- a/drivers/mtd/ubi/fastmap.c
1719 ++++ b/drivers/mtd/ubi/fastmap.c
1720 +@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1721 + ubi_msg(ubi, "fastmap WL pool size: %d",
1722 + ubi->fm_wl_pool.max_size);
1723 + ubi->fm_disabled = 0;
1724 ++ ubi->fast_attach = 1;
1725 +
1726 + ubi_free_vid_hdr(ubi, vh);
1727 + kfree(ech);
1728 +diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
1729 +index dadc6a9d5755..61d4e99755a4 100644
1730 +--- a/drivers/mtd/ubi/ubi.h
1731 ++++ b/drivers/mtd/ubi/ubi.h
1732 +@@ -466,6 +466,7 @@ struct ubi_debug_info {
1733 + * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
1734 + * @fm_work: fastmap work queue
1735 + * @fm_work_scheduled: non-zero if fastmap work was scheduled
1736 ++ * @fast_attach: non-zero if UBI was attached by fastmap
1737 + *
1738 + * @used: RB-tree of used physical eraseblocks
1739 + * @erroneous: RB-tree of erroneous used physical eraseblocks
1740 +@@ -574,6 +575,7 @@ struct ubi_device {
1741 + size_t fm_size;
1742 + struct work_struct fm_work;
1743 + int fm_work_scheduled;
1744 ++ int fast_attach;
1745 +
1746 + /* Wear-leveling sub-system's stuff */
1747 + struct rb_root used;
1748 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1749 +index 141c2a42d7ed..910c12e2638e 100644
1750 +--- a/drivers/net/can/dev.c
1751 ++++ b/drivers/net/can/dev.c
1752 +@@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
1753 + /* allow change of MTU according to the CANFD ability of the device */
1754 + switch (new_mtu) {
1755 + case CAN_MTU:
1756 ++ /* 'CANFD-only' controllers can not switch to CAN_MTU */
1757 ++ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
1758 ++ return -EINVAL;
1759 ++
1760 + priv->ctrlmode &= ~CAN_CTRLMODE_FD;
1761 + break;
1762 +
1763 + case CANFD_MTU:
1764 +- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
1765 ++ /* check for potential CANFD ability */
1766 ++ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
1767 ++ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
1768 + return -EINVAL;
1769 +
1770 + priv->ctrlmode |= CAN_CTRLMODE_FD;
1771 +@@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
1772 + = { .len = sizeof(struct can_bittiming_const) },
1773 + };
1774 +
1775 ++static int can_validate(struct nlattr *tb[], struct nlattr *data[])
1776 ++{
1777 ++ bool is_can_fd = false;
1778 ++
1779 ++ /* Make sure that valid CAN FD configurations always consist of
1780 ++ * - nominal/arbitration bittiming
1781 ++ * - data bittiming
1782 ++ * - control mode with CAN_CTRLMODE_FD set
1783 ++ */
1784 ++
1785 ++ if (data[IFLA_CAN_CTRLMODE]) {
1786 ++ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
1787 ++
1788 ++ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
1789 ++ }
1790 ++
1791 ++ if (is_can_fd) {
1792 ++ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
1793 ++ return -EOPNOTSUPP;
1794 ++ }
1795 ++
1796 ++ if (data[IFLA_CAN_DATA_BITTIMING]) {
1797 ++ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
1798 ++ return -EOPNOTSUPP;
1799 ++ }
1800 ++
1801 ++ return 0;
1802 ++}
1803 ++
1804 + static int can_changelink(struct net_device *dev,
1805 + struct nlattr *tb[], struct nlattr *data[])
1806 + {
1807 +@@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev,
1808 +
1809 + if (data[IFLA_CAN_CTRLMODE]) {
1810 + struct can_ctrlmode *cm;
1811 ++ u32 ctrlstatic;
1812 ++ u32 maskedflags;
1813 +
1814 + /* Do not allow changing controller mode while running */
1815 + if (dev->flags & IFF_UP)
1816 + return -EBUSY;
1817 + cm = nla_data(data[IFLA_CAN_CTRLMODE]);
1818 ++ ctrlstatic = priv->ctrlmode_static;
1819 ++ maskedflags = cm->flags & cm->mask;
1820 ++
1821 ++ /* check whether provided bits are allowed to be passed */
1822 ++ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
1823 ++ return -EOPNOTSUPP;
1824 ++
1825 ++ /* do not check for static fd-non-iso if 'fd' is disabled */
1826 ++ if (!(maskedflags & CAN_CTRLMODE_FD))
1827 ++ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
1828 +
1829 +- /* check whether changed bits are allowed to be modified */
1830 +- if (cm->mask & ~priv->ctrlmode_supported)
1831 ++ /* make sure static options are provided by configuration */
1832 ++ if ((maskedflags & ctrlstatic) != ctrlstatic)
1833 + return -EOPNOTSUPP;
1834 +
1835 + /* clear bits to be modified and copy the flag values */
1836 + priv->ctrlmode &= ~cm->mask;
1837 +- priv->ctrlmode |= (cm->flags & cm->mask);
1838 ++ priv->ctrlmode |= maskedflags;
1839 +
1840 + /* CAN_CTRLMODE_FD can only be set when driver supports FD */
1841 + if (priv->ctrlmode & CAN_CTRLMODE_FD)
1842 +@@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
1843 + .maxtype = IFLA_CAN_MAX,
1844 + .policy = can_policy,
1845 + .setup = can_setup,
1846 ++ .validate = can_validate,
1847 + .newlink = can_newlink,
1848 + .changelink = can_changelink,
1849 + .get_size = can_get_size,
1850 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
1851 +index 39cf911f7a1e..195f15edb32e 100644
1852 +--- a/drivers/net/can/m_can/m_can.c
1853 ++++ b/drivers/net/can/m_can/m_can.c
1854 +@@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void)
1855 + priv->can.do_get_berr_counter = m_can_get_berr_counter;
1856 +
1857 + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
1858 +- priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
1859 ++ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
1860 +
1861 + /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
1862 + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1863 +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
1864 +index 0c5c22b84da8..7de5ab589e4e 100644
1865 +--- a/drivers/net/usb/asix_common.c
1866 ++++ b/drivers/net/usb/asix_common.c
1867 +@@ -66,7 +66,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
1868 + * buffer.
1869 + */
1870 + if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
1871 +- offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
1872 ++ offset = ((rx->remaining + 1) & 0xfffe);
1873 + rx->header = get_unaligned_le32(skb->data + offset);
1874 + offset = 0;
1875 +
1876 +diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
1877 +index 1f44a151d206..d5a099b022e4 100644
1878 +--- a/drivers/nfc/st21nfca/i2c.c
1879 ++++ b/drivers/nfc/st21nfca/i2c.c
1880 +@@ -524,8 +524,10 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client)
1881 + /* Get EN GPIO from ACPI */
1882 + gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1,
1883 + GPIOD_OUT_LOW);
1884 +- if (!IS_ERR(gpiod_ena))
1885 +- phy->gpio_ena = desc_to_gpio(gpiod_ena);
1886 ++ if (!IS_ERR(gpiod_ena)) {
1887 ++ nfc_err(dev, "Unable to get ENABLE GPIO\n");
1888 ++ return -ENODEV;
1889 ++ }
1890 +
1891 + phy->gpio_ena = desc_to_gpio(gpiod_ena);
1892 +
1893 +diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
1894 +index b51a2008d782..dcd9f40a4b18 100644
1895 +--- a/drivers/platform/x86/dell-rbtn.c
1896 ++++ b/drivers/platform/x86/dell-rbtn.c
1897 +@@ -28,6 +28,7 @@ struct rbtn_data {
1898 + enum rbtn_type type;
1899 + struct rfkill *rfkill;
1900 + struct input_dev *input_dev;
1901 ++ bool suspended;
1902 + };
1903 +
1904 +
1905 +@@ -235,9 +236,55 @@ static const struct acpi_device_id rbtn_ids[] = {
1906 + { "", 0 },
1907 + };
1908 +
1909 ++#ifdef CONFIG_PM_SLEEP
1910 ++static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
1911 ++{
1912 ++ struct rbtn_data *rbtn_data = context;
1913 ++
1914 ++ rbtn_data->suspended = false;
1915 ++}
1916 ++
1917 ++static int rbtn_suspend(struct device *dev)
1918 ++{
1919 ++ struct acpi_device *device = to_acpi_device(dev);
1920 ++ struct rbtn_data *rbtn_data = acpi_driver_data(device);
1921 ++
1922 ++ rbtn_data->suspended = true;
1923 ++
1924 ++ return 0;
1925 ++}
1926 ++
1927 ++static int rbtn_resume(struct device *dev)
1928 ++{
1929 ++ struct acpi_device *device = to_acpi_device(dev);
1930 ++ struct rbtn_data *rbtn_data = acpi_driver_data(device);
1931 ++ acpi_status status;
1932 ++
1933 ++ /*
1934 ++ * Upon resume, some BIOSes send an ACPI notification thet triggers
1935 ++ * an unwanted input event. In order to ignore it, we use a flag
1936 ++ * that we set at suspend and clear once we have received the extra
1937 ++ * ACPI notification. Since ACPI notifications are delivered
1938 ++ * asynchronously to drivers, we clear the flag from the workqueue
1939 ++ * used to deliver the notifications. This should be enough
1940 ++ * to have the flag cleared only after we received the extra
1941 ++ * notification, if any.
1942 ++ */
1943 ++ status = acpi_os_execute(OSL_NOTIFY_HANDLER,
1944 ++ rbtn_clear_suspended_flag, rbtn_data);
1945 ++ if (ACPI_FAILURE(status))
1946 ++ rbtn_clear_suspended_flag(rbtn_data);
1947 ++
1948 ++ return 0;
1949 ++}
1950 ++#endif
1951 ++
1952 ++static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
1953 ++
1954 + static struct acpi_driver rbtn_driver = {
1955 + .name = "dell-rbtn",
1956 + .ids = rbtn_ids,
1957 ++ .drv.pm = &rbtn_pm_ops,
1958 + .ops = {
1959 + .add = rbtn_add,
1960 + .remove = rbtn_remove,
1961 +@@ -399,6 +446,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
1962 + {
1963 + struct rbtn_data *rbtn_data = device->driver_data;
1964 +
1965 ++ /*
1966 ++ * Some BIOSes send a notification at resume.
1967 ++ * Ignore it to prevent unwanted input events.
1968 ++ */
1969 ++ if (rbtn_data->suspended) {
1970 ++ dev_dbg(&device->dev, "ACPI notification ignored\n");
1971 ++ return;
1972 ++ }
1973 ++
1974 + if (event != 0x80) {
1975 + dev_info(&device->dev, "Received unknown event (0x%x)\n",
1976 + event);
1977 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
1978 +index 97074c91e328..6b5811de6519 100644
1979 +--- a/drivers/scsi/scsi_scan.c
1980 ++++ b/drivers/scsi/scsi_scan.c
1981 +@@ -316,6 +316,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
1982 + struct Scsi_Host *shost = dev_to_shost(dev->parent);
1983 + unsigned long flags;
1984 +
1985 ++ BUG_ON(starget->state == STARGET_DEL);
1986 + starget->state = STARGET_DEL;
1987 + transport_destroy_device(dev);
1988 + spin_lock_irqsave(shost->host_lock, flags);
1989 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
1990 +index 2b642b145be1..c7e4b5e0bae3 100644
1991 +--- a/drivers/scsi/scsi_sysfs.c
1992 ++++ b/drivers/scsi/scsi_sysfs.c
1993 +@@ -1366,18 +1366,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
1994 + void scsi_remove_target(struct device *dev)
1995 + {
1996 + struct Scsi_Host *shost = dev_to_shost(dev->parent);
1997 +- struct scsi_target *starget, *last_target = NULL;
1998 ++ struct scsi_target *starget;
1999 + unsigned long flags;
2000 +
2001 + restart:
2002 + spin_lock_irqsave(shost->host_lock, flags);
2003 + list_for_each_entry(starget, &shost->__targets, siblings) {
2004 + if (starget->state == STARGET_DEL ||
2005 +- starget == last_target)
2006 ++ starget->state == STARGET_REMOVE)
2007 + continue;
2008 + if (starget->dev.parent == dev || &starget->dev == dev) {
2009 + kref_get(&starget->reap_ref);
2010 +- last_target = starget;
2011 ++ starget->state = STARGET_REMOVE;
2012 + spin_unlock_irqrestore(shost->host_lock, flags);
2013 + __scsi_remove_target(starget);
2014 + scsi_target_reap(starget);
2015 +diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
2016 +index 940781183fac..3be10963f98b 100644
2017 +--- a/drivers/staging/comedi/drivers/das1800.c
2018 ++++ b/drivers/staging/comedi/drivers/das1800.c
2019 +@@ -567,14 +567,17 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
2020 + struct comedi_isadma_desc *desc;
2021 + int i;
2022 +
2023 +- outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */
2024 +- outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */
2025 +- outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */
2026 +-
2027 +- for (i = 0; i < 2; i++) {
2028 +- desc = &dma->desc[i];
2029 +- if (desc->chan)
2030 +- comedi_isadma_disable(desc->chan);
2031 ++ /* disable and stop conversions */
2032 ++ outb(0x0, dev->iobase + DAS1800_STATUS);
2033 ++ outb(0x0, dev->iobase + DAS1800_CONTROL_B);
2034 ++ outb(0x0, dev->iobase + DAS1800_CONTROL_A);
2035 ++
2036 ++ if (dma) {
2037 ++ for (i = 0; i < 2; i++) {
2038 ++ desc = &dma->desc[i];
2039 ++ if (desc->chan)
2040 ++ comedi_isadma_disable(desc->chan);
2041 ++ }
2042 + }
2043 +
2044 + return 0;
2045 +@@ -934,13 +937,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
2046 + {
2047 + struct das1800_private *devpriv = dev->private;
2048 + struct comedi_isadma *dma = devpriv->dma;
2049 +- struct comedi_isadma_desc *desc = &dma->desc[0];
2050 ++ struct comedi_isadma_desc *desc;
2051 + unsigned int bytes;
2052 +
2053 + if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
2054 + return;
2055 +
2056 + dma->cur_dma = 0;
2057 ++ desc = &dma->desc[0];
2058 +
2059 + /* determine a dma transfer size to fill buffer in 0.3 sec */
2060 + bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
2061 +diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
2062 +index 0dde34e3a7c5..545c60c826a1 100644
2063 +--- a/drivers/thunderbolt/eeprom.c
2064 ++++ b/drivers/thunderbolt/eeprom.c
2065 +@@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw)
2066 + return tb_drom_parse_entries(sw);
2067 + err:
2068 + kfree(sw->drom);
2069 ++ sw->drom = NULL;
2070 + return -EIO;
2071 +
2072 + }
2073 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
2074 +index c01620780f5b..365dfd8bc42b 100644
2075 +--- a/drivers/tty/n_gsm.c
2076 ++++ b/drivers/tty/n_gsm.c
2077 +@@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
2078 + }
2079 + }
2080 + spin_unlock(&gsm_mux_lock);
2081 +- WARN_ON(i == MAX_MUX);
2082 ++ /* open failed before registering => nothing to do */
2083 ++ if (i == MAX_MUX)
2084 ++ return;
2085 +
2086 + /* In theory disconnecting DLCI 0 is sufficient but for some
2087 + modems this is apparently not the case. */
2088 +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
2089 +index bcaba17688f6..a7fa016f31eb 100644
2090 +--- a/drivers/tty/n_hdlc.c
2091 ++++ b/drivers/tty/n_hdlc.c
2092 +@@ -599,7 +599,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
2093 + add_wait_queue(&tty->read_wait, &wait);
2094 +
2095 + for (;;) {
2096 +- if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
2097 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
2098 + ret = -EIO;
2099 + break;
2100 + }
2101 +@@ -827,7 +827,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
2102 + /* set bits for operations that won't block */
2103 + if (n_hdlc->rx_buf_list.head)
2104 + mask |= POLLIN | POLLRDNORM; /* readable */
2105 +- if (test_bit(TTY_OTHER_DONE, &tty->flags))
2106 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2107 + mask |= POLLHUP;
2108 + if (tty_hung_up_p(filp))
2109 + mask |= POLLHUP;
2110 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
2111 +index fb76a7d80e7e..bdf0e6e89991 100644
2112 +--- a/drivers/tty/n_tty.c
2113 ++++ b/drivers/tty/n_tty.c
2114 +@@ -1917,18 +1917,6 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
2115 + return ldata->commit_head - ldata->read_tail >= amt;
2116 + }
2117 +
2118 +-static inline int check_other_done(struct tty_struct *tty)
2119 +-{
2120 +- int done = test_bit(TTY_OTHER_DONE, &tty->flags);
2121 +- if (done) {
2122 +- /* paired with cmpxchg() in check_other_closed(); ensures
2123 +- * read buffer head index is not stale
2124 +- */
2125 +- smp_mb__after_atomic();
2126 +- }
2127 +- return done;
2128 +-}
2129 +-
2130 + /**
2131 + * copy_from_read_buf - copy read data directly
2132 + * @tty: terminal device
2133 +@@ -2124,7 +2112,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2134 + struct n_tty_data *ldata = tty->disc_data;
2135 + unsigned char __user *b = buf;
2136 + DEFINE_WAIT_FUNC(wait, woken_wake_function);
2137 +- int c, done;
2138 ++ int c;
2139 + int minimum, time;
2140 + ssize_t retval = 0;
2141 + long timeout;
2142 +@@ -2183,32 +2171,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2143 + break;
2144 + }
2145 +
2146 +- done = check_other_done(tty);
2147 +-
2148 + if (!input_available_p(tty, 0)) {
2149 +- if (done) {
2150 +- retval = -EIO;
2151 +- break;
2152 +- }
2153 +- if (tty_hung_up_p(file))
2154 +- break;
2155 +- if (!timeout)
2156 +- break;
2157 +- if (file->f_flags & O_NONBLOCK) {
2158 +- retval = -EAGAIN;
2159 +- break;
2160 +- }
2161 +- if (signal_pending(current)) {
2162 +- retval = -ERESTARTSYS;
2163 +- break;
2164 +- }
2165 + up_read(&tty->termios_rwsem);
2166 ++ tty_buffer_flush_work(tty->port);
2167 ++ down_read(&tty->termios_rwsem);
2168 ++ if (!input_available_p(tty, 0)) {
2169 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
2170 ++ retval = -EIO;
2171 ++ break;
2172 ++ }
2173 ++ if (tty_hung_up_p(file))
2174 ++ break;
2175 ++ if (!timeout)
2176 ++ break;
2177 ++ if (file->f_flags & O_NONBLOCK) {
2178 ++ retval = -EAGAIN;
2179 ++ break;
2180 ++ }
2181 ++ if (signal_pending(current)) {
2182 ++ retval = -ERESTARTSYS;
2183 ++ break;
2184 ++ }
2185 ++ up_read(&tty->termios_rwsem);
2186 +
2187 +- timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
2188 +- timeout);
2189 ++ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
2190 ++ timeout);
2191 +
2192 +- down_read(&tty->termios_rwsem);
2193 +- continue;
2194 ++ down_read(&tty->termios_rwsem);
2195 ++ continue;
2196 ++ }
2197 + }
2198 +
2199 + if (ldata->icanon && !L_EXTPROC(tty)) {
2200 +@@ -2386,12 +2377,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
2201 +
2202 + poll_wait(file, &tty->read_wait, wait);
2203 + poll_wait(file, &tty->write_wait, wait);
2204 +- if (check_other_done(tty))
2205 +- mask |= POLLHUP;
2206 + if (input_available_p(tty, 1))
2207 + mask |= POLLIN | POLLRDNORM;
2208 ++ else {
2209 ++ tty_buffer_flush_work(tty->port);
2210 ++ if (input_available_p(tty, 1))
2211 ++ mask |= POLLIN | POLLRDNORM;
2212 ++ }
2213 + if (tty->packet && tty->link->ctrl_status)
2214 + mask |= POLLPRI | POLLIN | POLLRDNORM;
2215 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2216 ++ mask |= POLLHUP;
2217 + if (tty_hung_up_p(file))
2218 + mask |= POLLHUP;
2219 + if (tty->ops->write && !tty_is_writelocked(tty) &&
2220 +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
2221 +index cf0dc51a2690..e198996c5b83 100644
2222 +--- a/drivers/tty/pty.c
2223 ++++ b/drivers/tty/pty.c
2224 +@@ -59,7 +59,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
2225 + if (!tty->link)
2226 + return;
2227 + set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
2228 +- tty_flip_buffer_push(tty->link->port);
2229 ++ wake_up_interruptible(&tty->link->read_wait);
2230 + wake_up_interruptible(&tty->link->write_wait);
2231 + if (tty->driver->subtype == PTY_TYPE_MASTER) {
2232 + set_bit(TTY_OTHER_CLOSED, &tty->flags);
2233 +@@ -247,9 +247,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
2234 + goto out;
2235 +
2236 + clear_bit(TTY_IO_ERROR, &tty->flags);
2237 +- /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
2238 + clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
2239 +- clear_bit(TTY_OTHER_DONE, &tty->link->flags);
2240 + set_bit(TTY_THROTTLED, &tty->flags);
2241 + return 0;
2242 +
2243 +diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
2244 +index 88531a36b69c..ed489880e62b 100644
2245 +--- a/drivers/tty/serial/8250/8250_mid.c
2246 ++++ b/drivers/tty/serial/8250/8250_mid.c
2247 +@@ -14,6 +14,7 @@
2248 + #include <linux/pci.h>
2249 +
2250 + #include <linux/dma/hsu.h>
2251 ++#include <linux/8250_pci.h>
2252 +
2253 + #include "8250.h"
2254 +
2255 +@@ -24,6 +25,7 @@
2256 + #define PCI_DEVICE_ID_INTEL_DNV_UART 0x19d8
2257 +
2258 + /* Intel MID Specific registers */
2259 ++#define INTEL_MID_UART_DNV_FISR 0x08
2260 + #define INTEL_MID_UART_PS 0x30
2261 + #define INTEL_MID_UART_MUL 0x34
2262 + #define INTEL_MID_UART_DIV 0x38
2263 +@@ -31,6 +33,7 @@
2264 + struct mid8250;
2265 +
2266 + struct mid8250_board {
2267 ++ unsigned int flags;
2268 + unsigned long freq;
2269 + unsigned int base_baud;
2270 + int (*setup)(struct mid8250 *, struct uart_port *p);
2271 +@@ -88,16 +91,16 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
2272 + static int dnv_handle_irq(struct uart_port *p)
2273 + {
2274 + struct mid8250 *mid = p->private_data;
2275 +- int ret;
2276 +-
2277 +- ret = hsu_dma_irq(&mid->dma_chip, 0);
2278 +- ret |= hsu_dma_irq(&mid->dma_chip, 1);
2279 +-
2280 +- /* For now, letting the HW generate separate interrupt for the UART */
2281 +- if (ret)
2282 +- return ret;
2283 +-
2284 +- return serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
2285 ++ unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
2286 ++ int ret = IRQ_NONE;
2287 ++
2288 ++ if (fisr & BIT(2))
2289 ++ ret |= hsu_dma_irq(&mid->dma_chip, 1);
2290 ++ if (fisr & BIT(1))
2291 ++ ret |= hsu_dma_irq(&mid->dma_chip, 0);
2292 ++ if (fisr & BIT(0))
2293 ++ ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
2294 ++ return ret;
2295 + }
2296 +
2297 + #define DNV_DMA_CHAN_OFFSET 0x80
2298 +@@ -106,12 +109,13 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
2299 + {
2300 + struct hsu_dma_chip *chip = &mid->dma_chip;
2301 + struct pci_dev *pdev = to_pci_dev(p->dev);
2302 ++ unsigned int bar = FL_GET_BASE(mid->board->flags);
2303 + int ret;
2304 +
2305 + chip->dev = &pdev->dev;
2306 + chip->irq = pdev->irq;
2307 + chip->regs = p->membase;
2308 +- chip->length = pci_resource_len(pdev, 0);
2309 ++ chip->length = pci_resource_len(pdev, bar);
2310 + chip->offset = DNV_DMA_CHAN_OFFSET;
2311 +
2312 + /* Falling back to PIO mode if DMA probing fails */
2313 +@@ -217,6 +221,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2314 + {
2315 + struct uart_8250_port uart;
2316 + struct mid8250 *mid;
2317 ++ unsigned int bar;
2318 + int ret;
2319 +
2320 + ret = pcim_enable_device(pdev);
2321 +@@ -230,6 +235,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2322 + return -ENOMEM;
2323 +
2324 + mid->board = (struct mid8250_board *)id->driver_data;
2325 ++ bar = FL_GET_BASE(mid->board->flags);
2326 +
2327 + memset(&uart, 0, sizeof(struct uart_8250_port));
2328 +
2329 +@@ -242,8 +248,8 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2330 + uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
2331 + uart.port.set_termios = mid8250_set_termios;
2332 +
2333 +- uart.port.mapbase = pci_resource_start(pdev, 0);
2334 +- uart.port.membase = pcim_iomap(pdev, 0, 0);
2335 ++ uart.port.mapbase = pci_resource_start(pdev, bar);
2336 ++ uart.port.membase = pcim_iomap(pdev, bar, 0);
2337 + if (!uart.port.membase)
2338 + return -ENOMEM;
2339 +
2340 +@@ -282,18 +288,21 @@ static void mid8250_remove(struct pci_dev *pdev)
2341 + }
2342 +
2343 + static const struct mid8250_board pnw_board = {
2344 ++ .flags = FL_BASE0,
2345 + .freq = 50000000,
2346 + .base_baud = 115200,
2347 + .setup = pnw_setup,
2348 + };
2349 +
2350 + static const struct mid8250_board tng_board = {
2351 ++ .flags = FL_BASE0,
2352 + .freq = 38400000,
2353 + .base_baud = 1843200,
2354 + .setup = tng_setup,
2355 + };
2356 +
2357 + static const struct mid8250_board dnv_board = {
2358 ++ .flags = FL_BASE1,
2359 + .freq = 133333333,
2360 + .base_baud = 115200,
2361 + .setup = dnv_setup,
2362 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
2363 +index 98862aa5bb58..4eedd1da32e6 100644
2364 +--- a/drivers/tty/serial/8250/8250_pci.c
2365 ++++ b/drivers/tty/serial/8250/8250_pci.c
2366 +@@ -1377,6 +1377,9 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
2367 + unsigned long m, n;
2368 + u32 reg;
2369 +
2370 ++ /* Gracefully handle the B0 case: fall back to B9600 */
2371 ++ fuart = fuart ? fuart : 9600 * 16;
2372 ++
2373 + /* Get Fuart closer to Fref */
2374 + fuart *= rounddown_pow_of_two(fref / fuart);
2375 +
2376 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2377 +index d9439e6ab719..954941dd8124 100644
2378 +--- a/drivers/tty/serial/atmel_serial.c
2379 ++++ b/drivers/tty/serial/atmel_serial.c
2380 +@@ -274,6 +274,13 @@ static bool atmel_use_dma_rx(struct uart_port *port)
2381 + return atmel_port->use_dma_rx;
2382 + }
2383 +
2384 ++static bool atmel_use_fifo(struct uart_port *port)
2385 ++{
2386 ++ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2387 ++
2388 ++ return atmel_port->fifo_size;
2389 ++}
2390 ++
2391 + static unsigned int atmel_get_lines_status(struct uart_port *port)
2392 + {
2393 + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2394 +@@ -2090,7 +2097,12 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2395 + mode |= ATMEL_US_USMODE_RS485;
2396 + } else if (termios->c_cflag & CRTSCTS) {
2397 + /* RS232 with hardware handshake (RTS/CTS) */
2398 +- mode |= ATMEL_US_USMODE_HWHS;
2399 ++ if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
2400 ++ dev_info(port->dev, "not enabling hardware flow control because DMA is used");
2401 ++ termios->c_cflag &= ~CRTSCTS;
2402 ++ } else {
2403 ++ mode |= ATMEL_US_USMODE_HWHS;
2404 ++ }
2405 + } else {
2406 + /* RS232 without hadware handshake */
2407 + mode |= ATMEL_US_USMODE_NORMAL;
2408 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
2409 +index ac7f8df54406..99bb23161dd6 100644
2410 +--- a/drivers/tty/serial/samsung.c
2411 ++++ b/drivers/tty/serial/samsung.c
2412 +@@ -1271,6 +1271,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
2413 + /* check to see if we need to change clock source */
2414 +
2415 + if (ourport->baudclk != clk) {
2416 ++ clk_prepare_enable(clk);
2417 ++
2418 + s3c24xx_serial_setsource(port, clk_sel);
2419 +
2420 + if (!IS_ERR(ourport->baudclk)) {
2421 +@@ -1278,8 +1280,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
2422 + ourport->baudclk = ERR_PTR(-EINVAL);
2423 + }
2424 +
2425 +- clk_prepare_enable(clk);
2426 +-
2427 + ourport->baudclk = clk;
2428 + ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
2429 + }
2430 +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
2431 +index a946e49a2626..aa80dc94ddc2 100644
2432 +--- a/drivers/tty/tty_buffer.c
2433 ++++ b/drivers/tty/tty_buffer.c
2434 +@@ -37,29 +37,6 @@
2435 +
2436 + #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
2437 +
2438 +-/*
2439 +- * If all tty flip buffers have been processed by flush_to_ldisc() or
2440 +- * dropped by tty_buffer_flush(), check if the linked pty has been closed.
2441 +- * If so, wake the reader/poll to process
2442 +- */
2443 +-static inline void check_other_closed(struct tty_struct *tty)
2444 +-{
2445 +- unsigned long flags, old;
2446 +-
2447 +- /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
2448 +- for (flags = ACCESS_ONCE(tty->flags);
2449 +- test_bit(TTY_OTHER_CLOSED, &flags);
2450 +- ) {
2451 +- old = flags;
2452 +- __set_bit(TTY_OTHER_DONE, &flags);
2453 +- flags = cmpxchg(&tty->flags, old, flags);
2454 +- if (old == flags) {
2455 +- wake_up_interruptible(&tty->read_wait);
2456 +- break;
2457 +- }
2458 +- }
2459 +-}
2460 +-
2461 + /**
2462 + * tty_buffer_lock_exclusive - gain exclusive access to buffer
2463 + * tty_buffer_unlock_exclusive - release exclusive access
2464 +@@ -254,8 +231,6 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
2465 + if (ld && ld->ops->flush_buffer)
2466 + ld->ops->flush_buffer(tty);
2467 +
2468 +- check_other_closed(tty);
2469 +-
2470 + atomic_dec(&buf->priority);
2471 + mutex_unlock(&buf->lock);
2472 + }
2473 +@@ -522,10 +497,8 @@ static void flush_to_ldisc(struct work_struct *work)
2474 + */
2475 + count = smp_load_acquire(&head->commit) - head->read;
2476 + if (!count) {
2477 +- if (next == NULL) {
2478 +- check_other_closed(tty);
2479 ++ if (next == NULL)
2480 + break;
2481 +- }
2482 + buf->head = next;
2483 + tty_buffer_free(port, head);
2484 + continue;
2485 +@@ -614,3 +587,8 @@ bool tty_buffer_cancel_work(struct tty_port *port)
2486 + {
2487 + return cancel_work_sync(&port->buf.work);
2488 + }
2489 ++
2490 ++void tty_buffer_flush_work(struct tty_port *port)
2491 ++{
2492 ++ flush_work(&port->buf.work);
2493 ++}
2494 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2495 +index 3e3c7575e92d..bd523adb9643 100644
2496 +--- a/drivers/tty/vt/vt.c
2497 ++++ b/drivers/tty/vt/vt.c
2498 +@@ -3583,9 +3583,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
2499 + goto err;
2500 +
2501 + desc = csw->con_startup();
2502 +-
2503 +- if (!desc)
2504 ++ if (!desc) {
2505 ++ retval = -ENODEV;
2506 + goto err;
2507 ++ }
2508 +
2509 + retval = -EINVAL;
2510 +
2511 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
2512 +index 2057d91d8336..dadd1e8dfe09 100644
2513 +--- a/drivers/usb/core/driver.c
2514 ++++ b/drivers/usb/core/driver.c
2515 +@@ -284,7 +284,7 @@ static int usb_probe_interface(struct device *dev)
2516 + struct usb_device *udev = interface_to_usbdev(intf);
2517 + const struct usb_device_id *id;
2518 + int error = -ENODEV;
2519 +- int lpm_disable_error;
2520 ++ int lpm_disable_error = -ENODEV;
2521 +
2522 + dev_dbg(dev, "%s\n", __func__);
2523 +
2524 +@@ -336,12 +336,14 @@ static int usb_probe_interface(struct device *dev)
2525 + * setting during probe, that should also be fine. usb_set_interface()
2526 + * will attempt to disable LPM, and fail if it can't disable it.
2527 + */
2528 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
2529 +- if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
2530 +- dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
2531 +- __func__, driver->name);
2532 +- error = lpm_disable_error;
2533 +- goto err;
2534 ++ if (driver->disable_hub_initiated_lpm) {
2535 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
2536 ++ if (lpm_disable_error) {
2537 ++ dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
2538 ++ __func__, driver->name);
2539 ++ error = lpm_disable_error;
2540 ++ goto err;
2541 ++ }
2542 + }
2543 +
2544 + /* Carry out a deferred switch to altsetting 0 */
2545 +@@ -391,7 +393,8 @@ static int usb_unbind_interface(struct device *dev)
2546 + struct usb_interface *intf = to_usb_interface(dev);
2547 + struct usb_host_endpoint *ep, **eps = NULL;
2548 + struct usb_device *udev;
2549 +- int i, j, error, r, lpm_disable_error;
2550 ++ int i, j, error, r;
2551 ++ int lpm_disable_error = -ENODEV;
2552 +
2553 + intf->condition = USB_INTERFACE_UNBINDING;
2554 +
2555 +@@ -399,12 +402,13 @@ static int usb_unbind_interface(struct device *dev)
2556 + udev = interface_to_usbdev(intf);
2557 + error = usb_autoresume_device(udev);
2558 +
2559 +- /* Hub-initiated LPM policy may change, so attempt to disable LPM until
2560 ++ /* If hub-initiated LPM policy may change, attempt to disable LPM until
2561 + * the driver is unbound. If LPM isn't disabled, that's fine because it
2562 + * wouldn't be enabled unless all the bound interfaces supported
2563 + * hub-initiated LPM.
2564 + */
2565 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
2566 ++ if (driver->disable_hub_initiated_lpm)
2567 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
2568 +
2569 + /*
2570 + * Terminate all URBs for this interface unless the driver
2571 +@@ -505,7 +509,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
2572 + struct device *dev;
2573 + struct usb_device *udev;
2574 + int retval = 0;
2575 +- int lpm_disable_error;
2576 ++ int lpm_disable_error = -ENODEV;
2577 +
2578 + if (!iface)
2579 + return -ENODEV;
2580 +@@ -526,12 +530,14 @@ int usb_driver_claim_interface(struct usb_driver *driver,
2581 +
2582 + iface->condition = USB_INTERFACE_BOUND;
2583 +
2584 +- /* Disable LPM until this driver is bound. */
2585 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
2586 +- if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
2587 +- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
2588 +- __func__, driver->name);
2589 +- return -ENOMEM;
2590 ++ /* See the comment about disabling LPM in usb_probe_interface(). */
2591 ++ if (driver->disable_hub_initiated_lpm) {
2592 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
2593 ++ if (lpm_disable_error) {
2594 ++ dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
2595 ++ __func__, driver->name);
2596 ++ return -ENOMEM;
2597 ++ }
2598 + }
2599 +
2600 + /* Claimed interfaces are initially inactive (suspended) and
2601 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
2602 +index 2ca2cef7f681..980fc5774151 100644
2603 +--- a/drivers/usb/core/hcd.c
2604 ++++ b/drivers/usb/core/hcd.c
2605 +@@ -994,7 +994,7 @@ static void usb_bus_init (struct usb_bus *bus)
2606 + bus->bandwidth_allocated = 0;
2607 + bus->bandwidth_int_reqs = 0;
2608 + bus->bandwidth_isoc_reqs = 0;
2609 +- mutex_init(&bus->usb_address0_mutex);
2610 ++ mutex_init(&bus->devnum_next_mutex);
2611 + }
2612 +
2613 + /*-------------------------------------------------------------------------*/
2614 +@@ -2521,6 +2521,14 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
2615 + return NULL;
2616 + }
2617 + if (primary_hcd == NULL) {
2618 ++ hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
2619 ++ GFP_KERNEL);
2620 ++ if (!hcd->address0_mutex) {
2621 ++ kfree(hcd);
2622 ++ dev_dbg(dev, "hcd address0 mutex alloc failed\n");
2623 ++ return NULL;
2624 ++ }
2625 ++ mutex_init(hcd->address0_mutex);
2626 + hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
2627 + GFP_KERNEL);
2628 + if (!hcd->bandwidth_mutex) {
2629 +@@ -2532,6 +2540,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
2630 + dev_set_drvdata(dev, hcd);
2631 + } else {
2632 + mutex_lock(&usb_port_peer_mutex);
2633 ++ hcd->address0_mutex = primary_hcd->address0_mutex;
2634 + hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
2635 + hcd->primary_hcd = primary_hcd;
2636 + primary_hcd->primary_hcd = primary_hcd;
2637 +@@ -2598,8 +2607,10 @@ static void hcd_release(struct kref *kref)
2638 + struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
2639 +
2640 + mutex_lock(&usb_port_peer_mutex);
2641 +- if (usb_hcd_is_primary_hcd(hcd))
2642 ++ if (usb_hcd_is_primary_hcd(hcd)) {
2643 ++ kfree(hcd->address0_mutex);
2644 + kfree(hcd->bandwidth_mutex);
2645 ++ }
2646 + if (hcd->shared_hcd) {
2647 + struct usb_hcd *peer = hcd->shared_hcd;
2648 +
2649 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2650 +index 38cc4bae0a82..1ab42bfbecaf 100644
2651 +--- a/drivers/usb/core/hub.c
2652 ++++ b/drivers/usb/core/hub.c
2653 +@@ -2080,7 +2080,7 @@ static void choose_devnum(struct usb_device *udev)
2654 + struct usb_bus *bus = udev->bus;
2655 +
2656 + /* be safe when more hub events are proceed in parallel */
2657 +- mutex_lock(&bus->usb_address0_mutex);
2658 ++ mutex_lock(&bus->devnum_next_mutex);
2659 + if (udev->wusb) {
2660 + devnum = udev->portnum + 1;
2661 + BUG_ON(test_bit(devnum, bus->devmap.devicemap));
2662 +@@ -2098,7 +2098,7 @@ static void choose_devnum(struct usb_device *udev)
2663 + set_bit(devnum, bus->devmap.devicemap);
2664 + udev->devnum = devnum;
2665 + }
2666 +- mutex_unlock(&bus->usb_address0_mutex);
2667 ++ mutex_unlock(&bus->devnum_next_mutex);
2668 + }
2669 +
2670 + static void release_devnum(struct usb_device *udev)
2671 +@@ -4364,7 +4364,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
2672 + if (oldspeed == USB_SPEED_LOW)
2673 + delay = HUB_LONG_RESET_TIME;
2674 +
2675 +- mutex_lock(&hdev->bus->usb_address0_mutex);
2676 ++ mutex_lock(hcd->address0_mutex);
2677 +
2678 + /* Reset the device; full speed may morph to high speed */
2679 + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
2680 +@@ -4650,7 +4650,7 @@ fail:
2681 + hub_port_disable(hub, port1, 0);
2682 + update_devnum(udev, devnum); /* for disconnect processing */
2683 + }
2684 +- mutex_unlock(&hdev->bus->usb_address0_mutex);
2685 ++ mutex_unlock(hcd->address0_mutex);
2686 + return retval;
2687 + }
2688 +
2689 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
2690 +index 15b648cbc75c..73515d54e1cc 100644
2691 +--- a/drivers/usb/gadget/function/f_fs.c
2692 ++++ b/drivers/usb/gadget/function/f_fs.c
2693 +@@ -651,7 +651,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
2694 + if (io_data->read && ret > 0) {
2695 + use_mm(io_data->mm);
2696 + ret = copy_to_iter(io_data->buf, ret, &io_data->data);
2697 +- if (iov_iter_count(&io_data->data))
2698 ++ if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
2699 + ret = -EFAULT;
2700 + unuse_mm(io_data->mm);
2701 + }
2702 +diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
2703 +index acf210f16328..5c6d4d7ca605 100644
2704 +--- a/drivers/usb/gadget/function/f_mass_storage.c
2705 ++++ b/drivers/usb/gadget/function/f_mass_storage.c
2706 +@@ -2977,25 +2977,6 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
2707 + }
2708 + EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
2709 +
2710 +-int fsg_common_run_thread(struct fsg_common *common)
2711 +-{
2712 +- common->state = FSG_STATE_IDLE;
2713 +- /* Tell the thread to start working */
2714 +- common->thread_task =
2715 +- kthread_create(fsg_main_thread, common, "file-storage");
2716 +- if (IS_ERR(common->thread_task)) {
2717 +- common->state = FSG_STATE_TERMINATED;
2718 +- return PTR_ERR(common->thread_task);
2719 +- }
2720 +-
2721 +- DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
2722 +-
2723 +- wake_up_process(common->thread_task);
2724 +-
2725 +- return 0;
2726 +-}
2727 +-EXPORT_SYMBOL_GPL(fsg_common_run_thread);
2728 +-
2729 + static void fsg_common_release(struct kref *ref)
2730 + {
2731 + struct fsg_common *common = container_of(ref, struct fsg_common, ref);
2732 +@@ -3005,6 +2986,7 @@ static void fsg_common_release(struct kref *ref)
2733 + if (common->state != FSG_STATE_TERMINATED) {
2734 + raise_exception(common, FSG_STATE_EXIT);
2735 + wait_for_completion(&common->thread_notifier);
2736 ++ common->thread_task = NULL;
2737 + }
2738 +
2739 + for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
2740 +@@ -3050,9 +3032,21 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2741 + if (ret)
2742 + return ret;
2743 + fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
2744 +- ret = fsg_common_run_thread(fsg->common);
2745 +- if (ret)
2746 ++ }
2747 ++
2748 ++ if (!common->thread_task) {
2749 ++ common->state = FSG_STATE_IDLE;
2750 ++ common->thread_task =
2751 ++ kthread_create(fsg_main_thread, common, "file-storage");
2752 ++ if (IS_ERR(common->thread_task)) {
2753 ++ int ret = PTR_ERR(common->thread_task);
2754 ++ common->thread_task = NULL;
2755 ++ common->state = FSG_STATE_TERMINATED;
2756 + return ret;
2757 ++ }
2758 ++ DBG(common, "I/O thread pid: %d\n",
2759 ++ task_pid_nr(common->thread_task));
2760 ++ wake_up_process(common->thread_task);
2761 + }
2762 +
2763 + fsg->gadget = gadget;
2764 +diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
2765 +index 445df6775609..b6a9918eaefb 100644
2766 +--- a/drivers/usb/gadget/function/f_mass_storage.h
2767 ++++ b/drivers/usb/gadget/function/f_mass_storage.h
2768 +@@ -153,8 +153,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg);
2769 + void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
2770 + const char *pn);
2771 +
2772 +-int fsg_common_run_thread(struct fsg_common *common);
2773 +-
2774 + void fsg_config_from_params(struct fsg_config *cfg,
2775 + const struct fsg_module_parameters *params,
2776 + unsigned int fsg_num_buffers);
2777 +diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
2778 +index c16089efc322..c39de65a448b 100644
2779 +--- a/drivers/usb/gadget/legacy/acm_ms.c
2780 ++++ b/drivers/usb/gadget/legacy/acm_ms.c
2781 +@@ -133,10 +133,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
2782 + if (status < 0)
2783 + goto put_msg;
2784 +
2785 +- status = fsg_common_run_thread(opts->common);
2786 +- if (status)
2787 +- goto remove_acm;
2788 +-
2789 + status = usb_add_function(c, f_msg);
2790 + if (status)
2791 + goto remove_acm;
2792 +diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
2793 +index e61af53c7d2b..125974f32f50 100644
2794 +--- a/drivers/usb/gadget/legacy/mass_storage.c
2795 ++++ b/drivers/usb/gadget/legacy/mass_storage.c
2796 +@@ -132,10 +132,6 @@ static int msg_do_config(struct usb_configuration *c)
2797 + if (IS_ERR(f_msg))
2798 + return PTR_ERR(f_msg);
2799 +
2800 +- ret = fsg_common_run_thread(opts->common);
2801 +- if (ret)
2802 +- goto put_func;
2803 +-
2804 + ret = usb_add_function(c, f_msg);
2805 + if (ret)
2806 + goto put_func;
2807 +diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
2808 +index 229d704a620b..a70a406580ea 100644
2809 +--- a/drivers/usb/gadget/legacy/multi.c
2810 ++++ b/drivers/usb/gadget/legacy/multi.c
2811 +@@ -137,7 +137,6 @@ static struct usb_function *f_msg_rndis;
2812 +
2813 + static int rndis_do_config(struct usb_configuration *c)
2814 + {
2815 +- struct fsg_opts *fsg_opts;
2816 + int ret;
2817 +
2818 + if (gadget_is_otg(c->cdev->gadget)) {
2819 +@@ -169,11 +168,6 @@ static int rndis_do_config(struct usb_configuration *c)
2820 + goto err_fsg;
2821 + }
2822 +
2823 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
2824 +- ret = fsg_common_run_thread(fsg_opts->common);
2825 +- if (ret)
2826 +- goto err_run;
2827 +-
2828 + ret = usb_add_function(c, f_msg_rndis);
2829 + if (ret)
2830 + goto err_run;
2831 +@@ -225,7 +219,6 @@ static struct usb_function *f_msg_multi;
2832 +
2833 + static int cdc_do_config(struct usb_configuration *c)
2834 + {
2835 +- struct fsg_opts *fsg_opts;
2836 + int ret;
2837 +
2838 + if (gadget_is_otg(c->cdev->gadget)) {
2839 +@@ -258,11 +251,6 @@ static int cdc_do_config(struct usb_configuration *c)
2840 + goto err_fsg;
2841 + }
2842 +
2843 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
2844 +- ret = fsg_common_run_thread(fsg_opts->common);
2845 +- if (ret)
2846 +- goto err_run;
2847 +-
2848 + ret = usb_add_function(c, f_msg_multi);
2849 + if (ret)
2850 + goto err_run;
2851 +diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
2852 +index 09975046c694..b1e535f4022e 100644
2853 +--- a/drivers/usb/gadget/legacy/nokia.c
2854 ++++ b/drivers/usb/gadget/legacy/nokia.c
2855 +@@ -152,7 +152,6 @@ static int nokia_bind_config(struct usb_configuration *c)
2856 + struct usb_function *f_ecm;
2857 + struct usb_function *f_obex2 = NULL;
2858 + struct usb_function *f_msg;
2859 +- struct fsg_opts *fsg_opts;
2860 + int status = 0;
2861 + int obex1_stat = -1;
2862 + int obex2_stat = -1;
2863 +@@ -222,12 +221,6 @@ static int nokia_bind_config(struct usb_configuration *c)
2864 + goto err_ecm;
2865 + }
2866 +
2867 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
2868 +-
2869 +- status = fsg_common_run_thread(fsg_opts->common);
2870 +- if (status)
2871 +- goto err_msg;
2872 +-
2873 + status = usb_add_function(c, f_msg);
2874 + if (status)
2875 + goto err_msg;
2876 +diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
2877 +index e4e70e11d0f6..c6e76465065a 100644
2878 +--- a/drivers/usb/gadget/udc/udc-core.c
2879 ++++ b/drivers/usb/gadget/udc/udc-core.c
2880 +@@ -75,7 +75,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
2881 + mapped = dma_map_sg(dev, req->sg, req->num_sgs,
2882 + is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2883 + if (mapped == 0) {
2884 +- dev_err(&gadget->dev, "failed to map SGs\n");
2885 ++ dev_err(dev, "failed to map SGs\n");
2886 + return -EFAULT;
2887 + }
2888 +
2889 +diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
2890 +index 3050b18b2447..e9d4dde3e9b3 100644
2891 +--- a/drivers/usb/host/Kconfig
2892 ++++ b/drivers/usb/host/Kconfig
2893 +@@ -35,6 +35,7 @@ config USB_XHCI_PCI
2894 +
2895 + config USB_XHCI_PLATFORM
2896 + tristate "Generic xHCI driver for a platform device"
2897 ++ select USB_XHCI_RCAR if ARCH_RENESAS
2898 + ---help---
2899 + Adds an xHCI host driver for a generic platform device, which
2900 + provides a memory space and an irq.
2901 +@@ -63,7 +64,7 @@ config USB_XHCI_MVEBU
2902 +
2903 + config USB_XHCI_RCAR
2904 + tristate "xHCI support for Renesas R-Car SoCs"
2905 +- select USB_XHCI_PLATFORM
2906 ++ depends on USB_XHCI_PLATFORM
2907 + depends on ARCH_RENESAS || COMPILE_TEST
2908 + ---help---
2909 + Say 'Y' to enable the support for the xHCI host controller
2910 +diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
2911 +index 92fdb6e9faff..c78ff95a43be 100644
2912 +--- a/drivers/usb/misc/usbtest.c
2913 ++++ b/drivers/usb/misc/usbtest.c
2914 +@@ -529,6 +529,7 @@ static struct scatterlist *
2915 + alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
2916 + {
2917 + struct scatterlist *sg;
2918 ++ unsigned int n_size = 0;
2919 + unsigned i;
2920 + unsigned size = max;
2921 + unsigned maxpacket =
2922 +@@ -561,7 +562,8 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
2923 + break;
2924 + case 1:
2925 + for (j = 0; j < size; j++)
2926 +- *buf++ = (u8) ((j % maxpacket) % 63);
2927 ++ *buf++ = (u8) (((j + n_size) % maxpacket) % 63);
2928 ++ n_size += size;
2929 + break;
2930 + }
2931 +
2932 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2933 +index 7c9f25e9c422..a7c8d26a322b 100644
2934 +--- a/drivers/usb/serial/cp210x.c
2935 ++++ b/drivers/usb/serial/cp210x.c
2936 +@@ -971,8 +971,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
2937 + } else {
2938 + modem_ctl[0] &= ~0x7B;
2939 + modem_ctl[0] |= 0x01;
2940 +- /* FIXME - OR here instead of assignment looks wrong */
2941 +- modem_ctl[4] |= 0x40;
2942 ++ modem_ctl[4] = 0x40;
2943 + dev_dbg(dev, "%s - flow control = NONE\n", __func__);
2944 + }
2945 +
2946 +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
2947 +index f3007ecdd1b4..11c05ce2f35f 100644
2948 +--- a/drivers/usb/serial/io_edgeport.c
2949 ++++ b/drivers/usb/serial/io_edgeport.c
2950 +@@ -2849,14 +2849,16 @@ static int edge_startup(struct usb_serial *serial)
2951 + /* not set up yet, so do it now */
2952 + edge_serial->interrupt_read_urb =
2953 + usb_alloc_urb(0, GFP_KERNEL);
2954 +- if (!edge_serial->interrupt_read_urb)
2955 +- return -ENOMEM;
2956 ++ if (!edge_serial->interrupt_read_urb) {
2957 ++ response = -ENOMEM;
2958 ++ break;
2959 ++ }
2960 +
2961 + edge_serial->interrupt_in_buffer =
2962 + kmalloc(buffer_size, GFP_KERNEL);
2963 + if (!edge_serial->interrupt_in_buffer) {
2964 +- usb_free_urb(edge_serial->interrupt_read_urb);
2965 +- return -ENOMEM;
2966 ++ response = -ENOMEM;
2967 ++ break;
2968 + }
2969 + edge_serial->interrupt_in_endpoint =
2970 + endpoint->bEndpointAddress;
2971 +@@ -2884,14 +2886,16 @@ static int edge_startup(struct usb_serial *serial)
2972 + /* not set up yet, so do it now */
2973 + edge_serial->read_urb =
2974 + usb_alloc_urb(0, GFP_KERNEL);
2975 +- if (!edge_serial->read_urb)
2976 +- return -ENOMEM;
2977 ++ if (!edge_serial->read_urb) {
2978 ++ response = -ENOMEM;
2979 ++ break;
2980 ++ }
2981 +
2982 + edge_serial->bulk_in_buffer =
2983 + kmalloc(buffer_size, GFP_KERNEL);
2984 + if (!edge_serial->bulk_in_buffer) {
2985 +- usb_free_urb(edge_serial->read_urb);
2986 +- return -ENOMEM;
2987 ++ response = -ENOMEM;
2988 ++ break;
2989 + }
2990 + edge_serial->bulk_in_endpoint =
2991 + endpoint->bEndpointAddress;
2992 +@@ -2917,9 +2921,22 @@ static int edge_startup(struct usb_serial *serial)
2993 + }
2994 + }
2995 +
2996 +- if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
2997 +- dev_err(ddev, "Error - the proper endpoints were not found!\n");
2998 +- return -ENODEV;
2999 ++ if (response || !interrupt_in_found || !bulk_in_found ||
3000 ++ !bulk_out_found) {
3001 ++ if (!response) {
3002 ++ dev_err(ddev, "expected endpoints not found\n");
3003 ++ response = -ENODEV;
3004 ++ }
3005 ++
3006 ++ usb_free_urb(edge_serial->interrupt_read_urb);
3007 ++ kfree(edge_serial->interrupt_in_buffer);
3008 ++
3009 ++ usb_free_urb(edge_serial->read_urb);
3010 ++ kfree(edge_serial->bulk_in_buffer);
3011 ++
3012 ++ kfree(edge_serial);
3013 ++
3014 ++ return response;
3015 + }
3016 +
3017 + /* start interrupt read for this edgeport this interrupt will
3018 +@@ -2942,16 +2959,9 @@ static void edge_disconnect(struct usb_serial *serial)
3019 + {
3020 + struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
3021 +
3022 +- /* stop reads and writes on all ports */
3023 +- /* free up our endpoint stuff */
3024 + if (edge_serial->is_epic) {
3025 + usb_kill_urb(edge_serial->interrupt_read_urb);
3026 +- usb_free_urb(edge_serial->interrupt_read_urb);
3027 +- kfree(edge_serial->interrupt_in_buffer);
3028 +-
3029 + usb_kill_urb(edge_serial->read_urb);
3030 +- usb_free_urb(edge_serial->read_urb);
3031 +- kfree(edge_serial->bulk_in_buffer);
3032 + }
3033 + }
3034 +
3035 +@@ -2964,6 +2974,16 @@ static void edge_release(struct usb_serial *serial)
3036 + {
3037 + struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
3038 +
3039 ++ if (edge_serial->is_epic) {
3040 ++ usb_kill_urb(edge_serial->interrupt_read_urb);
3041 ++ usb_free_urb(edge_serial->interrupt_read_urb);
3042 ++ kfree(edge_serial->interrupt_in_buffer);
3043 ++
3044 ++ usb_kill_urb(edge_serial->read_urb);
3045 ++ usb_free_urb(edge_serial->read_urb);
3046 ++ kfree(edge_serial->bulk_in_buffer);
3047 ++ }
3048 ++
3049 + kfree(edge_serial);
3050 + }
3051 +
3052 +diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
3053 +index b6bd8e4a6486..1cf05883f48c 100644
3054 +--- a/drivers/usb/serial/keyspan.c
3055 ++++ b/drivers/usb/serial/keyspan.c
3056 +@@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial)
3057 +
3058 + s_priv = usb_get_serial_data(serial);
3059 +
3060 ++ /* Make sure to unlink the URBs submitted in attach. */
3061 ++ usb_kill_urb(s_priv->instat_urb);
3062 ++ usb_kill_urb(s_priv->indat_urb);
3063 ++
3064 + usb_free_urb(s_priv->instat_urb);
3065 + usb_free_urb(s_priv->indat_urb);
3066 + usb_free_urb(s_priv->glocont_urb);
3067 +diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
3068 +index 31a8b47f1ac6..c6596cbcc4b6 100644
3069 +--- a/drivers/usb/serial/mxuport.c
3070 ++++ b/drivers/usb/serial/mxuport.c
3071 +@@ -1259,6 +1259,15 @@ static int mxuport_attach(struct usb_serial *serial)
3072 + return 0;
3073 + }
3074 +
3075 ++static void mxuport_release(struct usb_serial *serial)
3076 ++{
3077 ++ struct usb_serial_port *port0 = serial->port[0];
3078 ++ struct usb_serial_port *port1 = serial->port[1];
3079 ++
3080 ++ usb_serial_generic_close(port1);
3081 ++ usb_serial_generic_close(port0);
3082 ++}
3083 ++
3084 + static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
3085 + {
3086 + struct mxuport_port *mxport = usb_get_serial_port_data(port);
3087 +@@ -1361,6 +1370,7 @@ static struct usb_serial_driver mxuport_device = {
3088 + .probe = mxuport_probe,
3089 + .port_probe = mxuport_port_probe,
3090 + .attach = mxuport_attach,
3091 ++ .release = mxuport_release,
3092 + .calc_num_ports = mxuport_calc_num_ports,
3093 + .open = mxuport_open,
3094 + .close = mxuport_close,
3095 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3096 +index c6f497f16526..d96d423d00e6 100644
3097 +--- a/drivers/usb/serial/option.c
3098 ++++ b/drivers/usb/serial/option.c
3099 +@@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb);
3100 + #define HAIER_PRODUCT_CE81B 0x10f8
3101 + #define HAIER_PRODUCT_CE100 0x2009
3102 +
3103 +-/* Cinterion (formerly Siemens) products */
3104 +-#define SIEMENS_VENDOR_ID 0x0681
3105 +-#define CINTERION_VENDOR_ID 0x1e2d
3106 ++/* Gemalto's Cinterion products (formerly Siemens) */
3107 ++#define SIEMENS_VENDOR_ID 0x0681
3108 ++#define CINTERION_VENDOR_ID 0x1e2d
3109 ++#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
3110 + #define CINTERION_PRODUCT_HC25_MDM 0x0047
3111 +-#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
3112 ++#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
3113 + #define CINTERION_PRODUCT_HC28_MDM 0x004C
3114 +-#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
3115 + #define CINTERION_PRODUCT_EU3_E 0x0051
3116 + #define CINTERION_PRODUCT_EU3_P 0x0052
3117 + #define CINTERION_PRODUCT_PH8 0x0053
3118 + #define CINTERION_PRODUCT_AHXX 0x0055
3119 + #define CINTERION_PRODUCT_PLXX 0x0060
3120 ++#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
3121 ++#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
3122 ++#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
3123 ++#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
3124 +
3125 + /* Olivetti products */
3126 + #define OLIVETTI_VENDOR_ID 0x0b3c
3127 +@@ -633,6 +637,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
3128 + .reserved = BIT(1) | BIT(2) | BIT(3),
3129 + };
3130 +
3131 ++static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
3132 ++ .reserved = BIT(4) | BIT(5),
3133 ++};
3134 ++
3135 + static const struct usb_device_id option_ids[] = {
3136 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
3137 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
3138 +@@ -1602,7 +1610,79 @@ static const struct usb_device_id option_ids[] = {
3139 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
3140 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
3141 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
3142 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
3143 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
3144 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
3145 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
3146 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
3147 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
3148 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
3149 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
3150 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
3151 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
3152 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
3153 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
3154 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
3155 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
3156 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
3157 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
3158 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
3159 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
3160 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
3161 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
3162 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
3163 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
3164 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
3165 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
3166 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
3167 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
3168 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
3169 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
3170 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
3171 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
3172 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
3173 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
3174 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
3175 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
3176 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
3177 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
3178 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
3179 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
3180 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
3181 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
3182 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
3183 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
3184 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
3185 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
3186 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
3187 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
3188 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
3189 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
3190 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
3191 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
3192 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
3193 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
3194 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
3195 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
3196 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
3197 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
3198 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
3199 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
3200 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
3201 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
3202 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
3203 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
3204 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
3205 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
3206 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
3207 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
3208 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
3209 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
3210 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
3211 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
3212 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
3213 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
3214 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
3215 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
3216 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
3217 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
3218 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
3219 +@@ -1613,6 +1693,61 @@ static const struct usb_device_id option_ids[] = {
3220 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
3221 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
3222 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
3223 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
3224 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
3225 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
3226 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
3227 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
3228 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
3229 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
3230 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
3231 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
3232 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
3233 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
3234 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
3235 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
3236 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
3237 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
3238 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
3239 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
3240 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
3241 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
3242 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
3243 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
3244 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
3245 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
3246 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
3247 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
3248 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
3249 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
3250 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
3251 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
3252 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
3253 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
3254 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
3255 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
3256 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
3257 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
3258 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
3259 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
3260 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
3261 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
3262 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
3263 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
3264 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
3265 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
3266 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
3267 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
3268 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
3269 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
3270 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
3271 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
3272 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
3273 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
3274 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
3275 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
3276 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
3277 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
3278 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
3279 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
3280 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
3281 +@@ -1712,7 +1847,13 @@ static const struct usb_device_id option_ids[] = {
3282 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
3283 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
3284 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
3285 +- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
3286 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
3287 ++ .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
3288 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
3289 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
3290 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
3291 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
3292 ++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
3293 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
3294 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
3295 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
3296 +diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
3297 +index 2df8ad5ede89..85acb50a7ee2 100644
3298 +--- a/drivers/usb/serial/quatech2.c
3299 ++++ b/drivers/usb/serial/quatech2.c
3300 +@@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial)
3301 +
3302 + serial_priv = usb_get_serial_data(serial);
3303 +
3304 ++ usb_kill_urb(serial_priv->read_urb);
3305 + usb_free_urb(serial_priv->read_urb);
3306 + kfree(serial_priv->read_buffer);
3307 + kfree(serial_priv);
3308 +diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
3309 +index 6467b91f2245..028618c5eeba 100644
3310 +--- a/drivers/watchdog/sp5100_tco.c
3311 ++++ b/drivers/watchdog/sp5100_tco.c
3312 +@@ -73,6 +73,13 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
3313 + /*
3314 + * Some TCO specific functions
3315 + */
3316 ++
3317 ++static bool tco_has_sp5100_reg_layout(struct pci_dev *dev)
3318 ++{
3319 ++ return dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
3320 ++ dev->revision < 0x40;
3321 ++}
3322 ++
3323 + static void tco_timer_start(void)
3324 + {
3325 + u32 val;
3326 +@@ -129,7 +136,7 @@ static void tco_timer_enable(void)
3327 + {
3328 + int val;
3329 +
3330 +- if (sp5100_tco_pci->revision >= 0x40) {
3331 ++ if (!tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
3332 + /* For SB800 or later */
3333 + /* Set the Watchdog timer resolution to 1 sec */
3334 + outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG);
3335 +@@ -342,8 +349,7 @@ static unsigned char sp5100_tco_setupdevice(void)
3336 + /*
3337 + * Determine type of southbridge chipset.
3338 + */
3339 +- if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
3340 +- sp5100_tco_pci->revision < 0x40) {
3341 ++ if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
3342 + dev_name = SP5100_DEVNAME;
3343 + index_reg = SP5100_IO_PM_INDEX_REG;
3344 + data_reg = SP5100_IO_PM_DATA_REG;
3345 +@@ -388,8 +394,7 @@ static unsigned char sp5100_tco_setupdevice(void)
3346 + * Secondly, Find the watchdog timer MMIO address
3347 + * from SBResource_MMIO register.
3348 + */
3349 +- if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
3350 +- sp5100_tco_pci->revision < 0x40) {
3351 ++ if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
3352 + /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
3353 + pci_read_config_dword(sp5100_tco_pci,
3354 + SP5100_SB_RESOURCE_MMIO_BASE, &val);
3355 +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
3356 +index e2c5abbb45ff..3595cffa24ea 100644
3357 +--- a/drivers/watchdog/watchdog_dev.c
3358 ++++ b/drivers/watchdog/watchdog_dev.c
3359 +@@ -736,7 +736,6 @@ static int watchdog_release(struct inode *inode, struct file *file)
3360 + watchdog_ping(wdd);
3361 + }
3362 +
3363 +- cancel_delayed_work_sync(&wd_data->work);
3364 + watchdog_update_worker(wdd);
3365 +
3366 + /* make sure that /dev/watchdog can be re-opened */
3367 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
3368 +index 84a6a5b3384a..208d19938fdf 100644
3369 +--- a/fs/btrfs/ctree.h
3370 ++++ b/fs/btrfs/ctree.h
3371 +@@ -4122,6 +4122,7 @@ void btrfs_test_inode_set_ops(struct inode *inode);
3372 +
3373 + /* ioctl.c */
3374 + long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3375 ++long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3376 + int btrfs_ioctl_get_supported_features(void __user *arg);
3377 + void btrfs_update_iflags(struct inode *inode);
3378 + void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
3379 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
3380 +index 8d7b5a45c005..af5c7fa22e0e 100644
3381 +--- a/fs/btrfs/file.c
3382 ++++ b/fs/btrfs/file.c
3383 +@@ -1596,6 +1596,13 @@ again:
3384 +
3385 + copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
3386 +
3387 ++ num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
3388 ++ reserve_bytes);
3389 ++ dirty_sectors = round_up(copied + sector_offset,
3390 ++ root->sectorsize);
3391 ++ dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
3392 ++ dirty_sectors);
3393 ++
3394 + /*
3395 + * if we have trouble faulting in the pages, fall
3396 + * back to one page at a time
3397 +@@ -1605,6 +1612,7 @@ again:
3398 +
3399 + if (copied == 0) {
3400 + force_page_uptodate = true;
3401 ++ dirty_sectors = 0;
3402 + dirty_pages = 0;
3403 + } else {
3404 + force_page_uptodate = false;
3405 +@@ -1615,20 +1623,19 @@ again:
3406 + /*
3407 + * If we had a short copy we need to release the excess delaloc
3408 + * bytes we reserved. We need to increment outstanding_extents
3409 +- * because btrfs_delalloc_release_space will decrement it, but
3410 ++ * because btrfs_delalloc_release_space and
3411 ++ * btrfs_delalloc_release_metadata will decrement it, but
3412 + * we still have an outstanding extent for the chunk we actually
3413 + * managed to copy.
3414 + */
3415 +- num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
3416 +- reserve_bytes);
3417 +- dirty_sectors = round_up(copied + sector_offset,
3418 +- root->sectorsize);
3419 +- dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
3420 +- dirty_sectors);
3421 +-
3422 + if (num_sectors > dirty_sectors) {
3423 +- release_bytes = (write_bytes - copied)
3424 +- & ~((u64)root->sectorsize - 1);
3425 ++ /*
3426 ++ * we round down because we don't want to count
3427 ++ * any partial blocks actually sent through the
3428 ++ * IO machines
3429 ++ */
3430 ++ release_bytes = round_down(release_bytes - copied,
3431 ++ root->sectorsize);
3432 + if (copied > 0) {
3433 + spin_lock(&BTRFS_I(inode)->lock);
3434 + BTRFS_I(inode)->outstanding_extents++;
3435 +@@ -2956,7 +2963,7 @@ const struct file_operations btrfs_file_operations = {
3436 + .fallocate = btrfs_fallocate,
3437 + .unlocked_ioctl = btrfs_ioctl,
3438 + #ifdef CONFIG_COMPAT
3439 +- .compat_ioctl = btrfs_ioctl,
3440 ++ .compat_ioctl = btrfs_compat_ioctl,
3441 + #endif
3442 + .copy_file_range = btrfs_copy_file_range,
3443 + .clone_file_range = btrfs_clone_file_range,
3444 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3445 +index 2aaba58b4856..167fc3d49450 100644
3446 +--- a/fs/btrfs/inode.c
3447 ++++ b/fs/btrfs/inode.c
3448 +@@ -10184,7 +10184,7 @@ static const struct file_operations btrfs_dir_file_operations = {
3449 + .iterate = btrfs_real_readdir,
3450 + .unlocked_ioctl = btrfs_ioctl,
3451 + #ifdef CONFIG_COMPAT
3452 +- .compat_ioctl = btrfs_ioctl,
3453 ++ .compat_ioctl = btrfs_compat_ioctl,
3454 + #endif
3455 + .release = btrfs_release_file,
3456 + .fsync = btrfs_sync_file,
3457 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3458 +index 5a23806ae418..f545f81f642d 100644
3459 +--- a/fs/btrfs/ioctl.c
3460 ++++ b/fs/btrfs/ioctl.c
3461 +@@ -5552,3 +5552,24 @@ long btrfs_ioctl(struct file *file, unsigned int
3462 +
3463 + return -ENOTTY;
3464 + }
3465 ++
3466 ++#ifdef CONFIG_COMPAT
3467 ++long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3468 ++{
3469 ++ switch (cmd) {
3470 ++ case FS_IOC32_GETFLAGS:
3471 ++ cmd = FS_IOC_GETFLAGS;
3472 ++ break;
3473 ++ case FS_IOC32_SETFLAGS:
3474 ++ cmd = FS_IOC_SETFLAGS;
3475 ++ break;
3476 ++ case FS_IOC32_GETVERSION:
3477 ++ cmd = FS_IOC_GETVERSION;
3478 ++ break;
3479 ++ default:
3480 ++ return -ENOIOCTLCMD;
3481 ++ }
3482 ++
3483 ++ return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3484 ++}
3485 ++#endif
3486 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
3487 +index 59727e32ed0f..af0ec2d5ad0e 100644
3488 +--- a/fs/cifs/sess.c
3489 ++++ b/fs/cifs/sess.c
3490 +@@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3491 + sec_blob->LmChallengeResponse.MaximumLength = 0;
3492 +
3493 + sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
3494 +- rc = setup_ntlmv2_rsp(ses, nls_cp);
3495 +- if (rc) {
3496 +- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
3497 +- goto setup_ntlmv2_ret;
3498 ++ if (ses->user_name != NULL) {
3499 ++ rc = setup_ntlmv2_rsp(ses, nls_cp);
3500 ++ if (rc) {
3501 ++ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
3502 ++ goto setup_ntlmv2_ret;
3503 ++ }
3504 ++ memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3505 ++ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3506 ++ tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3507 ++
3508 ++ sec_blob->NtChallengeResponse.Length =
3509 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3510 ++ sec_blob->NtChallengeResponse.MaximumLength =
3511 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3512 ++ } else {
3513 ++ /*
3514 ++ * don't send an NT Response for anonymous access
3515 ++ */
3516 ++ sec_blob->NtChallengeResponse.Length = 0;
3517 ++ sec_blob->NtChallengeResponse.MaximumLength = 0;
3518 + }
3519 +- memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3520 +- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3521 +- tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3522 +-
3523 +- sec_blob->NtChallengeResponse.Length =
3524 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3525 +- sec_blob->NtChallengeResponse.MaximumLength =
3526 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3527 +
3528 + if (ses->domainName == NULL) {
3529 + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3530 +@@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data)
3531 +
3532 + pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
3533 +
3534 +- /* no capabilities flags in old lanman negotiation */
3535 +- pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3536 +-
3537 +- /* Calculate hash with password and copy into bcc_ptr.
3538 +- * Encryption Key (stored as in cryptkey) gets used if the
3539 +- * security mode bit in Negottiate Protocol response states
3540 +- * to use challenge/response method (i.e. Password bit is 1).
3541 +- */
3542 +- rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
3543 +- ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
3544 +- true : false, lnm_session_key);
3545 +-
3546 +- memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
3547 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
3548 ++ if (ses->user_name != NULL) {
3549 ++ /* no capabilities flags in old lanman negotiation */
3550 ++ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3551 ++
3552 ++ /* Calculate hash with password and copy into bcc_ptr.
3553 ++ * Encryption Key (stored as in cryptkey) gets used if the
3554 ++ * security mode bit in Negottiate Protocol response states
3555 ++ * to use challenge/response method (i.e. Password bit is 1).
3556 ++ */
3557 ++ rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
3558 ++ ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
3559 ++ true : false, lnm_session_key);
3560 ++
3561 ++ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
3562 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
3563 ++ } else {
3564 ++ pSMB->old_req.PasswordLength = 0;
3565 ++ }
3566 +
3567 + /*
3568 + * can not sign if LANMAN negotiated so no need
3569 +@@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data)
3570 + capabilities = cifs_ssetup_hdr(ses, pSMB);
3571 +
3572 + pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
3573 +- pSMB->req_no_secext.CaseInsensitivePasswordLength =
3574 +- cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3575 +- pSMB->req_no_secext.CaseSensitivePasswordLength =
3576 +- cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3577 +-
3578 +- /* calculate ntlm response and session key */
3579 +- rc = setup_ntlm_response(ses, sess_data->nls_cp);
3580 +- if (rc) {
3581 +- cifs_dbg(VFS, "Error %d during NTLM authentication\n",
3582 +- rc);
3583 +- goto out;
3584 +- }
3585 ++ if (ses->user_name != NULL) {
3586 ++ pSMB->req_no_secext.CaseInsensitivePasswordLength =
3587 ++ cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3588 ++ pSMB->req_no_secext.CaseSensitivePasswordLength =
3589 ++ cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3590 ++
3591 ++ /* calculate ntlm response and session key */
3592 ++ rc = setup_ntlm_response(ses, sess_data->nls_cp);
3593 ++ if (rc) {
3594 ++ cifs_dbg(VFS, "Error %d during NTLM authentication\n",
3595 ++ rc);
3596 ++ goto out;
3597 ++ }
3598 +
3599 +- /* copy ntlm response */
3600 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3601 +- CIFS_AUTH_RESP_SIZE);
3602 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
3603 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3604 +- CIFS_AUTH_RESP_SIZE);
3605 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
3606 ++ /* copy ntlm response */
3607 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3608 ++ CIFS_AUTH_RESP_SIZE);
3609 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
3610 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3611 ++ CIFS_AUTH_RESP_SIZE);
3612 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
3613 ++ } else {
3614 ++ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
3615 ++ pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
3616 ++ }
3617 +
3618 + if (ses->capabilities & CAP_UNICODE) {
3619 + /* unicode strings must be word aligned */
3620 +@@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
3621 + /* LM2 password would be here if we supported it */
3622 + pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
3623 +
3624 +- /* calculate nlmv2 response and session key */
3625 +- rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
3626 +- if (rc) {
3627 +- cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
3628 +- goto out;
3629 +- }
3630 ++ if (ses->user_name != NULL) {
3631 ++ /* calculate nlmv2 response and session key */
3632 ++ rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
3633 ++ if (rc) {
3634 ++ cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
3635 ++ goto out;
3636 ++ }
3637 +
3638 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3639 +- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3640 +- bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3641 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3642 ++ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3643 ++ bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3644 +
3645 +- /* set case sensitive password length after tilen may get
3646 +- * assigned, tilen is 0 otherwise.
3647 +- */
3648 +- pSMB->req_no_secext.CaseSensitivePasswordLength =
3649 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3650 ++ /* set case sensitive password length after tilen may get
3651 ++ * assigned, tilen is 0 otherwise.
3652 ++ */
3653 ++ pSMB->req_no_secext.CaseSensitivePasswordLength =
3654 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3655 ++ } else {
3656 ++ pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
3657 ++ }
3658 +
3659 + if (ses->capabilities & CAP_UNICODE) {
3660 + if (sess_data->iov[0].iov_len % 2) {
3661 +diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
3662 +index bc0bb9c34f72..0ffa18094335 100644
3663 +--- a/fs/cifs/smb2glob.h
3664 ++++ b/fs/cifs/smb2glob.h
3665 +@@ -44,6 +44,7 @@
3666 + #define SMB2_OP_DELETE 7
3667 + #define SMB2_OP_HARDLINK 8
3668 + #define SMB2_OP_SET_EOF 9
3669 ++#define SMB2_OP_RMDIR 10
3670 +
3671 + /* Used when constructing chained read requests. */
3672 + #define CHAINED_REQUEST 1
3673 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
3674 +index 899bbc86f73e..4f0231e685a9 100644
3675 +--- a/fs/cifs/smb2inode.c
3676 ++++ b/fs/cifs/smb2inode.c
3677 +@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
3678 + * SMB2_open() call.
3679 + */
3680 + break;
3681 ++ case SMB2_OP_RMDIR:
3682 ++ tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
3683 ++ fid.volatile_fid);
3684 ++ break;
3685 + case SMB2_OP_RENAME:
3686 + tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
3687 + fid.volatile_fid, (__le16 *)data);
3688 +@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
3689 + struct cifs_sb_info *cifs_sb)
3690 + {
3691 + return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
3692 +- CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
3693 +- NULL, SMB2_OP_DELETE);
3694 ++ CREATE_NOT_FILE,
3695 ++ NULL, SMB2_OP_RMDIR);
3696 + }
3697 +
3698 + int
3699 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3700 +index 42e1f440eb1e..8f38e33d365b 100644
3701 +--- a/fs/cifs/smb2pdu.c
3702 ++++ b/fs/cifs/smb2pdu.c
3703 +@@ -2575,6 +2575,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
3704 + }
3705 +
3706 + int
3707 ++SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
3708 ++ u64 persistent_fid, u64 volatile_fid)
3709 ++{
3710 ++ __u8 delete_pending = 1;
3711 ++ void *data;
3712 ++ unsigned int size;
3713 ++
3714 ++ data = &delete_pending;
3715 ++ size = 1; /* sizeof __u8 */
3716 ++
3717 ++ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
3718 ++ current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
3719 ++ &size);
3720 ++}
3721 ++
3722 ++int
3723 + SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
3724 + u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
3725 + {
3726 +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
3727 +index 4f07dc93608d..eb2cde2f64ba 100644
3728 +--- a/fs/cifs/smb2proto.h
3729 ++++ b/fs/cifs/smb2proto.h
3730 +@@ -141,6 +141,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
3731 + extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
3732 + u64 persistent_fid, u64 volatile_fid,
3733 + __le16 *target_file);
3734 ++extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
3735 ++ u64 persistent_fid, u64 volatile_fid);
3736 + extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
3737 + u64 persistent_fid, u64 volatile_fid,
3738 + __le16 *target_file);
3739 +diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
3740 +index 06f5aa478bf2..1ac263eddc4e 100644
3741 +--- a/fs/crypto/keyinfo.c
3742 ++++ b/fs/crypto/keyinfo.c
3743 +@@ -78,6 +78,67 @@ out:
3744 + return res;
3745 + }
3746 +
3747 ++static int validate_user_key(struct fscrypt_info *crypt_info,
3748 ++ struct fscrypt_context *ctx, u8 *raw_key,
3749 ++ u8 *prefix, int prefix_size)
3750 ++{
3751 ++ u8 *full_key_descriptor;
3752 ++ struct key *keyring_key;
3753 ++ struct fscrypt_key *master_key;
3754 ++ const struct user_key_payload *ukp;
3755 ++ int full_key_len = prefix_size + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1;
3756 ++ int res;
3757 ++
3758 ++ full_key_descriptor = kmalloc(full_key_len, GFP_NOFS);
3759 ++ if (!full_key_descriptor)
3760 ++ return -ENOMEM;
3761 ++
3762 ++ memcpy(full_key_descriptor, prefix, prefix_size);
3763 ++ sprintf(full_key_descriptor + prefix_size,
3764 ++ "%*phN", FS_KEY_DESCRIPTOR_SIZE,
3765 ++ ctx->master_key_descriptor);
3766 ++ full_key_descriptor[full_key_len - 1] = '\0';
3767 ++ keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
3768 ++ kfree(full_key_descriptor);
3769 ++ if (IS_ERR(keyring_key))
3770 ++ return PTR_ERR(keyring_key);
3771 ++
3772 ++ if (keyring_key->type != &key_type_logon) {
3773 ++ printk_once(KERN_WARNING
3774 ++ "%s: key type must be logon\n", __func__);
3775 ++ res = -ENOKEY;
3776 ++ goto out;
3777 ++ }
3778 ++ down_read(&keyring_key->sem);
3779 ++ ukp = user_key_payload(keyring_key);
3780 ++ if (ukp->datalen != sizeof(struct fscrypt_key)) {
3781 ++ res = -EINVAL;
3782 ++ up_read(&keyring_key->sem);
3783 ++ goto out;
3784 ++ }
3785 ++ master_key = (struct fscrypt_key *)ukp->data;
3786 ++ BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
3787 ++
3788 ++ if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
3789 ++ printk_once(KERN_WARNING
3790 ++ "%s: key size incorrect: %d\n",
3791 ++ __func__, master_key->size);
3792 ++ res = -ENOKEY;
3793 ++ up_read(&keyring_key->sem);
3794 ++ goto out;
3795 ++ }
3796 ++ res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
3797 ++ up_read(&keyring_key->sem);
3798 ++ if (res)
3799 ++ goto out;
3800 ++
3801 ++ crypt_info->ci_keyring_key = keyring_key;
3802 ++ return 0;
3803 ++out:
3804 ++ key_put(keyring_key);
3805 ++ return res;
3806 ++}
3807 ++
3808 + static void put_crypt_info(struct fscrypt_info *ci)
3809 + {
3810 + if (!ci)
3811 +@@ -91,12 +152,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
3812 + int get_crypt_info(struct inode *inode)
3813 + {
3814 + struct fscrypt_info *crypt_info;
3815 +- u8 full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE +
3816 +- (FS_KEY_DESCRIPTOR_SIZE * 2) + 1];
3817 +- struct key *keyring_key = NULL;
3818 +- struct fscrypt_key *master_key;
3819 + struct fscrypt_context ctx;
3820 +- const struct user_key_payload *ukp;
3821 + struct crypto_skcipher *ctfm;
3822 + const char *cipher_str;
3823 + u8 raw_key[FS_MAX_KEY_SIZE];
3824 +@@ -167,48 +223,24 @@ retry:
3825 + memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
3826 + goto got_key;
3827 + }
3828 +- memcpy(full_key_descriptor, FS_KEY_DESC_PREFIX,
3829 +- FS_KEY_DESC_PREFIX_SIZE);
3830 +- sprintf(full_key_descriptor + FS_KEY_DESC_PREFIX_SIZE,
3831 +- "%*phN", FS_KEY_DESCRIPTOR_SIZE,
3832 +- ctx.master_key_descriptor);
3833 +- full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE +
3834 +- (2 * FS_KEY_DESCRIPTOR_SIZE)] = '\0';
3835 +- keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
3836 +- if (IS_ERR(keyring_key)) {
3837 +- res = PTR_ERR(keyring_key);
3838 +- keyring_key = NULL;
3839 +- goto out;
3840 +- }
3841 +- crypt_info->ci_keyring_key = keyring_key;
3842 +- if (keyring_key->type != &key_type_logon) {
3843 +- printk_once(KERN_WARNING
3844 +- "%s: key type must be logon\n", __func__);
3845 +- res = -ENOKEY;
3846 +- goto out;
3847 +- }
3848 +- down_read(&keyring_key->sem);
3849 +- ukp = user_key_payload(keyring_key);
3850 +- if (ukp->datalen != sizeof(struct fscrypt_key)) {
3851 +- res = -EINVAL;
3852 +- up_read(&keyring_key->sem);
3853 +- goto out;
3854 +- }
3855 +- master_key = (struct fscrypt_key *)ukp->data;
3856 +- BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
3857 +
3858 +- if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
3859 +- printk_once(KERN_WARNING
3860 +- "%s: key size incorrect: %d\n",
3861 +- __func__, master_key->size);
3862 +- res = -ENOKEY;
3863 +- up_read(&keyring_key->sem);
3864 ++ res = validate_user_key(crypt_info, &ctx, raw_key,
3865 ++ FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
3866 ++ if (res && inode->i_sb->s_cop->key_prefix) {
3867 ++ u8 *prefix = NULL;
3868 ++ int prefix_size, res2;
3869 ++
3870 ++ prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
3871 ++ res2 = validate_user_key(crypt_info, &ctx, raw_key,
3872 ++ prefix, prefix_size);
3873 ++ if (res2) {
3874 ++ if (res2 == -ENOKEY)
3875 ++ res = -ENOKEY;
3876 ++ goto out;
3877 ++ }
3878 ++ } else if (res) {
3879 + goto out;
3880 + }
3881 +- res = derive_key_aes(ctx.nonce, master_key->raw, raw_key);
3882 +- up_read(&keyring_key->sem);
3883 +- if (res)
3884 +- goto out;
3885 + got_key:
3886 + ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
3887 + if (!ctfm || IS_ERR(ctfm)) {
3888 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
3889 +index 7a4558d17f36..2effb79b5f3e 100644
3890 +--- a/fs/f2fs/f2fs.h
3891 ++++ b/fs/f2fs/f2fs.h
3892 +@@ -680,6 +680,10 @@ enum {
3893 + MAX_TIME,
3894 + };
3895 +
3896 ++#ifdef CONFIG_F2FS_FS_ENCRYPTION
3897 ++#define F2FS_KEY_DESC_PREFIX "f2fs:"
3898 ++#define F2FS_KEY_DESC_PREFIX_SIZE 5
3899 ++#endif
3900 + struct f2fs_sb_info {
3901 + struct super_block *sb; /* pointer to VFS super block */
3902 + struct proc_dir_entry *s_proc; /* proc entry */
3903 +@@ -687,6 +691,10 @@ struct f2fs_sb_info {
3904 + int valid_super_block; /* valid super block no */
3905 + int s_flag; /* flags for sbi */
3906 +
3907 ++#ifdef CONFIG_F2FS_FS_ENCRYPTION
3908 ++ u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
3909 ++ u8 key_prefix_size;
3910 ++#endif
3911 + /* for node-related operations */
3912 + struct f2fs_nm_info *nm_info; /* node manager */
3913 + struct inode *node_inode; /* cache node blocks */
3914 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
3915 +index 006f87d69921..e11385bbd4c6 100644
3916 +--- a/fs/f2fs/super.c
3917 ++++ b/fs/f2fs/super.c
3918 +@@ -893,6 +893,12 @@ static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
3919 + ctx, len, NULL);
3920 + }
3921 +
3922 ++static int f2fs_key_prefix(struct inode *inode, u8 **key)
3923 ++{
3924 ++ *key = F2FS_I_SB(inode)->key_prefix;
3925 ++ return F2FS_I_SB(inode)->key_prefix_size;
3926 ++}
3927 ++
3928 + static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
3929 + void *fs_data)
3930 + {
3931 +@@ -909,6 +915,7 @@ static unsigned f2fs_max_namelen(struct inode *inode)
3932 +
3933 + static struct fscrypt_operations f2fs_cryptops = {
3934 + .get_context = f2fs_get_context,
3935 ++ .key_prefix = f2fs_key_prefix,
3936 + .set_context = f2fs_set_context,
3937 + .is_encrypted = f2fs_encrypted_inode,
3938 + .empty_dir = f2fs_empty_dir,
3939 +@@ -1231,6 +1238,12 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
3940 +
3941 + INIT_LIST_HEAD(&sbi->s_list);
3942 + mutex_init(&sbi->umount_mutex);
3943 ++
3944 ++#ifdef CONFIG_F2FS_FS_ENCRYPTION
3945 ++ memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
3946 ++ F2FS_KEY_DESC_PREFIX_SIZE);
3947 ++ sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
3948 ++#endif
3949 + }
3950 +
3951 + /*
3952 +diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
3953 +index 458cf463047b..82067ca22f2b 100644
3954 +--- a/fs/hpfs/super.c
3955 ++++ b/fs/hpfs/super.c
3956 +@@ -15,6 +15,7 @@
3957 + #include <linux/sched.h>
3958 + #include <linux/bitmap.h>
3959 + #include <linux/slab.h>
3960 ++#include <linux/seq_file.h>
3961 +
3962 + /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
3963 +
3964 +@@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
3965 + int lowercase, eas, chk, errs, chkdsk, timeshift;
3966 + int o;
3967 + struct hpfs_sb_info *sbi = hpfs_sb(s);
3968 +- char *new_opts = kstrdup(data, GFP_KERNEL);
3969 +-
3970 +- if (!new_opts)
3971 +- return -ENOMEM;
3972 +
3973 + sync_filesystem(s);
3974 +
3975 +@@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
3976 +
3977 + if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
3978 +
3979 +- replace_mount_options(s, new_opts);
3980 +-
3981 + hpfs_unlock(s);
3982 + return 0;
3983 +
3984 + out_err:
3985 + hpfs_unlock(s);
3986 +- kfree(new_opts);
3987 + return -EINVAL;
3988 + }
3989 +
3990 ++static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
3991 ++{
3992 ++ struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
3993 ++
3994 ++ seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
3995 ++ seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
3996 ++ seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
3997 ++ if (sbi->sb_lowercase)
3998 ++ seq_printf(seq, ",case=lower");
3999 ++ if (!sbi->sb_chk)
4000 ++ seq_printf(seq, ",check=none");
4001 ++ if (sbi->sb_chk == 2)
4002 ++ seq_printf(seq, ",check=strict");
4003 ++ if (!sbi->sb_err)
4004 ++ seq_printf(seq, ",errors=continue");
4005 ++ if (sbi->sb_err == 2)
4006 ++ seq_printf(seq, ",errors=panic");
4007 ++ if (!sbi->sb_chkdsk)
4008 ++ seq_printf(seq, ",chkdsk=no");
4009 ++ if (sbi->sb_chkdsk == 2)
4010 ++ seq_printf(seq, ",chkdsk=always");
4011 ++ if (!sbi->sb_eas)
4012 ++ seq_printf(seq, ",eas=no");
4013 ++ if (sbi->sb_eas == 1)
4014 ++ seq_printf(seq, ",eas=ro");
4015 ++ if (sbi->sb_timeshift)
4016 ++ seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
4017 ++ return 0;
4018 ++}
4019 ++
4020 + /* Super operations */
4021 +
4022 + static const struct super_operations hpfs_sops =
4023 +@@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops =
4024 + .put_super = hpfs_put_super,
4025 + .statfs = hpfs_statfs,
4026 + .remount_fs = hpfs_remount_fs,
4027 +- .show_options = generic_show_options,
4028 ++ .show_options = hpfs_show_options,
4029 + };
4030 +
4031 + static int hpfs_fill_super(struct super_block *s, void *options, int silent)
4032 +@@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
4033 +
4034 + int o;
4035 +
4036 +- save_mount_options(s, options);
4037 +-
4038 + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
4039 + if (!sbi) {
4040 + return -ENOMEM;
4041 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
4042 +index 618ced381a14..7c9fbf504f07 100644
4043 +--- a/fs/nfs/callback_proc.c
4044 ++++ b/fs/nfs/callback_proc.c
4045 +@@ -500,8 +500,10 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
4046 + cps->slot = slot;
4047 +
4048 + /* The ca_maxresponsesize_cached is 0 with no DRC */
4049 +- if (args->csa_cachethis != 0)
4050 +- return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
4051 ++ if (args->csa_cachethis != 0) {
4052 ++ status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
4053 ++ goto out_unlock;
4054 ++ }
4055 +
4056 + /*
4057 + * Check for pending referring calls. If a match is found, a
4058 +diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
4059 +index 35a52a880b2f..6bd05700d8c9 100644
4060 +--- a/include/asm-generic/qspinlock.h
4061 ++++ b/include/asm-generic/qspinlock.h
4062 +@@ -28,7 +28,30 @@
4063 + */
4064 + static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
4065 + {
4066 +- return atomic_read(&lock->val);
4067 ++ /*
4068 ++ * queued_spin_lock_slowpath() can ACQUIRE the lock before
4069 ++ * issuing the unordered store that sets _Q_LOCKED_VAL.
4070 ++ *
4071 ++ * See both smp_cond_acquire() sites for more detail.
4072 ++ *
4073 ++ * This however means that in code like:
4074 ++ *
4075 ++ * spin_lock(A) spin_lock(B)
4076 ++ * spin_unlock_wait(B) spin_is_locked(A)
4077 ++ * do_something() do_something()
4078 ++ *
4079 ++ * Both CPUs can end up running do_something() because the store
4080 ++ * setting _Q_LOCKED_VAL will pass through the loads in
4081 ++ * spin_unlock_wait() and/or spin_is_locked().
4082 ++ *
4083 ++ * Avoid this by issuing a full memory barrier between the spin_lock()
4084 ++ * and the loads in spin_unlock_wait() and spin_is_locked().
4085 ++ *
4086 ++ * Note that regular mutual exclusion doesn't care about this
4087 ++ * delayed store.
4088 ++ */
4089 ++ smp_mb();
4090 ++ return atomic_read(&lock->val) & _Q_LOCKED_MASK;
4091 + }
4092 +
4093 + /**
4094 +@@ -108,6 +131,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
4095 + */
4096 + static inline void queued_spin_unlock_wait(struct qspinlock *lock)
4097 + {
4098 ++ /* See queued_spin_is_locked() */
4099 ++ smp_mb();
4100 + while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
4101 + cpu_relax();
4102 + }
4103 +diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
4104 +index 3d1a3af5cf59..a2508a8f9a9c 100644
4105 +--- a/include/asm-generic/siginfo.h
4106 ++++ b/include/asm-generic/siginfo.h
4107 +@@ -17,21 +17,6 @@
4108 + struct siginfo;
4109 + void do_schedule_next_timer(struct siginfo *info);
4110 +
4111 +-#ifndef HAVE_ARCH_COPY_SIGINFO
4112 +-
4113 +-#include <linux/string.h>
4114 +-
4115 +-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
4116 +-{
4117 +- if (from->si_code < 0)
4118 +- memcpy(to, from, sizeof(*to));
4119 +- else
4120 +- /* _sigchld is currently the largest know union member */
4121 +- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
4122 +-}
4123 +-
4124 +-#endif
4125 +-
4126 + extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
4127 +
4128 + #endif
4129 +diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
4130 +index 735f9f8c4e43..5261751f6bd4 100644
4131 +--- a/include/linux/can/dev.h
4132 ++++ b/include/linux/can/dev.h
4133 +@@ -40,8 +40,11 @@ struct can_priv {
4134 + struct can_clock clock;
4135 +
4136 + enum can_state state;
4137 +- u32 ctrlmode;
4138 +- u32 ctrlmode_supported;
4139 ++
4140 ++ /* CAN controller features - see include/uapi/linux/can/netlink.h */
4141 ++ u32 ctrlmode; /* current options setting */
4142 ++ u32 ctrlmode_supported; /* options that can be modified by netlink */
4143 ++ u32 ctrlmode_static; /* static enabled options for driver/hardware */
4144 +
4145 + int restart_ms;
4146 + struct timer_list restart_timer;
4147 +@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
4148 + return skb->len == CANFD_MTU;
4149 + }
4150 +
4151 ++/* helper to define static CAN controller features at device creation time */
4152 ++static inline void can_set_static_ctrlmode(struct net_device *dev,
4153 ++ u32 static_mode)
4154 ++{
4155 ++ struct can_priv *priv = netdev_priv(dev);
4156 ++
4157 ++ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
4158 ++ priv->ctrlmode = static_mode;
4159 ++ priv->ctrlmode_static = static_mode;
4160 ++
4161 ++ /* override MTU which was set by default in can_setup()? */
4162 ++ if (static_mode & CAN_CTRLMODE_FD)
4163 ++ dev->mtu = CANFD_MTU;
4164 ++}
4165 ++
4166 + /* get data length from can_dlc with sanitized can_dlc */
4167 + u8 can_dlc2len(u8 can_dlc);
4168 +
4169 +diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
4170 +index 6027f6bbb061..cfa6cde25f8e 100644
4171 +--- a/include/linux/fscrypto.h
4172 ++++ b/include/linux/fscrypto.h
4173 +@@ -175,6 +175,7 @@ struct fscrypt_name {
4174 + */
4175 + struct fscrypt_operations {
4176 + int (*get_context)(struct inode *, void *, size_t);
4177 ++ int (*key_prefix)(struct inode *, u8 **);
4178 + int (*prepare_context)(struct inode *);
4179 + int (*set_context)(struct inode *, const void *, size_t, void *);
4180 + int (*dummy_context)(struct inode *);
4181 +diff --git a/include/linux/signal.h b/include/linux/signal.h
4182 +index 92557bbce7e7..d80259afb9e5 100644
4183 +--- a/include/linux/signal.h
4184 ++++ b/include/linux/signal.h
4185 +@@ -28,6 +28,21 @@ struct sigpending {
4186 + sigset_t signal;
4187 + };
4188 +
4189 ++#ifndef HAVE_ARCH_COPY_SIGINFO
4190 ++
4191 ++#include <linux/string.h>
4192 ++
4193 ++static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
4194 ++{
4195 ++ if (from->si_code < 0)
4196 ++ memcpy(to, from, sizeof(*to));
4197 ++ else
4198 ++ /* _sigchld is currently the largest know union member */
4199 ++ memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
4200 ++}
4201 ++
4202 ++#endif
4203 ++
4204 + /*
4205 + * Define some primitives to manipulate sigset_t.
4206 + */
4207 +diff --git a/include/linux/tty.h b/include/linux/tty.h
4208 +index 3b09f235db66..a34442031aae 100644
4209 +--- a/include/linux/tty.h
4210 ++++ b/include/linux/tty.h
4211 +@@ -338,7 +338,6 @@ struct tty_file_private {
4212 + #define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
4213 + #define TTY_EXCLUSIVE 3 /* Exclusive open mode */
4214 + #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
4215 +-#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
4216 + #define TTY_LDISC_OPEN 11 /* Line discipline is open */
4217 + #define TTY_PTY_LOCK 16 /* pty private */
4218 + #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
4219 +@@ -457,6 +456,7 @@ extern void tty_buffer_init(struct tty_port *port);
4220 + extern void tty_buffer_set_lock_subclass(struct tty_port *port);
4221 + extern bool tty_buffer_restart_work(struct tty_port *port);
4222 + extern bool tty_buffer_cancel_work(struct tty_port *port);
4223 ++extern void tty_buffer_flush_work(struct tty_port *port);
4224 + extern speed_t tty_termios_baud_rate(struct ktermios *termios);
4225 + extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
4226 + extern void tty_termios_encode_baud_rate(struct ktermios *termios,
4227 +diff --git a/include/linux/usb.h b/include/linux/usb.h
4228 +index 6a9a0c28415d..818bf7087996 100644
4229 +--- a/include/linux/usb.h
4230 ++++ b/include/linux/usb.h
4231 +@@ -374,13 +374,12 @@ struct usb_bus {
4232 +
4233 + int devnum_next; /* Next open device number in
4234 + * round-robin allocation */
4235 ++ struct mutex devnum_next_mutex; /* devnum_next mutex */
4236 +
4237 + struct usb_devmap devmap; /* device address allocation map */
4238 + struct usb_device *root_hub; /* Root hub */
4239 + struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
4240 +
4241 +- struct mutex usb_address0_mutex; /* unaddressed device mutex */
4242 +-
4243 + int bandwidth_allocated; /* on this bus: how much of the time
4244 + * reserved for periodic (intr/iso)
4245 + * requests is used, on average?
4246 +@@ -1069,7 +1068,7 @@ struct usbdrv_wrap {
4247 + * for interfaces bound to this driver.
4248 + * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
4249 + * endpoints before calling the driver's disconnect method.
4250 +- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
4251 ++ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
4252 + * to initiate lower power link state transitions when an idle timeout
4253 + * occurs. Device-initiated USB 3.0 link PM will still be allowed.
4254 + *
4255 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
4256 +index b98f831dcda3..66fc13705ab7 100644
4257 +--- a/include/linux/usb/hcd.h
4258 ++++ b/include/linux/usb/hcd.h
4259 +@@ -181,6 +181,7 @@ struct usb_hcd {
4260 + * bandwidth_mutex should be dropped after a successful control message
4261 + * to the device, or resetting the bandwidth after a failed attempt.
4262 + */
4263 ++ struct mutex *address0_mutex;
4264 + struct mutex *bandwidth_mutex;
4265 + struct usb_hcd *shared_hcd;
4266 + struct usb_hcd *primary_hcd;
4267 +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
4268 +index 74d79bde7075..a505079dcf88 100644
4269 +--- a/include/scsi/scsi_device.h
4270 ++++ b/include/scsi/scsi_device.h
4271 +@@ -242,6 +242,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
4272 + enum scsi_target_state {
4273 + STARGET_CREATED = 1,
4274 + STARGET_RUNNING,
4275 ++ STARGET_REMOVE,
4276 + STARGET_DEL,
4277 + };
4278 +
4279 +diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
4280 +index ef7159012cf3..b0b93fd33af9 100644
4281 +--- a/kernel/sched/loadavg.c
4282 ++++ b/kernel/sched/loadavg.c
4283 +@@ -99,10 +99,13 @@ long calc_load_fold_active(struct rq *this_rq)
4284 + static unsigned long
4285 + calc_load(unsigned long load, unsigned long exp, unsigned long active)
4286 + {
4287 +- load *= exp;
4288 +- load += active * (FIXED_1 - exp);
4289 +- load += 1UL << (FSHIFT - 1);
4290 +- return load >> FSHIFT;
4291 ++ unsigned long newload;
4292 ++
4293 ++ newload = load * exp + active * (FIXED_1 - exp);
4294 ++ if (active >= load)
4295 ++ newload += FIXED_1-1;
4296 ++
4297 ++ return newload / FIXED_1;
4298 + }
4299 +
4300 + #ifdef CONFIG_NO_HZ_COMMON
4301 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
4302 +index 95181e36891a..9c143739b8d7 100644
4303 +--- a/kernel/trace/ring_buffer.c
4304 ++++ b/kernel/trace/ring_buffer.c
4305 +@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
4306 + raw_spinlock_t reader_lock; /* serialize readers */
4307 + arch_spinlock_t lock;
4308 + struct lock_class_key lock_key;
4309 +- unsigned int nr_pages;
4310 ++ unsigned long nr_pages;
4311 + unsigned int current_context;
4312 + struct list_head *pages;
4313 + struct buffer_page *head_page; /* read from head */
4314 +@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
4315 + u64 write_stamp;
4316 + u64 read_stamp;
4317 + /* ring buffer pages to update, > 0 to add, < 0 to remove */
4318 +- int nr_pages_to_update;
4319 ++ long nr_pages_to_update;
4320 + struct list_head new_pages; /* new pages to add */
4321 + struct work_struct update_pages_work;
4322 + struct completion update_done;
4323 +@@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
4324 + return 0;
4325 + }
4326 +
4327 +-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
4328 ++static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
4329 + {
4330 +- int i;
4331 + struct buffer_page *bpage, *tmp;
4332 ++ long i;
4333 +
4334 + for (i = 0; i < nr_pages; i++) {
4335 + struct page *page;
4336 +@@ -1168,7 +1168,7 @@ free_pages:
4337 + }
4338 +
4339 + static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
4340 +- unsigned nr_pages)
4341 ++ unsigned long nr_pages)
4342 + {
4343 + LIST_HEAD(pages);
4344 +
4345 +@@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
4346 + }
4347 +
4348 + static struct ring_buffer_per_cpu *
4349 +-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
4350 ++rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
4351 + {
4352 + struct ring_buffer_per_cpu *cpu_buffer;
4353 + struct buffer_page *bpage;
4354 +@@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
4355 + struct lock_class_key *key)
4356 + {
4357 + struct ring_buffer *buffer;
4358 ++ long nr_pages;
4359 + int bsize;
4360 +- int cpu, nr_pages;
4361 ++ int cpu;
4362 +
4363 + /* keep it in its own cache line */
4364 + buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
4365 +@@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
4366 + }
4367 +
4368 + static int
4369 +-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
4370 ++rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
4371 + {
4372 + struct list_head *tail_page, *to_remove, *next_page;
4373 + struct buffer_page *to_remove_page, *tmp_iter_page;
4374 + struct buffer_page *last_page, *first_page;
4375 +- unsigned int nr_removed;
4376 ++ unsigned long nr_removed;
4377 + unsigned long head_bit;
4378 + int page_entries;
4379 +
4380 +@@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
4381 + int cpu_id)
4382 + {
4383 + struct ring_buffer_per_cpu *cpu_buffer;
4384 +- unsigned nr_pages;
4385 ++ unsigned long nr_pages;
4386 + int cpu, err = 0;
4387 +
4388 + /*
4389 +@@ -1656,14 +1657,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
4390 + !cpumask_test_cpu(cpu_id, buffer->cpumask))
4391 + return size;
4392 +
4393 +- size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
4394 +- size *= BUF_PAGE_SIZE;
4395 ++ nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
4396 +
4397 + /* we need a minimum of two pages */
4398 +- if (size < BUF_PAGE_SIZE * 2)
4399 +- size = BUF_PAGE_SIZE * 2;
4400 ++ if (nr_pages < 2)
4401 ++ nr_pages = 2;
4402 +
4403 +- nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
4404 ++ size = nr_pages * BUF_PAGE_SIZE;
4405 +
4406 + /*
4407 + * Don't succeed if resizing is disabled, as a reader might be
4408 +@@ -4640,8 +4640,9 @@ static int rb_cpu_notify(struct notifier_block *self,
4409 + struct ring_buffer *buffer =
4410 + container_of(self, struct ring_buffer, cpu_notify);
4411 + long cpu = (long)hcpu;
4412 +- int cpu_i, nr_pages_same;
4413 +- unsigned int nr_pages;
4414 ++ long nr_pages_same;
4415 ++ int cpu_i;
4416 ++ unsigned long nr_pages;
4417 +
4418 + switch (action) {
4419 + case CPU_UP_PREPARE:
4420 +diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
4421 +index f9e47a70509c..53449a6ff6aa 100644
4422 +--- a/scripts/Makefile.extrawarn
4423 ++++ b/scripts/Makefile.extrawarn
4424 +@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
4425 + warning-1 += -Wold-style-definition
4426 + warning-1 += $(call cc-option, -Wmissing-include-dirs)
4427 + warning-1 += $(call cc-option, -Wunused-but-set-variable)
4428 ++warning-1 += $(call cc-option, -Wunused-const-variable)
4429 + warning-1 += $(call cc-disable-warning, missing-field-initializers)
4430 + warning-1 += $(call cc-disable-warning, sign-compare)
4431 +
4432 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4433 +index 4918ffa5ba68..d53c25e7a1c1 100644
4434 +--- a/sound/pci/hda/patch_realtek.c
4435 ++++ b/sound/pci/hda/patch_realtek.c
4436 +@@ -335,6 +335,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
4437 + case 0x10ec0283:
4438 + case 0x10ec0286:
4439 + case 0x10ec0288:
4440 ++ case 0x10ec0295:
4441 + case 0x10ec0298:
4442 + alc_update_coef_idx(codec, 0x10, 1<<9, 0);
4443 + break;
4444 +@@ -342,6 +343,11 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
4445 + case 0x10ec0293:
4446 + alc_update_coef_idx(codec, 0xa, 1<<13, 0);
4447 + break;
4448 ++ case 0x10ec0234:
4449 ++ case 0x10ec0274:
4450 ++ case 0x10ec0294:
4451 ++ alc_update_coef_idx(codec, 0x10, 1<<15, 0);
4452 ++ break;
4453 + case 0x10ec0662:
4454 + if ((coef & 0x00f0) == 0x0030)
4455 + alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
4456 +@@ -902,6 +908,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
4457 + { 0x10ec0298, 0x1028, 0, "ALC3266" },
4458 + { 0x10ec0256, 0x1028, 0, "ALC3246" },
4459 + { 0x10ec0225, 0x1028, 0, "ALC3253" },
4460 ++ { 0x10ec0295, 0x1028, 0, "ALC3254" },
4461 + { 0x10ec0670, 0x1025, 0, "ALC669X" },
4462 + { 0x10ec0676, 0x1025, 0, "ALC679X" },
4463 + { 0x10ec0282, 0x1043, 0, "ALC3229" },
4464 +@@ -2647,6 +2654,7 @@ enum {
4465 + ALC269_TYPE_ALC255,
4466 + ALC269_TYPE_ALC256,
4467 + ALC269_TYPE_ALC225,
4468 ++ ALC269_TYPE_ALC294,
4469 + };
4470 +
4471 + /*
4472 +@@ -2677,6 +2685,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
4473 + case ALC269_TYPE_ALC255:
4474 + case ALC269_TYPE_ALC256:
4475 + case ALC269_TYPE_ALC225:
4476 ++ case ALC269_TYPE_ALC294:
4477 + ssids = alc269_ssids;
4478 + break;
4479 + default:
4480 +@@ -3690,6 +3699,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
4481 + alc_process_coef_fw(codec, coef0668);
4482 + break;
4483 + case 0x10ec0225:
4484 ++ case 0x10ec0295:
4485 + alc_process_coef_fw(codec, coef0225);
4486 + break;
4487 + }
4488 +@@ -3790,6 +3800,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
4489 + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
4490 + break;
4491 + case 0x10ec0225:
4492 ++ case 0x10ec0295:
4493 + alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
4494 + snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
4495 + alc_process_coef_fw(codec, coef0225);
4496 +@@ -3847,6 +3858,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
4497 +
4498 + switch (codec->core.vendor_id) {
4499 + case 0x10ec0225:
4500 ++ case 0x10ec0295:
4501 + alc_process_coef_fw(codec, coef0225);
4502 + break;
4503 + case 0x10ec0255:
4504 +@@ -3950,6 +3962,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
4505 + alc_process_coef_fw(codec, coef0688);
4506 + break;
4507 + case 0x10ec0225:
4508 ++ case 0x10ec0295:
4509 + alc_process_coef_fw(codec, coef0225);
4510 + break;
4511 + }
4512 +@@ -4031,6 +4044,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4513 + alc_process_coef_fw(codec, coef0688);
4514 + break;
4515 + case 0x10ec0225:
4516 ++ case 0x10ec0295:
4517 + alc_process_coef_fw(codec, coef0225);
4518 + break;
4519 + }
4520 +@@ -4114,6 +4128,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
4521 + is_ctia = (val & 0x1c02) == 0x1c02;
4522 + break;
4523 + case 0x10ec0225:
4524 ++ case 0x10ec0295:
4525 + alc_process_coef_fw(codec, coef0225);
4526 + msleep(800);
4527 + val = alc_read_coef_idx(codec, 0x46);
4528 +@@ -5459,8 +5474,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4529 + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
4530 + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
4531 + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
4532 +- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
4533 ++ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
4534 + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
4535 ++ SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
4536 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4537 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4538 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
4539 +@@ -5704,6 +5720,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4540 + {0x14, 0x90170110},
4541 + {0x21, 0x02211020}),
4542 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4543 ++ {0x14, 0x90170130},
4544 ++ {0x21, 0x02211040}),
4545 ++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4546 + {0x12, 0x90a60140},
4547 + {0x14, 0x90170110},
4548 + {0x21, 0x02211020}),
4549 +@@ -6026,8 +6045,14 @@ static int patch_alc269(struct hda_codec *codec)
4550 + alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
4551 + break;
4552 + case 0x10ec0225:
4553 ++ case 0x10ec0295:
4554 + spec->codec_variant = ALC269_TYPE_ALC225;
4555 + break;
4556 ++ case 0x10ec0234:
4557 ++ case 0x10ec0274:
4558 ++ case 0x10ec0294:
4559 ++ spec->codec_variant = ALC269_TYPE_ALC294;
4560 ++ break;
4561 + }
4562 +
4563 + if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
4564 +@@ -6942,6 +6967,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
4565 + HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
4566 + HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
4567 + HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
4568 ++ HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
4569 + HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
4570 + HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
4571 + HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
4572 +@@ -6952,6 +6978,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
4573 + HDA_CODEC_ENTRY(0x10ec0269, "ALC269", patch_alc269),
4574 + HDA_CODEC_ENTRY(0x10ec0270, "ALC270", patch_alc269),
4575 + HDA_CODEC_ENTRY(0x10ec0272, "ALC272", patch_alc662),
4576 ++ HDA_CODEC_ENTRY(0x10ec0274, "ALC274", patch_alc269),
4577 + HDA_CODEC_ENTRY(0x10ec0275, "ALC275", patch_alc269),
4578 + HDA_CODEC_ENTRY(0x10ec0276, "ALC276", patch_alc269),
4579 + HDA_CODEC_ENTRY(0x10ec0280, "ALC280", patch_alc269),
4580 +@@ -6964,6 +6991,8 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
4581 + HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
4582 + HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
4583 + HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
4584 ++ HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
4585 ++ HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
4586 + HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
4587 + HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
4588 + HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
4589 +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
4590 +index 6765c7e949f3..f094f3c4ed84 100644
4591 +--- a/tools/objtool/Makefile
4592 ++++ b/tools/objtool/Makefile
4593 +@@ -30,6 +30,10 @@ INCLUDES := -I$(srctree)/tools/include
4594 + CFLAGS += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
4595 + LDFLAGS += -lelf $(LIBSUBCMD)
4596 +
4597 ++# Allow old libelf to be used:
4598 ++elfshdr := $(shell echo '\#include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
4599 ++CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED)
4600 ++
4601 + AWK = awk
4602 + export srctree OUTPUT CFLAGS ARCH AWK
4603 + include $(srctree)/tools/build/Makefile.include
4604 +diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
4605 +index 7f3e00a2f907..aa1ff6596684 100644
4606 +--- a/tools/objtool/elf.h
4607 ++++ b/tools/objtool/elf.h
4608 +@@ -23,6 +23,11 @@
4609 + #include <linux/list.h>
4610 + #include <linux/hashtable.h>
4611 +
4612 ++#ifdef LIBELF_USE_DEPRECATED
4613 ++# define elf_getshdrnum elf_getshnum
4614 ++# define elf_getshdrstrndx elf_getshstrndx
4615 ++#endif
4616 ++
4617 + struct section {
4618 + struct list_head list;
4619 + GElf_Shdr sh;