Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Thu, 02 Jun 2016 20:19:41
Message-Id: 1464891857.30d88c13b3112cc9067e5257e134f59d2f131742.mpagano@gentoo
1 commit: 30d88c13b3112cc9067e5257e134f59d2f131742
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jun 2 18:24:17 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jun 2 18:24:17 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=30d88c13
7
8 Linux patch 4.4.12
9
10 0000_README | 4 +
11 1011_linux-4.4.12.patch | 3995 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3999 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 8270b5e..6faf02f 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -87,6 +87,10 @@ Patch: 1010_linux-4.4.11.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.11
21
22 +Patch: 1011_linux-4.4.12.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.12
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1011_linux-4.4.12.patch b/1011_linux-4.4.12.patch
31 new file mode 100644
32 index 0000000..0fc9067
33 --- /dev/null
34 +++ b/1011_linux-4.4.12.patch
35 @@ -0,0 +1,3995 @@
36 +diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
37 +index bc3842dc323a..e2dea3dc4307 100644
38 +--- a/Documentation/serial/tty.txt
39 ++++ b/Documentation/serial/tty.txt
40 +@@ -213,9 +213,6 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
41 +
42 + TTY_OTHER_CLOSED Device is a pty and the other side has closed.
43 +
44 +-TTY_OTHER_DONE Device is a pty and the other side has closed and
45 +- all pending input processing has been completed.
46 +-
47 + TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
48 + smaller chunks.
49 +
50 +diff --git a/Makefile b/Makefile
51 +index aad86274b61b..a1fbd691a36e 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,6 +1,6 @@
55 + VERSION = 4
56 + PATCHLEVEL = 4
57 +-SUBLEVEL = 11
58 ++SUBLEVEL = 12
59 + EXTRAVERSION =
60 + NAME = Blurry Fish Butt
61 +
62 +@@ -682,9 +682,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
63 + KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
64 + else
65 +
66 +-# This warning generated too much noise in a regular build.
67 +-# Use make W=1 to enable this warning (see scripts/Makefile.build)
68 ++# These warnings generated too much noise in a regular build.
69 ++# Use make W=1 to enable them (see scripts/Makefile.build)
70 + KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
71 ++KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
72 + endif
73 +
74 + ifdef CONFIG_FRAME_POINTER
75 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
76 +index 61d96a645ff3..12d727fae0a7 100644
77 +--- a/arch/arm/kvm/mmu.c
78 ++++ b/arch/arm/kvm/mmu.c
79 +@@ -886,11 +886,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
80 + VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
81 +
82 + old_pmd = *pmd;
83 +- kvm_set_pmd(pmd, *new_pmd);
84 +- if (pmd_present(old_pmd))
85 ++ if (pmd_present(old_pmd)) {
86 ++ pmd_clear(pmd);
87 + kvm_tlb_flush_vmid_ipa(kvm, addr);
88 +- else
89 ++ } else {
90 + get_page(virt_to_page(pmd));
91 ++ }
92 ++
93 ++ kvm_set_pmd(pmd, *new_pmd);
94 + return 0;
95 + }
96 +
97 +@@ -939,12 +942,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
98 +
99 + /* Create 2nd stage page table mapping - Level 3 */
100 + old_pte = *pte;
101 +- kvm_set_pte(pte, *new_pte);
102 +- if (pte_present(old_pte))
103 ++ if (pte_present(old_pte)) {
104 ++ kvm_set_pte(pte, __pte(0));
105 + kvm_tlb_flush_vmid_ipa(kvm, addr);
106 +- else
107 ++ } else {
108 + get_page(virt_to_page(pte));
109 ++ }
110 +
111 ++ kvm_set_pte(pte, *new_pte);
112 + return 0;
113 + }
114 +
115 +diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
116 +index d6739e836f7b..b9da9545b442 100644
117 +--- a/arch/arm64/include/asm/pgtable-hwdef.h
118 ++++ b/arch/arm64/include/asm/pgtable-hwdef.h
119 +@@ -117,7 +117,6 @@
120 + * Section
121 + */
122 + #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
123 +-#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
124 + #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
125 + #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
126 + #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
127 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
128 +index c63868ae9a4a..67c2ad6d33b7 100644
129 +--- a/arch/arm64/include/asm/pgtable.h
130 ++++ b/arch/arm64/include/asm/pgtable.h
131 +@@ -347,6 +347,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
132 + #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
133 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
134 +
135 ++#define pmd_present(pmd) pte_present(pmd_pte(pmd))
136 + #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
137 + #define pmd_young(pmd) pte_young(pmd_pte(pmd))
138 + #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
139 +@@ -355,7 +356,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
140 + #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
141 + #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
142 + #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
143 +-#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
144 ++#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
145 +
146 + #define __HAVE_ARCH_PMD_WRITE
147 + #define pmd_write(pmd) pte_write(pmd_pte(pmd))
148 +@@ -394,7 +395,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
149 + unsigned long size, pgprot_t vma_prot);
150 +
151 + #define pmd_none(pmd) (!pmd_val(pmd))
152 +-#define pmd_present(pmd) (pmd_val(pmd))
153 +
154 + #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
155 +
156 +@@ -538,6 +538,21 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
157 + }
158 +
159 + #ifdef CONFIG_ARM64_HW_AFDBM
160 ++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
161 ++extern int ptep_set_access_flags(struct vm_area_struct *vma,
162 ++ unsigned long address, pte_t *ptep,
163 ++ pte_t entry, int dirty);
164 ++
165 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
166 ++#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
167 ++static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
168 ++ unsigned long address, pmd_t *pmdp,
169 ++ pmd_t entry, int dirty)
170 ++{
171 ++ return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
172 ++}
173 ++#endif
174 ++
175 + /*
176 + * Atomic pte/pmd modifications.
177 + */
178 +@@ -590,9 +605,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
179 + }
180 +
181 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
182 +-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
183 +-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
184 +- unsigned long address, pmd_t *pmdp)
185 ++#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
186 ++static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
187 ++ unsigned long address, pmd_t *pmdp)
188 + {
189 + return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
190 + }
191 +diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
192 +index 212ae6361d8b..a5f234039616 100644
193 +--- a/arch/arm64/kernel/cpuinfo.c
194 ++++ b/arch/arm64/kernel/cpuinfo.c
195 +@@ -85,7 +85,8 @@ static const char *const compat_hwcap_str[] = {
196 + "idivt",
197 + "vfpd32",
198 + "lpae",
199 +- "evtstrm"
200 ++ "evtstrm",
201 ++ NULL
202 + };
203 +
204 + static const char *const compat_hwcap2_str[] = {
205 +diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
206 +index 648112e90ed5..3972e65fbd5a 100644
207 +--- a/arch/arm64/kvm/inject_fault.c
208 ++++ b/arch/arm64/kvm/inject_fault.c
209 +@@ -130,7 +130,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
210 + esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
211 +
212 + if (!is_iabt)
213 +- esr |= ESR_ELx_EC_DABT_LOW;
214 ++ esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
215 +
216 + vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
217 + }
218 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
219 +index 92ddac1e8ca2..40f5522245a2 100644
220 +--- a/arch/arm64/mm/fault.c
221 ++++ b/arch/arm64/mm/fault.c
222 +@@ -81,6 +81,56 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
223 + printk("\n");
224 + }
225 +
226 ++#ifdef CONFIG_ARM64_HW_AFDBM
227 ++/*
228 ++ * This function sets the access flags (dirty, accessed), as well as write
229 ++ * permission, and only to a more permissive setting.
230 ++ *
231 ++ * It needs to cope with hardware update of the accessed/dirty state by other
232 ++ * agents in the system and can safely skip the __sync_icache_dcache() call as,
233 ++ * like set_pte_at(), the PTE is never changed from no-exec to exec here.
234 ++ *
235 ++ * Returns whether or not the PTE actually changed.
236 ++ */
237 ++int ptep_set_access_flags(struct vm_area_struct *vma,
238 ++ unsigned long address, pte_t *ptep,
239 ++ pte_t entry, int dirty)
240 ++{
241 ++ pteval_t old_pteval;
242 ++ unsigned int tmp;
243 ++
244 ++ if (pte_same(*ptep, entry))
245 ++ return 0;
246 ++
247 ++ /* only preserve the access flags and write permission */
248 ++ pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
249 ++
250 ++ /*
251 ++ * PTE_RDONLY is cleared by default in the asm below, so set it in
252 ++ * back if necessary (read-only or clean PTE).
253 ++ */
254 ++ if (!pte_write(entry) || !dirty)
255 ++ pte_val(entry) |= PTE_RDONLY;
256 ++
257 ++ /*
258 ++ * Setting the flags must be done atomically to avoid racing with the
259 ++ * hardware update of the access/dirty state.
260 ++ */
261 ++ asm volatile("// ptep_set_access_flags\n"
262 ++ " prfm pstl1strm, %2\n"
263 ++ "1: ldxr %0, %2\n"
264 ++ " and %0, %0, %3 // clear PTE_RDONLY\n"
265 ++ " orr %0, %0, %4 // set flags\n"
266 ++ " stxr %w1, %0, %2\n"
267 ++ " cbnz %w1, 1b\n"
268 ++ : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
269 ++ : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
270 ++
271 ++ flush_tlb_fix_spurious_fault(vma, address);
272 ++ return 1;
273 ++}
274 ++#endif
275 ++
276 + /*
277 + * The kernel tried to access some page that wasn't present.
278 + */
279 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
280 +index 6ded8d347af9..4e956b3e16f5 100644
281 +--- a/arch/mips/include/asm/kvm_host.h
282 ++++ b/arch/mips/include/asm/kvm_host.h
283 +@@ -784,7 +784,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
284 +
285 + uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
286 + void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
287 +-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
288 ++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
289 + void kvm_mips_init_count(struct kvm_vcpu *vcpu);
290 + int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
291 + int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
292 +diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
293 +index 41b1b090f56f..dc10c77b7500 100644
294 +--- a/arch/mips/kvm/emulate.c
295 ++++ b/arch/mips/kvm/emulate.c
296 +@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
297 + */
298 + static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
299 + {
300 +- ktime_t expires;
301 ++ struct mips_coproc *cop0 = vcpu->arch.cop0;
302 ++ ktime_t expires, threshold;
303 ++ uint32_t count, compare;
304 + int running;
305 +
306 +- /* Is the hrtimer pending? */
307 ++ /* Calculate the biased and scaled guest CP0_Count */
308 ++ count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
309 ++ compare = kvm_read_c0_guest_compare(cop0);
310 ++
311 ++ /*
312 ++ * Find whether CP0_Count has reached the closest timer interrupt. If
313 ++ * not, we shouldn't inject it.
314 ++ */
315 ++ if ((int32_t)(count - compare) < 0)
316 ++ return count;
317 ++
318 ++ /*
319 ++ * The CP0_Count we're going to return has already reached the closest
320 ++ * timer interrupt. Quickly check if it really is a new interrupt by
321 ++ * looking at whether the interval until the hrtimer expiry time is
322 ++ * less than 1/4 of the timer period.
323 ++ */
324 + expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
325 +- if (ktime_compare(now, expires) >= 0) {
326 ++ threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
327 ++ if (ktime_before(expires, threshold)) {
328 + /*
329 + * Cancel it while we handle it so there's no chance of
330 + * interference with the timeout handler.
331 +@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
332 + }
333 + }
334 +
335 +- /* Return the biased and scaled guest CP0_Count */
336 +- return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
337 ++ return count;
338 + }
339 +
340 + /**
341 +@@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
342 + }
343 +
344 + /**
345 +- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
346 +- * @vcpu: Virtual CPU.
347 +- *
348 +- * Recalculates and updates the expiry time of the hrtimer. This can be used
349 +- * after timer parameters have been altered which do not depend on the time that
350 +- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
351 +- * kvm_mips_resume_hrtimer() are used directly).
352 +- *
353 +- * It is guaranteed that no timer interrupts will be lost in the process.
354 +- *
355 +- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
356 +- */
357 +-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
358 +-{
359 +- ktime_t now;
360 +- uint32_t count;
361 +-
362 +- /*
363 +- * freeze_hrtimer takes care of a timer interrupts <= count, and
364 +- * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
365 +- */
366 +- now = kvm_mips_freeze_hrtimer(vcpu, &count);
367 +- kvm_mips_resume_hrtimer(vcpu, now, count);
368 +-}
369 +-
370 +-/**
371 + * kvm_mips_write_count() - Modify the count and update timer.
372 + * @vcpu: Virtual CPU.
373 + * @count: Guest CP0_Count value to set.
374 +@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
375 + * kvm_mips_write_compare() - Modify compare and update timer.
376 + * @vcpu: Virtual CPU.
377 + * @compare: New CP0_Compare value.
378 ++ * @ack: Whether to acknowledge timer interrupt.
379 + *
380 + * Update CP0_Compare to a new value and update the timeout.
381 ++ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
382 ++ * any pending timer interrupt is preserved.
383 + */
384 +-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
385 ++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
386 + {
387 + struct mips_coproc *cop0 = vcpu->arch.cop0;
388 ++ int dc;
389 ++ u32 old_compare = kvm_read_c0_guest_compare(cop0);
390 ++ ktime_t now;
391 ++ uint32_t count;
392 +
393 + /* if unchanged, must just be an ack */
394 +- if (kvm_read_c0_guest_compare(cop0) == compare)
395 ++ if (old_compare == compare) {
396 ++ if (!ack)
397 ++ return;
398 ++ kvm_mips_callbacks->dequeue_timer_int(vcpu);
399 ++ kvm_write_c0_guest_compare(cop0, compare);
400 + return;
401 ++ }
402 ++
403 ++ /* freeze_hrtimer() takes care of timer interrupts <= count */
404 ++ dc = kvm_mips_count_disabled(vcpu);
405 ++ if (!dc)
406 ++ now = kvm_mips_freeze_hrtimer(vcpu, &count);
407 ++
408 ++ if (ack)
409 ++ kvm_mips_callbacks->dequeue_timer_int(vcpu);
410 +
411 +- /* Update compare */
412 + kvm_write_c0_guest_compare(cop0, compare);
413 +
414 +- /* Update timeout if count enabled */
415 +- if (!kvm_mips_count_disabled(vcpu))
416 +- kvm_mips_update_hrtimer(vcpu);
417 ++ /* resume_hrtimer() takes care of timer interrupts > count */
418 ++ if (!dc)
419 ++ kvm_mips_resume_hrtimer(vcpu, now, count);
420 + }
421 +
422 + /**
423 +@@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
424 +
425 + /* If we are writing to COMPARE */
426 + /* Clear pending timer interrupt, if any */
427 +- kvm_mips_callbacks->dequeue_timer_int(vcpu);
428 + kvm_mips_write_compare(vcpu,
429 +- vcpu->arch.gprs[rt]);
430 ++ vcpu->arch.gprs[rt],
431 ++ true);
432 + } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
433 + unsigned int old_val, val, change;
434 +
435 +diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
436 +index d836ed5b0bc7..307cc4c98bdd 100644
437 +--- a/arch/mips/kvm/trap_emul.c
438 ++++ b/arch/mips/kvm/trap_emul.c
439 +@@ -547,7 +547,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
440 + kvm_mips_write_count(vcpu, v);
441 + break;
442 + case KVM_REG_MIPS_CP0_COMPARE:
443 +- kvm_mips_write_compare(vcpu, v);
444 ++ kvm_mips_write_compare(vcpu, v, false);
445 + break;
446 + case KVM_REG_MIPS_CP0_CAUSE:
447 + /*
448 +diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
449 +index 868e1194337f..49e35d003b74 100644
450 +--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
451 ++++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
452 +@@ -694,6 +694,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
453 +
454 + /* clear STOP and INT from current entry */
455 + buf->topa_index[buf->stop_pos]->stop = 0;
456 ++ buf->topa_index[buf->stop_pos]->intr = 0;
457 + buf->topa_index[buf->intr_pos]->intr = 0;
458 +
459 + /* how many pages till the STOP marker */
460 +@@ -718,6 +719,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
461 + buf->intr_pos = idx;
462 +
463 + buf->topa_index[buf->stop_pos]->stop = 1;
464 ++ buf->topa_index[buf->stop_pos]->intr = 1;
465 + buf->topa_index[buf->intr_pos]->intr = 1;
466 +
467 + return 0;
468 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
469 +index 6525e926f566..2e1fd586b895 100644
470 +--- a/arch/x86/kvm/cpuid.c
471 ++++ b/arch/x86/kvm/cpuid.c
472 +@@ -509,6 +509,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
473 + do_cpuid_1_ent(&entry[i], function, idx);
474 + if (idx == 1) {
475 + entry[i].eax &= kvm_supported_word10_x86_features;
476 ++ cpuid_mask(&entry[i].eax, 10);
477 + entry[i].ebx = 0;
478 + if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
479 + entry[i].ebx =
480 +diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
481 +index 3f8c732117ec..c146f3c262c3 100644
482 +--- a/arch/x86/kvm/mtrr.c
483 ++++ b/arch/x86/kvm/mtrr.c
484 +@@ -44,8 +44,6 @@ static bool msr_mtrr_valid(unsigned msr)
485 + case MSR_MTRRdefType:
486 + case MSR_IA32_CR_PAT:
487 + return true;
488 +- case 0x2f8:
489 +- return true;
490 + }
491 + return false;
492 + }
493 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
494 +index f34ab71dfd57..f314e9b9660b 100644
495 +--- a/arch/x86/kvm/vmx.c
496 ++++ b/arch/x86/kvm/vmx.c
497 +@@ -4954,8 +4954,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
498 + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
499 +
500 + cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
501 +- vmx_set_cr0(vcpu, cr0); /* enter rmode */
502 + vmx->vcpu.arch.cr0 = cr0;
503 ++ vmx_set_cr0(vcpu, cr0); /* enter rmode */
504 + vmx_set_cr4(vcpu, 0);
505 + vmx_set_efer(vcpu, 0);
506 + vmx_fpu_activate(vcpu);
507 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
508 +index ff31ab464213..c6d6efed392a 100644
509 +--- a/arch/x86/pci/xen.c
510 ++++ b/arch/x86/pci/xen.c
511 +@@ -488,8 +488,11 @@ int __init pci_xen_initial_domain(void)
512 + #endif
513 + __acpi_register_gsi = acpi_register_gsi_xen;
514 + __acpi_unregister_gsi = NULL;
515 +- /* Pre-allocate legacy irqs */
516 +- for (irq = 0; irq < nr_legacy_irqs(); irq++) {
517 ++ /*
518 ++ * Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here
519 ++ * because we don't have a PIC and thus nr_legacy_irqs() is zero.
520 ++ */
521 ++ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
522 + int trigger, polarity;
523 +
524 + if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
525 +diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
526 +index 32d684af0ec7..a000ecb995e6 100644
527 +--- a/drivers/acpi/osl.c
528 ++++ b/drivers/acpi/osl.c
529 +@@ -135,7 +135,7 @@ static struct osi_linux {
530 + unsigned int enable:1;
531 + unsigned int dmi:1;
532 + unsigned int cmdline:1;
533 +- unsigned int default_disabling:1;
534 ++ u8 default_disabling;
535 + } osi_linux = {0, 0, 0, 0};
536 +
537 + static u32 acpi_osi_handler(acpi_string interface, u32 supported)
538 +@@ -1444,10 +1444,13 @@ void __init acpi_osi_setup(char *str)
539 + if (*str == '!') {
540 + str++;
541 + if (*str == '\0') {
542 +- osi_linux.default_disabling = 1;
543 ++ /* Do not override acpi_osi=!* */
544 ++ if (!osi_linux.default_disabling)
545 ++ osi_linux.default_disabling =
546 ++ ACPI_DISABLE_ALL_VENDOR_STRINGS;
547 + return;
548 + } else if (*str == '*') {
549 +- acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
550 ++ osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
551 + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
552 + osi = &osi_setup_entries[i];
553 + osi->enable = false;
554 +@@ -1520,10 +1523,13 @@ static void __init acpi_osi_setup_late(void)
555 + acpi_status status;
556 +
557 + if (osi_linux.default_disabling) {
558 +- status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
559 ++ status = acpi_update_interfaces(osi_linux.default_disabling);
560 +
561 + if (ACPI_SUCCESS(status))
562 +- printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
563 ++ printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
564 ++ osi_linux.default_disabling ==
565 ++ ACPI_DISABLE_ALL_STRINGS ?
566 ++ " and feature groups" : "");
567 + }
568 +
569 + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
570 +diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
571 +index ed888e302bc3..597b2d16b775 100644
572 +--- a/drivers/bluetooth/hci_vhci.c
573 ++++ b/drivers/bluetooth/hci_vhci.c
574 +@@ -50,6 +50,7 @@ struct vhci_data {
575 + wait_queue_head_t read_wait;
576 + struct sk_buff_head readq;
577 +
578 ++ struct mutex open_mutex;
579 + struct delayed_work open_timeout;
580 + };
581 +
582 +@@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
583 + return 0;
584 + }
585 +
586 +-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
587 ++static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
588 + {
589 + struct hci_dev *hdev;
590 + struct sk_buff *skb;
591 + __u8 dev_type;
592 +
593 ++ if (data->hdev)
594 ++ return -EBADFD;
595 ++
596 + /* bits 0-1 are dev_type (BR/EDR or AMP) */
597 + dev_type = opcode & 0x03;
598 +
599 +@@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
600 + return 0;
601 + }
602 +
603 ++static int vhci_create_device(struct vhci_data *data, __u8 opcode)
604 ++{
605 ++ int err;
606 ++
607 ++ mutex_lock(&data->open_mutex);
608 ++ err = __vhci_create_device(data, opcode);
609 ++ mutex_unlock(&data->open_mutex);
610 ++
611 ++ return err;
612 ++}
613 ++
614 + static inline ssize_t vhci_get_user(struct vhci_data *data,
615 + struct iov_iter *from)
616 + {
617 +@@ -189,11 +204,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
618 + break;
619 +
620 + case HCI_VENDOR_PKT:
621 +- if (data->hdev) {
622 +- kfree_skb(skb);
623 +- return -EBADFD;
624 +- }
625 +-
626 + cancel_delayed_work_sync(&data->open_timeout);
627 +
628 + opcode = *((__u8 *) skb->data);
629 +@@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file)
630 + skb_queue_head_init(&data->readq);
631 + init_waitqueue_head(&data->read_wait);
632 +
633 ++ mutex_init(&data->open_mutex);
634 + INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
635 +
636 + file->private_data = data;
637 +@@ -333,15 +344,18 @@ static int vhci_open(struct inode *inode, struct file *file)
638 + static int vhci_release(struct inode *inode, struct file *file)
639 + {
640 + struct vhci_data *data = file->private_data;
641 +- struct hci_dev *hdev = data->hdev;
642 ++ struct hci_dev *hdev;
643 +
644 + cancel_delayed_work_sync(&data->open_timeout);
645 +
646 ++ hdev = data->hdev;
647 ++
648 + if (hdev) {
649 + hci_unregister_dev(hdev);
650 + hci_free_dev(hdev);
651 + }
652 +
653 ++ skb_queue_purge(&data->readq);
654 + file->private_data = NULL;
655 + kfree(data);
656 +
657 +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
658 +index 4f9830c1b121..185a4d859638 100644
659 +--- a/drivers/clk/bcm/clk-bcm2835.c
660 ++++ b/drivers/clk/bcm/clk-bcm2835.c
661 +@@ -1068,10 +1068,12 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
662 + struct bcm2835_cprman *cprman = divider->cprman;
663 + const struct bcm2835_pll_divider_data *data = divider->data;
664 +
665 ++ spin_lock(&cprman->regs_lock);
666 + cprman_write(cprman, data->cm_reg,
667 + (cprman_read(cprman, data->cm_reg) &
668 + ~data->load_mask) | data->hold_mask);
669 + cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
670 ++ spin_unlock(&cprman->regs_lock);
671 + }
672 +
673 + static int bcm2835_pll_divider_on(struct clk_hw *hw)
674 +@@ -1080,12 +1082,14 @@ static int bcm2835_pll_divider_on(struct clk_hw *hw)
675 + struct bcm2835_cprman *cprman = divider->cprman;
676 + const struct bcm2835_pll_divider_data *data = divider->data;
677 +
678 ++ spin_lock(&cprman->regs_lock);
679 + cprman_write(cprman, data->a2w_reg,
680 + cprman_read(cprman, data->a2w_reg) &
681 + ~A2W_PLL_CHANNEL_DISABLE);
682 +
683 + cprman_write(cprman, data->cm_reg,
684 + cprman_read(cprman, data->cm_reg) & ~data->hold_mask);
685 ++ spin_unlock(&cprman->regs_lock);
686 +
687 + return 0;
688 + }
689 +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
690 +index d0a0313d6bef..2e7f03d50f4e 100644
691 +--- a/drivers/clk/qcom/gcc-msm8916.c
692 ++++ b/drivers/clk/qcom/gcc-msm8916.c
693 +@@ -2346,6 +2346,7 @@ static struct clk_branch gcc_crypto_ahb_clk = {
694 + "pcnoc_bfdcd_clk_src",
695 + },
696 + .num_parents = 1,
697 ++ .flags = CLK_SET_RATE_PARENT,
698 + .ops = &clk_branch2_ops,
699 + },
700 + },
701 +@@ -2381,6 +2382,7 @@ static struct clk_branch gcc_crypto_clk = {
702 + "crypto_clk_src",
703 + },
704 + .num_parents = 1,
705 ++ .flags = CLK_SET_RATE_PARENT,
706 + .ops = &clk_branch2_ops,
707 + },
708 + },
709 +diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
710 +index f7e0d8d4c3da..8f50a02ff68d 100644
711 +--- a/drivers/crypto/caam/jr.c
712 ++++ b/drivers/crypto/caam/jr.c
713 +@@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg)
714 + struct device *caam_jr_alloc(void)
715 + {
716 + struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
717 +- struct device *dev = NULL;
718 ++ struct device *dev = ERR_PTR(-ENODEV);
719 + int min_tfm_cnt = INT_MAX;
720 + int tfm_cnt;
721 +
722 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
723 +index a19ee127edca..e72fea737a0d 100644
724 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
725 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
726 +@@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
727 + unsigned int todo;
728 + struct sg_mapping_iter mi, mo;
729 + unsigned int oi, oo; /* offset for in and out */
730 ++ unsigned long flags;
731 +
732 + if (areq->nbytes == 0)
733 + return 0;
734 +@@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
735 + return -EINVAL;
736 + }
737 +
738 +- spin_lock_bh(&ss->slock);
739 ++ spin_lock_irqsave(&ss->slock, flags);
740 +
741 + for (i = 0; i < op->keylen; i += 4)
742 + writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
743 +@@ -117,7 +118,7 @@ release_ss:
744 + sg_miter_stop(&mi);
745 + sg_miter_stop(&mo);
746 + writel(0, ss->base + SS_CTL);
747 +- spin_unlock_bh(&ss->slock);
748 ++ spin_unlock_irqrestore(&ss->slock, flags);
749 + return err;
750 + }
751 +
752 +@@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
753 + unsigned int ob = 0; /* offset in buf */
754 + unsigned int obo = 0; /* offset in bufo*/
755 + unsigned int obl = 0; /* length of data in bufo */
756 ++ unsigned long flags;
757 +
758 + if (areq->nbytes == 0)
759 + return 0;
760 +@@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
761 + if (no_chunk == 1)
762 + return sun4i_ss_opti_poll(areq);
763 +
764 +- spin_lock_bh(&ss->slock);
765 ++ spin_lock_irqsave(&ss->slock, flags);
766 +
767 + for (i = 0; i < op->keylen; i += 4)
768 + writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
769 +@@ -308,7 +310,7 @@ release_ss:
770 + sg_miter_stop(&mi);
771 + sg_miter_stop(&mo);
772 + writel(0, ss->base + SS_CTL);
773 +- spin_unlock_bh(&ss->slock);
774 ++ spin_unlock_irqrestore(&ss->slock, flags);
775 +
776 + return err;
777 + }
778 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
779 +index a04fea4d0063..9a8a18aafd5c 100644
780 +--- a/drivers/crypto/talitos.c
781 ++++ b/drivers/crypto/talitos.c
782 +@@ -835,6 +835,16 @@ struct talitos_ahash_req_ctx {
783 + struct scatterlist *psrc;
784 + };
785 +
786 ++struct talitos_export_state {
787 ++ u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
788 ++ u8 buf[HASH_MAX_BLOCK_SIZE];
789 ++ unsigned int swinit;
790 ++ unsigned int first;
791 ++ unsigned int last;
792 ++ unsigned int to_hash_later;
793 ++ unsigned int nbuf;
794 ++};
795 ++
796 + static int aead_setkey(struct crypto_aead *authenc,
797 + const u8 *key, unsigned int keylen)
798 + {
799 +@@ -1954,6 +1964,46 @@ static int ahash_digest(struct ahash_request *areq)
800 + return ahash_process_req(areq, areq->nbytes);
801 + }
802 +
803 ++static int ahash_export(struct ahash_request *areq, void *out)
804 ++{
805 ++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
806 ++ struct talitos_export_state *export = out;
807 ++
808 ++ memcpy(export->hw_context, req_ctx->hw_context,
809 ++ req_ctx->hw_context_size);
810 ++ memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
811 ++ export->swinit = req_ctx->swinit;
812 ++ export->first = req_ctx->first;
813 ++ export->last = req_ctx->last;
814 ++ export->to_hash_later = req_ctx->to_hash_later;
815 ++ export->nbuf = req_ctx->nbuf;
816 ++
817 ++ return 0;
818 ++}
819 ++
820 ++static int ahash_import(struct ahash_request *areq, const void *in)
821 ++{
822 ++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
823 ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
824 ++ const struct talitos_export_state *export = in;
825 ++
826 ++ memset(req_ctx, 0, sizeof(*req_ctx));
827 ++ req_ctx->hw_context_size =
828 ++ (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
829 ++ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
830 ++ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
831 ++ memcpy(req_ctx->hw_context, export->hw_context,
832 ++ req_ctx->hw_context_size);
833 ++ memcpy(req_ctx->buf, export->buf, export->nbuf);
834 ++ req_ctx->swinit = export->swinit;
835 ++ req_ctx->first = export->first;
836 ++ req_ctx->last = export->last;
837 ++ req_ctx->to_hash_later = export->to_hash_later;
838 ++ req_ctx->nbuf = export->nbuf;
839 ++
840 ++ return 0;
841 ++}
842 ++
843 + struct keyhash_result {
844 + struct completion completion;
845 + int err;
846 +@@ -2348,6 +2398,7 @@ static struct talitos_alg_template driver_algs[] = {
847 + { .type = CRYPTO_ALG_TYPE_AHASH,
848 + .alg.hash = {
849 + .halg.digestsize = MD5_DIGEST_SIZE,
850 ++ .halg.statesize = sizeof(struct talitos_export_state),
851 + .halg.base = {
852 + .cra_name = "md5",
853 + .cra_driver_name = "md5-talitos",
854 +@@ -2363,6 +2414,7 @@ static struct talitos_alg_template driver_algs[] = {
855 + { .type = CRYPTO_ALG_TYPE_AHASH,
856 + .alg.hash = {
857 + .halg.digestsize = SHA1_DIGEST_SIZE,
858 ++ .halg.statesize = sizeof(struct talitos_export_state),
859 + .halg.base = {
860 + .cra_name = "sha1",
861 + .cra_driver_name = "sha1-talitos",
862 +@@ -2378,6 +2430,7 @@ static struct talitos_alg_template driver_algs[] = {
863 + { .type = CRYPTO_ALG_TYPE_AHASH,
864 + .alg.hash = {
865 + .halg.digestsize = SHA224_DIGEST_SIZE,
866 ++ .halg.statesize = sizeof(struct talitos_export_state),
867 + .halg.base = {
868 + .cra_name = "sha224",
869 + .cra_driver_name = "sha224-talitos",
870 +@@ -2393,6 +2446,7 @@ static struct talitos_alg_template driver_algs[] = {
871 + { .type = CRYPTO_ALG_TYPE_AHASH,
872 + .alg.hash = {
873 + .halg.digestsize = SHA256_DIGEST_SIZE,
874 ++ .halg.statesize = sizeof(struct talitos_export_state),
875 + .halg.base = {
876 + .cra_name = "sha256",
877 + .cra_driver_name = "sha256-talitos",
878 +@@ -2408,6 +2462,7 @@ static struct talitos_alg_template driver_algs[] = {
879 + { .type = CRYPTO_ALG_TYPE_AHASH,
880 + .alg.hash = {
881 + .halg.digestsize = SHA384_DIGEST_SIZE,
882 ++ .halg.statesize = sizeof(struct talitos_export_state),
883 + .halg.base = {
884 + .cra_name = "sha384",
885 + .cra_driver_name = "sha384-talitos",
886 +@@ -2423,6 +2478,7 @@ static struct talitos_alg_template driver_algs[] = {
887 + { .type = CRYPTO_ALG_TYPE_AHASH,
888 + .alg.hash = {
889 + .halg.digestsize = SHA512_DIGEST_SIZE,
890 ++ .halg.statesize = sizeof(struct talitos_export_state),
891 + .halg.base = {
892 + .cra_name = "sha512",
893 + .cra_driver_name = "sha512-talitos",
894 +@@ -2438,6 +2494,7 @@ static struct talitos_alg_template driver_algs[] = {
895 + { .type = CRYPTO_ALG_TYPE_AHASH,
896 + .alg.hash = {
897 + .halg.digestsize = MD5_DIGEST_SIZE,
898 ++ .halg.statesize = sizeof(struct talitos_export_state),
899 + .halg.base = {
900 + .cra_name = "hmac(md5)",
901 + .cra_driver_name = "hmac-md5-talitos",
902 +@@ -2453,6 +2510,7 @@ static struct talitos_alg_template driver_algs[] = {
903 + { .type = CRYPTO_ALG_TYPE_AHASH,
904 + .alg.hash = {
905 + .halg.digestsize = SHA1_DIGEST_SIZE,
906 ++ .halg.statesize = sizeof(struct talitos_export_state),
907 + .halg.base = {
908 + .cra_name = "hmac(sha1)",
909 + .cra_driver_name = "hmac-sha1-talitos",
910 +@@ -2468,6 +2526,7 @@ static struct talitos_alg_template driver_algs[] = {
911 + { .type = CRYPTO_ALG_TYPE_AHASH,
912 + .alg.hash = {
913 + .halg.digestsize = SHA224_DIGEST_SIZE,
914 ++ .halg.statesize = sizeof(struct talitos_export_state),
915 + .halg.base = {
916 + .cra_name = "hmac(sha224)",
917 + .cra_driver_name = "hmac-sha224-talitos",
918 +@@ -2483,6 +2542,7 @@ static struct talitos_alg_template driver_algs[] = {
919 + { .type = CRYPTO_ALG_TYPE_AHASH,
920 + .alg.hash = {
921 + .halg.digestsize = SHA256_DIGEST_SIZE,
922 ++ .halg.statesize = sizeof(struct talitos_export_state),
923 + .halg.base = {
924 + .cra_name = "hmac(sha256)",
925 + .cra_driver_name = "hmac-sha256-talitos",
926 +@@ -2498,6 +2558,7 @@ static struct talitos_alg_template driver_algs[] = {
927 + { .type = CRYPTO_ALG_TYPE_AHASH,
928 + .alg.hash = {
929 + .halg.digestsize = SHA384_DIGEST_SIZE,
930 ++ .halg.statesize = sizeof(struct talitos_export_state),
931 + .halg.base = {
932 + .cra_name = "hmac(sha384)",
933 + .cra_driver_name = "hmac-sha384-talitos",
934 +@@ -2513,6 +2574,7 @@ static struct talitos_alg_template driver_algs[] = {
935 + { .type = CRYPTO_ALG_TYPE_AHASH,
936 + .alg.hash = {
937 + .halg.digestsize = SHA512_DIGEST_SIZE,
938 ++ .halg.statesize = sizeof(struct talitos_export_state),
939 + .halg.base = {
940 + .cra_name = "hmac(sha512)",
941 + .cra_driver_name = "hmac-sha512-talitos",
942 +@@ -2704,6 +2766,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
943 + t_alg->algt.alg.hash.finup = ahash_finup;
944 + t_alg->algt.alg.hash.digest = ahash_digest;
945 + t_alg->algt.alg.hash.setkey = ahash_setkey;
946 ++ t_alg->algt.alg.hash.import = ahash_import;
947 ++ t_alg->algt.alg.hash.export = ahash_export;
948 +
949 + if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
950 + !strncmp(alg->cra_name, "hmac", 4)) {
951 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
952 +index 3db9a659719b..5f0f4fc58f43 100644
953 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
954 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
955 +@@ -1519,7 +1519,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
956 +
957 + if (dev->use_fast_reg) {
958 + state.sg = idb_sg;
959 +- sg_set_buf(idb_sg, req->indirect_desc, idb_len);
960 ++ sg_init_one(idb_sg, req->indirect_desc, idb_len);
961 + idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
962 + #ifdef CONFIG_NEED_SG_DMA_LENGTH
963 + idb_sg->dma_length = idb_sg->length; /* hack^2 */
964 +diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
965 +index f2261ab54701..18663d4edae5 100644
966 +--- a/drivers/input/misc/pwm-beeper.c
967 ++++ b/drivers/input/misc/pwm-beeper.c
968 +@@ -20,21 +20,40 @@
969 + #include <linux/platform_device.h>
970 + #include <linux/pwm.h>
971 + #include <linux/slab.h>
972 ++#include <linux/workqueue.h>
973 +
974 + struct pwm_beeper {
975 + struct input_dev *input;
976 + struct pwm_device *pwm;
977 ++ struct work_struct work;
978 + unsigned long period;
979 + };
980 +
981 + #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
982 +
983 ++static void __pwm_beeper_set(struct pwm_beeper *beeper)
984 ++{
985 ++ unsigned long period = beeper->period;
986 ++
987 ++ if (period) {
988 ++ pwm_config(beeper->pwm, period / 2, period);
989 ++ pwm_enable(beeper->pwm);
990 ++ } else
991 ++ pwm_disable(beeper->pwm);
992 ++}
993 ++
994 ++static void pwm_beeper_work(struct work_struct *work)
995 ++{
996 ++ struct pwm_beeper *beeper =
997 ++ container_of(work, struct pwm_beeper, work);
998 ++
999 ++ __pwm_beeper_set(beeper);
1000 ++}
1001 ++
1002 + static int pwm_beeper_event(struct input_dev *input,
1003 + unsigned int type, unsigned int code, int value)
1004 + {
1005 +- int ret = 0;
1006 + struct pwm_beeper *beeper = input_get_drvdata(input);
1007 +- unsigned long period;
1008 +
1009 + if (type != EV_SND || value < 0)
1010 + return -EINVAL;
1011 +@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
1012 + return -EINVAL;
1013 + }
1014 +
1015 +- if (value == 0) {
1016 +- pwm_disable(beeper->pwm);
1017 +- } else {
1018 +- period = HZ_TO_NANOSECONDS(value);
1019 +- ret = pwm_config(beeper->pwm, period / 2, period);
1020 +- if (ret)
1021 +- return ret;
1022 +- ret = pwm_enable(beeper->pwm);
1023 +- if (ret)
1024 +- return ret;
1025 +- beeper->period = period;
1026 +- }
1027 ++ if (value == 0)
1028 ++ beeper->period = 0;
1029 ++ else
1030 ++ beeper->period = HZ_TO_NANOSECONDS(value);
1031 ++
1032 ++ schedule_work(&beeper->work);
1033 +
1034 + return 0;
1035 + }
1036 +
1037 ++static void pwm_beeper_stop(struct pwm_beeper *beeper)
1038 ++{
1039 ++ cancel_work_sync(&beeper->work);
1040 ++
1041 ++ if (beeper->period)
1042 ++ pwm_disable(beeper->pwm);
1043 ++}
1044 ++
1045 ++static void pwm_beeper_close(struct input_dev *input)
1046 ++{
1047 ++ struct pwm_beeper *beeper = input_get_drvdata(input);
1048 ++
1049 ++ pwm_beeper_stop(beeper);
1050 ++}
1051 ++
1052 + static int pwm_beeper_probe(struct platform_device *pdev)
1053 + {
1054 + unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
1055 +@@ -87,6 +115,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
1056 + goto err_free;
1057 + }
1058 +
1059 ++ INIT_WORK(&beeper->work, pwm_beeper_work);
1060 ++
1061 + beeper->input = input_allocate_device();
1062 + if (!beeper->input) {
1063 + dev_err(&pdev->dev, "Failed to allocate input device\n");
1064 +@@ -106,6 +136,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
1065 + beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
1066 +
1067 + beeper->input->event = pwm_beeper_event;
1068 ++ beeper->input->close = pwm_beeper_close;
1069 +
1070 + input_set_drvdata(beeper->input, beeper);
1071 +
1072 +@@ -135,7 +166,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
1073 +
1074 + input_unregister_device(beeper->input);
1075 +
1076 +- pwm_disable(beeper->pwm);
1077 + pwm_free(beeper->pwm);
1078 +
1079 + kfree(beeper);
1080 +@@ -147,8 +177,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
1081 + {
1082 + struct pwm_beeper *beeper = dev_get_drvdata(dev);
1083 +
1084 +- if (beeper->period)
1085 +- pwm_disable(beeper->pwm);
1086 ++ pwm_beeper_stop(beeper);
1087 +
1088 + return 0;
1089 + }
1090 +@@ -157,10 +186,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
1091 + {
1092 + struct pwm_beeper *beeper = dev_get_drvdata(dev);
1093 +
1094 +- if (beeper->period) {
1095 +- pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
1096 +- pwm_enable(beeper->pwm);
1097 +- }
1098 ++ if (beeper->period)
1099 ++ __pwm_beeper_set(beeper);
1100 +
1101 + return 0;
1102 + }
1103 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1104 +index d7be6ddc34f6..2fc499a2207e 100644
1105 +--- a/drivers/irqchip/irq-gic-v3.c
1106 ++++ b/drivers/irqchip/irq-gic-v3.c
1107 +@@ -361,6 +361,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
1108 + if (static_key_true(&supports_deactivate))
1109 + gic_write_dir(irqnr);
1110 + #ifdef CONFIG_SMP
1111 ++ /*
1112 ++ * Unlike GICv2, we don't need an smp_rmb() here.
1113 ++ * The control dependency from gic_read_iar to
1114 ++ * the ISB in gic_write_eoir is enough to ensure
1115 ++ * that any shared data read by handle_IPI will
1116 ++ * be read after the ACK.
1117 ++ */
1118 + handle_IPI(irqnr, regs);
1119 + #else
1120 + WARN_ONCE(true, "Unexpected SGI received!\n");
1121 +@@ -380,6 +387,15 @@ static void __init gic_dist_init(void)
1122 + writel_relaxed(0, base + GICD_CTLR);
1123 + gic_dist_wait_for_rwp();
1124 +
1125 ++ /*
1126 ++ * Configure SPIs as non-secure Group-1. This will only matter
1127 ++ * if the GIC only has a single security state. This will not
1128 ++ * do the right thing if the kernel is running in secure mode,
1129 ++ * but that's not the intended use case anyway.
1130 ++ */
1131 ++ for (i = 32; i < gic_data.irq_nr; i += 32)
1132 ++ writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
1133 ++
1134 + gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
1135 +
1136 + /* Enable distributor with ARE, Group1 */
1137 +@@ -494,6 +510,9 @@ static void gic_cpu_init(void)
1138 +
1139 + rbase = gic_data_rdist_sgi_base();
1140 +
1141 ++ /* Configure SGIs/PPIs as non-secure Group-1 */
1142 ++ writel_relaxed(~0, rbase + GICR_IGROUPR0);
1143 ++
1144 + gic_cpu_config(rbase, gic_redist_wait_for_rwp);
1145 +
1146 + /* Give LPIs a spin */
1147 +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
1148 +index abf2ffaed392..cebd8efe651a 100644
1149 +--- a/drivers/irqchip/irq-gic.c
1150 ++++ b/drivers/irqchip/irq-gic.c
1151 +@@ -347,6 +347,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
1152 + if (static_key_true(&supports_deactivate))
1153 + writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
1154 + #ifdef CONFIG_SMP
1155 ++ /*
1156 ++ * Ensure any shared data written by the CPU sending
1157 ++ * the IPI is read after we've read the ACK register
1158 ++ * on the GIC.
1159 ++ *
1160 ++ * Pairs with the write barrier in gic_raise_softirq
1161 ++ */
1162 ++ smp_rmb();
1163 + handle_IPI(irqnr, regs);
1164 + #endif
1165 + continue;
1166 +diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
1167 +index 004926955263..b0155b05cddb 100644
1168 +--- a/drivers/mcb/mcb-parse.c
1169 ++++ b/drivers/mcb/mcb-parse.c
1170 +@@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
1171 + mdev->id = GDD_DEV(reg1);
1172 + mdev->rev = GDD_REV(reg1);
1173 + mdev->var = GDD_VAR(reg1);
1174 +- mdev->bar = GDD_BAR(reg1);
1175 ++ mdev->bar = GDD_BAR(reg2);
1176 + mdev->group = GDD_GRP(reg2);
1177 + mdev->inst = GDD_INS(reg2);
1178 +
1179 +diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
1180 +index b7b3e8ee64f2..c30290f33430 100644
1181 +--- a/drivers/mfd/omap-usb-tll.c
1182 ++++ b/drivers/mfd/omap-usb-tll.c
1183 +@@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev)
1184 +
1185 + if (IS_ERR(tll->ch_clk[i]))
1186 + dev_dbg(dev, "can't get clock : %s\n", clkname);
1187 ++ else
1188 ++ clk_prepare(tll->ch_clk[i]);
1189 + }
1190 +
1191 + pm_runtime_put_sync(dev);
1192 +@@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev)
1193 + tll_dev = NULL;
1194 + spin_unlock(&tll_lock);
1195 +
1196 +- for (i = 0; i < tll->nch; i++)
1197 +- if (!IS_ERR(tll->ch_clk[i]))
1198 ++ for (i = 0; i < tll->nch; i++) {
1199 ++ if (!IS_ERR(tll->ch_clk[i])) {
1200 ++ clk_unprepare(tll->ch_clk[i]);
1201 + clk_put(tll->ch_clk[i]);
1202 ++ }
1203 ++ }
1204 +
1205 + pm_runtime_disable(&pdev->dev);
1206 + return 0;
1207 +@@ -420,7 +425,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
1208 + if (IS_ERR(tll->ch_clk[i]))
1209 + continue;
1210 +
1211 +- r = clk_prepare_enable(tll->ch_clk[i]);
1212 ++ r = clk_enable(tll->ch_clk[i]);
1213 + if (r) {
1214 + dev_err(tll_dev,
1215 + "Error enabling ch %d clock: %d\n", i, r);
1216 +@@ -448,7 +453,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
1217 + for (i = 0; i < tll->nch; i++) {
1218 + if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
1219 + if (!IS_ERR(tll->ch_clk[i]))
1220 +- clk_disable_unprepare(tll->ch_clk[i]);
1221 ++ clk_disable(tll->ch_clk[i]);
1222 + }
1223 + }
1224 +
1225 +diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
1226 +index cd0403f09267..e79c0371ee6f 100644
1227 +--- a/drivers/misc/mei/amthif.c
1228 ++++ b/drivers/misc/mei/amthif.c
1229 +@@ -417,8 +417,10 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
1230 +
1231 + dev = cl->dev;
1232 +
1233 +- if (dev->iamthif_state != MEI_IAMTHIF_READING)
1234 ++ if (dev->iamthif_state != MEI_IAMTHIF_READING) {
1235 ++ mei_irq_discard_msg(dev, mei_hdr);
1236 + return 0;
1237 ++ }
1238 +
1239 + ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
1240 + if (ret)
1241 +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
1242 +index 1a173d0af694..a77643954523 100644
1243 +--- a/drivers/misc/mei/bus.c
1244 ++++ b/drivers/misc/mei/bus.c
1245 +@@ -222,17 +222,23 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv);
1246 + static void mei_cl_bus_event_work(struct work_struct *work)
1247 + {
1248 + struct mei_cl_device *cldev;
1249 ++ struct mei_device *bus;
1250 +
1251 + cldev = container_of(work, struct mei_cl_device, event_work);
1252 +
1253 ++ bus = cldev->bus;
1254 ++
1255 + if (cldev->event_cb)
1256 + cldev->event_cb(cldev, cldev->events, cldev->event_context);
1257 +
1258 + cldev->events = 0;
1259 +
1260 + /* Prepare for the next read */
1261 +- if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
1262 ++ if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
1263 ++ mutex_lock(&bus->device_lock);
1264 + mei_cl_read_start(cldev->cl, 0, NULL);
1265 ++ mutex_unlock(&bus->device_lock);
1266 ++ }
1267 + }
1268 +
1269 + /**
1270 +@@ -296,6 +302,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
1271 + unsigned long events_mask,
1272 + mei_cldev_event_cb_t event_cb, void *context)
1273 + {
1274 ++ struct mei_device *bus = cldev->bus;
1275 + int ret;
1276 +
1277 + if (cldev->event_cb)
1278 +@@ -308,15 +315,17 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
1279 + INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
1280 +
1281 + if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
1282 ++ mutex_lock(&bus->device_lock);
1283 + ret = mei_cl_read_start(cldev->cl, 0, NULL);
1284 ++ mutex_unlock(&bus->device_lock);
1285 + if (ret && ret != -EBUSY)
1286 + return ret;
1287 + }
1288 +
1289 + if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
1290 +- mutex_lock(&cldev->cl->dev->device_lock);
1291 ++ mutex_lock(&bus->device_lock);
1292 + ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
1293 +- mutex_unlock(&cldev->cl->dev->device_lock);
1294 ++ mutex_unlock(&bus->device_lock);
1295 + if (ret)
1296 + return ret;
1297 + }
1298 +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
1299 +index a6c87c713193..958af84884b5 100644
1300 +--- a/drivers/misc/mei/client.c
1301 ++++ b/drivers/misc/mei/client.c
1302 +@@ -1735,6 +1735,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1303 + wake_up(&cl->wait);
1304 +
1305 + break;
1306 ++ case MEI_FOP_DISCONNECT_RSP:
1307 ++ mei_io_cb_free(cb);
1308 ++ mei_cl_set_disconnected(cl);
1309 ++ break;
1310 + default:
1311 + BUG_ON(0);
1312 + }
1313 +diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
1314 +index e7b7aad0999b..fd8a9f057ea6 100644
1315 +--- a/drivers/misc/mei/hbm.c
1316 ++++ b/drivers/misc/mei/hbm.c
1317 +@@ -873,8 +873,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
1318 + cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
1319 + if (!cb)
1320 + return -ENOMEM;
1321 +- cl_dbg(dev, cl, "add disconnect response as first\n");
1322 +- list_add(&cb->list, &dev->ctrl_wr_list.list);
1323 ++ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
1324 + }
1325 + return 0;
1326 + }
1327 +diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
1328 +index 64b568a0268d..d1df797c7568 100644
1329 +--- a/drivers/misc/mei/interrupt.c
1330 ++++ b/drivers/misc/mei/interrupt.c
1331 +@@ -76,7 +76,6 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
1332 + * @dev: mei device
1333 + * @hdr: message header
1334 + */
1335 +-static inline
1336 + void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
1337 + {
1338 + /*
1339 +@@ -184,10 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
1340 + return -EMSGSIZE;
1341 +
1342 + ret = mei_hbm_cl_disconnect_rsp(dev, cl);
1343 +- mei_cl_set_disconnected(cl);
1344 +- mei_io_cb_free(cb);
1345 +- mei_me_cl_put(cl->me_cl);
1346 +- cl->me_cl = NULL;
1347 ++ list_move_tail(&cb->list, &cmpl_list->list);
1348 +
1349 + return ret;
1350 + }
1351 +diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
1352 +index 4250555d5e72..1b06e2fd6858 100644
1353 +--- a/drivers/misc/mei/mei_dev.h
1354 ++++ b/drivers/misc/mei/mei_dev.h
1355 +@@ -782,6 +782,8 @@ bool mei_hbuf_acquire(struct mei_device *dev);
1356 +
1357 + bool mei_write_is_idle(struct mei_device *dev);
1358 +
1359 ++void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr);
1360 ++
1361 + #if IS_ENABLED(CONFIG_DEBUG_FS)
1362 + int mei_dbgfs_register(struct mei_device *dev, const char *name);
1363 + void mei_dbgfs_deregister(struct mei_device *dev);
1364 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1365 +index 553113eb1bdb..c641c202fe7e 100644
1366 +--- a/drivers/mmc/card/block.c
1367 ++++ b/drivers/mmc/card/block.c
1368 +@@ -2514,11 +2514,12 @@ static const struct mmc_fixup blk_fixups[] =
1369 + MMC_QUIRK_BLK_NO_CMD23),
1370 +
1371 + /*
1372 +- * Some Micron MMC cards needs longer data read timeout than
1373 +- * indicated in CSD.
1374 ++ * Some MMC cards need longer data read timeout than indicated in CSD.
1375 + */
1376 + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
1377 + MMC_QUIRK_LONG_READ_TIME),
1378 ++ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1379 ++ MMC_QUIRK_LONG_READ_TIME),
1380 +
1381 + /*
1382 + * On these Samsung MoviNAND parts, performing secure erase or
1383 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
1384 +index 5ae89e48fd85..5f7d10ba498a 100644
1385 +--- a/drivers/mmc/core/core.c
1386 ++++ b/drivers/mmc/core/core.c
1387 +@@ -874,11 +874,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
1388 + /*
1389 + * Some cards require longer data read timeout than indicated in CSD.
1390 + * Address this by setting the read timeout to a "reasonably high"
1391 +- * value. For the cards tested, 300ms has proven enough. If necessary,
1392 ++ * value. For the cards tested, 600ms has proven enough. If necessary,
1393 + * this value can be increased if other problematic cards require this.
1394 + */
1395 + if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
1396 +- data->timeout_ns = 300000000;
1397 ++ data->timeout_ns = 600000000;
1398 + data->timeout_clks = 0;
1399 + }
1400 +
1401 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1402 +index 3d5087b03999..78187699467a 100644
1403 +--- a/drivers/mmc/core/mmc.c
1404 ++++ b/drivers/mmc/core/mmc.c
1405 +@@ -333,6 +333,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
1406 + }
1407 + }
1408 +
1409 ++/* Minimum partition switch timeout in milliseconds */
1410 ++#define MMC_MIN_PART_SWITCH_TIME 300
1411 ++
1412 + /*
1413 + * Decode extended CSD.
1414 + */
1415 +@@ -397,6 +400,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
1416 +
1417 + /* EXT_CSD value is in units of 10ms, but we store in ms */
1418 + card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
1419 ++ /* Some eMMC set the value too low so set a minimum */
1420 ++ if (card->ext_csd.part_time &&
1421 ++ card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
1422 ++ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
1423 +
1424 + /* Sleep / awake timeout in 100ns units */
1425 + if (sa_shift > 0 && sa_shift <= 0x17)
1426 +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
1427 +index a5cda926d38e..8aea3fa6938b 100644
1428 +--- a/drivers/mmc/host/sdhci-acpi.c
1429 ++++ b/drivers/mmc/host/sdhci-acpi.c
1430 +@@ -233,7 +233,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
1431 + .chip = &sdhci_acpi_chip_int,
1432 + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
1433 + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
1434 +- MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
1435 ++ MMC_CAP_WAIT_WHILE_BUSY,
1436 + .caps2 = MMC_CAP2_HC_ERASE_SZ,
1437 + .flags = SDHCI_ACPI_RUNTIME_PM,
1438 + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1439 +@@ -248,7 +248,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
1440 + SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1441 + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
1442 + .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
1443 +- MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
1444 ++ MMC_CAP_WAIT_WHILE_BUSY,
1445 + .flags = SDHCI_ACPI_RUNTIME_PM,
1446 + .pm_caps = MMC_PM_KEEP_POWER,
1447 + .probe_slot = sdhci_acpi_sdio_probe_slot,
1448 +@@ -260,7 +260,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
1449 + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1450 + .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1451 + SDHCI_QUIRK2_STOP_WITH_TC,
1452 +- .caps = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
1453 ++ .caps = MMC_CAP_WAIT_WHILE_BUSY,
1454 + .probe_slot = sdhci_acpi_sd_probe_slot,
1455 + };
1456 +
1457 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
1458 +index 610154836d79..5ebe6eb6b89e 100644
1459 +--- a/drivers/mmc/host/sdhci-pci-core.c
1460 ++++ b/drivers/mmc/host/sdhci-pci-core.c
1461 +@@ -361,7 +361,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
1462 + {
1463 + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
1464 + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
1465 +- MMC_CAP_BUS_WIDTH_TEST |
1466 + MMC_CAP_WAIT_WHILE_BUSY;
1467 + slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
1468 + slot->hw_reset = sdhci_pci_int_hw_reset;
1469 +@@ -377,15 +376,13 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
1470 + static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1471 + {
1472 + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1473 +- MMC_CAP_BUS_WIDTH_TEST |
1474 + MMC_CAP_WAIT_WHILE_BUSY;
1475 + return 0;
1476 + }
1477 +
1478 + static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1479 + {
1480 +- slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
1481 +- MMC_CAP_WAIT_WHILE_BUSY;
1482 ++ slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1483 + slot->cd_con_id = NULL;
1484 + slot->cd_idx = 0;
1485 + slot->cd_override_level = true;
1486 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
1487 +index 5b9834cf2820..96fddb016bf1 100644
1488 +--- a/drivers/mtd/ubi/eba.c
1489 ++++ b/drivers/mtd/ubi/eba.c
1490 +@@ -426,8 +426,25 @@ retry:
1491 + pnum, vol_id, lnum);
1492 + err = -EBADMSG;
1493 + } else {
1494 +- err = -EINVAL;
1495 +- ubi_ro_mode(ubi);
1496 ++ /*
1497 ++ * Ending up here in the non-Fastmap case
1498 ++ * is a clear bug as the VID header had to
1499 ++ * be present at scan time to have it referenced.
1500 ++ * With fastmap the story is more complicated.
1501 ++ * Fastmap has the mapping info without the need
1502 ++ * of a full scan. So the LEB could have been
1503 ++ * unmapped, Fastmap cannot know this and keeps
1504 ++ * the LEB referenced.
1505 ++ * This is valid and works as the layer above UBI
1506 ++ * has to do bookkeeping about used/referenced
1507 ++ * LEBs in any case.
1508 ++ */
1509 ++ if (ubi->fast_attach) {
1510 ++ err = -EBADMSG;
1511 ++ } else {
1512 ++ err = -EINVAL;
1513 ++ ubi_ro_mode(ubi);
1514 ++ }
1515 + }
1516 + }
1517 + goto out_free;
1518 +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
1519 +index 263b439e21a8..990898b9dc72 100644
1520 +--- a/drivers/mtd/ubi/fastmap.c
1521 ++++ b/drivers/mtd/ubi/fastmap.c
1522 +@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1523 + ubi_msg(ubi, "fastmap WL pool size: %d",
1524 + ubi->fm_wl_pool.max_size);
1525 + ubi->fm_disabled = 0;
1526 ++ ubi->fast_attach = 1;
1527 +
1528 + ubi_free_vid_hdr(ubi, vh);
1529 + kfree(ech);
1530 +diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
1531 +index 2974b67f6c6c..de1ea2e4c37d 100644
1532 +--- a/drivers/mtd/ubi/ubi.h
1533 ++++ b/drivers/mtd/ubi/ubi.h
1534 +@@ -462,6 +462,7 @@ struct ubi_debug_info {
1535 + * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
1536 + * @fm_work: fastmap work queue
1537 + * @fm_work_scheduled: non-zero if fastmap work was scheduled
1538 ++ * @fast_attach: non-zero if UBI was attached by fastmap
1539 + *
1540 + * @used: RB-tree of used physical eraseblocks
1541 + * @erroneous: RB-tree of erroneous used physical eraseblocks
1542 +@@ -570,6 +571,7 @@ struct ubi_device {
1543 + size_t fm_size;
1544 + struct work_struct fm_work;
1545 + int fm_work_scheduled;
1546 ++ int fast_attach;
1547 +
1548 + /* Wear-leveling sub-system's stuff */
1549 + struct rb_root used;
1550 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1551 +index 141c2a42d7ed..910c12e2638e 100644
1552 +--- a/drivers/net/can/dev.c
1553 ++++ b/drivers/net/can/dev.c
1554 +@@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
1555 + /* allow change of MTU according to the CANFD ability of the device */
1556 + switch (new_mtu) {
1557 + case CAN_MTU:
1558 ++ /* 'CANFD-only' controllers can not switch to CAN_MTU */
1559 ++ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
1560 ++ return -EINVAL;
1561 ++
1562 + priv->ctrlmode &= ~CAN_CTRLMODE_FD;
1563 + break;
1564 +
1565 + case CANFD_MTU:
1566 +- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
1567 ++ /* check for potential CANFD ability */
1568 ++ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
1569 ++ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
1570 + return -EINVAL;
1571 +
1572 + priv->ctrlmode |= CAN_CTRLMODE_FD;
1573 +@@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
1574 + = { .len = sizeof(struct can_bittiming_const) },
1575 + };
1576 +
1577 ++static int can_validate(struct nlattr *tb[], struct nlattr *data[])
1578 ++{
1579 ++ bool is_can_fd = false;
1580 ++
1581 ++ /* Make sure that valid CAN FD configurations always consist of
1582 ++ * - nominal/arbitration bittiming
1583 ++ * - data bittiming
1584 ++ * - control mode with CAN_CTRLMODE_FD set
1585 ++ */
1586 ++
1587 ++ if (data[IFLA_CAN_CTRLMODE]) {
1588 ++ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
1589 ++
1590 ++ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
1591 ++ }
1592 ++
1593 ++ if (is_can_fd) {
1594 ++ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
1595 ++ return -EOPNOTSUPP;
1596 ++ }
1597 ++
1598 ++ if (data[IFLA_CAN_DATA_BITTIMING]) {
1599 ++ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
1600 ++ return -EOPNOTSUPP;
1601 ++ }
1602 ++
1603 ++ return 0;
1604 ++}
1605 ++
1606 + static int can_changelink(struct net_device *dev,
1607 + struct nlattr *tb[], struct nlattr *data[])
1608 + {
1609 +@@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev,
1610 +
1611 + if (data[IFLA_CAN_CTRLMODE]) {
1612 + struct can_ctrlmode *cm;
1613 ++ u32 ctrlstatic;
1614 ++ u32 maskedflags;
1615 +
1616 + /* Do not allow changing controller mode while running */
1617 + if (dev->flags & IFF_UP)
1618 + return -EBUSY;
1619 + cm = nla_data(data[IFLA_CAN_CTRLMODE]);
1620 ++ ctrlstatic = priv->ctrlmode_static;
1621 ++ maskedflags = cm->flags & cm->mask;
1622 ++
1623 ++ /* check whether provided bits are allowed to be passed */
1624 ++ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
1625 ++ return -EOPNOTSUPP;
1626 ++
1627 ++ /* do not check for static fd-non-iso if 'fd' is disabled */
1628 ++ if (!(maskedflags & CAN_CTRLMODE_FD))
1629 ++ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
1630 +
1631 +- /* check whether changed bits are allowed to be modified */
1632 +- if (cm->mask & ~priv->ctrlmode_supported)
1633 ++ /* make sure static options are provided by configuration */
1634 ++ if ((maskedflags & ctrlstatic) != ctrlstatic)
1635 + return -EOPNOTSUPP;
1636 +
1637 + /* clear bits to be modified and copy the flag values */
1638 + priv->ctrlmode &= ~cm->mask;
1639 +- priv->ctrlmode |= (cm->flags & cm->mask);
1640 ++ priv->ctrlmode |= maskedflags;
1641 +
1642 + /* CAN_CTRLMODE_FD can only be set when driver supports FD */
1643 + if (priv->ctrlmode & CAN_CTRLMODE_FD)
1644 +@@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
1645 + .maxtype = IFLA_CAN_MAX,
1646 + .policy = can_policy,
1647 + .setup = can_setup,
1648 ++ .validate = can_validate,
1649 + .newlink = can_newlink,
1650 + .changelink = can_changelink,
1651 + .get_size = can_get_size,
1652 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
1653 +index 39cf911f7a1e..195f15edb32e 100644
1654 +--- a/drivers/net/can/m_can/m_can.c
1655 ++++ b/drivers/net/can/m_can/m_can.c
1656 +@@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void)
1657 + priv->can.do_get_berr_counter = m_can_get_berr_counter;
1658 +
1659 + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
1660 +- priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
1661 ++ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
1662 +
1663 + /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
1664 + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1665 +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
1666 +index bd9acff1eb7b..7fbd8f044207 100644
1667 +--- a/drivers/net/usb/asix_common.c
1668 ++++ b/drivers/net/usb/asix_common.c
1669 +@@ -66,7 +66,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
1670 + * buffer.
1671 + */
1672 + if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
1673 +- offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
1674 ++ offset = ((rx->remaining + 1) & 0xfffe);
1675 + rx->header = get_unaligned_le32(skb->data + offset);
1676 + offset = 0;
1677 +
1678 +diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
1679 +index cd410e392550..d33e9ad3218f 100644
1680 +--- a/drivers/platform/x86/dell-rbtn.c
1681 ++++ b/drivers/platform/x86/dell-rbtn.c
1682 +@@ -28,6 +28,7 @@ struct rbtn_data {
1683 + enum rbtn_type type;
1684 + struct rfkill *rfkill;
1685 + struct input_dev *input_dev;
1686 ++ bool suspended;
1687 + };
1688 +
1689 +
1690 +@@ -220,9 +221,55 @@ static const struct acpi_device_id rbtn_ids[] = {
1691 + { "", 0 },
1692 + };
1693 +
1694 ++#ifdef CONFIG_PM_SLEEP
1695 ++static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
1696 ++{
1697 ++ struct rbtn_data *rbtn_data = context;
1698 ++
1699 ++ rbtn_data->suspended = false;
1700 ++}
1701 ++
1702 ++static int rbtn_suspend(struct device *dev)
1703 ++{
1704 ++ struct acpi_device *device = to_acpi_device(dev);
1705 ++ struct rbtn_data *rbtn_data = acpi_driver_data(device);
1706 ++
1707 ++ rbtn_data->suspended = true;
1708 ++
1709 ++ return 0;
1710 ++}
1711 ++
1712 ++static int rbtn_resume(struct device *dev)
1713 ++{
1714 ++ struct acpi_device *device = to_acpi_device(dev);
1715 ++ struct rbtn_data *rbtn_data = acpi_driver_data(device);
1716 ++ acpi_status status;
1717 ++
1718 ++ /*
1719 ++ * Upon resume, some BIOSes send an ACPI notification thet triggers
1720 ++ * an unwanted input event. In order to ignore it, we use a flag
1721 ++ * that we set at suspend and clear once we have received the extra
1722 ++ * ACPI notification. Since ACPI notifications are delivered
1723 ++ * asynchronously to drivers, we clear the flag from the workqueue
1724 ++ * used to deliver the notifications. This should be enough
1725 ++ * to have the flag cleared only after we received the extra
1726 ++ * notification, if any.
1727 ++ */
1728 ++ status = acpi_os_execute(OSL_NOTIFY_HANDLER,
1729 ++ rbtn_clear_suspended_flag, rbtn_data);
1730 ++ if (ACPI_FAILURE(status))
1731 ++ rbtn_clear_suspended_flag(rbtn_data);
1732 ++
1733 ++ return 0;
1734 ++}
1735 ++#endif
1736 ++
1737 ++static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
1738 ++
1739 + static struct acpi_driver rbtn_driver = {
1740 + .name = "dell-rbtn",
1741 + .ids = rbtn_ids,
1742 ++ .drv.pm = &rbtn_pm_ops,
1743 + .ops = {
1744 + .add = rbtn_add,
1745 + .remove = rbtn_remove,
1746 +@@ -384,6 +431,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
1747 + {
1748 + struct rbtn_data *rbtn_data = device->driver_data;
1749 +
1750 ++ /*
1751 ++ * Some BIOSes send a notification at resume.
1752 ++ * Ignore it to prevent unwanted input events.
1753 ++ */
1754 ++ if (rbtn_data->suspended) {
1755 ++ dev_dbg(&device->dev, "ACPI notification ignored\n");
1756 ++ return;
1757 ++ }
1758 ++
1759 + if (event != 0x80) {
1760 + dev_info(&device->dev, "Received unknown event (0x%x)\n",
1761 + event);
1762 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
1763 +index 054923e3393c..f0cfaacbfabd 100644
1764 +--- a/drivers/scsi/scsi_scan.c
1765 ++++ b/drivers/scsi/scsi_scan.c
1766 +@@ -314,6 +314,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
1767 + struct Scsi_Host *shost = dev_to_shost(dev->parent);
1768 + unsigned long flags;
1769 +
1770 ++ BUG_ON(starget->state == STARGET_DEL);
1771 + starget->state = STARGET_DEL;
1772 + transport_destroy_device(dev);
1773 + spin_lock_irqsave(shost->host_lock, flags);
1774 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
1775 +index c8115b4fe474..f7ae898833dd 100644
1776 +--- a/drivers/scsi/scsi_sysfs.c
1777 ++++ b/drivers/scsi/scsi_sysfs.c
1778 +@@ -1192,18 +1192,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
1779 + void scsi_remove_target(struct device *dev)
1780 + {
1781 + struct Scsi_Host *shost = dev_to_shost(dev->parent);
1782 +- struct scsi_target *starget, *last_target = NULL;
1783 ++ struct scsi_target *starget;
1784 + unsigned long flags;
1785 +
1786 + restart:
1787 + spin_lock_irqsave(shost->host_lock, flags);
1788 + list_for_each_entry(starget, &shost->__targets, siblings) {
1789 + if (starget->state == STARGET_DEL ||
1790 +- starget == last_target)
1791 ++ starget->state == STARGET_REMOVE)
1792 + continue;
1793 + if (starget->dev.parent == dev || &starget->dev == dev) {
1794 + kref_get(&starget->reap_ref);
1795 +- last_target = starget;
1796 ++ starget->state = STARGET_REMOVE;
1797 + spin_unlock_irqrestore(shost->host_lock, flags);
1798 + __scsi_remove_target(starget);
1799 + scsi_target_reap(starget);
1800 +diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
1801 +index 940781183fac..3be10963f98b 100644
1802 +--- a/drivers/staging/comedi/drivers/das1800.c
1803 ++++ b/drivers/staging/comedi/drivers/das1800.c
1804 +@@ -567,14 +567,17 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
1805 + struct comedi_isadma_desc *desc;
1806 + int i;
1807 +
1808 +- outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */
1809 +- outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */
1810 +- outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */
1811 +-
1812 +- for (i = 0; i < 2; i++) {
1813 +- desc = &dma->desc[i];
1814 +- if (desc->chan)
1815 +- comedi_isadma_disable(desc->chan);
1816 ++ /* disable and stop conversions */
1817 ++ outb(0x0, dev->iobase + DAS1800_STATUS);
1818 ++ outb(0x0, dev->iobase + DAS1800_CONTROL_B);
1819 ++ outb(0x0, dev->iobase + DAS1800_CONTROL_A);
1820 ++
1821 ++ if (dma) {
1822 ++ for (i = 0; i < 2; i++) {
1823 ++ desc = &dma->desc[i];
1824 ++ if (desc->chan)
1825 ++ comedi_isadma_disable(desc->chan);
1826 ++ }
1827 + }
1828 +
1829 + return 0;
1830 +@@ -934,13 +937,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
1831 + {
1832 + struct das1800_private *devpriv = dev->private;
1833 + struct comedi_isadma *dma = devpriv->dma;
1834 +- struct comedi_isadma_desc *desc = &dma->desc[0];
1835 ++ struct comedi_isadma_desc *desc;
1836 + unsigned int bytes;
1837 +
1838 + if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
1839 + return;
1840 +
1841 + dma->cur_dma = 0;
1842 ++ desc = &dma->desc[0];
1843 +
1844 + /* determine a dma transfer size to fill buffer in 0.3 sec */
1845 + bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
1846 +diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
1847 +index 0dde34e3a7c5..545c60c826a1 100644
1848 +--- a/drivers/thunderbolt/eeprom.c
1849 ++++ b/drivers/thunderbolt/eeprom.c
1850 +@@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw)
1851 + return tb_drom_parse_entries(sw);
1852 + err:
1853 + kfree(sw->drom);
1854 ++ sw->drom = NULL;
1855 + return -EIO;
1856 +
1857 + }
1858 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
1859 +index c3fe026d3168..9aff37186246 100644
1860 +--- a/drivers/tty/n_gsm.c
1861 ++++ b/drivers/tty/n_gsm.c
1862 +@@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
1863 + }
1864 + }
1865 + spin_unlock(&gsm_mux_lock);
1866 +- WARN_ON(i == MAX_MUX);
1867 ++ /* open failed before registering => nothing to do */
1868 ++ if (i == MAX_MUX)
1869 ++ return;
1870 +
1871 + /* In theory disconnecting DLCI 0 is sufficient but for some
1872 + modems this is apparently not the case. */
1873 +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
1874 +index bbc4ce66c2c1..644ddb841d9f 100644
1875 +--- a/drivers/tty/n_hdlc.c
1876 ++++ b/drivers/tty/n_hdlc.c
1877 +@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
1878 + add_wait_queue(&tty->read_wait, &wait);
1879 +
1880 + for (;;) {
1881 +- if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
1882 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
1883 + ret = -EIO;
1884 + break;
1885 + }
1886 +@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
1887 + /* set bits for operations that won't block */
1888 + if (n_hdlc->rx_buf_list.head)
1889 + mask |= POLLIN | POLLRDNORM; /* readable */
1890 +- if (test_bit(TTY_OTHER_DONE, &tty->flags))
1891 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
1892 + mask |= POLLHUP;
1893 + if (tty_hung_up_p(filp))
1894 + mask |= POLLHUP;
1895 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
1896 +index cf000b331eed..84e71bd19082 100644
1897 +--- a/drivers/tty/n_tty.c
1898 ++++ b/drivers/tty/n_tty.c
1899 +@@ -1955,18 +1955,6 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
1900 + return ldata->commit_head - ldata->read_tail >= amt;
1901 + }
1902 +
1903 +-static inline int check_other_done(struct tty_struct *tty)
1904 +-{
1905 +- int done = test_bit(TTY_OTHER_DONE, &tty->flags);
1906 +- if (done) {
1907 +- /* paired with cmpxchg() in check_other_closed(); ensures
1908 +- * read buffer head index is not stale
1909 +- */
1910 +- smp_mb__after_atomic();
1911 +- }
1912 +- return done;
1913 +-}
1914 +-
1915 + /**
1916 + * copy_from_read_buf - copy read data directly
1917 + * @tty: terminal device
1918 +@@ -2171,7 +2159,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
1919 + struct n_tty_data *ldata = tty->disc_data;
1920 + unsigned char __user *b = buf;
1921 + DEFINE_WAIT_FUNC(wait, woken_wake_function);
1922 +- int c, done;
1923 ++ int c;
1924 + int minimum, time;
1925 + ssize_t retval = 0;
1926 + long timeout;
1927 +@@ -2239,32 +2227,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
1928 + ((minimum - (b - buf)) >= 1))
1929 + ldata->minimum_to_wake = (minimum - (b - buf));
1930 +
1931 +- done = check_other_done(tty);
1932 +-
1933 + if (!input_available_p(tty, 0)) {
1934 +- if (done) {
1935 +- retval = -EIO;
1936 +- break;
1937 +- }
1938 +- if (tty_hung_up_p(file))
1939 +- break;
1940 +- if (!timeout)
1941 +- break;
1942 +- if (file->f_flags & O_NONBLOCK) {
1943 +- retval = -EAGAIN;
1944 +- break;
1945 +- }
1946 +- if (signal_pending(current)) {
1947 +- retval = -ERESTARTSYS;
1948 +- break;
1949 +- }
1950 + up_read(&tty->termios_rwsem);
1951 ++ tty_buffer_flush_work(tty->port);
1952 ++ down_read(&tty->termios_rwsem);
1953 ++ if (!input_available_p(tty, 0)) {
1954 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
1955 ++ retval = -EIO;
1956 ++ break;
1957 ++ }
1958 ++ if (tty_hung_up_p(file))
1959 ++ break;
1960 ++ if (!timeout)
1961 ++ break;
1962 ++ if (file->f_flags & O_NONBLOCK) {
1963 ++ retval = -EAGAIN;
1964 ++ break;
1965 ++ }
1966 ++ if (signal_pending(current)) {
1967 ++ retval = -ERESTARTSYS;
1968 ++ break;
1969 ++ }
1970 ++ up_read(&tty->termios_rwsem);
1971 +
1972 +- timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
1973 +- timeout);
1974 ++ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
1975 ++ timeout);
1976 +
1977 +- down_read(&tty->termios_rwsem);
1978 +- continue;
1979 ++ down_read(&tty->termios_rwsem);
1980 ++ continue;
1981 ++ }
1982 + }
1983 +
1984 + if (ldata->icanon && !L_EXTPROC(tty)) {
1985 +@@ -2446,12 +2437,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
1986 +
1987 + poll_wait(file, &tty->read_wait, wait);
1988 + poll_wait(file, &tty->write_wait, wait);
1989 +- if (check_other_done(tty))
1990 +- mask |= POLLHUP;
1991 + if (input_available_p(tty, 1))
1992 + mask |= POLLIN | POLLRDNORM;
1993 ++ else {
1994 ++ tty_buffer_flush_work(tty->port);
1995 ++ if (input_available_p(tty, 1))
1996 ++ mask |= POLLIN | POLLRDNORM;
1997 ++ }
1998 + if (tty->packet && tty->link->ctrl_status)
1999 + mask |= POLLPRI | POLLIN | POLLRDNORM;
2000 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2001 ++ mask |= POLLHUP;
2002 + if (tty_hung_up_p(file))
2003 + mask |= POLLHUP;
2004 + if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
2005 +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
2006 +index 78e983677339..7865228f664f 100644
2007 +--- a/drivers/tty/pty.c
2008 ++++ b/drivers/tty/pty.c
2009 +@@ -59,7 +59,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
2010 + if (!tty->link)
2011 + return;
2012 + set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
2013 +- tty_flip_buffer_push(tty->link->port);
2014 ++ wake_up_interruptible(&tty->link->read_wait);
2015 + wake_up_interruptible(&tty->link->write_wait);
2016 + if (tty->driver->subtype == PTY_TYPE_MASTER) {
2017 + set_bit(TTY_OTHER_CLOSED, &tty->flags);
2018 +@@ -247,9 +247,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
2019 + goto out;
2020 +
2021 + clear_bit(TTY_IO_ERROR, &tty->flags);
2022 +- /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
2023 + clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
2024 +- clear_bit(TTY_OTHER_DONE, &tty->link->flags);
2025 + set_bit(TTY_THROTTLED, &tty->flags);
2026 + return 0;
2027 +
2028 +diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
2029 +index 88531a36b69c..ed489880e62b 100644
2030 +--- a/drivers/tty/serial/8250/8250_mid.c
2031 ++++ b/drivers/tty/serial/8250/8250_mid.c
2032 +@@ -14,6 +14,7 @@
2033 + #include <linux/pci.h>
2034 +
2035 + #include <linux/dma/hsu.h>
2036 ++#include <linux/8250_pci.h>
2037 +
2038 + #include "8250.h"
2039 +
2040 +@@ -24,6 +25,7 @@
2041 + #define PCI_DEVICE_ID_INTEL_DNV_UART 0x19d8
2042 +
2043 + /* Intel MID Specific registers */
2044 ++#define INTEL_MID_UART_DNV_FISR 0x08
2045 + #define INTEL_MID_UART_PS 0x30
2046 + #define INTEL_MID_UART_MUL 0x34
2047 + #define INTEL_MID_UART_DIV 0x38
2048 +@@ -31,6 +33,7 @@
2049 + struct mid8250;
2050 +
2051 + struct mid8250_board {
2052 ++ unsigned int flags;
2053 + unsigned long freq;
2054 + unsigned int base_baud;
2055 + int (*setup)(struct mid8250 *, struct uart_port *p);
2056 +@@ -88,16 +91,16 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
2057 + static int dnv_handle_irq(struct uart_port *p)
2058 + {
2059 + struct mid8250 *mid = p->private_data;
2060 +- int ret;
2061 +-
2062 +- ret = hsu_dma_irq(&mid->dma_chip, 0);
2063 +- ret |= hsu_dma_irq(&mid->dma_chip, 1);
2064 +-
2065 +- /* For now, letting the HW generate separate interrupt for the UART */
2066 +- if (ret)
2067 +- return ret;
2068 +-
2069 +- return serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
2070 ++ unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
2071 ++ int ret = IRQ_NONE;
2072 ++
2073 ++ if (fisr & BIT(2))
2074 ++ ret |= hsu_dma_irq(&mid->dma_chip, 1);
2075 ++ if (fisr & BIT(1))
2076 ++ ret |= hsu_dma_irq(&mid->dma_chip, 0);
2077 ++ if (fisr & BIT(0))
2078 ++ ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
2079 ++ return ret;
2080 + }
2081 +
2082 + #define DNV_DMA_CHAN_OFFSET 0x80
2083 +@@ -106,12 +109,13 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
2084 + {
2085 + struct hsu_dma_chip *chip = &mid->dma_chip;
2086 + struct pci_dev *pdev = to_pci_dev(p->dev);
2087 ++ unsigned int bar = FL_GET_BASE(mid->board->flags);
2088 + int ret;
2089 +
2090 + chip->dev = &pdev->dev;
2091 + chip->irq = pdev->irq;
2092 + chip->regs = p->membase;
2093 +- chip->length = pci_resource_len(pdev, 0);
2094 ++ chip->length = pci_resource_len(pdev, bar);
2095 + chip->offset = DNV_DMA_CHAN_OFFSET;
2096 +
2097 + /* Falling back to PIO mode if DMA probing fails */
2098 +@@ -217,6 +221,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2099 + {
2100 + struct uart_8250_port uart;
2101 + struct mid8250 *mid;
2102 ++ unsigned int bar;
2103 + int ret;
2104 +
2105 + ret = pcim_enable_device(pdev);
2106 +@@ -230,6 +235,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2107 + return -ENOMEM;
2108 +
2109 + mid->board = (struct mid8250_board *)id->driver_data;
2110 ++ bar = FL_GET_BASE(mid->board->flags);
2111 +
2112 + memset(&uart, 0, sizeof(struct uart_8250_port));
2113 +
2114 +@@ -242,8 +248,8 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2115 + uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
2116 + uart.port.set_termios = mid8250_set_termios;
2117 +
2118 +- uart.port.mapbase = pci_resource_start(pdev, 0);
2119 +- uart.port.membase = pcim_iomap(pdev, 0, 0);
2120 ++ uart.port.mapbase = pci_resource_start(pdev, bar);
2121 ++ uart.port.membase = pcim_iomap(pdev, bar, 0);
2122 + if (!uart.port.membase)
2123 + return -ENOMEM;
2124 +
2125 +@@ -282,18 +288,21 @@ static void mid8250_remove(struct pci_dev *pdev)
2126 + }
2127 +
2128 + static const struct mid8250_board pnw_board = {
2129 ++ .flags = FL_BASE0,
2130 + .freq = 50000000,
2131 + .base_baud = 115200,
2132 + .setup = pnw_setup,
2133 + };
2134 +
2135 + static const struct mid8250_board tng_board = {
2136 ++ .flags = FL_BASE0,
2137 + .freq = 38400000,
2138 + .base_baud = 1843200,
2139 + .setup = tng_setup,
2140 + };
2141 +
2142 + static const struct mid8250_board dnv_board = {
2143 ++ .flags = FL_BASE1,
2144 + .freq = 133333333,
2145 + .base_baud = 115200,
2146 + .setup = dnv_setup,
2147 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
2148 +index 7cd6f9a90542..c1d4a8fa9be8 100644
2149 +--- a/drivers/tty/serial/8250/8250_pci.c
2150 ++++ b/drivers/tty/serial/8250/8250_pci.c
2151 +@@ -1401,6 +1401,9 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
2152 + unsigned long m, n;
2153 + u32 reg;
2154 +
2155 ++ /* Gracefully handle the B0 case: fall back to B9600 */
2156 ++ fuart = fuart ? fuart : 9600 * 16;
2157 ++
2158 + /* Get Fuart closer to Fref */
2159 + fuart *= rounddown_pow_of_two(fref / fuart);
2160 +
2161 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2162 +index 94294558943c..7bbadd176c74 100644
2163 +--- a/drivers/tty/serial/atmel_serial.c
2164 ++++ b/drivers/tty/serial/atmel_serial.c
2165 +@@ -277,6 +277,13 @@ static bool atmel_use_dma_rx(struct uart_port *port)
2166 + return atmel_port->use_dma_rx;
2167 + }
2168 +
2169 ++static bool atmel_use_fifo(struct uart_port *port)
2170 ++{
2171 ++ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2172 ++
2173 ++ return atmel_port->fifo_size;
2174 ++}
2175 ++
2176 + static unsigned int atmel_get_lines_status(struct uart_port *port)
2177 + {
2178 + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2179 +@@ -2169,7 +2176,12 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2180 + mode |= ATMEL_US_USMODE_RS485;
2181 + } else if (termios->c_cflag & CRTSCTS) {
2182 + /* RS232 with hardware handshake (RTS/CTS) */
2183 +- mode |= ATMEL_US_USMODE_HWHS;
2184 ++ if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
2185 ++ dev_info(port->dev, "not enabling hardware flow control because DMA is used");
2186 ++ termios->c_cflag &= ~CRTSCTS;
2187 ++ } else {
2188 ++ mode |= ATMEL_US_USMODE_HWHS;
2189 ++ }
2190 + } else {
2191 + /* RS232 without hadware handshake */
2192 + mode |= ATMEL_US_USMODE_NORMAL;
2193 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
2194 +index d72cd736bdc6..8320173af846 100644
2195 +--- a/drivers/tty/serial/samsung.c
2196 ++++ b/drivers/tty/serial/samsung.c
2197 +@@ -1263,6 +1263,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
2198 + /* check to see if we need to change clock source */
2199 +
2200 + if (ourport->baudclk != clk) {
2201 ++ clk_prepare_enable(clk);
2202 ++
2203 + s3c24xx_serial_setsource(port, clk_sel);
2204 +
2205 + if (!IS_ERR(ourport->baudclk)) {
2206 +@@ -1270,8 +1272,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
2207 + ourport->baudclk = ERR_PTR(-EINVAL);
2208 + }
2209 +
2210 +- clk_prepare_enable(clk);
2211 +-
2212 + ourport->baudclk = clk;
2213 + ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
2214 + }
2215 +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
2216 +index 3cd31e0d4bd9..fb31eecb708d 100644
2217 +--- a/drivers/tty/tty_buffer.c
2218 ++++ b/drivers/tty/tty_buffer.c
2219 +@@ -37,29 +37,6 @@
2220 +
2221 + #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
2222 +
2223 +-/*
2224 +- * If all tty flip buffers have been processed by flush_to_ldisc() or
2225 +- * dropped by tty_buffer_flush(), check if the linked pty has been closed.
2226 +- * If so, wake the reader/poll to process
2227 +- */
2228 +-static inline void check_other_closed(struct tty_struct *tty)
2229 +-{
2230 +- unsigned long flags, old;
2231 +-
2232 +- /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
2233 +- for (flags = ACCESS_ONCE(tty->flags);
2234 +- test_bit(TTY_OTHER_CLOSED, &flags);
2235 +- ) {
2236 +- old = flags;
2237 +- __set_bit(TTY_OTHER_DONE, &flags);
2238 +- flags = cmpxchg(&tty->flags, old, flags);
2239 +- if (old == flags) {
2240 +- wake_up_interruptible(&tty->read_wait);
2241 +- break;
2242 +- }
2243 +- }
2244 +-}
2245 +-
2246 + /**
2247 + * tty_buffer_lock_exclusive - gain exclusive access to buffer
2248 + * tty_buffer_unlock_exclusive - release exclusive access
2249 +@@ -254,8 +231,6 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
2250 + if (ld && ld->ops->flush_buffer)
2251 + ld->ops->flush_buffer(tty);
2252 +
2253 +- check_other_closed(tty);
2254 +-
2255 + atomic_dec(&buf->priority);
2256 + mutex_unlock(&buf->lock);
2257 + }
2258 +@@ -505,10 +480,8 @@ static void flush_to_ldisc(struct work_struct *work)
2259 + */
2260 + count = smp_load_acquire(&head->commit) - head->read;
2261 + if (!count) {
2262 +- if (next == NULL) {
2263 +- check_other_closed(tty);
2264 ++ if (next == NULL)
2265 + break;
2266 +- }
2267 + buf->head = next;
2268 + tty_buffer_free(port, head);
2269 + continue;
2270 +@@ -597,3 +570,8 @@ bool tty_buffer_cancel_work(struct tty_port *port)
2271 + {
2272 + return cancel_work_sync(&port->buf.work);
2273 + }
2274 ++
2275 ++void tty_buffer_flush_work(struct tty_port *port)
2276 ++{
2277 ++ flush_work(&port->buf.work);
2278 ++}
2279 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2280 +index 4462d167900c..cf20282f79f0 100644
2281 +--- a/drivers/tty/vt/vt.c
2282 ++++ b/drivers/tty/vt/vt.c
2283 +@@ -3583,9 +3583,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
2284 + goto err;
2285 +
2286 + desc = csw->con_startup();
2287 +-
2288 +- if (!desc)
2289 ++ if (!desc) {
2290 ++ retval = -ENODEV;
2291 + goto err;
2292 ++ }
2293 +
2294 + retval = -EINVAL;
2295 +
2296 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
2297 +index 2057d91d8336..dadd1e8dfe09 100644
2298 +--- a/drivers/usb/core/driver.c
2299 ++++ b/drivers/usb/core/driver.c
2300 +@@ -284,7 +284,7 @@ static int usb_probe_interface(struct device *dev)
2301 + struct usb_device *udev = interface_to_usbdev(intf);
2302 + const struct usb_device_id *id;
2303 + int error = -ENODEV;
2304 +- int lpm_disable_error;
2305 ++ int lpm_disable_error = -ENODEV;
2306 +
2307 + dev_dbg(dev, "%s\n", __func__);
2308 +
2309 +@@ -336,12 +336,14 @@ static int usb_probe_interface(struct device *dev)
2310 + * setting during probe, that should also be fine. usb_set_interface()
2311 + * will attempt to disable LPM, and fail if it can't disable it.
2312 + */
2313 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
2314 +- if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
2315 +- dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
2316 +- __func__, driver->name);
2317 +- error = lpm_disable_error;
2318 +- goto err;
2319 ++ if (driver->disable_hub_initiated_lpm) {
2320 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
2321 ++ if (lpm_disable_error) {
2322 ++ dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
2323 ++ __func__, driver->name);
2324 ++ error = lpm_disable_error;
2325 ++ goto err;
2326 ++ }
2327 + }
2328 +
2329 + /* Carry out a deferred switch to altsetting 0 */
2330 +@@ -391,7 +393,8 @@ static int usb_unbind_interface(struct device *dev)
2331 + struct usb_interface *intf = to_usb_interface(dev);
2332 + struct usb_host_endpoint *ep, **eps = NULL;
2333 + struct usb_device *udev;
2334 +- int i, j, error, r, lpm_disable_error;
2335 ++ int i, j, error, r;
2336 ++ int lpm_disable_error = -ENODEV;
2337 +
2338 + intf->condition = USB_INTERFACE_UNBINDING;
2339 +
2340 +@@ -399,12 +402,13 @@ static int usb_unbind_interface(struct device *dev)
2341 + udev = interface_to_usbdev(intf);
2342 + error = usb_autoresume_device(udev);
2343 +
2344 +- /* Hub-initiated LPM policy may change, so attempt to disable LPM until
2345 ++ /* If hub-initiated LPM policy may change, attempt to disable LPM until
2346 + * the driver is unbound. If LPM isn't disabled, that's fine because it
2347 + * wouldn't be enabled unless all the bound interfaces supported
2348 + * hub-initiated LPM.
2349 + */
2350 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
2351 ++ if (driver->disable_hub_initiated_lpm)
2352 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
2353 +
2354 + /*
2355 + * Terminate all URBs for this interface unless the driver
2356 +@@ -505,7 +509,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
2357 + struct device *dev;
2358 + struct usb_device *udev;
2359 + int retval = 0;
2360 +- int lpm_disable_error;
2361 ++ int lpm_disable_error = -ENODEV;
2362 +
2363 + if (!iface)
2364 + return -ENODEV;
2365 +@@ -526,12 +530,14 @@ int usb_driver_claim_interface(struct usb_driver *driver,
2366 +
2367 + iface->condition = USB_INTERFACE_BOUND;
2368 +
2369 +- /* Disable LPM until this driver is bound. */
2370 +- lpm_disable_error = usb_unlocked_disable_lpm(udev);
2371 +- if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
2372 +- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
2373 +- __func__, driver->name);
2374 +- return -ENOMEM;
2375 ++ /* See the comment about disabling LPM in usb_probe_interface(). */
2376 ++ if (driver->disable_hub_initiated_lpm) {
2377 ++ lpm_disable_error = usb_unlocked_disable_lpm(udev);
2378 ++ if (lpm_disable_error) {
2379 ++ dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
2380 ++ __func__, driver->name);
2381 ++ return -ENOMEM;
2382 ++ }
2383 + }
2384 +
2385 + /* Claimed interfaces are initially inactive (suspended) and
2386 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
2387 +index 79d895c2dd71..97ef75af9632 100644
2388 +--- a/drivers/usb/gadget/function/f_fs.c
2389 ++++ b/drivers/usb/gadget/function/f_fs.c
2390 +@@ -651,7 +651,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
2391 + if (io_data->read && ret > 0) {
2392 + use_mm(io_data->mm);
2393 + ret = copy_to_iter(io_data->buf, ret, &io_data->data);
2394 +- if (iov_iter_count(&io_data->data))
2395 ++ if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
2396 + ret = -EFAULT;
2397 + unuse_mm(io_data->mm);
2398 + }
2399 +diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
2400 +index 223ccf89d226..a4f664062e0c 100644
2401 +--- a/drivers/usb/gadget/function/f_mass_storage.c
2402 ++++ b/drivers/usb/gadget/function/f_mass_storage.c
2403 +@@ -2977,25 +2977,6 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
2404 + }
2405 + EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
2406 +
2407 +-int fsg_common_run_thread(struct fsg_common *common)
2408 +-{
2409 +- common->state = FSG_STATE_IDLE;
2410 +- /* Tell the thread to start working */
2411 +- common->thread_task =
2412 +- kthread_create(fsg_main_thread, common, "file-storage");
2413 +- if (IS_ERR(common->thread_task)) {
2414 +- common->state = FSG_STATE_TERMINATED;
2415 +- return PTR_ERR(common->thread_task);
2416 +- }
2417 +-
2418 +- DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
2419 +-
2420 +- wake_up_process(common->thread_task);
2421 +-
2422 +- return 0;
2423 +-}
2424 +-EXPORT_SYMBOL_GPL(fsg_common_run_thread);
2425 +-
2426 + static void fsg_common_release(struct kref *ref)
2427 + {
2428 + struct fsg_common *common = container_of(ref, struct fsg_common, ref);
2429 +@@ -3005,6 +2986,7 @@ static void fsg_common_release(struct kref *ref)
2430 + if (common->state != FSG_STATE_TERMINATED) {
2431 + raise_exception(common, FSG_STATE_EXIT);
2432 + wait_for_completion(&common->thread_notifier);
2433 ++ common->thread_task = NULL;
2434 + }
2435 +
2436 + for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
2437 +@@ -3050,9 +3032,21 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2438 + if (ret)
2439 + return ret;
2440 + fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
2441 +- ret = fsg_common_run_thread(fsg->common);
2442 +- if (ret)
2443 ++ }
2444 ++
2445 ++ if (!common->thread_task) {
2446 ++ common->state = FSG_STATE_IDLE;
2447 ++ common->thread_task =
2448 ++ kthread_create(fsg_main_thread, common, "file-storage");
2449 ++ if (IS_ERR(common->thread_task)) {
2450 ++ int ret = PTR_ERR(common->thread_task);
2451 ++ common->thread_task = NULL;
2452 ++ common->state = FSG_STATE_TERMINATED;
2453 + return ret;
2454 ++ }
2455 ++ DBG(common, "I/O thread pid: %d\n",
2456 ++ task_pid_nr(common->thread_task));
2457 ++ wake_up_process(common->thread_task);
2458 + }
2459 +
2460 + fsg->gadget = gadget;
2461 +diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
2462 +index 445df6775609..b6a9918eaefb 100644
2463 +--- a/drivers/usb/gadget/function/f_mass_storage.h
2464 ++++ b/drivers/usb/gadget/function/f_mass_storage.h
2465 +@@ -153,8 +153,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg);
2466 + void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
2467 + const char *pn);
2468 +
2469 +-int fsg_common_run_thread(struct fsg_common *common);
2470 +-
2471 + void fsg_config_from_params(struct fsg_config *cfg,
2472 + const struct fsg_module_parameters *params,
2473 + unsigned int fsg_num_buffers);
2474 +diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
2475 +index 4b158e2d1e57..64b2cbb0bc6b 100644
2476 +--- a/drivers/usb/gadget/legacy/acm_ms.c
2477 ++++ b/drivers/usb/gadget/legacy/acm_ms.c
2478 +@@ -133,10 +133,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
2479 + if (status < 0)
2480 + goto put_msg;
2481 +
2482 +- status = fsg_common_run_thread(opts->common);
2483 +- if (status)
2484 +- goto remove_acm;
2485 +-
2486 + status = usb_add_function(c, f_msg);
2487 + if (status)
2488 + goto remove_acm;
2489 +diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
2490 +index bda3c519110f..99aa22c81770 100644
2491 +--- a/drivers/usb/gadget/legacy/mass_storage.c
2492 ++++ b/drivers/usb/gadget/legacy/mass_storage.c
2493 +@@ -132,10 +132,6 @@ static int msg_do_config(struct usb_configuration *c)
2494 + if (IS_ERR(f_msg))
2495 + return PTR_ERR(f_msg);
2496 +
2497 +- ret = fsg_common_run_thread(opts->common);
2498 +- if (ret)
2499 +- goto put_func;
2500 +-
2501 + ret = usb_add_function(c, f_msg);
2502 + if (ret)
2503 + goto put_func;
2504 +diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
2505 +index 4fe794ddcd49..09c7c28f32f7 100644
2506 +--- a/drivers/usb/gadget/legacy/multi.c
2507 ++++ b/drivers/usb/gadget/legacy/multi.c
2508 +@@ -137,7 +137,6 @@ static struct usb_function *f_msg_rndis;
2509 +
2510 + static int rndis_do_config(struct usb_configuration *c)
2511 + {
2512 +- struct fsg_opts *fsg_opts;
2513 + int ret;
2514 +
2515 + if (gadget_is_otg(c->cdev->gadget)) {
2516 +@@ -169,11 +168,6 @@ static int rndis_do_config(struct usb_configuration *c)
2517 + goto err_fsg;
2518 + }
2519 +
2520 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
2521 +- ret = fsg_common_run_thread(fsg_opts->common);
2522 +- if (ret)
2523 +- goto err_run;
2524 +-
2525 + ret = usb_add_function(c, f_msg_rndis);
2526 + if (ret)
2527 + goto err_run;
2528 +@@ -225,7 +219,6 @@ static struct usb_function *f_msg_multi;
2529 +
2530 + static int cdc_do_config(struct usb_configuration *c)
2531 + {
2532 +- struct fsg_opts *fsg_opts;
2533 + int ret;
2534 +
2535 + if (gadget_is_otg(c->cdev->gadget)) {
2536 +@@ -258,11 +251,6 @@ static int cdc_do_config(struct usb_configuration *c)
2537 + goto err_fsg;
2538 + }
2539 +
2540 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
2541 +- ret = fsg_common_run_thread(fsg_opts->common);
2542 +- if (ret)
2543 +- goto err_run;
2544 +-
2545 + ret = usb_add_function(c, f_msg_multi);
2546 + if (ret)
2547 + goto err_run;
2548 +diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
2549 +index 8b3f6fb1825d..05d3f79e768d 100644
2550 +--- a/drivers/usb/gadget/legacy/nokia.c
2551 ++++ b/drivers/usb/gadget/legacy/nokia.c
2552 +@@ -152,7 +152,6 @@ static int nokia_bind_config(struct usb_configuration *c)
2553 + struct usb_function *f_ecm;
2554 + struct usb_function *f_obex2 = NULL;
2555 + struct usb_function *f_msg;
2556 +- struct fsg_opts *fsg_opts;
2557 + int status = 0;
2558 + int obex1_stat = -1;
2559 + int obex2_stat = -1;
2560 +@@ -222,12 +221,6 @@ static int nokia_bind_config(struct usb_configuration *c)
2561 + goto err_ecm;
2562 + }
2563 +
2564 +- fsg_opts = fsg_opts_from_func_inst(fi_msg);
2565 +-
2566 +- status = fsg_common_run_thread(fsg_opts->common);
2567 +- if (status)
2568 +- goto err_msg;
2569 +-
2570 + status = usb_add_function(c, f_msg);
2571 + if (status)
2572 + goto err_msg;
2573 +diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
2574 +index f660afba715d..89f7cd66f5e6 100644
2575 +--- a/drivers/usb/gadget/udc/udc-core.c
2576 ++++ b/drivers/usb/gadget/udc/udc-core.c
2577 +@@ -71,7 +71,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
2578 + mapped = dma_map_sg(dev, req->sg, req->num_sgs,
2579 + is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2580 + if (mapped == 0) {
2581 +- dev_err(&gadget->dev, "failed to map SGs\n");
2582 ++ dev_err(dev, "failed to map SGs\n");
2583 + return -EFAULT;
2584 + }
2585 +
2586 +diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
2587 +index 637f3f7cfce8..1a812eafe670 100644
2588 +--- a/drivers/usb/misc/usbtest.c
2589 ++++ b/drivers/usb/misc/usbtest.c
2590 +@@ -505,6 +505,7 @@ static struct scatterlist *
2591 + alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
2592 + {
2593 + struct scatterlist *sg;
2594 ++ unsigned int n_size = 0;
2595 + unsigned i;
2596 + unsigned size = max;
2597 + unsigned maxpacket =
2598 +@@ -537,7 +538,8 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
2599 + break;
2600 + case 1:
2601 + for (j = 0; j < size; j++)
2602 +- *buf++ = (u8) ((j % maxpacket) % 63);
2603 ++ *buf++ = (u8) (((j + n_size) % maxpacket) % 63);
2604 ++ n_size += size;
2605 + break;
2606 + }
2607 +
2608 +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
2609 +index c0866971db2b..1947ea0e0988 100644
2610 +--- a/drivers/usb/serial/io_edgeport.c
2611 ++++ b/drivers/usb/serial/io_edgeport.c
2612 +@@ -2856,14 +2856,16 @@ static int edge_startup(struct usb_serial *serial)
2613 + /* not set up yet, so do it now */
2614 + edge_serial->interrupt_read_urb =
2615 + usb_alloc_urb(0, GFP_KERNEL);
2616 +- if (!edge_serial->interrupt_read_urb)
2617 +- return -ENOMEM;
2618 ++ if (!edge_serial->interrupt_read_urb) {
2619 ++ response = -ENOMEM;
2620 ++ break;
2621 ++ }
2622 +
2623 + edge_serial->interrupt_in_buffer =
2624 + kmalloc(buffer_size, GFP_KERNEL);
2625 + if (!edge_serial->interrupt_in_buffer) {
2626 +- usb_free_urb(edge_serial->interrupt_read_urb);
2627 +- return -ENOMEM;
2628 ++ response = -ENOMEM;
2629 ++ break;
2630 + }
2631 + edge_serial->interrupt_in_endpoint =
2632 + endpoint->bEndpointAddress;
2633 +@@ -2891,14 +2893,16 @@ static int edge_startup(struct usb_serial *serial)
2634 + /* not set up yet, so do it now */
2635 + edge_serial->read_urb =
2636 + usb_alloc_urb(0, GFP_KERNEL);
2637 +- if (!edge_serial->read_urb)
2638 +- return -ENOMEM;
2639 ++ if (!edge_serial->read_urb) {
2640 ++ response = -ENOMEM;
2641 ++ break;
2642 ++ }
2643 +
2644 + edge_serial->bulk_in_buffer =
2645 + kmalloc(buffer_size, GFP_KERNEL);
2646 + if (!edge_serial->bulk_in_buffer) {
2647 +- usb_free_urb(edge_serial->read_urb);
2648 +- return -ENOMEM;
2649 ++ response = -ENOMEM;
2650 ++ break;
2651 + }
2652 + edge_serial->bulk_in_endpoint =
2653 + endpoint->bEndpointAddress;
2654 +@@ -2924,9 +2928,22 @@ static int edge_startup(struct usb_serial *serial)
2655 + }
2656 + }
2657 +
2658 +- if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
2659 +- dev_err(ddev, "Error - the proper endpoints were not found!\n");
2660 +- return -ENODEV;
2661 ++ if (response || !interrupt_in_found || !bulk_in_found ||
2662 ++ !bulk_out_found) {
2663 ++ if (!response) {
2664 ++ dev_err(ddev, "expected endpoints not found\n");
2665 ++ response = -ENODEV;
2666 ++ }
2667 ++
2668 ++ usb_free_urb(edge_serial->interrupt_read_urb);
2669 ++ kfree(edge_serial->interrupt_in_buffer);
2670 ++
2671 ++ usb_free_urb(edge_serial->read_urb);
2672 ++ kfree(edge_serial->bulk_in_buffer);
2673 ++
2674 ++ kfree(edge_serial);
2675 ++
2676 ++ return response;
2677 + }
2678 +
2679 + /* start interrupt read for this edgeport this interrupt will
2680 +@@ -2949,16 +2966,9 @@ static void edge_disconnect(struct usb_serial *serial)
2681 + {
2682 + struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
2683 +
2684 +- /* stop reads and writes on all ports */
2685 +- /* free up our endpoint stuff */
2686 + if (edge_serial->is_epic) {
2687 + usb_kill_urb(edge_serial->interrupt_read_urb);
2688 +- usb_free_urb(edge_serial->interrupt_read_urb);
2689 +- kfree(edge_serial->interrupt_in_buffer);
2690 +-
2691 + usb_kill_urb(edge_serial->read_urb);
2692 +- usb_free_urb(edge_serial->read_urb);
2693 +- kfree(edge_serial->bulk_in_buffer);
2694 + }
2695 + }
2696 +
2697 +@@ -2971,6 +2981,16 @@ static void edge_release(struct usb_serial *serial)
2698 + {
2699 + struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
2700 +
2701 ++ if (edge_serial->is_epic) {
2702 ++ usb_kill_urb(edge_serial->interrupt_read_urb);
2703 ++ usb_free_urb(edge_serial->interrupt_read_urb);
2704 ++ kfree(edge_serial->interrupt_in_buffer);
2705 ++
2706 ++ usb_kill_urb(edge_serial->read_urb);
2707 ++ usb_free_urb(edge_serial->read_urb);
2708 ++ kfree(edge_serial->bulk_in_buffer);
2709 ++ }
2710 ++
2711 + kfree(edge_serial);
2712 + }
2713 +
2714 +diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
2715 +index e07b15ed5814..7faa901ee47f 100644
2716 +--- a/drivers/usb/serial/keyspan.c
2717 ++++ b/drivers/usb/serial/keyspan.c
2718 +@@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial)
2719 +
2720 + s_priv = usb_get_serial_data(serial);
2721 +
2722 ++ /* Make sure to unlink the URBs submitted in attach. */
2723 ++ usb_kill_urb(s_priv->instat_urb);
2724 ++ usb_kill_urb(s_priv->indat_urb);
2725 ++
2726 + usb_free_urb(s_priv->instat_urb);
2727 + usb_free_urb(s_priv->indat_urb);
2728 + usb_free_urb(s_priv->glocont_urb);
2729 +diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
2730 +index 31a8b47f1ac6..c6596cbcc4b6 100644
2731 +--- a/drivers/usb/serial/mxuport.c
2732 ++++ b/drivers/usb/serial/mxuport.c
2733 +@@ -1259,6 +1259,15 @@ static int mxuport_attach(struct usb_serial *serial)
2734 + return 0;
2735 + }
2736 +
2737 ++static void mxuport_release(struct usb_serial *serial)
2738 ++{
2739 ++ struct usb_serial_port *port0 = serial->port[0];
2740 ++ struct usb_serial_port *port1 = serial->port[1];
2741 ++
2742 ++ usb_serial_generic_close(port1);
2743 ++ usb_serial_generic_close(port0);
2744 ++}
2745 ++
2746 + static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
2747 + {
2748 + struct mxuport_port *mxport = usb_get_serial_port_data(port);
2749 +@@ -1361,6 +1370,7 @@ static struct usb_serial_driver mxuport_device = {
2750 + .probe = mxuport_probe,
2751 + .port_probe = mxuport_port_probe,
2752 + .attach = mxuport_attach,
2753 ++ .release = mxuport_release,
2754 + .calc_num_ports = mxuport_calc_num_ports,
2755 + .open = mxuport_open,
2756 + .close = mxuport_close,
2757 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2758 +index c6f497f16526..d96d423d00e6 100644
2759 +--- a/drivers/usb/serial/option.c
2760 ++++ b/drivers/usb/serial/option.c
2761 +@@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb);
2762 + #define HAIER_PRODUCT_CE81B 0x10f8
2763 + #define HAIER_PRODUCT_CE100 0x2009
2764 +
2765 +-/* Cinterion (formerly Siemens) products */
2766 +-#define SIEMENS_VENDOR_ID 0x0681
2767 +-#define CINTERION_VENDOR_ID 0x1e2d
2768 ++/* Gemalto's Cinterion products (formerly Siemens) */
2769 ++#define SIEMENS_VENDOR_ID 0x0681
2770 ++#define CINTERION_VENDOR_ID 0x1e2d
2771 ++#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
2772 + #define CINTERION_PRODUCT_HC25_MDM 0x0047
2773 +-#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
2774 ++#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
2775 + #define CINTERION_PRODUCT_HC28_MDM 0x004C
2776 +-#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
2777 + #define CINTERION_PRODUCT_EU3_E 0x0051
2778 + #define CINTERION_PRODUCT_EU3_P 0x0052
2779 + #define CINTERION_PRODUCT_PH8 0x0053
2780 + #define CINTERION_PRODUCT_AHXX 0x0055
2781 + #define CINTERION_PRODUCT_PLXX 0x0060
2782 ++#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
2783 ++#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
2784 ++#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
2785 ++#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
2786 +
2787 + /* Olivetti products */
2788 + #define OLIVETTI_VENDOR_ID 0x0b3c
2789 +@@ -633,6 +637,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
2790 + .reserved = BIT(1) | BIT(2) | BIT(3),
2791 + };
2792 +
2793 ++static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
2794 ++ .reserved = BIT(4) | BIT(5),
2795 ++};
2796 ++
2797 + static const struct usb_device_id option_ids[] = {
2798 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
2799 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
2800 +@@ -1602,7 +1610,79 @@ static const struct usb_device_id option_ids[] = {
2801 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2802 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
2803 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2804 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
2805 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
2806 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
2807 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
2808 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
2809 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
2810 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
2811 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
2812 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
2813 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
2814 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
2815 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
2816 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
2817 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
2818 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
2819 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
2820 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
2821 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
2822 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
2823 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
2824 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
2825 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
2826 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
2827 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
2828 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
2829 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
2830 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
2831 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
2832 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
2833 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
2834 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
2835 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
2836 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
2837 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
2838 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
2839 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
2840 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
2841 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
2842 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
2843 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
2844 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
2845 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
2846 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
2847 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
2848 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
2849 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
2850 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
2851 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
2852 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
2853 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
2854 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
2855 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
2856 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
2857 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
2858 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
2859 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
2860 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
2861 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
2862 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
2863 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
2864 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
2865 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
2866 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
2867 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
2868 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
2869 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
2870 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
2871 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
2872 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
2873 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
2874 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
2875 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
2876 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
2877 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
2878 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
2879 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
2880 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
2881 +@@ -1613,6 +1693,61 @@ static const struct usb_device_id option_ids[] = {
2882 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
2883 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
2884 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
2885 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
2886 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
2887 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
2888 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
2889 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
2890 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
2891 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
2892 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
2893 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
2894 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
2895 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
2896 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
2897 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
2898 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
2899 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
2900 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
2901 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
2902 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
2903 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
2904 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
2905 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
2906 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
2907 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
2908 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
2909 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
2910 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
2911 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
2912 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
2913 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
2914 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
2915 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
2916 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
2917 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
2918 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
2919 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
2920 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
2921 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
2922 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
2923 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
2924 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
2925 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
2926 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
2927 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
2928 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
2929 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
2930 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
2931 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
2932 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
2933 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
2934 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
2935 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
2936 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
2937 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
2938 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
2939 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
2940 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
2941 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
2942 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
2943 +@@ -1712,7 +1847,13 @@ static const struct usb_device_id option_ids[] = {
2944 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
2945 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
2946 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2947 +- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
2948 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
2949 ++ .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
2950 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
2951 ++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2952 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
2953 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
2954 ++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
2955 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
2956 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
2957 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
2958 +diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
2959 +index 504f5bff79c0..b18974cbd995 100644
2960 +--- a/drivers/usb/serial/quatech2.c
2961 ++++ b/drivers/usb/serial/quatech2.c
2962 +@@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial)
2963 +
2964 + serial_priv = usb_get_serial_data(serial);
2965 +
2966 ++ usb_kill_urb(serial_priv->read_urb);
2967 + usb_free_urb(serial_priv->read_urb);
2968 + kfree(serial_priv->read_buffer);
2969 + kfree(serial_priv);
2970 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2971 +index f07d01bc4875..bfcd87ee8ff5 100644
2972 +--- a/fs/btrfs/ioctl.c
2973 ++++ b/fs/btrfs/ioctl.c
2974 +@@ -1648,7 +1648,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
2975 +
2976 + src_inode = file_inode(src.file);
2977 + if (src_inode->i_sb != file_inode(file)->i_sb) {
2978 +- btrfs_info(BTRFS_I(src_inode)->root->fs_info,
2979 ++ btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
2980 + "Snapshot src from another FS");
2981 + ret = -EXDEV;
2982 + } else if (!inode_owner_or_capable(src_inode)) {
2983 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
2984 +index 59727e32ed0f..af0ec2d5ad0e 100644
2985 +--- a/fs/cifs/sess.c
2986 ++++ b/fs/cifs/sess.c
2987 +@@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
2988 + sec_blob->LmChallengeResponse.MaximumLength = 0;
2989 +
2990 + sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
2991 +- rc = setup_ntlmv2_rsp(ses, nls_cp);
2992 +- if (rc) {
2993 +- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
2994 +- goto setup_ntlmv2_ret;
2995 ++ if (ses->user_name != NULL) {
2996 ++ rc = setup_ntlmv2_rsp(ses, nls_cp);
2997 ++ if (rc) {
2998 ++ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
2999 ++ goto setup_ntlmv2_ret;
3000 ++ }
3001 ++ memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3002 ++ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3003 ++ tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3004 ++
3005 ++ sec_blob->NtChallengeResponse.Length =
3006 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3007 ++ sec_blob->NtChallengeResponse.MaximumLength =
3008 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3009 ++ } else {
3010 ++ /*
3011 ++ * don't send an NT Response for anonymous access
3012 ++ */
3013 ++ sec_blob->NtChallengeResponse.Length = 0;
3014 ++ sec_blob->NtChallengeResponse.MaximumLength = 0;
3015 + }
3016 +- memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3017 +- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3018 +- tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3019 +-
3020 +- sec_blob->NtChallengeResponse.Length =
3021 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3022 +- sec_blob->NtChallengeResponse.MaximumLength =
3023 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3024 +
3025 + if (ses->domainName == NULL) {
3026 + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3027 +@@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data)
3028 +
3029 + pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
3030 +
3031 +- /* no capabilities flags in old lanman negotiation */
3032 +- pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3033 +-
3034 +- /* Calculate hash with password and copy into bcc_ptr.
3035 +- * Encryption Key (stored as in cryptkey) gets used if the
3036 +- * security mode bit in Negottiate Protocol response states
3037 +- * to use challenge/response method (i.e. Password bit is 1).
3038 +- */
3039 +- rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
3040 +- ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
3041 +- true : false, lnm_session_key);
3042 +-
3043 +- memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
3044 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
3045 ++ if (ses->user_name != NULL) {
3046 ++ /* no capabilities flags in old lanman negotiation */
3047 ++ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3048 ++
3049 ++ /* Calculate hash with password and copy into bcc_ptr.
3050 ++ * Encryption Key (stored as in cryptkey) gets used if the
3051 ++ * security mode bit in Negottiate Protocol response states
3052 ++ * to use challenge/response method (i.e. Password bit is 1).
3053 ++ */
3054 ++ rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
3055 ++ ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
3056 ++ true : false, lnm_session_key);
3057 ++
3058 ++ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
3059 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
3060 ++ } else {
3061 ++ pSMB->old_req.PasswordLength = 0;
3062 ++ }
3063 +
3064 + /*
3065 + * can not sign if LANMAN negotiated so no need
3066 +@@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data)
3067 + capabilities = cifs_ssetup_hdr(ses, pSMB);
3068 +
3069 + pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
3070 +- pSMB->req_no_secext.CaseInsensitivePasswordLength =
3071 +- cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3072 +- pSMB->req_no_secext.CaseSensitivePasswordLength =
3073 +- cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3074 +-
3075 +- /* calculate ntlm response and session key */
3076 +- rc = setup_ntlm_response(ses, sess_data->nls_cp);
3077 +- if (rc) {
3078 +- cifs_dbg(VFS, "Error %d during NTLM authentication\n",
3079 +- rc);
3080 +- goto out;
3081 +- }
3082 ++ if (ses->user_name != NULL) {
3083 ++ pSMB->req_no_secext.CaseInsensitivePasswordLength =
3084 ++ cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3085 ++ pSMB->req_no_secext.CaseSensitivePasswordLength =
3086 ++ cpu_to_le16(CIFS_AUTH_RESP_SIZE);
3087 ++
3088 ++ /* calculate ntlm response and session key */
3089 ++ rc = setup_ntlm_response(ses, sess_data->nls_cp);
3090 ++ if (rc) {
3091 ++ cifs_dbg(VFS, "Error %d during NTLM authentication\n",
3092 ++ rc);
3093 ++ goto out;
3094 ++ }
3095 +
3096 +- /* copy ntlm response */
3097 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3098 +- CIFS_AUTH_RESP_SIZE);
3099 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
3100 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3101 +- CIFS_AUTH_RESP_SIZE);
3102 +- bcc_ptr += CIFS_AUTH_RESP_SIZE;
3103 ++ /* copy ntlm response */
3104 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3105 ++ CIFS_AUTH_RESP_SIZE);
3106 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
3107 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3108 ++ CIFS_AUTH_RESP_SIZE);
3109 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
3110 ++ } else {
3111 ++ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
3112 ++ pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
3113 ++ }
3114 +
3115 + if (ses->capabilities & CAP_UNICODE) {
3116 + /* unicode strings must be word aligned */
3117 +@@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
3118 + /* LM2 password would be here if we supported it */
3119 + pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
3120 +
3121 +- /* calculate nlmv2 response and session key */
3122 +- rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
3123 +- if (rc) {
3124 +- cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
3125 +- goto out;
3126 +- }
3127 ++ if (ses->user_name != NULL) {
3128 ++ /* calculate nlmv2 response and session key */
3129 ++ rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
3130 ++ if (rc) {
3131 ++ cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
3132 ++ goto out;
3133 ++ }
3134 +
3135 +- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3136 +- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3137 +- bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3138 ++ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3139 ++ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3140 ++ bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3141 +
3142 +- /* set case sensitive password length after tilen may get
3143 +- * assigned, tilen is 0 otherwise.
3144 +- */
3145 +- pSMB->req_no_secext.CaseSensitivePasswordLength =
3146 +- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3147 ++ /* set case sensitive password length after tilen may get
3148 ++ * assigned, tilen is 0 otherwise.
3149 ++ */
3150 ++ pSMB->req_no_secext.CaseSensitivePasswordLength =
3151 ++ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3152 ++ } else {
3153 ++ pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
3154 ++ }
3155 +
3156 + if (ses->capabilities & CAP_UNICODE) {
3157 + if (sess_data->iov[0].iov_len % 2) {
3158 +diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
3159 +index bc0bb9c34f72..0ffa18094335 100644
3160 +--- a/fs/cifs/smb2glob.h
3161 ++++ b/fs/cifs/smb2glob.h
3162 +@@ -44,6 +44,7 @@
3163 + #define SMB2_OP_DELETE 7
3164 + #define SMB2_OP_HARDLINK 8
3165 + #define SMB2_OP_SET_EOF 9
3166 ++#define SMB2_OP_RMDIR 10
3167 +
3168 + /* Used when constructing chained read requests. */
3169 + #define CHAINED_REQUEST 1
3170 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
3171 +index 899bbc86f73e..4f0231e685a9 100644
3172 +--- a/fs/cifs/smb2inode.c
3173 ++++ b/fs/cifs/smb2inode.c
3174 +@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
3175 + * SMB2_open() call.
3176 + */
3177 + break;
3178 ++ case SMB2_OP_RMDIR:
3179 ++ tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
3180 ++ fid.volatile_fid);
3181 ++ break;
3182 + case SMB2_OP_RENAME:
3183 + tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
3184 + fid.volatile_fid, (__le16 *)data);
3185 +@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
3186 + struct cifs_sb_info *cifs_sb)
3187 + {
3188 + return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
3189 +- CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
3190 +- NULL, SMB2_OP_DELETE);
3191 ++ CREATE_NOT_FILE,
3192 ++ NULL, SMB2_OP_RMDIR);
3193 + }
3194 +
3195 + int
3196 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3197 +index 373b5cd1c913..82c5f57382b2 100644
3198 +--- a/fs/cifs/smb2pdu.c
3199 ++++ b/fs/cifs/smb2pdu.c
3200 +@@ -2577,6 +2577,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
3201 + }
3202 +
3203 + int
3204 ++SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
3205 ++ u64 persistent_fid, u64 volatile_fid)
3206 ++{
3207 ++ __u8 delete_pending = 1;
3208 ++ void *data;
3209 ++ unsigned int size;
3210 ++
3211 ++ data = &delete_pending;
3212 ++ size = 1; /* sizeof __u8 */
3213 ++
3214 ++ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
3215 ++ current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
3216 ++ &size);
3217 ++}
3218 ++
3219 ++int
3220 + SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
3221 + u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
3222 + {
3223 +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
3224 +index 79dc650c18b2..9bc59f9c12fb 100644
3225 +--- a/fs/cifs/smb2proto.h
3226 ++++ b/fs/cifs/smb2proto.h
3227 +@@ -140,6 +140,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
3228 + extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
3229 + u64 persistent_fid, u64 volatile_fid,
3230 + __le16 *target_file);
3231 ++extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
3232 ++ u64 persistent_fid, u64 volatile_fid);
3233 + extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
3234 + u64 persistent_fid, u64 volatile_fid,
3235 + __le16 *target_file);
3236 +diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
3237 +index 9bdbf98240a0..796ff0eafd3c 100644
3238 +--- a/fs/ext4/move_extent.c
3239 ++++ b/fs/ext4/move_extent.c
3240 +@@ -390,6 +390,7 @@ data_copy:
3241 + *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
3242 + if (*err < 0)
3243 + break;
3244 ++ bh = bh->b_this_page;
3245 + }
3246 + if (!*err)
3247 + *err = block_commit_write(pagep[0], from, from + replaced_size);
3248 +diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
3249 +index a561591896bd..3713fd52b44b 100644
3250 +--- a/fs/hpfs/super.c
3251 ++++ b/fs/hpfs/super.c
3252 +@@ -15,6 +15,7 @@
3253 + #include <linux/sched.h>
3254 + #include <linux/bitmap.h>
3255 + #include <linux/slab.h>
3256 ++#include <linux/seq_file.h>
3257 +
3258 + /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
3259 +
3260 +@@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
3261 + int lowercase, eas, chk, errs, chkdsk, timeshift;
3262 + int o;
3263 + struct hpfs_sb_info *sbi = hpfs_sb(s);
3264 +- char *new_opts = kstrdup(data, GFP_KERNEL);
3265 +-
3266 +- if (!new_opts)
3267 +- return -ENOMEM;
3268 +
3269 + sync_filesystem(s);
3270 +
3271 +@@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
3272 +
3273 + if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
3274 +
3275 +- replace_mount_options(s, new_opts);
3276 +-
3277 + hpfs_unlock(s);
3278 + return 0;
3279 +
3280 + out_err:
3281 + hpfs_unlock(s);
3282 +- kfree(new_opts);
3283 + return -EINVAL;
3284 + }
3285 +
3286 ++static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
3287 ++{
3288 ++ struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
3289 ++
3290 ++ seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
3291 ++ seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
3292 ++ seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
3293 ++ if (sbi->sb_lowercase)
3294 ++ seq_printf(seq, ",case=lower");
3295 ++ if (!sbi->sb_chk)
3296 ++ seq_printf(seq, ",check=none");
3297 ++ if (sbi->sb_chk == 2)
3298 ++ seq_printf(seq, ",check=strict");
3299 ++ if (!sbi->sb_err)
3300 ++ seq_printf(seq, ",errors=continue");
3301 ++ if (sbi->sb_err == 2)
3302 ++ seq_printf(seq, ",errors=panic");
3303 ++ if (!sbi->sb_chkdsk)
3304 ++ seq_printf(seq, ",chkdsk=no");
3305 ++ if (sbi->sb_chkdsk == 2)
3306 ++ seq_printf(seq, ",chkdsk=always");
3307 ++ if (!sbi->sb_eas)
3308 ++ seq_printf(seq, ",eas=no");
3309 ++ if (sbi->sb_eas == 1)
3310 ++ seq_printf(seq, ",eas=ro");
3311 ++ if (sbi->sb_timeshift)
3312 ++ seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
3313 ++ return 0;
3314 ++}
3315 ++
3316 + /* Super operations */
3317 +
3318 + static const struct super_operations hpfs_sops =
3319 +@@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops =
3320 + .put_super = hpfs_put_super,
3321 + .statfs = hpfs_statfs,
3322 + .remount_fs = hpfs_remount_fs,
3323 +- .show_options = generic_show_options,
3324 ++ .show_options = hpfs_show_options,
3325 + };
3326 +
3327 + static int hpfs_fill_super(struct super_block *s, void *options, int silent)
3328 +@@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
3329 +
3330 + int o;
3331 +
3332 +- save_mount_options(s, options);
3333 +-
3334 + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3335 + if (!sbi) {
3336 + return -ENOMEM;
3337 +diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
3338 +index e2aadbc7151f..7d633f19e38a 100644
3339 +--- a/include/asm-generic/qspinlock.h
3340 ++++ b/include/asm-generic/qspinlock.h
3341 +@@ -27,7 +27,30 @@
3342 + */
3343 + static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
3344 + {
3345 +- return atomic_read(&lock->val);
3346 ++ /*
3347 ++ * queued_spin_lock_slowpath() can ACQUIRE the lock before
3348 ++ * issuing the unordered store that sets _Q_LOCKED_VAL.
3349 ++ *
3350 ++ * See both smp_cond_acquire() sites for more detail.
3351 ++ *
3352 ++ * This however means that in code like:
3353 ++ *
3354 ++ * spin_lock(A) spin_lock(B)
3355 ++ * spin_unlock_wait(B) spin_is_locked(A)
3356 ++ * do_something() do_something()
3357 ++ *
3358 ++ * Both CPUs can end up running do_something() because the store
3359 ++ * setting _Q_LOCKED_VAL will pass through the loads in
3360 ++ * spin_unlock_wait() and/or spin_is_locked().
3361 ++ *
3362 ++ * Avoid this by issuing a full memory barrier between the spin_lock()
3363 ++ * and the loads in spin_unlock_wait() and spin_is_locked().
3364 ++ *
3365 ++ * Note that regular mutual exclusion doesn't care about this
3366 ++ * delayed store.
3367 ++ */
3368 ++ smp_mb();
3369 ++ return atomic_read(&lock->val) & _Q_LOCKED_MASK;
3370 + }
3371 +
3372 + /**
3373 +@@ -107,6 +130,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
3374 + */
3375 + static inline void queued_spin_unlock_wait(struct qspinlock *lock)
3376 + {
3377 ++ /* See queued_spin_is_locked() */
3378 ++ smp_mb();
3379 + while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
3380 + cpu_relax();
3381 + }
3382 +diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
3383 +index 3d1a3af5cf59..a2508a8f9a9c 100644
3384 +--- a/include/asm-generic/siginfo.h
3385 ++++ b/include/asm-generic/siginfo.h
3386 +@@ -17,21 +17,6 @@
3387 + struct siginfo;
3388 + void do_schedule_next_timer(struct siginfo *info);
3389 +
3390 +-#ifndef HAVE_ARCH_COPY_SIGINFO
3391 +-
3392 +-#include <linux/string.h>
3393 +-
3394 +-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
3395 +-{
3396 +- if (from->si_code < 0)
3397 +- memcpy(to, from, sizeof(*to));
3398 +- else
3399 +- /* _sigchld is currently the largest know union member */
3400 +- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
3401 +-}
3402 +-
3403 +-#endif
3404 +-
3405 + extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
3406 +
3407 + #endif
3408 +diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
3409 +index 735f9f8c4e43..5261751f6bd4 100644
3410 +--- a/include/linux/can/dev.h
3411 ++++ b/include/linux/can/dev.h
3412 +@@ -40,8 +40,11 @@ struct can_priv {
3413 + struct can_clock clock;
3414 +
3415 + enum can_state state;
3416 +- u32 ctrlmode;
3417 +- u32 ctrlmode_supported;
3418 ++
3419 ++ /* CAN controller features - see include/uapi/linux/can/netlink.h */
3420 ++ u32 ctrlmode; /* current options setting */
3421 ++ u32 ctrlmode_supported; /* options that can be modified by netlink */
3422 ++ u32 ctrlmode_static; /* static enabled options for driver/hardware */
3423 +
3424 + int restart_ms;
3425 + struct timer_list restart_timer;
3426 +@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
3427 + return skb->len == CANFD_MTU;
3428 + }
3429 +
3430 ++/* helper to define static CAN controller features at device creation time */
3431 ++static inline void can_set_static_ctrlmode(struct net_device *dev,
3432 ++ u32 static_mode)
3433 ++{
3434 ++ struct can_priv *priv = netdev_priv(dev);
3435 ++
3436 ++ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
3437 ++ priv->ctrlmode = static_mode;
3438 ++ priv->ctrlmode_static = static_mode;
3439 ++
3440 ++ /* override MTU which was set by default in can_setup()? */
3441 ++ if (static_mode & CAN_CTRLMODE_FD)
3442 ++ dev->mtu = CANFD_MTU;
3443 ++}
3444 ++
3445 + /* get data length from can_dlc with sanitized can_dlc */
3446 + u8 can_dlc2len(u8 can_dlc);
3447 +
3448 +diff --git a/include/linux/signal.h b/include/linux/signal.h
3449 +index 92557bbce7e7..d80259afb9e5 100644
3450 +--- a/include/linux/signal.h
3451 ++++ b/include/linux/signal.h
3452 +@@ -28,6 +28,21 @@ struct sigpending {
3453 + sigset_t signal;
3454 + };
3455 +
3456 ++#ifndef HAVE_ARCH_COPY_SIGINFO
3457 ++
3458 ++#include <linux/string.h>
3459 ++
3460 ++static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
3461 ++{
3462 ++ if (from->si_code < 0)
3463 ++ memcpy(to, from, sizeof(*to));
3464 ++ else
3465 ++ /* _sigchld is currently the largest know union member */
3466 ++ memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
3467 ++}
3468 ++
3469 ++#endif
3470 ++
3471 + /*
3472 + * Define some primitives to manipulate sigset_t.
3473 + */
3474 +diff --git a/include/linux/tty.h b/include/linux/tty.h
3475 +index 3bf03b6b52e9..83b264c52898 100644
3476 +--- a/include/linux/tty.h
3477 ++++ b/include/linux/tty.h
3478 +@@ -338,7 +338,6 @@ struct tty_file_private {
3479 + #define TTY_EXCLUSIVE 3 /* Exclusive open mode */
3480 + #define TTY_DEBUG 4 /* Debugging */
3481 + #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
3482 +-#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
3483 + #define TTY_LDISC_OPEN 11 /* Line discipline is open */
3484 + #define TTY_PTY_LOCK 16 /* pty private */
3485 + #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
3486 +@@ -469,6 +468,7 @@ extern void tty_buffer_init(struct tty_port *port);
3487 + extern void tty_buffer_set_lock_subclass(struct tty_port *port);
3488 + extern bool tty_buffer_restart_work(struct tty_port *port);
3489 + extern bool tty_buffer_cancel_work(struct tty_port *port);
3490 ++extern void tty_buffer_flush_work(struct tty_port *port);
3491 + extern speed_t tty_termios_baud_rate(struct ktermios *termios);
3492 + extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
3493 + extern void tty_termios_encode_baud_rate(struct ktermios *termios,
3494 +diff --git a/include/linux/usb.h b/include/linux/usb.h
3495 +index b79925dd2b41..12891ffd4bf0 100644
3496 +--- a/include/linux/usb.h
3497 ++++ b/include/linux/usb.h
3498 +@@ -1068,7 +1068,7 @@ struct usbdrv_wrap {
3499 + * for interfaces bound to this driver.
3500 + * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
3501 + * endpoints before calling the driver's disconnect method.
3502 +- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
3503 ++ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
3504 + * to initiate lower power link state transitions when an idle timeout
3505 + * occurs. Device-initiated USB 3.0 link PM will still be allowed.
3506 + *
3507 +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
3508 +index fe89d7cd67b9..4f6ba34cdee6 100644
3509 +--- a/include/scsi/scsi_device.h
3510 ++++ b/include/scsi/scsi_device.h
3511 +@@ -239,6 +239,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
3512 + enum scsi_target_state {
3513 + STARGET_CREATED = 1,
3514 + STARGET_RUNNING,
3515 ++ STARGET_REMOVE,
3516 + STARGET_DEL,
3517 + };
3518 +
3519 +diff --git a/kernel/events/core.c b/kernel/events/core.c
3520 +index 1e889a078dbc..95e47d2f2c67 100644
3521 +--- a/kernel/events/core.c
3522 ++++ b/kernel/events/core.c
3523 +@@ -946,6 +946,7 @@ static void put_ctx(struct perf_event_context *ctx)
3524 + * function.
3525 + *
3526 + * Lock order:
3527 ++ * cred_guard_mutex
3528 + * task_struct::perf_event_mutex
3529 + * perf_event_context::mutex
3530 + * perf_event_context::lock
3531 +@@ -3418,7 +3419,6 @@ static struct task_struct *
3532 + find_lively_task_by_vpid(pid_t vpid)
3533 + {
3534 + struct task_struct *task;
3535 +- int err;
3536 +
3537 + rcu_read_lock();
3538 + if (!vpid)
3539 +@@ -3432,16 +3432,7 @@ find_lively_task_by_vpid(pid_t vpid)
3540 + if (!task)
3541 + return ERR_PTR(-ESRCH);
3542 +
3543 +- /* Reuse ptrace permission checks for now. */
3544 +- err = -EACCES;
3545 +- if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
3546 +- goto errout;
3547 +-
3548 + return task;
3549 +-errout:
3550 +- put_task_struct(task);
3551 +- return ERR_PTR(err);
3552 +-
3553 + }
3554 +
3555 + /*
3556 +@@ -8328,6 +8319,24 @@ SYSCALL_DEFINE5(perf_event_open,
3557 +
3558 + get_online_cpus();
3559 +
3560 ++ if (task) {
3561 ++ err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
3562 ++ if (err)
3563 ++ goto err_cpus;
3564 ++
3565 ++ /*
3566 ++ * Reuse ptrace permission checks for now.
3567 ++ *
3568 ++ * We must hold cred_guard_mutex across this and any potential
3569 ++ * perf_install_in_context() call for this new event to
3570 ++ * serialize against exec() altering our credentials (and the
3571 ++ * perf_event_exit_task() that could imply).
3572 ++ */
3573 ++ err = -EACCES;
3574 ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
3575 ++ goto err_cred;
3576 ++ }
3577 ++
3578 + if (flags & PERF_FLAG_PID_CGROUP)
3579 + cgroup_fd = pid;
3580 +
3581 +@@ -8335,7 +8344,7 @@ SYSCALL_DEFINE5(perf_event_open,
3582 + NULL, NULL, cgroup_fd);
3583 + if (IS_ERR(event)) {
3584 + err = PTR_ERR(event);
3585 +- goto err_cpus;
3586 ++ goto err_cred;
3587 + }
3588 +
3589 + if (is_sampling_event(event)) {
3590 +@@ -8394,11 +8403,6 @@ SYSCALL_DEFINE5(perf_event_open,
3591 + goto err_context;
3592 + }
3593 +
3594 +- if (task) {
3595 +- put_task_struct(task);
3596 +- task = NULL;
3597 +- }
3598 +-
3599 + /*
3600 + * Look up the group leader (we will attach this event to it):
3601 + */
3602 +@@ -8486,6 +8490,11 @@ SYSCALL_DEFINE5(perf_event_open,
3603 +
3604 + WARN_ON_ONCE(ctx->parent_ctx);
3605 +
3606 ++ /*
3607 ++ * This is the point on no return; we cannot fail hereafter. This is
3608 ++ * where we start modifying current state.
3609 ++ */
3610 ++
3611 + if (move_group) {
3612 + /*
3613 + * See perf_event_ctx_lock() for comments on the details
3614 +@@ -8555,6 +8564,11 @@ SYSCALL_DEFINE5(perf_event_open,
3615 + mutex_unlock(&gctx->mutex);
3616 + mutex_unlock(&ctx->mutex);
3617 +
3618 ++ if (task) {
3619 ++ mutex_unlock(&task->signal->cred_guard_mutex);
3620 ++ put_task_struct(task);
3621 ++ }
3622 ++
3623 + put_online_cpus();
3624 +
3625 + event->owner = current;
3626 +@@ -8589,6 +8603,9 @@ err_alloc:
3627 + */
3628 + if (!event_file)
3629 + free_event(event);
3630 ++err_cred:
3631 ++ if (task)
3632 ++ mutex_unlock(&task->signal->cred_guard_mutex);
3633 + err_cpus:
3634 + put_online_cpus();
3635 + err_task:
3636 +@@ -8868,6 +8885,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
3637 +
3638 + /*
3639 + * When a child task exits, feed back event values to parent events.
3640 ++ *
3641 ++ * Can be called with cred_guard_mutex held when called from
3642 ++ * install_exec_creds().
3643 + */
3644 + void perf_event_exit_task(struct task_struct *child)
3645 + {
3646 +diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
3647 +index ef7159012cf3..b0b93fd33af9 100644
3648 +--- a/kernel/sched/loadavg.c
3649 ++++ b/kernel/sched/loadavg.c
3650 +@@ -99,10 +99,13 @@ long calc_load_fold_active(struct rq *this_rq)
3651 + static unsigned long
3652 + calc_load(unsigned long load, unsigned long exp, unsigned long active)
3653 + {
3654 +- load *= exp;
3655 +- load += active * (FIXED_1 - exp);
3656 +- load += 1UL << (FSHIFT - 1);
3657 +- return load >> FSHIFT;
3658 ++ unsigned long newload;
3659 ++
3660 ++ newload = load * exp + active * (FIXED_1 - exp);
3661 ++ if (active >= load)
3662 ++ newload += FIXED_1-1;
3663 ++
3664 ++ return newload / FIXED_1;
3665 + }
3666 +
3667 + #ifdef CONFIG_NO_HZ_COMMON
3668 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
3669 +index 9c6045a27ba3..acbb0e73d3a2 100644
3670 +--- a/kernel/trace/ring_buffer.c
3671 ++++ b/kernel/trace/ring_buffer.c
3672 +@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
3673 + raw_spinlock_t reader_lock; /* serialize readers */
3674 + arch_spinlock_t lock;
3675 + struct lock_class_key lock_key;
3676 +- unsigned int nr_pages;
3677 ++ unsigned long nr_pages;
3678 + unsigned int current_context;
3679 + struct list_head *pages;
3680 + struct buffer_page *head_page; /* read from head */
3681 +@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
3682 + u64 write_stamp;
3683 + u64 read_stamp;
3684 + /* ring buffer pages to update, > 0 to add, < 0 to remove */
3685 +- int nr_pages_to_update;
3686 ++ long nr_pages_to_update;
3687 + struct list_head new_pages; /* new pages to add */
3688 + struct work_struct update_pages_work;
3689 + struct completion update_done;
3690 +@@ -1137,10 +1137,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
3691 + return 0;
3692 + }
3693 +
3694 +-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
3695 ++static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
3696 + {
3697 +- int i;
3698 + struct buffer_page *bpage, *tmp;
3699 ++ long i;
3700 +
3701 + for (i = 0; i < nr_pages; i++) {
3702 + struct page *page;
3703 +@@ -1177,7 +1177,7 @@ free_pages:
3704 + }
3705 +
3706 + static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
3707 +- unsigned nr_pages)
3708 ++ unsigned long nr_pages)
3709 + {
3710 + LIST_HEAD(pages);
3711 +
3712 +@@ -1202,7 +1202,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
3713 + }
3714 +
3715 + static struct ring_buffer_per_cpu *
3716 +-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
3717 ++rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
3718 + {
3719 + struct ring_buffer_per_cpu *cpu_buffer;
3720 + struct buffer_page *bpage;
3721 +@@ -1302,8 +1302,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
3722 + struct lock_class_key *key)
3723 + {
3724 + struct ring_buffer *buffer;
3725 ++ long nr_pages;
3726 + int bsize;
3727 +- int cpu, nr_pages;
3728 ++ int cpu;
3729 +
3730 + /* keep it in its own cache line */
3731 + buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
3732 +@@ -1429,12 +1430,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
3733 + }
3734 +
3735 + static int
3736 +-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
3737 ++rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
3738 + {
3739 + struct list_head *tail_page, *to_remove, *next_page;
3740 + struct buffer_page *to_remove_page, *tmp_iter_page;
3741 + struct buffer_page *last_page, *first_page;
3742 +- unsigned int nr_removed;
3743 ++ unsigned long nr_removed;
3744 + unsigned long head_bit;
3745 + int page_entries;
3746 +
3747 +@@ -1651,7 +1652,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
3748 + int cpu_id)
3749 + {
3750 + struct ring_buffer_per_cpu *cpu_buffer;
3751 +- unsigned nr_pages;
3752 ++ unsigned long nr_pages;
3753 + int cpu, err = 0;
3754 +
3755 + /*
3756 +@@ -1665,14 +1666,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
3757 + !cpumask_test_cpu(cpu_id, buffer->cpumask))
3758 + return size;
3759 +
3760 +- size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
3761 +- size *= BUF_PAGE_SIZE;
3762 ++ nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
3763 +
3764 + /* we need a minimum of two pages */
3765 +- if (size < BUF_PAGE_SIZE * 2)
3766 +- size = BUF_PAGE_SIZE * 2;
3767 ++ if (nr_pages < 2)
3768 ++ nr_pages = 2;
3769 +
3770 +- nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
3771 ++ size = nr_pages * BUF_PAGE_SIZE;
3772 +
3773 + /*
3774 + * Don't succeed if resizing is disabled, as a reader might be
3775 +@@ -4645,8 +4645,9 @@ static int rb_cpu_notify(struct notifier_block *self,
3776 + struct ring_buffer *buffer =
3777 + container_of(self, struct ring_buffer, cpu_notify);
3778 + long cpu = (long)hcpu;
3779 +- int cpu_i, nr_pages_same;
3780 +- unsigned int nr_pages;
3781 ++ long nr_pages_same;
3782 ++ int cpu_i;
3783 ++ unsigned long nr_pages;
3784 +
3785 + switch (action) {
3786 + case CPU_UP_PREPARE:
3787 +diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
3788 +index 4efedcbe4165..da3386a9d244 100644
3789 +--- a/scripts/Makefile.extrawarn
3790 ++++ b/scripts/Makefile.extrawarn
3791 +@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
3792 + warning-1 += -Wold-style-definition
3793 + warning-1 += $(call cc-option, -Wmissing-include-dirs)
3794 + warning-1 += $(call cc-option, -Wunused-but-set-variable)
3795 ++warning-1 += $(call cc-option, -Wunused-const-variable)
3796 + warning-1 += $(call cc-disable-warning, missing-field-initializers)
3797 +
3798 + warning-2 := -Waggregate-return
3799 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3800 +index 4918ffa5ba68..d53c25e7a1c1 100644
3801 +--- a/sound/pci/hda/patch_realtek.c
3802 ++++ b/sound/pci/hda/patch_realtek.c
3803 +@@ -335,6 +335,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
3804 + case 0x10ec0283:
3805 + case 0x10ec0286:
3806 + case 0x10ec0288:
3807 ++ case 0x10ec0295:
3808 + case 0x10ec0298:
3809 + alc_update_coef_idx(codec, 0x10, 1<<9, 0);
3810 + break;
3811 +@@ -342,6 +343,11 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
3812 + case 0x10ec0293:
3813 + alc_update_coef_idx(codec, 0xa, 1<<13, 0);
3814 + break;
3815 ++ case 0x10ec0234:
3816 ++ case 0x10ec0274:
3817 ++ case 0x10ec0294:
3818 ++ alc_update_coef_idx(codec, 0x10, 1<<15, 0);
3819 ++ break;
3820 + case 0x10ec0662:
3821 + if ((coef & 0x00f0) == 0x0030)
3822 + alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
3823 +@@ -902,6 +908,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
3824 + { 0x10ec0298, 0x1028, 0, "ALC3266" },
3825 + { 0x10ec0256, 0x1028, 0, "ALC3246" },
3826 + { 0x10ec0225, 0x1028, 0, "ALC3253" },
3827 ++ { 0x10ec0295, 0x1028, 0, "ALC3254" },
3828 + { 0x10ec0670, 0x1025, 0, "ALC669X" },
3829 + { 0x10ec0676, 0x1025, 0, "ALC679X" },
3830 + { 0x10ec0282, 0x1043, 0, "ALC3229" },
3831 +@@ -2647,6 +2654,7 @@ enum {
3832 + ALC269_TYPE_ALC255,
3833 + ALC269_TYPE_ALC256,
3834 + ALC269_TYPE_ALC225,
3835 ++ ALC269_TYPE_ALC294,
3836 + };
3837 +
3838 + /*
3839 +@@ -2677,6 +2685,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
3840 + case ALC269_TYPE_ALC255:
3841 + case ALC269_TYPE_ALC256:
3842 + case ALC269_TYPE_ALC225:
3843 ++ case ALC269_TYPE_ALC294:
3844 + ssids = alc269_ssids;
3845 + break;
3846 + default:
3847 +@@ -3690,6 +3699,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3848 + alc_process_coef_fw(codec, coef0668);
3849 + break;
3850 + case 0x10ec0225:
3851 ++ case 0x10ec0295:
3852 + alc_process_coef_fw(codec, coef0225);
3853 + break;
3854 + }
3855 +@@ -3790,6 +3800,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3856 + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
3857 + break;
3858 + case 0x10ec0225:
3859 ++ case 0x10ec0295:
3860 + alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
3861 + snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
3862 + alc_process_coef_fw(codec, coef0225);
3863 +@@ -3847,6 +3858,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3864 +
3865 + switch (codec->core.vendor_id) {
3866 + case 0x10ec0225:
3867 ++ case 0x10ec0295:
3868 + alc_process_coef_fw(codec, coef0225);
3869 + break;
3870 + case 0x10ec0255:
3871 +@@ -3950,6 +3962,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
3872 + alc_process_coef_fw(codec, coef0688);
3873 + break;
3874 + case 0x10ec0225:
3875 ++ case 0x10ec0295:
3876 + alc_process_coef_fw(codec, coef0225);
3877 + break;
3878 + }
3879 +@@ -4031,6 +4044,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
3880 + alc_process_coef_fw(codec, coef0688);
3881 + break;
3882 + case 0x10ec0225:
3883 ++ case 0x10ec0295:
3884 + alc_process_coef_fw(codec, coef0225);
3885 + break;
3886 + }
3887 +@@ -4114,6 +4128,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
3888 + is_ctia = (val & 0x1c02) == 0x1c02;
3889 + break;
3890 + case 0x10ec0225:
3891 ++ case 0x10ec0295:
3892 + alc_process_coef_fw(codec, coef0225);
3893 + msleep(800);
3894 + val = alc_read_coef_idx(codec, 0x46);
3895 +@@ -5459,8 +5474,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3896 + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
3897 + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
3898 + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
3899 +- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
3900 ++ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
3901 + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
3902 ++ SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
3903 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
3904 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
3905 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
3906 +@@ -5704,6 +5720,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
3907 + {0x14, 0x90170110},
3908 + {0x21, 0x02211020}),
3909 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
3910 ++ {0x14, 0x90170130},
3911 ++ {0x21, 0x02211040}),
3912 ++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
3913 + {0x12, 0x90a60140},
3914 + {0x14, 0x90170110},
3915 + {0x21, 0x02211020}),
3916 +@@ -6026,8 +6045,14 @@ static int patch_alc269(struct hda_codec *codec)
3917 + alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
3918 + break;
3919 + case 0x10ec0225:
3920 ++ case 0x10ec0295:
3921 + spec->codec_variant = ALC269_TYPE_ALC225;
3922 + break;
3923 ++ case 0x10ec0234:
3924 ++ case 0x10ec0274:
3925 ++ case 0x10ec0294:
3926 ++ spec->codec_variant = ALC269_TYPE_ALC294;
3927 ++ break;
3928 + }
3929 +
3930 + if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
3931 +@@ -6942,6 +6967,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
3932 + HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
3933 + HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
3934 + HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
3935 ++ HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
3936 + HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
3937 + HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
3938 + HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
3939 +@@ -6952,6 +6978,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
3940 + HDA_CODEC_ENTRY(0x10ec0269, "ALC269", patch_alc269),
3941 + HDA_CODEC_ENTRY(0x10ec0270, "ALC270", patch_alc269),
3942 + HDA_CODEC_ENTRY(0x10ec0272, "ALC272", patch_alc662),
3943 ++ HDA_CODEC_ENTRY(0x10ec0274, "ALC274", patch_alc269),
3944 + HDA_CODEC_ENTRY(0x10ec0275, "ALC275", patch_alc269),
3945 + HDA_CODEC_ENTRY(0x10ec0276, "ALC276", patch_alc269),
3946 + HDA_CODEC_ENTRY(0x10ec0280, "ALC280", patch_alc269),
3947 +@@ -6964,6 +6991,8 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
3948 + HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
3949 + HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
3950 + HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
3951 ++ HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
3952 ++ HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
3953 + HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
3954 + HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
3955 + HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
3956 +diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
3957 +index ec16f7812c8b..6ebfdee3e2c6 100644
3958 +--- a/tools/perf/tests/bpf.c
3959 ++++ b/tools/perf/tests/bpf.c
3960 +@@ -146,7 +146,7 @@ prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
3961 + return obj;
3962 + }
3963 +
3964 +-static int __test__bpf(int index)
3965 ++static int __test__bpf(int idx)
3966 + {
3967 + int ret;
3968 + void *obj_buf;
3969 +@@ -154,27 +154,27 @@ static int __test__bpf(int index)
3970 + struct bpf_object *obj;
3971 +
3972 + ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
3973 +- bpf_testcase_table[index].prog_id,
3974 ++ bpf_testcase_table[idx].prog_id,
3975 + true);
3976 + if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
3977 + pr_debug("Unable to get BPF object, %s\n",
3978 +- bpf_testcase_table[index].msg_compile_fail);
3979 +- if (index == 0)
3980 ++ bpf_testcase_table[idx].msg_compile_fail);
3981 ++ if (idx == 0)
3982 + return TEST_SKIP;
3983 + else
3984 + return TEST_FAIL;
3985 + }
3986 +
3987 + obj = prepare_bpf(obj_buf, obj_buf_sz,
3988 +- bpf_testcase_table[index].name);
3989 ++ bpf_testcase_table[idx].name);
3990 + if (!obj) {
3991 + ret = TEST_FAIL;
3992 + goto out;
3993 + }
3994 +
3995 + ret = do_test(obj,
3996 +- bpf_testcase_table[index].target_func,
3997 +- bpf_testcase_table[index].expect_result);
3998 ++ bpf_testcase_table[idx].target_func,
3999 ++ bpf_testcase_table[idx].expect_result);
4000 + out:
4001 + bpf__clear();
4002 + return ret;
4003 +diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
4004 +index bc4cf507cde5..366e38ba8b49 100644
4005 +--- a/tools/perf/tests/llvm.c
4006 ++++ b/tools/perf/tests/llvm.c
4007 +@@ -50,7 +50,7 @@ static struct {
4008 + int
4009 + test_llvm__fetch_bpf_obj(void **p_obj_buf,
4010 + size_t *p_obj_buf_sz,
4011 +- enum test_llvm__testcase index,
4012 ++ enum test_llvm__testcase idx,
4013 + bool force)
4014 + {
4015 + const char *source;
4016 +@@ -59,11 +59,11 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf,
4017 + char *tmpl_new = NULL, *clang_opt_new = NULL;
4018 + int err, old_verbose, ret = TEST_FAIL;
4019 +
4020 +- if (index >= __LLVM_TESTCASE_MAX)
4021 ++ if (idx >= __LLVM_TESTCASE_MAX)
4022 + return TEST_FAIL;
4023 +
4024 +- source = bpf_source_table[index].source;
4025 +- desc = bpf_source_table[index].desc;
4026 ++ source = bpf_source_table[idx].source;
4027 ++ desc = bpf_source_table[idx].desc;
4028 +
4029 + perf_config(perf_config_cb, NULL);
4030 +