Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Thu, 09 Feb 2017 11:11:54
Message-Id: 1486638694.b6cc76d171506ea5f54f296e60c2abad9a0ad920.alicef@gentoo
1 commit: b6cc76d171506ea5f54f296e60c2abad9a0ad920
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Thu Feb 9 11:11:34 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Thu Feb 9 11:11:34 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b6cc76d1
7
8 Linux patch 4.9.9
9
10 0000_README | 4 +
11 1008_linux-4.9.9.patch | 2333 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2337 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index c9cccee..3bab163 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -75,6 +75,10 @@ Patch: 1007_linux-4.9.8.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.8
21
22 +Patch: 1008_linux-4.9.9.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.9
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1008_linux-4.9.9.patch b/1008_linux-4.9.9.patch
31 new file mode 100644
32 index 0000000..393ec22
33 --- /dev/null
34 +++ b/1008_linux-4.9.9.patch
35 @@ -0,0 +1,2333 @@
36 +diff --git a/Makefile b/Makefile
37 +index 1130803ab93c..c0c41c9fac0c 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 8
44 ++SUBLEVEL = 9
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
49 +index c53dbeae79f2..838dad5c209f 100644
50 +--- a/arch/arm64/crypto/aes-modes.S
51 ++++ b/arch/arm64/crypto/aes-modes.S
52 +@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
53 + cbz w6, .Lcbcencloop
54 +
55 + ld1 {v0.16b}, [x5] /* get iv */
56 +- enc_prepare w3, x2, x5
57 ++ enc_prepare w3, x2, x6
58 +
59 + .Lcbcencloop:
60 + ld1 {v1.16b}, [x1], #16 /* get next pt block */
61 + eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
62 +- encrypt_block v0, w3, x2, x5, w6
63 ++ encrypt_block v0, w3, x2, x6, w7
64 + st1 {v0.16b}, [x0], #16
65 + subs w4, w4, #1
66 + bne .Lcbcencloop
67 ++ st1 {v0.16b}, [x5] /* return iv */
68 + ret
69 + AES_ENDPROC(aes_cbc_encrypt)
70 +
71 +@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
72 + cbz w6, .LcbcdecloopNx
73 +
74 + ld1 {v7.16b}, [x5] /* get iv */
75 +- dec_prepare w3, x2, x5
76 ++ dec_prepare w3, x2, x6
77 +
78 + .LcbcdecloopNx:
79 + #if INTERLEAVE >= 2
80 +@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
81 + .Lcbcdecloop:
82 + ld1 {v1.16b}, [x1], #16 /* get next ct block */
83 + mov v0.16b, v1.16b /* ...and copy to v0 */
84 +- decrypt_block v0, w3, x2, x5, w6
85 ++ decrypt_block v0, w3, x2, x6, w7
86 + eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
87 + mov v7.16b, v1.16b /* ct is next iv */
88 + st1 {v0.16b}, [x0], #16
89 +@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
90 + bne .Lcbcdecloop
91 + .Lcbcdecout:
92 + FRAME_POP
93 ++ st1 {v7.16b}, [x5] /* return iv */
94 + ret
95 + AES_ENDPROC(aes_cbc_decrypt)
96 +
97 +@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
98 +
99 + AES_ENTRY(aes_ctr_encrypt)
100 + FRAME_PUSH
101 +- cbnz w6, .Lctrfirst /* 1st time around? */
102 +- umov x5, v4.d[1] /* keep swabbed ctr in reg */
103 +- rev x5, x5
104 +-#if INTERLEAVE >= 2
105 +- cmn w5, w4 /* 32 bit overflow? */
106 +- bcs .Lctrinc
107 +- add x5, x5, #1 /* increment BE ctr */
108 +- b .LctrincNx
109 +-#else
110 +- b .Lctrinc
111 +-#endif
112 +-.Lctrfirst:
113 ++ cbz w6, .Lctrnotfirst /* 1st time around? */
114 + enc_prepare w3, x2, x6
115 + ld1 {v4.16b}, [x5]
116 +- umov x5, v4.d[1] /* keep swabbed ctr in reg */
117 +- rev x5, x5
118 ++
119 ++.Lctrnotfirst:
120 ++ umov x8, v4.d[1] /* keep swabbed ctr in reg */
121 ++ rev x8, x8
122 + #if INTERLEAVE >= 2
123 +- cmn w5, w4 /* 32 bit overflow? */
124 ++ cmn w8, w4 /* 32 bit overflow? */
125 + bcs .Lctrloop
126 + .LctrloopNx:
127 + subs w4, w4, #INTERLEAVE
128 +@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
129 + #if INTERLEAVE == 2
130 + mov v0.8b, v4.8b
131 + mov v1.8b, v4.8b
132 +- rev x7, x5
133 +- add x5, x5, #1
134 ++ rev x7, x8
135 ++ add x8, x8, #1
136 + ins v0.d[1], x7
137 +- rev x7, x5
138 +- add x5, x5, #1
139 ++ rev x7, x8
140 ++ add x8, x8, #1
141 + ins v1.d[1], x7
142 + ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
143 + do_encrypt_block2x
144 +@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
145 + st1 {v0.16b-v1.16b}, [x0], #32
146 + #else
147 + ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
148 +- dup v7.4s, w5
149 ++ dup v7.4s, w8
150 + mov v0.16b, v4.16b
151 + add v7.4s, v7.4s, v8.4s
152 + mov v1.16b, v4.16b
153 +@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
154 + eor v2.16b, v7.16b, v2.16b
155 + eor v3.16b, v5.16b, v3.16b
156 + st1 {v0.16b-v3.16b}, [x0], #64
157 +- add x5, x5, #INTERLEAVE
158 ++ add x8, x8, #INTERLEAVE
159 + #endif
160 +- cbz w4, .LctroutNx
161 +-.LctrincNx:
162 +- rev x7, x5
163 ++ rev x7, x8
164 + ins v4.d[1], x7
165 ++ cbz w4, .Lctrout
166 + b .LctrloopNx
167 +-.LctroutNx:
168 +- sub x5, x5, #1
169 +- rev x7, x5
170 +- ins v4.d[1], x7
171 +- b .Lctrout
172 + .Lctr1x:
173 + adds w4, w4, #INTERLEAVE
174 + beq .Lctrout
175 +@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
176 + .Lctrloop:
177 + mov v0.16b, v4.16b
178 + encrypt_block v0, w3, x2, x6, w7
179 ++
180 ++ adds x8, x8, #1 /* increment BE ctr */
181 ++ rev x7, x8
182 ++ ins v4.d[1], x7
183 ++ bcs .Lctrcarry /* overflow? */
184 ++
185 ++.Lctrcarrydone:
186 + subs w4, w4, #1
187 + bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
188 + ld1 {v3.16b}, [x1], #16
189 + eor v3.16b, v0.16b, v3.16b
190 + st1 {v3.16b}, [x0], #16
191 +- beq .Lctrout
192 +-.Lctrinc:
193 +- adds x5, x5, #1 /* increment BE ctr */
194 +- rev x7, x5
195 +- ins v4.d[1], x7
196 +- bcc .Lctrloop /* no overflow? */
197 +- umov x7, v4.d[0] /* load upper word of ctr */
198 +- rev x7, x7 /* ... to handle the carry */
199 +- add x7, x7, #1
200 +- rev x7, x7
201 +- ins v4.d[0], x7
202 +- b .Lctrloop
203 ++ bne .Lctrloop
204 ++
205 ++.Lctrout:
206 ++ st1 {v4.16b}, [x5] /* return next CTR value */
207 ++ FRAME_POP
208 ++ ret
209 ++
210 + .Lctrhalfblock:
211 + ld1 {v3.8b}, [x1]
212 + eor v3.8b, v0.8b, v3.8b
213 + st1 {v3.8b}, [x0]
214 +-.Lctrout:
215 + FRAME_POP
216 + ret
217 ++
218 ++.Lctrcarry:
219 ++ umov x7, v4.d[0] /* load upper word of ctr */
220 ++ rev x7, x7 /* ... to handle the carry */
221 ++ add x7, x7, #1
222 ++ rev x7, x7
223 ++ ins v4.d[0], x7
224 ++ b .Lctrcarrydone
225 + AES_ENDPROC(aes_ctr_encrypt)
226 + .ltorg
227 +
228 +diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
229 +index b312b152461b..6e834caa3720 100644
230 +--- a/arch/powerpc/include/asm/cpu_has_feature.h
231 ++++ b/arch/powerpc/include/asm/cpu_has_feature.h
232 +@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
233 + {
234 + int i;
235 +
236 ++#ifndef __clang__ /* clang can't cope with this */
237 + BUILD_BUG_ON(!__builtin_constant_p(feature));
238 ++#endif
239 +
240 + #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
241 + if (!static_key_initialized) {
242 +diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
243 +index e311c25751a4..a244e09d2d88 100644
244 +--- a/arch/powerpc/include/asm/mmu.h
245 ++++ b/arch/powerpc/include/asm/mmu.h
246 +@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
247 + {
248 + int i;
249 +
250 ++#ifndef __clang__ /* clang can't cope with this */
251 + BUILD_BUG_ON(!__builtin_constant_p(feature));
252 ++#endif
253 +
254 + #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
255 + if (!static_key_initialized) {
256 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
257 +index 5c31369435f2..a5dd493670a0 100644
258 +--- a/arch/powerpc/kernel/eeh_driver.c
259 ++++ b/arch/powerpc/kernel/eeh_driver.c
260 +@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
261 + static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
262 + {
263 + struct eeh_pe *pe = (struct eeh_pe *)data;
264 +- bool *clear_sw_state = flag;
265 ++ bool clear_sw_state = *(bool *)flag;
266 + int i, rc = 1;
267 +
268 + for (i = 0; rc && i < 3; i++)
269 +diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
270 +index 88ac964f4858..1e8c57207346 100644
271 +--- a/arch/powerpc/kernel/prom_init.c
272 ++++ b/arch/powerpc/kernel/prom_init.c
273 +@@ -2747,6 +2747,9 @@ static void __init prom_find_boot_cpu(void)
274 +
275 + cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
276 +
277 ++ if (!PHANDLE_VALID(cpu_pkg))
278 ++ return;
279 ++
280 + prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
281 + prom.cpu = be32_to_cpu(rval);
282 +
283 +diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
284 +index ebb7f46f0532..9a25dce87875 100644
285 +--- a/arch/powerpc/mm/pgtable-radix.c
286 ++++ b/arch/powerpc/mm/pgtable-radix.c
287 +@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
288 + if (!pmdp)
289 + return -ENOMEM;
290 + if (map_page_size == PMD_SIZE) {
291 +- ptep = (pte_t *)pudp;
292 ++ ptep = pmdp_ptep(pmdp);
293 + goto set_the_pte;
294 + }
295 + ptep = pte_alloc_kernel(pmdp, ea);
296 +@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
297 + }
298 + pmdp = pmd_offset(pudp, ea);
299 + if (map_page_size == PMD_SIZE) {
300 +- ptep = (pte_t *)pudp;
301 ++ ptep = pmdp_ptep(pmdp);
302 + goto set_the_pte;
303 + }
304 + if (!pmd_present(*pmdp)) {
305 +diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
306 +index dbaaf7dc8373..19d646a783fd 100644
307 +--- a/arch/x86/events/intel/uncore.c
308 ++++ b/arch/x86/events/intel/uncore.c
309 +@@ -763,30 +763,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
310 + pmu->registered = false;
311 + }
312 +
313 +-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
314 +-{
315 +- struct intel_uncore_pmu *pmu = type->pmus;
316 +- struct intel_uncore_box *box;
317 +- int i, pkg;
318 +-
319 +- if (pmu) {
320 +- pkg = topology_physical_package_id(cpu);
321 +- for (i = 0; i < type->num_boxes; i++, pmu++) {
322 +- box = pmu->boxes[pkg];
323 +- if (box)
324 +- uncore_box_exit(box);
325 +- }
326 +- }
327 +-}
328 +-
329 +-static void uncore_exit_boxes(void *dummy)
330 +-{
331 +- struct intel_uncore_type **types;
332 +-
333 +- for (types = uncore_msr_uncores; *types; types++)
334 +- __uncore_exit_boxes(*types++, smp_processor_id());
335 +-}
336 +-
337 + static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
338 + {
339 + int pkg;
340 +@@ -1077,22 +1053,12 @@ static int uncore_cpu_dying(unsigned int cpu)
341 + return 0;
342 + }
343 +
344 +-static int first_init;
345 +-
346 + static int uncore_cpu_starting(unsigned int cpu)
347 + {
348 + struct intel_uncore_type *type, **types = uncore_msr_uncores;
349 + struct intel_uncore_pmu *pmu;
350 + struct intel_uncore_box *box;
351 +- int i, pkg, ncpus = 1;
352 +-
353 +- if (first_init) {
354 +- /*
355 +- * On init we get the number of online cpus in the package
356 +- * and set refcount for all of them.
357 +- */
358 +- ncpus = cpumask_weight(topology_core_cpumask(cpu));
359 +- }
360 ++ int i, pkg;
361 +
362 + pkg = topology_logical_package_id(cpu);
363 + for (; *types; types++) {
364 +@@ -1103,7 +1069,7 @@ static int uncore_cpu_starting(unsigned int cpu)
365 + if (!box)
366 + continue;
367 + /* The first cpu on a package activates the box */
368 +- if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
369 ++ if (atomic_inc_return(&box->refcnt) == 1)
370 + uncore_box_init(box);
371 + }
372 + }
373 +@@ -1407,19 +1373,17 @@ static int __init intel_uncore_init(void)
374 + "PERF_X86_UNCORE_PREP",
375 + uncore_cpu_prepare, NULL);
376 + }
377 +- first_init = 1;
378 ++
379 + cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
380 + "AP_PERF_X86_UNCORE_STARTING",
381 + uncore_cpu_starting, uncore_cpu_dying);
382 +- first_init = 0;
383 ++
384 + cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
385 + "AP_PERF_X86_UNCORE_ONLINE",
386 + uncore_event_cpu_online, uncore_event_cpu_offline);
387 + return 0;
388 +
389 + err:
390 +- /* Undo box->init_box() */
391 +- on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
392 + uncore_types_exit(uncore_msr_uncores);
393 + uncore_pci_exit();
394 + return ret;
395 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
396 +index 3d8ff40ecc6f..7249f1500bcb 100644
397 +--- a/arch/x86/kernel/apic/io_apic.c
398 ++++ b/arch/x86/kernel/apic/io_apic.c
399 +@@ -2118,6 +2118,7 @@ static inline void __init check_timer(void)
400 + if (idx != -1 && irq_trigger(idx))
401 + unmask_ioapic_irq(irq_get_chip_data(0));
402 + }
403 ++ irq_domain_deactivate_irq(irq_data);
404 + irq_domain_activate_irq(irq_data);
405 + if (timer_irq_works()) {
406 + if (disable_timer_pin_1 > 0)
407 +@@ -2139,6 +2140,7 @@ static inline void __init check_timer(void)
408 + * legacy devices should be connected to IO APIC #0
409 + */
410 + replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
411 ++ irq_domain_deactivate_irq(irq_data);
412 + irq_domain_activate_irq(irq_data);
413 + legacy_pic->unmask(0);
414 + if (timer_irq_works()) {
415 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
416 +index 274fab99169d..932348fbb6ea 100644
417 +--- a/arch/x86/kernel/hpet.c
418 ++++ b/arch/x86/kernel/hpet.c
419 +@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
420 + } else {
421 + struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
422 +
423 ++ irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
424 + irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
425 + disable_irq(hdev->irq);
426 + irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
427 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
428 +index 487b957e7802..731044efb195 100644
429 +--- a/arch/x86/kvm/x86.c
430 ++++ b/arch/x86/kvm/x86.c
431 +@@ -3148,6 +3148,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
432 + memcpy(dest, xsave, XSAVE_HDR_OFFSET);
433 +
434 + /* Set XSTATE_BV */
435 ++ xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
436 + *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
437 +
438 + /*
439 +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
440 +index 319148bd4b05..2f25a363068c 100644
441 +--- a/arch/x86/platform/efi/efi_64.c
442 ++++ b/arch/x86/platform/efi/efi_64.c
443 +@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
444 + efi_scratch.use_pgd = true;
445 +
446 + /*
447 ++ * Certain firmware versions are way too sentimential and still believe
448 ++ * they are exclusive and unquestionable owners of the first physical page,
449 ++ * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
450 ++ * (but then write-access it later during SetVirtualAddressMap()).
451 ++ *
452 ++ * Create a 1:1 mapping for this page, to avoid triple faults during early
453 ++ * boot with such firmware. We are free to hand this page to the BIOS,
454 ++ * as trim_bios_range() will reserve the first page and isolate it away
455 ++ * from memory allocators anyway.
456 ++ */
457 ++ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
458 ++ pr_err("Failed to create 1:1 mapping for the first page!\n");
459 ++ return 1;
460 ++ }
461 ++
462 ++ /*
463 + * When making calls to the firmware everything needs to be 1:1
464 + * mapped and addressable with 32-bit pointers. Map the kernel
465 + * text and allocate a new stack because we can't rely on the
466 +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
467 +index 88a044af7504..32cdc2c52e98 100644
468 +--- a/arch/xtensa/kernel/setup.c
469 ++++ b/arch/xtensa/kernel/setup.c
470 +@@ -540,7 +540,7 @@ subsys_initcall(topology_init);
471 +
472 + void cpu_reset(void)
473 + {
474 +-#if XCHAL_HAVE_PTP_MMU
475 ++#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
476 + local_irq_disable();
477 + /*
478 + * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
479 +diff --git a/crypto/algapi.c b/crypto/algapi.c
480 +index df939b54b09f..1fad2a6b3bbb 100644
481 +--- a/crypto/algapi.c
482 ++++ b/crypto/algapi.c
483 +@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
484 + struct crypto_larval *larval;
485 + int err;
486 +
487 ++ alg->cra_flags &= ~CRYPTO_ALG_DEAD;
488 + err = crypto_check_alg(alg);
489 + if (err)
490 + return err;
491 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
492 +index 223a770f78f3..33e363dcc63b 100644
493 +--- a/drivers/ata/libata-core.c
494 ++++ b/drivers/ata/libata-core.c
495 +@@ -1695,6 +1695,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
496 +
497 + if (qc->err_mask & ~AC_ERR_OTHER)
498 + qc->err_mask &= ~AC_ERR_OTHER;
499 ++ } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
500 ++ qc->result_tf.command |= ATA_SENSE;
501 + }
502 +
503 + /* finish up */
504 +@@ -4316,10 +4318,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
505 + { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
506 +
507 + /*
508 +- * Device times out with higher max sects.
509 ++ * These devices time out with higher max sects.
510 + * https://bugzilla.kernel.org/show_bug.cgi?id=121671
511 + */
512 +- { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
513 ++ { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
514 +
515 + /* Devices we expect to fail diagnostics */
516 +
517 +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
518 +index 823e938c9a78..2f32782cea6d 100644
519 +--- a/drivers/ata/sata_mv.c
520 ++++ b/drivers/ata/sata_mv.c
521 +@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
522 + host->iomap = NULL;
523 + hpriv->base = devm_ioremap(&pdev->dev, res->start,
524 + resource_size(res));
525 ++ if (!hpriv->base)
526 ++ return -ENOMEM;
527 ++
528 + hpriv->base -= SATAHC0_REG_BASE;
529 +
530 + hpriv->clk = clk_get(&pdev->dev, NULL);
531 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
532 +index e7f86a8887d2..c5cdd190b781 100644
533 +--- a/drivers/base/memory.c
534 ++++ b/drivers/base/memory.c
535 +@@ -391,33 +391,33 @@ static ssize_t show_valid_zones(struct device *dev,
536 + {
537 + struct memory_block *mem = to_memory_block(dev);
538 + unsigned long start_pfn, end_pfn;
539 ++ unsigned long valid_start, valid_end, valid_pages;
540 + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
541 +- struct page *first_page;
542 + struct zone *zone;
543 + int zone_shift = 0;
544 +
545 + start_pfn = section_nr_to_pfn(mem->start_section_nr);
546 + end_pfn = start_pfn + nr_pages;
547 +- first_page = pfn_to_page(start_pfn);
548 +
549 + /* The block contains more than one zone can not be offlined. */
550 +- if (!test_pages_in_a_zone(start_pfn, end_pfn))
551 ++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
552 + return sprintf(buf, "none\n");
553 +
554 +- zone = page_zone(first_page);
555 ++ zone = page_zone(pfn_to_page(valid_start));
556 ++ valid_pages = valid_end - valid_start;
557 +
558 + /* MMOP_ONLINE_KEEP */
559 + sprintf(buf, "%s", zone->name);
560 +
561 + /* MMOP_ONLINE_KERNEL */
562 +- zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
563 ++ zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
564 + if (zone_shift) {
565 + strcat(buf, " ");
566 + strcat(buf, (zone + zone_shift)->name);
567 + }
568 +
569 + /* MMOP_ONLINE_MOVABLE */
570 +- zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
571 ++ zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
572 + if (zone_shift) {
573 + strcat(buf, " ");
574 + strcat(buf, (zone + zone_shift)->name);
575 +diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
576 +index f642c4264c27..168fa175d65a 100644
577 +--- a/drivers/bcma/bcma_private.h
578 ++++ b/drivers/bcma/bcma_private.h
579 +@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
580 + void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
581 + void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
582 + void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
583 ++#ifdef CONFIG_BCMA_DRIVER_MIPS
584 ++void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
585 ++#endif /* CONFIG_BCMA_DRIVER_MIPS */
586 +
587 + /* driver_chipcommon_b.c */
588 + int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
589 +diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
590 +index b4f6520e74f0..62f5bfa5065d 100644
591 +--- a/drivers/bcma/driver_chipcommon.c
592 ++++ b/drivers/bcma/driver_chipcommon.c
593 +@@ -15,8 +15,6 @@
594 + #include <linux/platform_device.h>
595 + #include <linux/bcma/bcma.h>
596 +
597 +-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
598 +-
599 + static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
600 + u32 mask, u32 value)
601 + {
602 +@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
603 + if (cc->capabilities & BCMA_CC_CAP_PMU)
604 + bcma_pmu_early_init(cc);
605 +
606 +- if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
607 +- bcma_chipco_serial_init(cc);
608 +-
609 + if (bus->hosttype == BCMA_HOSTTYPE_SOC)
610 + bcma_core_chipcommon_flash_detect(cc);
611 +
612 +@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
613 + return res;
614 + }
615 +
616 +-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
617 ++#ifdef CONFIG_BCMA_DRIVER_MIPS
618 ++void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
619 + {
620 +-#if IS_BUILTIN(CONFIG_BCM47XX)
621 + unsigned int irq;
622 + u32 baud_base;
623 + u32 i;
624 +@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
625 + ports[i].baud_base = baud_base;
626 + ports[i].reg_shift = 0;
627 + }
628 +-#endif /* CONFIG_BCM47XX */
629 + }
630 ++#endif /* CONFIG_BCMA_DRIVER_MIPS */
631 +diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
632 +index 96f171328200..89af807cf29c 100644
633 +--- a/drivers/bcma/driver_mips.c
634 ++++ b/drivers/bcma/driver_mips.c
635 +@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
636 +
637 + void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
638 + {
639 ++ struct bcma_bus *bus = mcore->core->bus;
640 ++
641 + if (mcore->early_setup_done)
642 + return;
643 +
644 ++ bcma_chipco_serial_init(&bus->drv_cc);
645 + bcma_core_mips_nvram_init(mcore);
646 +
647 + mcore->early_setup_done = true;
648 +diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
649 +index d5ba43a87a68..55c1782e3623 100644
650 +--- a/drivers/dma/cppi41.c
651 ++++ b/drivers/dma/cppi41.c
652 +@@ -153,6 +153,8 @@ struct cppi41_dd {
653 +
654 + /* context for suspend/resume */
655 + unsigned int dma_tdfdq;
656 ++
657 ++ bool is_suspended;
658 + };
659 +
660 + #define FIST_COMPLETION_QUEUE 93
661 +@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
662 + BUG_ON(desc_num >= ALLOC_DECS_NUM);
663 + c = cdd->chan_busy[desc_num];
664 + cdd->chan_busy[desc_num] = NULL;
665 ++
666 ++ /* Usecount for chan_busy[], paired with push_desc_queue() */
667 ++ pm_runtime_put(cdd->ddev.dev);
668 ++
669 + return c;
670 + }
671 +
672 +@@ -447,6 +453,15 @@ static void push_desc_queue(struct cppi41_channel *c)
673 + */
674 + __iowmb();
675 +
676 ++ /*
677 ++ * DMA transfers can take at least 200ms to complete with USB mass
678 ++ * storage connected. To prevent autosuspend timeouts, we must use
679 ++ * pm_runtime_get/put() when chan_busy[] is modified. This will get
680 ++ * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
681 ++ * outcome of the transfer.
682 ++ */
683 ++ pm_runtime_get(cdd->ddev.dev);
684 ++
685 + desc_phys = lower_32_bits(c->desc_phys);
686 + desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
687 + WARN_ON(cdd->chan_busy[desc_num]);
688 +@@ -457,20 +472,26 @@ static void push_desc_queue(struct cppi41_channel *c)
689 + cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
690 + }
691 +
692 +-static void pending_desc(struct cppi41_channel *c)
693 ++/*
694 ++ * Caller must hold cdd->lock to prevent push_desc_queue()
695 ++ * getting called out of order. We have both cppi41_dma_issue_pending()
696 ++ * and cppi41_runtime_resume() call this function.
697 ++ */
698 ++static void cppi41_run_queue(struct cppi41_dd *cdd)
699 + {
700 +- struct cppi41_dd *cdd = c->cdd;
701 +- unsigned long flags;
702 ++ struct cppi41_channel *c, *_c;
703 +
704 +- spin_lock_irqsave(&cdd->lock, flags);
705 +- list_add_tail(&c->node, &cdd->pending);
706 +- spin_unlock_irqrestore(&cdd->lock, flags);
707 ++ list_for_each_entry_safe(c, _c, &cdd->pending, node) {
708 ++ push_desc_queue(c);
709 ++ list_del(&c->node);
710 ++ }
711 + }
712 +
713 + static void cppi41_dma_issue_pending(struct dma_chan *chan)
714 + {
715 + struct cppi41_channel *c = to_cpp41_chan(chan);
716 + struct cppi41_dd *cdd = c->cdd;
717 ++ unsigned long flags;
718 + int error;
719 +
720 + error = pm_runtime_get(cdd->ddev.dev);
721 +@@ -482,10 +503,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
722 + return;
723 + }
724 +
725 +- if (likely(pm_runtime_active(cdd->ddev.dev)))
726 +- push_desc_queue(c);
727 +- else
728 +- pending_desc(c);
729 ++ spin_lock_irqsave(&cdd->lock, flags);
730 ++ list_add_tail(&c->node, &cdd->pending);
731 ++ if (!cdd->is_suspended)
732 ++ cppi41_run_queue(cdd);
733 ++ spin_unlock_irqrestore(&cdd->lock, flags);
734 +
735 + pm_runtime_mark_last_busy(cdd->ddev.dev);
736 + pm_runtime_put_autosuspend(cdd->ddev.dev);
737 +@@ -705,6 +727,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
738 + WARN_ON(!cdd->chan_busy[desc_num]);
739 + cdd->chan_busy[desc_num] = NULL;
740 +
741 ++ /* Usecount for chan_busy[], paired with push_desc_queue() */
742 ++ pm_runtime_put(cdd->ddev.dev);
743 ++
744 + return 0;
745 + }
746 +
747 +@@ -1150,8 +1175,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
748 + static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
749 + {
750 + struct cppi41_dd *cdd = dev_get_drvdata(dev);
751 ++ unsigned long flags;
752 +
753 ++ spin_lock_irqsave(&cdd->lock, flags);
754 ++ cdd->is_suspended = true;
755 + WARN_ON(!list_empty(&cdd->pending));
756 ++ spin_unlock_irqrestore(&cdd->lock, flags);
757 +
758 + return 0;
759 + }
760 +@@ -1159,14 +1188,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
761 + static int __maybe_unused cppi41_runtime_resume(struct device *dev)
762 + {
763 + struct cppi41_dd *cdd = dev_get_drvdata(dev);
764 +- struct cppi41_channel *c, *_c;
765 + unsigned long flags;
766 +
767 + spin_lock_irqsave(&cdd->lock, flags);
768 +- list_for_each_entry_safe(c, _c, &cdd->pending, node) {
769 +- push_desc_queue(c);
770 +- list_del(&c->node);
771 +- }
772 ++ cdd->is_suspended = false;
773 ++ cppi41_run_queue(cdd);
774 + spin_unlock_irqrestore(&cdd->lock, flags);
775 +
776 + return 0;
777 +diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
778 +index 921dfa047202..260c4b4b492e 100644
779 +--- a/drivers/firmware/efi/libstub/fdt.c
780 ++++ b/drivers/firmware/efi/libstub/fdt.c
781 +@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
782 + struct exit_boot_struct {
783 + efi_memory_desc_t *runtime_map;
784 + int *runtime_entry_count;
785 ++ void *new_fdt_addr;
786 + };
787 +
788 + static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
789 +@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
790 + efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
791 + p->runtime_map, p->runtime_entry_count);
792 +
793 +- return EFI_SUCCESS;
794 ++ return update_fdt_memmap(p->new_fdt_addr, map);
795 + }
796 +
797 + /*
798 +@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
799 +
800 + priv.runtime_map = runtime_map;
801 + priv.runtime_entry_count = &runtime_entry_count;
802 ++ priv.new_fdt_addr = (void *)*new_fdt_addr;
803 + status = efi_exit_boot_services(sys_table, handle, &map, &priv,
804 + exit_boot_func);
805 +
806 + if (status == EFI_SUCCESS) {
807 + efi_set_virtual_address_map_t *svam;
808 +
809 +- status = update_fdt_memmap((void *)*new_fdt_addr, &map);
810 +- if (status != EFI_SUCCESS) {
811 +- /*
812 +- * The kernel won't get far without the memory map, but
813 +- * may still be able to print something meaningful so
814 +- * return success here.
815 +- */
816 +- return EFI_SUCCESS;
817 +- }
818 +-
819 + /* Install the new virtual address map */
820 + svam = sys_table->runtime->set_virtual_address_map;
821 + status = svam(runtime_entry_count * desc_size, desc_size,
822 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
823 +index b13c8aaec078..6df924f72f29 100644
824 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
825 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
826 +@@ -227,6 +227,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
827 + }
828 + WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
829 +
830 ++ if (adev->mode_info.num_crtc)
831 ++ amdgpu_display_set_vga_render_state(adev, false);
832 ++
833 + gmc_v6_0_mc_stop(adev, &save);
834 +
835 + if (gmc_v6_0_wait_for_idle((void *)adev)) {
836 +@@ -256,7 +259,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
837 + dev_warn(adev->dev, "Wait for MC idle timedout !\n");
838 + }
839 + gmc_v6_0_mc_resume(adev, &save);
840 +- amdgpu_display_set_vga_render_state(adev, false);
841 + }
842 +
843 + static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
844 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
845 +index 67db1577ee49..4147e51cf893 100644
846 +--- a/drivers/gpu/drm/i915/intel_lrc.c
847 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
848 +@@ -2152,30 +2152,42 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
849 +
850 + void intel_lr_context_resume(struct drm_i915_private *dev_priv)
851 + {
852 +- struct i915_gem_context *ctx = dev_priv->kernel_context;
853 + struct intel_engine_cs *engine;
854 ++ struct i915_gem_context *ctx;
855 ++
856 ++ /* Because we emit WA_TAIL_DWORDS there may be a disparity
857 ++ * between our bookkeeping in ce->ring->head and ce->ring->tail and
858 ++ * that stored in context. As we only write new commands from
859 ++ * ce->ring->tail onwards, everything before that is junk. If the GPU
860 ++ * starts reading from its RING_HEAD from the context, it may try to
861 ++ * execute that junk and die.
862 ++ *
863 ++ * So to avoid that we reset the context images upon resume. For
864 ++ * simplicity, we just zero everything out.
865 ++ */
866 ++ list_for_each_entry(ctx, &dev_priv->context_list, link) {
867 ++ for_each_engine(engine, dev_priv) {
868 ++ struct intel_context *ce = &ctx->engine[engine->id];
869 ++ u32 *reg;
870 +
871 +- for_each_engine(engine, dev_priv) {
872 +- struct intel_context *ce = &ctx->engine[engine->id];
873 +- void *vaddr;
874 +- uint32_t *reg_state;
875 +-
876 +- if (!ce->state)
877 +- continue;
878 +-
879 +- vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
880 +- if (WARN_ON(IS_ERR(vaddr)))
881 +- continue;
882 ++ if (!ce->state)
883 ++ continue;
884 +
885 +- reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
886 ++ reg = i915_gem_object_pin_map(ce->state->obj,
887 ++ I915_MAP_WB);
888 ++ if (WARN_ON(IS_ERR(reg)))
889 ++ continue;
890 +
891 +- reg_state[CTX_RING_HEAD+1] = 0;
892 +- reg_state[CTX_RING_TAIL+1] = 0;
893 ++ reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
894 ++ reg[CTX_RING_HEAD+1] = 0;
895 ++ reg[CTX_RING_TAIL+1] = 0;
896 +
897 +- ce->state->obj->dirty = true;
898 +- i915_gem_object_unpin_map(ce->state->obj);
899 ++ ce->state->obj->dirty = true;
900 ++ i915_gem_object_unpin_map(ce->state->obj);
901 +
902 +- ce->ring->head = 0;
903 +- ce->ring->tail = 0;
904 ++ ce->ring->head = ce->ring->tail = 0;
905 ++ ce->ring->last_retired_head = -1;
906 ++ intel_ring_update_space(ce->ring);
907 ++ }
908 + }
909 + }
910 +diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
911 +index 74856a8b8f35..e64f52464ecf 100644
912 +--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
913 ++++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
914 +@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
915 + uint32_t mpllP;
916 +
917 + pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
918 ++ mpllP = (mpllP >> 8) & 0xf;
919 + if (!mpllP)
920 + mpllP = 4;
921 +
922 +@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
923 + uint32_t clock;
924 +
925 + pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
926 +- return clock;
927 ++ return clock / 1000;
928 + }
929 +
930 + ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
931 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
932 +index 6f0436df0219..f8f2f16c22a2 100644
933 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
934 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
935 +@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
936 + );
937 + }
938 + for (i = 0; i < size; i++)
939 +- nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
940 ++ nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
941 + for (; i < 0x60; i++)
942 + nvkm_wr32(device, 0x61c440 + soff, (i << 8));
943 + nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
944 +diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
945 +index 60d30203a5fa..e06c1344c913 100644
946 +--- a/drivers/hid/hid-cp2112.c
947 ++++ b/drivers/hid/hid-cp2112.c
948 +@@ -167,7 +167,7 @@ struct cp2112_device {
949 + atomic_t xfer_avail;
950 + struct gpio_chip gc;
951 + u8 *in_out_buffer;
952 +- spinlock_t lock;
953 ++ struct mutex lock;
954 + };
955 +
956 + static int gpio_push_pull = 0xFF;
957 +@@ -179,10 +179,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
958 + struct cp2112_device *dev = gpiochip_get_data(chip);
959 + struct hid_device *hdev = dev->hdev;
960 + u8 *buf = dev->in_out_buffer;
961 +- unsigned long flags;
962 + int ret;
963 +
964 +- spin_lock_irqsave(&dev->lock, flags);
965 ++ mutex_lock(&dev->lock);
966 +
967 + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
968 + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
969 +@@ -206,8 +205,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
970 + ret = 0;
971 +
972 + exit:
973 +- spin_unlock_irqrestore(&dev->lock, flags);
974 +- return ret <= 0 ? ret : -EIO;
975 ++ mutex_unlock(&dev->lock);
976 ++ return ret < 0 ? ret : -EIO;
977 + }
978 +
979 + static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
980 +@@ -215,10 +214,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
981 + struct cp2112_device *dev = gpiochip_get_data(chip);
982 + struct hid_device *hdev = dev->hdev;
983 + u8 *buf = dev->in_out_buffer;
984 +- unsigned long flags;
985 + int ret;
986 +
987 +- spin_lock_irqsave(&dev->lock, flags);
988 ++ mutex_lock(&dev->lock);
989 +
990 + buf[0] = CP2112_GPIO_SET;
991 + buf[1] = value ? 0xff : 0;
992 +@@ -230,7 +228,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
993 + if (ret < 0)
994 + hid_err(hdev, "error setting GPIO values: %d\n", ret);
995 +
996 +- spin_unlock_irqrestore(&dev->lock, flags);
997 ++ mutex_unlock(&dev->lock);
998 + }
999 +
1000 + static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
1001 +@@ -238,10 +236,9 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
1002 + struct cp2112_device *dev = gpiochip_get_data(chip);
1003 + struct hid_device *hdev = dev->hdev;
1004 + u8 *buf = dev->in_out_buffer;
1005 +- unsigned long flags;
1006 + int ret;
1007 +
1008 +- spin_lock_irqsave(&dev->lock, flags);
1009 ++ mutex_lock(&dev->lock);
1010 +
1011 + ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
1012 + CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
1013 +@@ -255,7 +252,7 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
1014 + ret = (buf[1] >> offset) & 1;
1015 +
1016 + exit:
1017 +- spin_unlock_irqrestore(&dev->lock, flags);
1018 ++ mutex_unlock(&dev->lock);
1019 +
1020 + return ret;
1021 + }
1022 +@@ -266,10 +263,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
1023 + struct cp2112_device *dev = gpiochip_get_data(chip);
1024 + struct hid_device *hdev = dev->hdev;
1025 + u8 *buf = dev->in_out_buffer;
1026 +- unsigned long flags;
1027 + int ret;
1028 +
1029 +- spin_lock_irqsave(&dev->lock, flags);
1030 ++ mutex_lock(&dev->lock);
1031 +
1032 + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
1033 + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
1034 +@@ -290,7 +286,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
1035 + goto fail;
1036 + }
1037 +
1038 +- spin_unlock_irqrestore(&dev->lock, flags);
1039 ++ mutex_unlock(&dev->lock);
1040 +
1041 + /*
1042 + * Set gpio value when output direction is already set,
1043 +@@ -301,7 +297,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
1044 + return 0;
1045 +
1046 + fail:
1047 +- spin_unlock_irqrestore(&dev->lock, flags);
1048 ++ mutex_unlock(&dev->lock);
1049 + return ret < 0 ? ret : -EIO;
1050 + }
1051 +
1052 +@@ -1057,7 +1053,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1053 + if (!dev->in_out_buffer)
1054 + return -ENOMEM;
1055 +
1056 +- spin_lock_init(&dev->lock);
1057 ++ mutex_init(&dev->lock);
1058 +
1059 + ret = hid_parse(hdev);
1060 + if (ret) {
1061 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1062 +index 575aa65436d1..9845189fae92 100644
1063 +--- a/drivers/hid/hid-ids.h
1064 ++++ b/drivers/hid/hid-ids.h
1065 +@@ -76,6 +76,9 @@
1066 + #define USB_VENDOR_ID_ALPS_JP 0x044E
1067 + #define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
1068 +
1069 ++#define USB_VENDOR_ID_AMI 0x046b
1070 ++#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
1071 ++
1072 + #define USB_VENDOR_ID_ANTON 0x1130
1073 + #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
1074 +
1075 +diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
1076 +index c5c5fbe9d605..52026dc94d5c 100644
1077 +--- a/drivers/hid/hid-lg.c
1078 ++++ b/drivers/hid/hid-lg.c
1079 +@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
1080 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
1081 + .driver_data = LG_NOGET | LG_FF4 },
1082 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
1083 +- .driver_data = LG_FF2 },
1084 ++ .driver_data = LG_NOGET | LG_FF2 },
1085 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
1086 + .driver_data = LG_FF3 },
1087 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
1088 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
1089 +index e6cfd323babc..cde060fefa91 100644
1090 +--- a/drivers/hid/usbhid/hid-quirks.c
1091 ++++ b/drivers/hid/usbhid/hid-quirks.c
1092 +@@ -57,6 +57,7 @@ static const struct hid_blacklist {
1093 + { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
1094 + { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
1095 + { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
1096 ++ { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
1097 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
1098 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
1099 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
1100 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1101 +index 1cb79925730d..623be90704ab 100644
1102 +--- a/drivers/hid/wacom_wac.c
1103 ++++ b/drivers/hid/wacom_wac.c
1104 +@@ -164,19 +164,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
1105 + wacom->id[0] = STYLUS_DEVICE_ID;
1106 + }
1107 +
1108 +- pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
1109 +- if (features->pressure_max > 255)
1110 +- pressure = (pressure << 1) | ((data[4] >> 6) & 1);
1111 +- pressure += (features->pressure_max + 1) / 2;
1112 +-
1113 +- input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
1114 +- input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
1115 +- input_report_abs(input, ABS_PRESSURE, pressure);
1116 +-
1117 +- input_report_key(input, BTN_TOUCH, data[4] & 0x08);
1118 +- input_report_key(input, BTN_STYLUS, data[4] & 0x10);
1119 +- /* Only allow the stylus2 button to be reported for the pen tool. */
1120 +- input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
1121 ++ if (prox) {
1122 ++ pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
1123 ++ if (features->pressure_max > 255)
1124 ++ pressure = (pressure << 1) | ((data[4] >> 6) & 1);
1125 ++ pressure += (features->pressure_max + 1) / 2;
1126 ++
1127 ++ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
1128 ++ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
1129 ++ input_report_abs(input, ABS_PRESSURE, pressure);
1130 ++
1131 ++ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
1132 ++ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
1133 ++ /* Only allow the stylus2 button to be reported for the pen tool. */
1134 ++ input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
1135 ++ }
1136 +
1137 + if (!prox)
1138 + wacom->id[0] = 0;
1139 +diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
1140 +index 2bbf0c521beb..7d61b566e148 100644
1141 +--- a/drivers/iio/adc/palmas_gpadc.c
1142 ++++ b/drivers/iio/adc/palmas_gpadc.c
1143 +@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
1144 +
1145 + static int palmas_gpadc_suspend(struct device *dev)
1146 + {
1147 +- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1148 ++ struct iio_dev *indio_dev = dev_get_drvdata(dev);
1149 + struct palmas_gpadc *adc = iio_priv(indio_dev);
1150 + int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
1151 + int ret;
1152 +@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
1153 +
1154 + static int palmas_gpadc_resume(struct device *dev)
1155 + {
1156 +- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1157 ++ struct iio_dev *indio_dev = dev_get_drvdata(dev);
1158 + struct palmas_gpadc *adc = iio_priv(indio_dev);
1159 + int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
1160 + int ret;
1161 +diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
1162 +index 9a081465c42f..6bb23a49e81e 100644
1163 +--- a/drivers/iio/health/afe4403.c
1164 ++++ b/drivers/iio/health/afe4403.c
1165 +@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
1166 +
1167 + static int __maybe_unused afe4403_suspend(struct device *dev)
1168 + {
1169 +- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1170 ++ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
1171 + struct afe4403_data *afe = iio_priv(indio_dev);
1172 + int ret;
1173 +
1174 +@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
1175 +
1176 + static int __maybe_unused afe4403_resume(struct device *dev)
1177 + {
1178 +- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1179 ++ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
1180 + struct afe4403_data *afe = iio_priv(indio_dev);
1181 + int ret;
1182 +
1183 +diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
1184 +index 45266404f7e3..964f5231a831 100644
1185 +--- a/drivers/iio/health/afe4404.c
1186 ++++ b/drivers/iio/health/afe4404.c
1187 +@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
1188 +
1189 + static int __maybe_unused afe4404_suspend(struct device *dev)
1190 + {
1191 +- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1192 ++ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1193 + struct afe4404_data *afe = iio_priv(indio_dev);
1194 + int ret;
1195 +
1196 +@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
1197 +
1198 + static int __maybe_unused afe4404_resume(struct device *dev)
1199 + {
1200 +- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1201 ++ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1202 + struct afe4404_data *afe = iio_priv(indio_dev);
1203 + int ret;
1204 +
1205 +diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
1206 +index 90ab8a2d2846..183c14329d6e 100644
1207 +--- a/drivers/iio/health/max30100.c
1208 ++++ b/drivers/iio/health/max30100.c
1209 +@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
1210 +
1211 + mutex_lock(&data->lock);
1212 +
1213 +- while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
1214 ++ while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
1215 + ret = max30100_read_measurement(data);
1216 + if (ret)
1217 + break;
1218 +diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
1219 +index 9c47bc98f3ac..2a22ad920333 100644
1220 +--- a/drivers/iio/humidity/dht11.c
1221 ++++ b/drivers/iio/humidity/dht11.c
1222 +@@ -71,7 +71,8 @@
1223 + * a) select an implementation using busy loop polling on those systems
1224 + * b) use the checksum to do some probabilistic decoding
1225 + */
1226 +-#define DHT11_START_TRANSMISSION 18 /* ms */
1227 ++#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
1228 ++#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
1229 + #define DHT11_MIN_TIMERES 34000 /* ns */
1230 + #define DHT11_THRESHOLD 49000 /* ns */
1231 + #define DHT11_AMBIG_LOW 23000 /* ns */
1232 +@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
1233 + ret = gpio_direction_output(dht11->gpio, 0);
1234 + if (ret)
1235 + goto err;
1236 +- msleep(DHT11_START_TRANSMISSION);
1237 ++ usleep_range(DHT11_START_TRANSMISSION_MIN,
1238 ++ DHT11_START_TRANSMISSION_MAX);
1239 + ret = gpio_direction_input(dht11->gpio);
1240 + if (ret)
1241 + goto err;
1242 +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
1243 +index bb0fde6e2047..cc2243f6cc7f 100644
1244 +--- a/drivers/infiniband/hw/cxgb4/qp.c
1245 ++++ b/drivers/infiniband/hw/cxgb4/qp.c
1246 +@@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
1247 + FW_RI_RES_WR_DCAEN_V(0) |
1248 + FW_RI_RES_WR_DCACPU_V(0) |
1249 + FW_RI_RES_WR_FBMIN_V(2) |
1250 +- FW_RI_RES_WR_FBMAX_V(2) |
1251 ++ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
1252 ++ FW_RI_RES_WR_FBMAX_V(3)) |
1253 + FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
1254 + FW_RI_RES_WR_CIDXFTHRESH_V(0) |
1255 + FW_RI_RES_WR_EQSIZE_V(eqsize));
1256 +@@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
1257 + FW_RI_RES_WR_DCAEN_V(0) |
1258 + FW_RI_RES_WR_DCACPU_V(0) |
1259 + FW_RI_RES_WR_FBMIN_V(2) |
1260 +- FW_RI_RES_WR_FBMAX_V(2) |
1261 ++ FW_RI_RES_WR_FBMAX_V(3) |
1262 + FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
1263 + FW_RI_RES_WR_CIDXFTHRESH_V(0) |
1264 + FW_RI_RES_WR_EQSIZE_V(eqsize));
1265 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1266 +index e1e274a0a34f..ba637ff8aa7e 100644
1267 +--- a/drivers/mmc/host/sdhci.c
1268 ++++ b/drivers/mmc/host/sdhci.c
1269 +@@ -2719,7 +2719,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
1270 + if (intmask & SDHCI_INT_RETUNE)
1271 + mmc_retune_needed(host->mmc);
1272 +
1273 +- if (intmask & SDHCI_INT_CARD_INT) {
1274 ++ if ((intmask & SDHCI_INT_CARD_INT) &&
1275 ++ (host->ier & SDHCI_INT_CARD_INT)) {
1276 + sdhci_enable_sdio_irq_nolock(host, false);
1277 + host->thread_isr |= SDHCI_INT_CARD_INT;
1278 + result = IRQ_WAKE_THREAD;
1279 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
1280 +index d02ca1491d16..8d3e53fac1da 100644
1281 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
1282 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
1283 +@@ -91,7 +91,7 @@
1284 +
1285 + #define IWL8000_FW_PRE "iwlwifi-8000C-"
1286 + #define IWL8000_MODULE_FIRMWARE(api) \
1287 +- IWL8000_FW_PRE "-" __stringify(api) ".ucode"
1288 ++ IWL8000_FW_PRE __stringify(api) ".ucode"
1289 +
1290 + #define IWL8265_FW_PRE "iwlwifi-8265-"
1291 + #define IWL8265_MODULE_FIRMWARE(api) \
1292 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1293 +index fc771885e383..52de3c6d760c 100644
1294 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1295 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1296 +@@ -1144,9 +1144,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1297 + .frame_limit = IWL_FRAME_LIMIT,
1298 + };
1299 +
1300 +- /* Make sure reserved queue is still marked as such (or allocated) */
1301 +- mvm->queue_info[mvm_sta->reserved_queue].status =
1302 +- IWL_MVM_QUEUE_RESERVED;
1303 ++ /* Make sure reserved queue is still marked as such (if allocated) */
1304 ++ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1305 ++ mvm->queue_info[mvm_sta->reserved_queue].status =
1306 ++ IWL_MVM_QUEUE_RESERVED;
1307 +
1308 + for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1309 + struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1310 +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
1311 +index 0ec649d961d7..b0916b126923 100644
1312 +--- a/drivers/pci/pcie/aspm.c
1313 ++++ b/drivers/pci/pcie/aspm.c
1314 +@@ -518,25 +518,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
1315 + link = kzalloc(sizeof(*link), GFP_KERNEL);
1316 + if (!link)
1317 + return NULL;
1318 ++
1319 + INIT_LIST_HEAD(&link->sibling);
1320 + INIT_LIST_HEAD(&link->children);
1321 + INIT_LIST_HEAD(&link->link);
1322 + link->pdev = pdev;
1323 +- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
1324 ++
1325 ++ /*
1326 ++ * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
1327 ++ * hierarchies.
1328 ++ */
1329 ++ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
1330 ++ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
1331 ++ link->root = link;
1332 ++ } else {
1333 + struct pcie_link_state *parent;
1334 ++
1335 + parent = pdev->bus->parent->self->link_state;
1336 + if (!parent) {
1337 + kfree(link);
1338 + return NULL;
1339 + }
1340 ++
1341 + link->parent = parent;
1342 ++ link->root = link->parent->root;
1343 + list_add(&link->link, &parent->children);
1344 + }
1345 +- /* Setup a pointer to the root port link */
1346 +- if (!link->parent)
1347 +- link->root = link;
1348 +- else
1349 +- link->root = link->parent->root;
1350 +
1351 + list_add(&link->sibling, &link_list);
1352 + pdev->link_state = link;
1353 +diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
1354 +index 079015385fd8..583ae3f38fc0 100644
1355 +--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
1356 ++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
1357 +@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
1358 + int reg)
1359 + {
1360 + struct byt_community *comm = byt_get_community(vg, offset);
1361 +- u32 reg_offset = 0;
1362 ++ u32 reg_offset;
1363 +
1364 + if (!comm)
1365 + return NULL;
1366 +
1367 + offset -= comm->pin_base;
1368 +- if (reg == BYT_INT_STAT_REG)
1369 ++ switch (reg) {
1370 ++ case BYT_INT_STAT_REG:
1371 + reg_offset = (offset / 32) * 4;
1372 +- else
1373 ++ break;
1374 ++ case BYT_DEBOUNCE_REG:
1375 ++ reg_offset = 0;
1376 ++ break;
1377 ++ default:
1378 + reg_offset = comm->pad_map[offset] * 16;
1379 ++ break;
1380 ++ }
1381 +
1382 + return comm->reg_base + reg_offset + reg;
1383 + }
1384 +@@ -1612,7 +1619,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
1385 + continue;
1386 + }
1387 +
1388 ++ raw_spin_lock(&vg->lock);
1389 + pending = readl(reg);
1390 ++ raw_spin_unlock(&vg->lock);
1391 + for_each_set_bit(pin, &pending, 32) {
1392 + virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
1393 + generic_handle_irq(virq);
1394 +diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
1395 +index 7826c7f0cb7c..9931be6af0ca 100644
1396 +--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
1397 ++++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
1398 +@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
1399 + unsigned int i;
1400 + int ret;
1401 +
1402 ++ if (!mrfld_buf_available(mp, pin))
1403 ++ return -ENOTSUPP;
1404 ++
1405 + for (i = 0; i < nconfigs; i++) {
1406 + switch (pinconf_to_config_param(configs[i])) {
1407 + case PIN_CONFIG_BIAS_DISABLE:
1408 +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
1409 +index e6a512ebeae2..a3ade9e4ef47 100644
1410 +--- a/drivers/regulator/axp20x-regulator.c
1411 ++++ b/drivers/regulator/axp20x-regulator.c
1412 +@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
1413 + 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
1414 + BIT(3)),
1415 + AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
1416 +- AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
1417 ++ AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
1418 + AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
1419 + AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
1420 + AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
1421 +diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
1422 +index 113f3d6c4b3a..27f75b17679b 100644
1423 +--- a/drivers/staging/greybus/timesync_platform.c
1424 ++++ b/drivers/staging/greybus/timesync_platform.c
1425 +@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
1426 +
1427 + int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
1428 + {
1429 ++ if (!arche_platform_change_state_cb)
1430 ++ return 0;
1431 ++
1432 + return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
1433 + pdata);
1434 + }
1435 +
1436 + void gb_timesync_platform_unlock_bus(void)
1437 + {
1438 ++ if (!arche_platform_change_state_cb)
1439 ++ return;
1440 ++
1441 + arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
1442 + }
1443 +
1444 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1445 +index d2e50a27140c..24f9f98968a5 100644
1446 +--- a/drivers/usb/core/quirks.c
1447 ++++ b/drivers/usb/core/quirks.c
1448 +@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
1449 + /* CBM - Flash disk */
1450 + { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
1451 +
1452 ++ /* WORLDE easy key (easykey.25) MIDI controller */
1453 ++ { USB_DEVICE(0x0218, 0x0401), .driver_info =
1454 ++ USB_QUIRK_CONFIG_INTF_STRINGS },
1455 ++
1456 + /* HP 5300/5370C scanner */
1457 + { USB_DEVICE(0x03f0, 0x0701), .driver_info =
1458 + USB_QUIRK_STRING_FETCH_255 },
1459 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1460 +index 17989b72cdae..8d412d8b1f29 100644
1461 +--- a/drivers/usb/gadget/function/f_fs.c
1462 ++++ b/drivers/usb/gadget/function/f_fs.c
1463 +@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
1464 + if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
1465 + return -EINVAL;
1466 + length = le32_to_cpu(d->dwSize);
1467 ++ if (len < length)
1468 ++ return -EINVAL;
1469 + type = le32_to_cpu(d->dwPropertyDataType);
1470 + if (type < USB_EXT_PROP_UNICODE ||
1471 + type > USB_EXT_PROP_UNICODE_MULTI) {
1472 +@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
1473 + return -EINVAL;
1474 + }
1475 + pnl = le16_to_cpu(d->wPropertyNameLength);
1476 ++ if (length < 14 + pnl) {
1477 ++ pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
1478 ++ length, pnl, type);
1479 ++ return -EINVAL;
1480 ++ }
1481 + pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
1482 + if (length != 14 + pnl + pdl) {
1483 + pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
1484 +@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
1485 + }
1486 + }
1487 + if (flags & (1 << i)) {
1488 ++ if (len < 4) {
1489 ++ goto error;
1490 ++ }
1491 + os_descs_count = get_unaligned_le32(data);
1492 + data += 4;
1493 + len -= 4;
1494 +@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
1495 +
1496 + ENTER();
1497 +
1498 +- if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
1499 ++ if (unlikely(len < 16 ||
1500 ++ get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
1501 + get_unaligned_le32(data + 4) != len))
1502 + goto error;
1503 + str_count = get_unaligned_le32(data + 8);
1504 +diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
1505 +index c3e172e15ec3..338575fb2d27 100644
1506 +--- a/drivers/usb/musb/musb_core.c
1507 ++++ b/drivers/usb/musb/musb_core.c
1508 +@@ -578,11 +578,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
1509 + | MUSB_PORT_STAT_RESUME;
1510 + musb->rh_timer = jiffies
1511 + + msecs_to_jiffies(USB_RESUME_TIMEOUT);
1512 +- musb->need_finish_resume = 1;
1513 +-
1514 + musb->xceiv->otg->state = OTG_STATE_A_HOST;
1515 + musb->is_active = 1;
1516 + musb_host_resume_root_hub(musb);
1517 ++ schedule_delayed_work(&musb->finish_resume_work,
1518 ++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
1519 + break;
1520 + case OTG_STATE_B_WAIT_ACON:
1521 + musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
1522 +@@ -2691,11 +2691,6 @@ static int musb_resume(struct device *dev)
1523 + mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
1524 + if ((devctl & mask) != (musb->context.devctl & mask))
1525 + musb->port1_status = 0;
1526 +- if (musb->need_finish_resume) {
1527 +- musb->need_finish_resume = 0;
1528 +- schedule_delayed_work(&musb->finish_resume_work,
1529 +- msecs_to_jiffies(USB_RESUME_TIMEOUT));
1530 +- }
1531 +
1532 + /*
1533 + * The USB HUB code expects the device to be in RPM_ACTIVE once it came
1534 +@@ -2747,12 +2742,6 @@ static int musb_runtime_resume(struct device *dev)
1535 +
1536 + musb_restore_context(musb);
1537 +
1538 +- if (musb->need_finish_resume) {
1539 +- musb->need_finish_resume = 0;
1540 +- schedule_delayed_work(&musb->finish_resume_work,
1541 +- msecs_to_jiffies(USB_RESUME_TIMEOUT));
1542 +- }
1543 +-
1544 + spin_lock_irqsave(&musb->lock, flags);
1545 + error = musb_run_resume_work(musb);
1546 + if (error)
1547 +diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
1548 +index 47331dbdde29..854fbf7b6b23 100644
1549 +--- a/drivers/usb/musb/musb_core.h
1550 ++++ b/drivers/usb/musb/musb_core.h
1551 +@@ -410,7 +410,6 @@ struct musb {
1552 +
1553 + /* is_suspended means USB B_PERIPHERAL suspend */
1554 + unsigned is_suspended:1;
1555 +- unsigned need_finish_resume :1;
1556 +
1557 + /* may_wakeup means remote wakeup is enabled */
1558 + unsigned may_wakeup:1;
1559 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1560 +index 7ce31a4c7e7f..42cc72e54c05 100644
1561 +--- a/drivers/usb/serial/option.c
1562 ++++ b/drivers/usb/serial/option.c
1563 +@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
1564 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
1565 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
1566 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
1567 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1568 + { } /* Terminating entry */
1569 + };
1570 + MODULE_DEVICE_TABLE(usb, option_ids);
1571 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1572 +index 46fca6b75846..1db4b61bdf7b 100644
1573 +--- a/drivers/usb/serial/pl2303.c
1574 ++++ b/drivers/usb/serial/pl2303.c
1575 +@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
1576 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
1577 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
1578 + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
1579 ++ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
1580 + { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
1581 + { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
1582 + { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
1583 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1584 +index e3b7af8adfb7..09d9be88209e 100644
1585 +--- a/drivers/usb/serial/pl2303.h
1586 ++++ b/drivers/usb/serial/pl2303.h
1587 +@@ -27,6 +27,7 @@
1588 + #define ATEN_VENDOR_ID 0x0557
1589 + #define ATEN_VENDOR_ID2 0x0547
1590 + #define ATEN_PRODUCT_ID 0x2008
1591 ++#define ATEN_PRODUCT_ID2 0x2118
1592 +
1593 + #define IODATA_VENDOR_ID 0x04bb
1594 + #define IODATA_PRODUCT_ID 0x0a03
1595 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1596 +index 1bc6089b9008..696458db7e3c 100644
1597 +--- a/drivers/usb/serial/qcserial.c
1598 ++++ b/drivers/usb/serial/qcserial.c
1599 +@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
1600 + {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
1601 + {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
1602 + {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
1603 ++ {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
1604 + {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
1605 + {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
1606 + {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
1607 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1608 +index c6f2d89c0e97..64613fbf5cf8 100644
1609 +--- a/drivers/vhost/vhost.c
1610 ++++ b/drivers/vhost/vhost.c
1611 +@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
1612 +
1613 + static void vhost_init_is_le(struct vhost_virtqueue *vq)
1614 + {
1615 +- if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
1616 +- vq->is_le = true;
1617 ++ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
1618 ++ || virtio_legacy_is_little_endian();
1619 + }
1620 + #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
1621 +
1622 + static void vhost_reset_is_le(struct vhost_virtqueue *vq)
1623 + {
1624 +- vq->is_le = virtio_legacy_is_little_endian();
1625 ++ vhost_init_is_le(vq);
1626 + }
1627 +
1628 + struct vhost_flush_struct {
1629 +@@ -1713,10 +1713,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
1630 + int r;
1631 + bool is_le = vq->is_le;
1632 +
1633 +- if (!vq->private_data) {
1634 +- vhost_reset_is_le(vq);
1635 ++ if (!vq->private_data)
1636 + return 0;
1637 +- }
1638 +
1639 + vhost_init_is_le(vq);
1640 +
1641 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1642 +index f1360487a594..489bfc61cf30 100644
1643 +--- a/drivers/virtio/virtio_ring.c
1644 ++++ b/drivers/virtio/virtio_ring.c
1645 +@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
1646 + if (xen_domain())
1647 + return true;
1648 +
1649 +- /*
1650 +- * On ARM-based machines, the DMA ops will do the right thing,
1651 +- * so always use them with legacy devices.
1652 +- */
1653 +- if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
1654 +- return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
1655 +-
1656 + return false;
1657 + }
1658 +
1659 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
1660 +index 8f6a2a5863b9..a27fc8791551 100644
1661 +--- a/fs/cifs/readdir.c
1662 ++++ b/fs/cifs/readdir.c
1663 +@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
1664 + rc = -ENOMEM;
1665 + goto error_exit;
1666 + }
1667 ++ spin_lock_init(&cifsFile->file_info_lock);
1668 + file->private_data = cifsFile;
1669 + cifsFile->tlink = cifs_get_tlink(tlink);
1670 + tcon = tlink_tcon(tlink);
1671 +diff --git a/fs/dax.c b/fs/dax.c
1672 +index 014defd2e744..bf6218da7928 100644
1673 +--- a/fs/dax.c
1674 ++++ b/fs/dax.c
1675 +@@ -1270,6 +1270,11 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1676 + struct blk_dax_ctl dax = { 0 };
1677 + ssize_t map_len;
1678 +
1679 ++ if (fatal_signal_pending(current)) {
1680 ++ ret = -EINTR;
1681 ++ break;
1682 ++ }
1683 ++
1684 + dax.sector = iomap->blkno +
1685 + (((pos & PAGE_MASK) - iomap->offset) >> 9);
1686 + dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1687 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1688 +index 478630af0d19..bbc316db9495 100644
1689 +--- a/fs/ext4/super.c
1690 ++++ b/fs/ext4/super.c
1691 +@@ -3827,6 +3827,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1692 + (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1693 + db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
1694 + EXT4_DESC_PER_BLOCK(sb);
1695 ++ if (ext4_has_feature_meta_bg(sb)) {
1696 ++ if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
1697 ++ ext4_msg(sb, KERN_WARNING,
1698 ++ "first meta block group too large: %u "
1699 ++ "(group descriptor block count %u)",
1700 ++ le32_to_cpu(es->s_first_meta_bg), db_count);
1701 ++ goto failed_mount;
1702 ++ }
1703 ++ }
1704 + sbi->s_group_desc = ext4_kvmalloc(db_count *
1705 + sizeof(struct buffer_head *),
1706 + GFP_KERNEL);
1707 +diff --git a/fs/iomap.c b/fs/iomap.c
1708 +index a8ee8c33ca78..814ae8f9587d 100644
1709 +--- a/fs/iomap.c
1710 ++++ b/fs/iomap.c
1711 +@@ -113,6 +113,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
1712 +
1713 + BUG_ON(pos + len > iomap->offset + iomap->length);
1714 +
1715 ++ if (fatal_signal_pending(current))
1716 ++ return -EINTR;
1717 ++
1718 + page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
1719 + if (!page)
1720 + return -ENOMEM;
1721 +diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
1722 +index 42aace4fc4c8..64813697f4c4 100644
1723 +--- a/fs/nfsd/nfs4layouts.c
1724 ++++ b/fs/nfsd/nfs4layouts.c
1725 +@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
1726 + struct nfs4_layout_stateid *ls;
1727 + struct nfs4_stid *stp;
1728 +
1729 +- stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
1730 ++ stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
1731 ++ nfsd4_free_layout_stateid);
1732 + if (!stp)
1733 + return NULL;
1734 +- stp->sc_free = nfsd4_free_layout_stateid;
1735 ++
1736 + get_nfs4_file(fp);
1737 + stp->sc_file = fp;
1738 +
1739 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1740 +index 4b4beaaa4eaa..a0dee8ae9f97 100644
1741 +--- a/fs/nfsd/nfs4state.c
1742 ++++ b/fs/nfsd/nfs4state.c
1743 +@@ -633,8 +633,8 @@ find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
1744 + return co;
1745 + }
1746 +
1747 +-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
1748 +- struct kmem_cache *slab)
1749 ++struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
1750 ++ void (*sc_free)(struct nfs4_stid *))
1751 + {
1752 + struct nfs4_stid *stid;
1753 + int new_id;
1754 +@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
1755 + idr_preload_end();
1756 + if (new_id < 0)
1757 + goto out_free;
1758 ++
1759 ++ stid->sc_free = sc_free;
1760 + stid->sc_client = cl;
1761 + stid->sc_stateid.si_opaque.so_id = new_id;
1762 + stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
1763 +@@ -675,15 +677,12 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
1764 + static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1765 + {
1766 + struct nfs4_stid *stid;
1767 +- struct nfs4_ol_stateid *stp;
1768 +
1769 +- stid = nfs4_alloc_stid(clp, stateid_slab);
1770 ++ stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1771 + if (!stid)
1772 + return NULL;
1773 +
1774 +- stp = openlockstateid(stid);
1775 +- stp->st_stid.sc_free = nfs4_free_ol_stateid;
1776 +- return stp;
1777 ++ return openlockstateid(stid);
1778 + }
1779 +
1780 + static void nfs4_free_deleg(struct nfs4_stid *stid)
1781 +@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
1782 + goto out_dec;
1783 + if (delegation_blocked(&current_fh->fh_handle))
1784 + goto out_dec;
1785 +- dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
1786 ++ dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1787 + if (dp == NULL)
1788 + goto out_dec;
1789 +
1790 +- dp->dl_stid.sc_free = nfs4_free_deleg;
1791 + /*
1792 + * delegation seqid's are never incremented. The 4.1 special
1793 + * meaning of seqid 0 isn't meaningful, really, but let's avoid
1794 +@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
1795 + stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
1796 + get_nfs4_file(fp);
1797 + stp->st_stid.sc_file = fp;
1798 +- stp->st_stid.sc_free = nfs4_free_lock_stateid;
1799 + stp->st_access_bmap = 0;
1800 + stp->st_deny_bmap = open_stp->st_deny_bmap;
1801 + stp->st_openstp = open_stp;
1802 +@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
1803 + lst = find_lock_stateid(lo, fi);
1804 + if (lst == NULL) {
1805 + spin_unlock(&clp->cl_lock);
1806 +- ns = nfs4_alloc_stid(clp, stateid_slab);
1807 ++ ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
1808 + if (ns == NULL)
1809 + return NULL;
1810 +
1811 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
1812 +index c9399366f9df..4516e8b7d776 100644
1813 +--- a/fs/nfsd/state.h
1814 ++++ b/fs/nfsd/state.h
1815 +@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
1816 + __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
1817 + stateid_t *stateid, unsigned char typemask,
1818 + struct nfs4_stid **s, struct nfsd_net *nn);
1819 +-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
1820 +- struct kmem_cache *slab);
1821 ++struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
1822 ++ void (*sc_free)(struct nfs4_stid *));
1823 + void nfs4_unhash_stid(struct nfs4_stid *s);
1824 + void nfs4_put_stid(struct nfs4_stid *s);
1825 + void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
1826 +diff --git a/include/linux/irq.h b/include/linux/irq.h
1827 +index e79875574b39..39e3254e5769 100644
1828 +--- a/include/linux/irq.h
1829 ++++ b/include/linux/irq.h
1830 +@@ -184,6 +184,7 @@ struct irq_data {
1831 + *
1832 + * IRQD_TRIGGER_MASK - Mask for the trigger type bits
1833 + * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
1834 ++ * IRQD_ACTIVATED - Interrupt has already been activated
1835 + * IRQD_NO_BALANCING - Balancing disabled for this IRQ
1836 + * IRQD_PER_CPU - Interrupt is per cpu
1837 + * IRQD_AFFINITY_SET - Interrupt affinity was set
1838 +@@ -202,6 +203,7 @@ struct irq_data {
1839 + enum {
1840 + IRQD_TRIGGER_MASK = 0xf,
1841 + IRQD_SETAFFINITY_PENDING = (1 << 8),
1842 ++ IRQD_ACTIVATED = (1 << 9),
1843 + IRQD_NO_BALANCING = (1 << 10),
1844 + IRQD_PER_CPU = (1 << 11),
1845 + IRQD_AFFINITY_SET = (1 << 12),
1846 +@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
1847 + return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
1848 + }
1849 +
1850 ++static inline bool irqd_is_activated(struct irq_data *d)
1851 ++{
1852 ++ return __irqd_to_state(d) & IRQD_ACTIVATED;
1853 ++}
1854 ++
1855 ++static inline void irqd_set_activated(struct irq_data *d)
1856 ++{
1857 ++ __irqd_to_state(d) |= IRQD_ACTIVATED;
1858 ++}
1859 ++
1860 ++static inline void irqd_clr_activated(struct irq_data *d)
1861 ++{
1862 ++ __irqd_to_state(d) &= ~IRQD_ACTIVATED;
1863 ++}
1864 ++
1865 + #undef __irqd_to_state
1866 +
1867 + static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
1868 +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
1869 +index c1784c0b4f35..134a2f69c21a 100644
1870 +--- a/include/linux/memory_hotplug.h
1871 ++++ b/include/linux/memory_hotplug.h
1872 +@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
1873 + extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
1874 + /* VM interface that may be used by firmware interface */
1875 + extern int online_pages(unsigned long, unsigned long, int);
1876 +-extern int test_pages_in_a_zone(unsigned long, unsigned long);
1877 ++extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1878 ++ unsigned long *valid_start, unsigned long *valid_end);
1879 + extern void __offline_isolated_pages(unsigned long, unsigned long);
1880 +
1881 + typedef void (*online_page_callback_t)(struct page *page);
1882 +diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
1883 +index 1c7eec09e5eb..3a481a49546e 100644
1884 +--- a/include/linux/percpu-refcount.h
1885 ++++ b/include/linux/percpu-refcount.h
1886 +@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
1887 + static inline bool percpu_ref_tryget(struct percpu_ref *ref)
1888 + {
1889 + unsigned long __percpu *percpu_count;
1890 +- int ret;
1891 ++ bool ret;
1892 +
1893 + rcu_read_lock_sched();
1894 +
1895 +@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
1896 + static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
1897 + {
1898 + unsigned long __percpu *percpu_count;
1899 +- int ret = false;
1900 ++ bool ret = false;
1901 +
1902 + rcu_read_lock_sched();
1903 +
1904 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1905 +index 85bc9beb046d..4e2f3de0e40b 100644
1906 +--- a/kernel/cgroup.c
1907 ++++ b/kernel/cgroup.c
1908 +@@ -5219,6 +5219,11 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
1909 + return ERR_PTR(err);
1910 + }
1911 +
1912 ++/*
1913 ++ * The returned cgroup is fully initialized including its control mask, but
1914 ++ * it isn't associated with its kernfs_node and doesn't have the control
1915 ++ * mask applied.
1916 ++ */
1917 + static struct cgroup *cgroup_create(struct cgroup *parent)
1918 + {
1919 + struct cgroup_root *root = parent->root;
1920 +@@ -5283,11 +5288,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
1921 +
1922 + cgroup_propagate_control(cgrp);
1923 +
1924 +- /* @cgrp doesn't have dir yet so the following will only create csses */
1925 +- ret = cgroup_apply_control_enable(cgrp);
1926 +- if (ret)
1927 +- goto out_destroy;
1928 +-
1929 + return cgrp;
1930 +
1931 + out_cancel_ref:
1932 +@@ -5295,9 +5295,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
1933 + out_free_cgrp:
1934 + kfree(cgrp);
1935 + return ERR_PTR(ret);
1936 +-out_destroy:
1937 +- cgroup_destroy_locked(cgrp);
1938 +- return ERR_PTR(ret);
1939 + }
1940 +
1941 + static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
1942 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1943 +index e5a8839e7076..b1cfd7416db0 100644
1944 +--- a/kernel/events/core.c
1945 ++++ b/kernel/events/core.c
1946 +@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1947 + static void
1948 + list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1949 + {
1950 +-
1951 + lockdep_assert_held(&ctx->lock);
1952 +
1953 + WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1954 +@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
1955 + {
1956 + struct perf_event *group_leader = event->group_leader, *pos;
1957 +
1958 ++ lockdep_assert_held(&event->ctx->lock);
1959 ++
1960 + /*
1961 + * We can have double attach due to group movement in perf_event_open.
1962 + */
1963 +@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
1964 + struct perf_event *sibling, *tmp;
1965 + struct list_head *list = NULL;
1966 +
1967 ++ lockdep_assert_held(&event->ctx->lock);
1968 ++
1969 + /*
1970 + * We can have double detach due to exit/hot-unplug + close.
1971 + */
1972 +@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
1973 + */
1974 + static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1975 + {
1976 +- lockdep_assert_held(&event->ctx->mutex);
1977 ++ struct perf_event_context *ctx = event->ctx;
1978 ++
1979 ++ lockdep_assert_held(&ctx->mutex);
1980 +
1981 + event_function_call(event, __perf_remove_from_context, (void *)flags);
1982 ++
1983 ++ /*
1984 ++ * The above event_function_call() can NO-OP when it hits
1985 ++ * TASK_TOMBSTONE. In that case we must already have been detached
1986 ++ * from the context (by perf_event_exit_event()) but the grouping
1987 ++ * might still be in-tact.
1988 ++ */
1989 ++ WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1990 ++ if ((flags & DETACH_GROUP) &&
1991 ++ (event->attach_state & PERF_ATTACH_GROUP)) {
1992 ++ /*
1993 ++ * Since in that case we cannot possibly be scheduled, simply
1994 ++ * detach now.
1995 ++ */
1996 ++ raw_spin_lock_irq(&ctx->lock);
1997 ++ perf_group_detach(event);
1998 ++ raw_spin_unlock_irq(&ctx->lock);
1999 ++ }
2000 + }
2001 +
2002 + /*
2003 +@@ -6583,6 +6606,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
2004 + char *buf = NULL;
2005 + char *name;
2006 +
2007 ++ if (vma->vm_flags & VM_READ)
2008 ++ prot |= PROT_READ;
2009 ++ if (vma->vm_flags & VM_WRITE)
2010 ++ prot |= PROT_WRITE;
2011 ++ if (vma->vm_flags & VM_EXEC)
2012 ++ prot |= PROT_EXEC;
2013 ++
2014 ++ if (vma->vm_flags & VM_MAYSHARE)
2015 ++ flags = MAP_SHARED;
2016 ++ else
2017 ++ flags = MAP_PRIVATE;
2018 ++
2019 ++ if (vma->vm_flags & VM_DENYWRITE)
2020 ++ flags |= MAP_DENYWRITE;
2021 ++ if (vma->vm_flags & VM_MAYEXEC)
2022 ++ flags |= MAP_EXECUTABLE;
2023 ++ if (vma->vm_flags & VM_LOCKED)
2024 ++ flags |= MAP_LOCKED;
2025 ++ if (vma->vm_flags & VM_HUGETLB)
2026 ++ flags |= MAP_HUGETLB;
2027 ++
2028 + if (file) {
2029 + struct inode *inode;
2030 + dev_t dev;
2031 +@@ -6609,27 +6653,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
2032 + maj = MAJOR(dev);
2033 + min = MINOR(dev);
2034 +
2035 +- if (vma->vm_flags & VM_READ)
2036 +- prot |= PROT_READ;
2037 +- if (vma->vm_flags & VM_WRITE)
2038 +- prot |= PROT_WRITE;
2039 +- if (vma->vm_flags & VM_EXEC)
2040 +- prot |= PROT_EXEC;
2041 +-
2042 +- if (vma->vm_flags & VM_MAYSHARE)
2043 +- flags = MAP_SHARED;
2044 +- else
2045 +- flags = MAP_PRIVATE;
2046 +-
2047 +- if (vma->vm_flags & VM_DENYWRITE)
2048 +- flags |= MAP_DENYWRITE;
2049 +- if (vma->vm_flags & VM_MAYEXEC)
2050 +- flags |= MAP_EXECUTABLE;
2051 +- if (vma->vm_flags & VM_LOCKED)
2052 +- flags |= MAP_LOCKED;
2053 +- if (vma->vm_flags & VM_HUGETLB)
2054 +- flags |= MAP_HUGETLB;
2055 +-
2056 + goto got_name;
2057 + } else {
2058 + if (vma->vm_ops && vma->vm_ops->name) {
2059 +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
2060 +index 8c0a0ae43521..b59e6768c5e9 100644
2061 +--- a/kernel/irq/irqdomain.c
2062 ++++ b/kernel/irq/irqdomain.c
2063 +@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
2064 + }
2065 + EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
2066 +
2067 ++static void __irq_domain_activate_irq(struct irq_data *irq_data)
2068 ++{
2069 ++ if (irq_data && irq_data->domain) {
2070 ++ struct irq_domain *domain = irq_data->domain;
2071 ++
2072 ++ if (irq_data->parent_data)
2073 ++ __irq_domain_activate_irq(irq_data->parent_data);
2074 ++ if (domain->ops->activate)
2075 ++ domain->ops->activate(domain, irq_data);
2076 ++ }
2077 ++}
2078 ++
2079 ++static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
2080 ++{
2081 ++ if (irq_data && irq_data->domain) {
2082 ++ struct irq_domain *domain = irq_data->domain;
2083 ++
2084 ++ if (domain->ops->deactivate)
2085 ++ domain->ops->deactivate(domain, irq_data);
2086 ++ if (irq_data->parent_data)
2087 ++ __irq_domain_deactivate_irq(irq_data->parent_data);
2088 ++ }
2089 ++}
2090 ++
2091 + /**
2092 + * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
2093 + * interrupt
2094 +@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
2095 + */
2096 + void irq_domain_activate_irq(struct irq_data *irq_data)
2097 + {
2098 +- if (irq_data && irq_data->domain) {
2099 +- struct irq_domain *domain = irq_data->domain;
2100 +-
2101 +- if (irq_data->parent_data)
2102 +- irq_domain_activate_irq(irq_data->parent_data);
2103 +- if (domain->ops->activate)
2104 +- domain->ops->activate(domain, irq_data);
2105 ++ if (!irqd_is_activated(irq_data)) {
2106 ++ __irq_domain_activate_irq(irq_data);
2107 ++ irqd_set_activated(irq_data);
2108 + }
2109 + }
2110 +
2111 +@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
2112 + */
2113 + void irq_domain_deactivate_irq(struct irq_data *irq_data)
2114 + {
2115 +- if (irq_data && irq_data->domain) {
2116 +- struct irq_domain *domain = irq_data->domain;
2117 +-
2118 +- if (domain->ops->deactivate)
2119 +- domain->ops->deactivate(domain, irq_data);
2120 +- if (irq_data->parent_data)
2121 +- irq_domain_deactivate_irq(irq_data->parent_data);
2122 ++ if (irqd_is_activated(irq_data)) {
2123 ++ __irq_domain_deactivate_irq(irq_data);
2124 ++ irqd_clr_activated(irq_data);
2125 + }
2126 + }
2127 +
2128 +diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
2129 +index b97286c48735..f00b0131c8f9 100644
2130 +--- a/kernel/trace/trace_hwlat.c
2131 ++++ b/kernel/trace/trace_hwlat.c
2132 +@@ -266,7 +266,7 @@ static int get_sample(void)
2133 + static struct cpumask save_cpumask;
2134 + static bool disable_migrate;
2135 +
2136 +-static void move_to_next_cpu(void)
2137 ++static void move_to_next_cpu(bool initmask)
2138 + {
2139 + static struct cpumask *current_mask;
2140 + int next_cpu;
2141 +@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
2142 + return;
2143 +
2144 + /* Just pick the first CPU on first iteration */
2145 +- if (!current_mask) {
2146 ++ if (initmask) {
2147 + current_mask = &save_cpumask;
2148 + get_online_cpus();
2149 + cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
2150 +@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
2151 + static int kthread_fn(void *data)
2152 + {
2153 + u64 interval;
2154 ++ bool initmask = true;
2155 +
2156 + while (!kthread_should_stop()) {
2157 +
2158 +- move_to_next_cpu();
2159 ++ move_to_next_cpu(initmask);
2160 ++ initmask = false;
2161 +
2162 + local_irq_disable();
2163 + get_sample();
2164 +diff --git a/mm/filemap.c b/mm/filemap.c
2165 +index 779801092ef1..d8d7df82c69a 100644
2166 +--- a/mm/filemap.c
2167 ++++ b/mm/filemap.c
2168 +@@ -1703,6 +1703,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
2169 +
2170 + cond_resched();
2171 + find_page:
2172 ++ if (fatal_signal_pending(current)) {
2173 ++ error = -EINTR;
2174 ++ goto out;
2175 ++ }
2176 ++
2177 + page = find_get_page(mapping, index);
2178 + if (!page) {
2179 + page_cache_sync_readahead(mapping,
2180 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2181 +index c3a8141ac788..ede137345a99 100644
2182 +--- a/mm/memory_hotplug.c
2183 ++++ b/mm/memory_hotplug.c
2184 +@@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
2185 + }
2186 +
2187 + /*
2188 +- * Confirm all pages in a range [start, end) is belongs to the same zone.
2189 ++ * Confirm all pages in a range [start, end) belong to the same zone.
2190 ++ * When true, return its valid [start, end).
2191 + */
2192 +-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
2193 ++int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
2194 ++ unsigned long *valid_start, unsigned long *valid_end)
2195 + {
2196 + unsigned long pfn, sec_end_pfn;
2197 ++ unsigned long start, end;
2198 + struct zone *zone = NULL;
2199 + struct page *page;
2200 + int i;
2201 +- for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
2202 ++ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
2203 + pfn < end_pfn;
2204 +- pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
2205 ++ pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
2206 + /* Make sure the memory section is present first */
2207 + if (!present_section_nr(pfn_to_section_nr(pfn)))
2208 + continue;
2209 +@@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
2210 + page = pfn_to_page(pfn + i);
2211 + if (zone && page_zone(page) != zone)
2212 + return 0;
2213 ++ if (!zone)
2214 ++ start = pfn + i;
2215 + zone = page_zone(page);
2216 ++ end = pfn + MAX_ORDER_NR_PAGES;
2217 + }
2218 + }
2219 +- return 1;
2220 ++
2221 ++ if (zone) {
2222 ++ *valid_start = start;
2223 ++ *valid_end = end;
2224 ++ return 1;
2225 ++ } else {
2226 ++ return 0;
2227 ++ }
2228 + }
2229 +
2230 + /*
2231 +@@ -1859,6 +1872,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
2232 + long offlined_pages;
2233 + int ret, drain, retry_max, node;
2234 + unsigned long flags;
2235 ++ unsigned long valid_start, valid_end;
2236 + struct zone *zone;
2237 + struct memory_notify arg;
2238 +
2239 +@@ -1869,10 +1883,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
2240 + return -EINVAL;
2241 + /* This makes hotplug much easier...and readable.
2242 + we assume this for now. .*/
2243 +- if (!test_pages_in_a_zone(start_pfn, end_pfn))
2244 ++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
2245 + return -EINVAL;
2246 +
2247 +- zone = page_zone(pfn_to_page(start_pfn));
2248 ++ zone = page_zone(pfn_to_page(valid_start));
2249 + node = zone_to_nid(zone);
2250 + nr_pages = end_pfn - start_pfn;
2251 +
2252 +diff --git a/mm/zswap.c b/mm/zswap.c
2253 +index 275b22cc8df4..dbef27822a98 100644
2254 +--- a/mm/zswap.c
2255 ++++ b/mm/zswap.c
2256 +@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
2257 +
2258 + /* Enable/disable zswap (disabled by default) */
2259 + static bool zswap_enabled;
2260 +-module_param_named(enabled, zswap_enabled, bool, 0644);
2261 ++static int zswap_enabled_param_set(const char *,
2262 ++ const struct kernel_param *);
2263 ++static struct kernel_param_ops zswap_enabled_param_ops = {
2264 ++ .set = zswap_enabled_param_set,
2265 ++ .get = param_get_bool,
2266 ++};
2267 ++module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2268 +
2269 + /* Crypto compressor to use */
2270 + #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
2271 +@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
2272 + /* used by param callback function */
2273 + static bool zswap_init_started;
2274 +
2275 ++/* fatal error during init */
2276 ++static bool zswap_init_failed;
2277 ++
2278 + /*********************************
2279 + * helpers and fwd declarations
2280 + **********************************/
2281 +@@ -706,6 +715,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
2282 + char *s = strstrip((char *)val);
2283 + int ret;
2284 +
2285 ++ if (zswap_init_failed) {
2286 ++ pr_err("can't set param, initialization failed\n");
2287 ++ return -ENODEV;
2288 ++ }
2289 ++
2290 + /* no change required */
2291 + if (!strcmp(s, *(char **)kp->arg))
2292 + return 0;
2293 +@@ -785,6 +799,17 @@ static int zswap_zpool_param_set(const char *val,
2294 + return __zswap_param_set(val, kp, NULL, zswap_compressor);
2295 + }
2296 +
2297 ++static int zswap_enabled_param_set(const char *val,
2298 ++ const struct kernel_param *kp)
2299 ++{
2300 ++ if (zswap_init_failed) {
2301 ++ pr_err("can't enable, initialization failed\n");
2302 ++ return -ENODEV;
2303 ++ }
2304 ++
2305 ++ return param_set_bool(val, kp);
2306 ++}
2307 ++
2308 + /*********************************
2309 + * writeback code
2310 + **********************************/
2311 +@@ -1271,6 +1296,9 @@ static int __init init_zswap(void)
2312 + dstmem_fail:
2313 + zswap_entry_cache_destroy();
2314 + cache_fail:
2315 ++ /* if built-in, we aren't unloaded on failure; don't allow use */
2316 ++ zswap_init_failed = true;
2317 ++ zswap_enabled = false;
2318 + return -ENOMEM;
2319 + }
2320 + /* must be late so crypto has time to come up */
2321 +diff --git a/net/can/bcm.c b/net/can/bcm.c
2322 +index 436a7537e6a9..5e9ed5ec2860 100644
2323 +--- a/net/can/bcm.c
2324 ++++ b/net/can/bcm.c
2325 +@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
2326 +
2327 + static void bcm_remove_op(struct bcm_op *op)
2328 + {
2329 +- hrtimer_cancel(&op->timer);
2330 +- hrtimer_cancel(&op->thrtimer);
2331 +-
2332 +- if (op->tsklet.func)
2333 +- tasklet_kill(&op->tsklet);
2334 ++ if (op->tsklet.func) {
2335 ++ while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
2336 ++ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
2337 ++ hrtimer_active(&op->timer)) {
2338 ++ hrtimer_cancel(&op->timer);
2339 ++ tasklet_kill(&op->tsklet);
2340 ++ }
2341 ++ }
2342 +
2343 +- if (op->thrtsklet.func)
2344 +- tasklet_kill(&op->thrtsklet);
2345 ++ if (op->thrtsklet.func) {
2346 ++ while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
2347 ++ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
2348 ++ hrtimer_active(&op->thrtimer)) {
2349 ++ hrtimer_cancel(&op->thrtimer);
2350 ++ tasklet_kill(&op->thrtsklet);
2351 ++ }
2352 ++ }
2353 +
2354 + if ((op->frames) && (op->frames != &op->sframe))
2355 + kfree(op->frames);
2356 +diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
2357 +index dc6fb79a361f..25d9a9cf7b66 100644
2358 +--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
2359 ++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
2360 +@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
2361 + if (!oa->data)
2362 + return -ENOMEM;
2363 +
2364 +- creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
2365 ++ creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
2366 + if (!creds) {
2367 + kfree(oa->data);
2368 + return -ENOMEM;