Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 09 Sep 2020 17:58:40
Message-Id: 1599674301.5daaf1bd8061d1dcbeca1d1a1cd16880585a89f8.mpagano@gentoo
1 commit: 5daaf1bd8061d1dcbeca1d1a1cd16880585a89f8
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 9 17:58:21 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 9 17:58:21 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5daaf1bd
7
8 Linux patch 4.14.197
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1196_linux-4.14.197.patch | 2688 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2692 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 923cca1..7b69642 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -827,6 +827,10 @@ Patch: 1195_linux-4.14.196.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.196
23
24 +Patch: 1196_linux-4.14.197.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.197
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1196_linux-4.14.197.patch b/1196_linux-4.14.197.patch
33 new file mode 100644
34 index 0000000..c2a0343
35 --- /dev/null
36 +++ b/1196_linux-4.14.197.patch
37 @@ -0,0 +1,2688 @@
38 +diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt
39 +index 71b63c2b98410..a8f1a58e36922 100644
40 +--- a/Documentation/filesystems/affs.txt
41 ++++ b/Documentation/filesystems/affs.txt
42 +@@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows:
43 +
44 + - R maps to r for user, group and others. On directories, R implies x.
45 +
46 +- - If both W and D are allowed, w will be set.
47 ++ - W maps to w.
48 +
49 + - E maps to x.
50 +
51 +- - H and P are always retained and ignored under Linux.
52 ++ - D is ignored.
53 +
54 +- - A is always reset when a file is written to.
55 ++ - H, S and P are always retained and ignored under Linux.
56 ++
57 ++ - A is cleared when a file is written to.
58 +
59 + User id and group id will be used unless set[gu]id are given as mount
60 + options. Since most of the Amiga file systems are single user systems
61 +@@ -111,11 +113,13 @@ Linux -> Amiga:
62 +
63 + The Linux rwxrwxrwx file mode is handled as follows:
64 +
65 +- - r permission will set R for user, group and others.
66 ++ - r permission will allow R for user, group and others.
67 ++
68 ++ - w permission will allow W for user, group and others.
69 +
70 +- - w permission will set W and D for user, group and others.
71 ++ - x permission of the user will allow E for plain files.
72 +
73 +- - x permission of the user will set E for plain files.
74 ++ - D will be allowed for user, group and others.
75 +
76 + - All other flags (suid, sgid, ...) are ignored and will
77 + not be retained.
78 +diff --git a/Makefile b/Makefile
79 +index 5c8b785f72919..3712b4deafbed 100644
80 +--- a/Makefile
81 ++++ b/Makefile
82 +@@ -1,7 +1,7 @@
83 + # SPDX-License-Identifier: GPL-2.0
84 + VERSION = 4
85 + PATCHLEVEL = 14
86 +-SUBLEVEL = 196
87 ++SUBLEVEL = 197
88 + EXTRAVERSION =
89 + NAME = Petit Gorille
90 +
91 +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
92 +index 1d6d980f80ac0..f88611e241f0e 100644
93 +--- a/arch/arm64/include/asm/kvm_arm.h
94 ++++ b/arch/arm64/include/asm/kvm_arm.h
95 +@@ -78,10 +78,11 @@
96 + * IMO: Override CPSR.I and enable signaling with VI
97 + * FMO: Override CPSR.F and enable signaling with VF
98 + * SWIO: Turn set/way invalidates into set/way clean+invalidate
99 ++ * PTW: Take a stage2 fault if a stage1 walk steps in device memory
100 + */
101 + #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
102 + HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
103 +- HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
104 ++ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_PTW)
105 + #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
106 + #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
107 + #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
108 +diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
109 +index c59e81b651328..b0f0fb81f5f5e 100644
110 +--- a/arch/arm64/include/asm/kvm_asm.h
111 ++++ b/arch/arm64/include/asm/kvm_asm.h
112 +@@ -83,6 +83,34 @@ extern u32 __init_stage2_translation(void);
113 + *__hyp_this_cpu_ptr(sym); \
114 + })
115 +
116 ++#define __KVM_EXTABLE(from, to) \
117 ++ " .pushsection __kvm_ex_table, \"a\"\n" \
118 ++ " .align 3\n" \
119 ++ " .long (" #from " - .), (" #to " - .)\n" \
120 ++ " .popsection\n"
121 ++
122 ++
123 ++#define __kvm_at(at_op, addr) \
124 ++( { \
125 ++ int __kvm_at_err = 0; \
126 ++ u64 spsr, elr; \
127 ++ asm volatile( \
128 ++ " mrs %1, spsr_el2\n" \
129 ++ " mrs %2, elr_el2\n" \
130 ++ "1: at "at_op", %3\n" \
131 ++ " isb\n" \
132 ++ " b 9f\n" \
133 ++ "2: msr spsr_el2, %1\n" \
134 ++ " msr elr_el2, %2\n" \
135 ++ " mov %w0, %4\n" \
136 ++ "9:\n" \
137 ++ __KVM_EXTABLE(1b, 2b) \
138 ++ : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
139 ++ : "r" (addr), "i" (-EFAULT)); \
140 ++ __kvm_at_err; \
141 ++} )
142 ++
143 ++
144 + #else /* __ASSEMBLY__ */
145 +
146 + .macro hyp_adr_this_cpu reg, sym, tmp
147 +@@ -107,6 +135,21 @@ extern u32 __init_stage2_translation(void);
148 + kern_hyp_va \vcpu
149 + .endm
150 +
151 ++/*
152 ++ * KVM extable for unexpected exceptions.
153 ++ * In the same format _asm_extable, but output to a different section so that
154 ++ * it can be mapped to EL2. The KVM version is not sorted. The caller must
155 ++ * ensure:
156 ++ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
157 ++ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
158 ++ */
159 ++.macro _kvm_extable, from, to
160 ++ .pushsection __kvm_ex_table, "a"
161 ++ .align 3
162 ++ .long (\from - .), (\to - .)
163 ++ .popsection
164 ++.endm
165 ++
166 + #endif
167 +
168 + #endif /* __ARM_KVM_ASM_H__ */
169 +diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
170 +index c4e55176f4b6d..4c11d3e64aef4 100644
171 +--- a/arch/arm64/kernel/vmlinux.lds.S
172 ++++ b/arch/arm64/kernel/vmlinux.lds.S
173 +@@ -24,6 +24,13 @@ ENTRY(_text)
174 +
175 + jiffies = jiffies_64;
176 +
177 ++
178 ++#define HYPERVISOR_EXTABLE \
179 ++ . = ALIGN(SZ_8); \
180 ++ VMLINUX_SYMBOL(__start___kvm_ex_table) = .; \
181 ++ *(__kvm_ex_table) \
182 ++ VMLINUX_SYMBOL(__stop___kvm_ex_table) = .;
183 ++
184 + #define HYPERVISOR_TEXT \
185 + /* \
186 + * Align to 4 KB so that \
187 +@@ -39,6 +46,7 @@ jiffies = jiffies_64;
188 + VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
189 + VMLINUX_SYMBOL(__hyp_text_start) = .; \
190 + *(.hyp.text) \
191 ++ HYPERVISOR_EXTABLE \
192 + VMLINUX_SYMBOL(__hyp_text_end) = .;
193 +
194 + #define IDMAP_TEXT \
195 +diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
196 +index a360ac6e89e9d..4e0eac361f87c 100644
197 +--- a/arch/arm64/kvm/hyp/entry.S
198 ++++ b/arch/arm64/kvm/hyp/entry.S
199 +@@ -17,6 +17,7 @@
200 +
201 + #include <linux/linkage.h>
202 +
203 ++#include <asm/alternative.h>
204 + #include <asm/asm-offsets.h>
205 + #include <asm/assembler.h>
206 + #include <asm/fpsimdmacros.h>
207 +@@ -62,6 +63,15 @@ ENTRY(__guest_enter)
208 + // Store the host regs
209 + save_callee_saved_regs x1
210 +
211 ++ // Now the host state is stored if we have a pending RAS SError it must
212 ++ // affect the host. If any asynchronous exception is pending we defer
213 ++ // the guest entry.
214 ++ mrs x1, isr_el1
215 ++ cbz x1, 1f
216 ++ mov x0, #ARM_EXCEPTION_IRQ
217 ++ ret
218 ++
219 ++1:
220 + add x18, x0, #VCPU_CONTEXT
221 +
222 + // Restore guest regs x0-x17
223 +@@ -135,18 +145,22 @@ ENTRY(__guest_exit)
224 + // This is our single instruction exception window. A pending
225 + // SError is guaranteed to occur at the earliest when we unmask
226 + // it, and at the latest just after the ISB.
227 +- .global abort_guest_exit_start
228 + abort_guest_exit_start:
229 +
230 + isb
231 +
232 +- .global abort_guest_exit_end
233 + abort_guest_exit_end:
234 ++ msr daifset, #4 // Mask aborts
235 ++ ret
236 ++
237 ++ _kvm_extable abort_guest_exit_start, 9997f
238 ++ _kvm_extable abort_guest_exit_end, 9997f
239 ++9997:
240 ++ msr daifset, #4 // Mask aborts
241 ++ mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
242 +
243 +- // If the exception took place, restore the EL1 exception
244 +- // context so that we can report some information.
245 +- // Merge the exception code with the SError pending bit.
246 +- tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
247 ++ // restore the EL1 exception context so that we can report some
248 ++ // information. Merge the exception code with the SError pending bit.
249 + msr elr_el2, x2
250 + msr esr_el2, x3
251 + msr spsr_el2, x4
252 +diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
253 +index 3c283fd8c8f5a..5e041eabdd03e 100644
254 +--- a/arch/arm64/kvm/hyp/hyp-entry.S
255 ++++ b/arch/arm64/kvm/hyp/hyp-entry.S
256 +@@ -25,6 +25,30 @@
257 + #include <asm/kvm_asm.h>
258 + #include <asm/kvm_mmu.h>
259 +
260 ++.macro save_caller_saved_regs_vect
261 ++ stp x0, x1, [sp, #-16]!
262 ++ stp x2, x3, [sp, #-16]!
263 ++ stp x4, x5, [sp, #-16]!
264 ++ stp x6, x7, [sp, #-16]!
265 ++ stp x8, x9, [sp, #-16]!
266 ++ stp x10, x11, [sp, #-16]!
267 ++ stp x12, x13, [sp, #-16]!
268 ++ stp x14, x15, [sp, #-16]!
269 ++ stp x16, x17, [sp, #-16]!
270 ++.endm
271 ++
272 ++.macro restore_caller_saved_regs_vect
273 ++ ldp x16, x17, [sp], #16
274 ++ ldp x14, x15, [sp], #16
275 ++ ldp x12, x13, [sp], #16
276 ++ ldp x10, x11, [sp], #16
277 ++ ldp x8, x9, [sp], #16
278 ++ ldp x6, x7, [sp], #16
279 ++ ldp x4, x5, [sp], #16
280 ++ ldp x2, x3, [sp], #16
281 ++ ldp x0, x1, [sp], #16
282 ++.endm
283 ++
284 + .text
285 + .pushsection .hyp.text, "ax"
286 +
287 +@@ -183,26 +207,24 @@ el1_error:
288 + mov x0, #ARM_EXCEPTION_EL1_SERROR
289 + b __guest_exit
290 +
291 ++el2_sync:
292 ++ save_caller_saved_regs_vect
293 ++ stp x29, x30, [sp, #-16]!
294 ++ bl kvm_unexpected_el2_exception
295 ++ ldp x29, x30, [sp], #16
296 ++ restore_caller_saved_regs_vect
297 ++
298 ++ eret
299 ++
300 + el2_error:
301 +- /*
302 +- * Only two possibilities:
303 +- * 1) Either we come from the exit path, having just unmasked
304 +- * PSTATE.A: change the return code to an EL2 fault, and
305 +- * carry on, as we're already in a sane state to handle it.
306 +- * 2) Or we come from anywhere else, and that's a bug: we panic.
307 +- *
308 +- * For (1), x0 contains the original return code and x1 doesn't
309 +- * contain anything meaningful at that stage. We can reuse them
310 +- * as temp registers.
311 +- * For (2), who cares?
312 +- */
313 +- mrs x0, elr_el2
314 +- adr x1, abort_guest_exit_start
315 +- cmp x0, x1
316 +- adr x1, abort_guest_exit_end
317 +- ccmp x0, x1, #4, ne
318 +- b.ne __hyp_panic
319 +- mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
320 ++ save_caller_saved_regs_vect
321 ++ stp x29, x30, [sp, #-16]!
322 ++
323 ++ bl kvm_unexpected_el2_exception
324 ++
325 ++ ldp x29, x30, [sp], #16
326 ++ restore_caller_saved_regs_vect
327 ++
328 + eret
329 +
330 + ENTRY(__hyp_do_panic)
331 +@@ -231,7 +253,6 @@ ENDPROC(\label)
332 + invalid_vector el2t_irq_invalid
333 + invalid_vector el2t_fiq_invalid
334 + invalid_vector el2t_error_invalid
335 +- invalid_vector el2h_sync_invalid
336 + invalid_vector el2h_irq_invalid
337 + invalid_vector el2h_fiq_invalid
338 + invalid_vector el1_sync_invalid
339 +@@ -248,7 +269,7 @@ ENTRY(__kvm_hyp_vector)
340 + ventry el2t_fiq_invalid // FIQ EL2t
341 + ventry el2t_error_invalid // Error EL2t
342 +
343 +- ventry el2h_sync_invalid // Synchronous EL2h
344 ++ ventry el2_sync // Synchronous EL2h
345 + ventry el2h_irq_invalid // IRQ EL2h
346 + ventry el2h_fiq_invalid // FIQ EL2h
347 + ventry el2_error // Error EL2h
348 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
349 +index 0ad952e074457..99ae75a43985c 100644
350 +--- a/arch/arm64/kvm/hyp/switch.c
351 ++++ b/arch/arm64/kvm/hyp/switch.c
352 +@@ -22,11 +22,15 @@
353 +
354 + #include <kvm/arm_psci.h>
355 +
356 ++#include <asm/extable.h>
357 + #include <asm/kvm_asm.h>
358 + #include <asm/kvm_emulate.h>
359 + #include <asm/kvm_hyp.h>
360 + #include <asm/fpsimd.h>
361 +
362 ++extern struct exception_table_entry __start___kvm_ex_table;
363 ++extern struct exception_table_entry __stop___kvm_ex_table;
364 ++
365 + static bool __hyp_text __fpsimd_enabled_nvhe(void)
366 + {
367 + return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
368 +@@ -216,10 +220,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
369 + * saved the guest context yet, and we may return early...
370 + */
371 + par = read_sysreg(par_el1);
372 +- asm volatile("at s1e1r, %0" : : "r" (far));
373 +- isb();
374 +-
375 +- tmp = read_sysreg(par_el1);
376 ++ if (!__kvm_at("s1e1r", far))
377 ++ tmp = read_sysreg(par_el1);
378 ++ else
379 ++ tmp = 1; /* back to the guest */
380 + write_sysreg(par, par_el1);
381 +
382 + if (unlikely(tmp & 1))
383 +@@ -486,3 +490,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
384 +
385 + unreachable();
386 + }
387 ++
388 ++asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
389 ++{
390 ++ unsigned long addr, fixup;
391 ++ struct kvm_cpu_context *host_ctxt;
392 ++ struct exception_table_entry *entry, *end;
393 ++ unsigned long elr_el2 = read_sysreg(elr_el2);
394 ++
395 ++ entry = hyp_symbol_addr(__start___kvm_ex_table);
396 ++ end = hyp_symbol_addr(__stop___kvm_ex_table);
397 ++ host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
398 ++
399 ++ while (entry < end) {
400 ++ addr = (unsigned long)&entry->insn + entry->insn;
401 ++ fixup = (unsigned long)&entry->fixup + entry->fixup;
402 ++
403 ++ if (addr != elr_el2) {
404 ++ entry++;
405 ++ continue;
406 ++ }
407 ++
408 ++ write_sysreg(fixup, elr_el2);
409 ++ return;
410 ++ }
411 ++
412 ++ hyp_panic(host_ctxt);
413 ++}
414 +diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
415 +index 45fbcbbf2504e..3018582794efc 100644
416 +--- a/arch/mips/kernel/smp-bmips.c
417 ++++ b/arch/mips/kernel/smp-bmips.c
418 +@@ -240,6 +240,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
419 + */
420 + static void bmips_init_secondary(void)
421 + {
422 ++ bmips_cpu_setup();
423 ++
424 + switch (current_cpu_type()) {
425 + case CPU_BMIPS4350:
426 + case CPU_BMIPS4380:
427 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
428 +index bacd67f5d71df..e4de107bf7fd8 100644
429 +--- a/arch/mips/mm/c-r4k.c
430 ++++ b/arch/mips/mm/c-r4k.c
431 +@@ -1781,7 +1781,11 @@ static void setup_scache(void)
432 + printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
433 + scache_size >> 10,
434 + way_string[c->scache.ways], c->scache.linesz);
435 ++
436 ++ if (current_cpu_type() == CPU_BMIPS5000)
437 ++ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
438 + }
439 ++
440 + #else
441 + if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
442 + panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
443 +diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
444 +index 0095ddb58ff69..50f6661ba5664 100644
445 +--- a/arch/s390/include/asm/percpu.h
446 ++++ b/arch/s390/include/asm/percpu.h
447 +@@ -29,7 +29,7 @@
448 + typedef typeof(pcp) pcp_op_T__; \
449 + pcp_op_T__ old__, new__, prev__; \
450 + pcp_op_T__ *ptr__; \
451 +- preempt_disable(); \
452 ++ preempt_disable_notrace(); \
453 + ptr__ = raw_cpu_ptr(&(pcp)); \
454 + prev__ = *ptr__; \
455 + do { \
456 +@@ -37,7 +37,7 @@
457 + new__ = old__ op (val); \
458 + prev__ = cmpxchg(ptr__, old__, new__); \
459 + } while (prev__ != old__); \
460 +- preempt_enable(); \
461 ++ preempt_enable_notrace(); \
462 + new__; \
463 + })
464 +
465 +@@ -68,7 +68,7 @@
466 + typedef typeof(pcp) pcp_op_T__; \
467 + pcp_op_T__ val__ = (val); \
468 + pcp_op_T__ old__, *ptr__; \
469 +- preempt_disable(); \
470 ++ preempt_disable_notrace(); \
471 + ptr__ = raw_cpu_ptr(&(pcp)); \
472 + if (__builtin_constant_p(val__) && \
473 + ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
474 +@@ -84,7 +84,7 @@
475 + : [val__] "d" (val__) \
476 + : "cc"); \
477 + } \
478 +- preempt_enable(); \
479 ++ preempt_enable_notrace(); \
480 + }
481 +
482 + #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
483 +@@ -95,14 +95,14 @@
484 + typedef typeof(pcp) pcp_op_T__; \
485 + pcp_op_T__ val__ = (val); \
486 + pcp_op_T__ old__, *ptr__; \
487 +- preempt_disable(); \
488 ++ preempt_disable_notrace(); \
489 + ptr__ = raw_cpu_ptr(&(pcp)); \
490 + asm volatile( \
491 + op " %[old__],%[val__],%[ptr__]\n" \
492 + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
493 + : [val__] "d" (val__) \
494 + : "cc"); \
495 +- preempt_enable(); \
496 ++ preempt_enable_notrace(); \
497 + old__ + val__; \
498 + })
499 +
500 +@@ -114,14 +114,14 @@
501 + typedef typeof(pcp) pcp_op_T__; \
502 + pcp_op_T__ val__ = (val); \
503 + pcp_op_T__ old__, *ptr__; \
504 +- preempt_disable(); \
505 ++ preempt_disable_notrace(); \
506 + ptr__ = raw_cpu_ptr(&(pcp)); \
507 + asm volatile( \
508 + op " %[old__],%[val__],%[ptr__]\n" \
509 + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
510 + : [val__] "d" (val__) \
511 + : "cc"); \
512 +- preempt_enable(); \
513 ++ preempt_enable_notrace(); \
514 + }
515 +
516 + #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
517 +@@ -136,10 +136,10 @@
518 + typedef typeof(pcp) pcp_op_T__; \
519 + pcp_op_T__ ret__; \
520 + pcp_op_T__ *ptr__; \
521 +- preempt_disable(); \
522 ++ preempt_disable_notrace(); \
523 + ptr__ = raw_cpu_ptr(&(pcp)); \
524 + ret__ = cmpxchg(ptr__, oval, nval); \
525 +- preempt_enable(); \
526 ++ preempt_enable_notrace(); \
527 + ret__; \
528 + })
529 +
530 +@@ -152,10 +152,10 @@
531 + ({ \
532 + typeof(pcp) *ptr__; \
533 + typeof(pcp) ret__; \
534 +- preempt_disable(); \
535 ++ preempt_disable_notrace(); \
536 + ptr__ = raw_cpu_ptr(&(pcp)); \
537 + ret__ = xchg(ptr__, nval); \
538 +- preempt_enable(); \
539 ++ preempt_enable_notrace(); \
540 + ret__; \
541 + })
542 +
543 +@@ -171,11 +171,11 @@
544 + typeof(pcp1) *p1__; \
545 + typeof(pcp2) *p2__; \
546 + int ret__; \
547 +- preempt_disable(); \
548 ++ preempt_disable_notrace(); \
549 + p1__ = raw_cpu_ptr(&(pcp1)); \
550 + p2__ = raw_cpu_ptr(&(pcp2)); \
551 + ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
552 +- preempt_enable(); \
553 ++ preempt_enable_notrace(); \
554 + ret__; \
555 + })
556 +
557 +diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
558 +index c45b90bb93393..c75e75932807e 100644
559 +--- a/arch/xtensa/platforms/iss/simdisk.c
560 ++++ b/arch/xtensa/platforms/iss/simdisk.c
561 +@@ -21,7 +21,6 @@
562 + #include <platform/simcall.h>
563 +
564 + #define SIMDISK_MAJOR 240
565 +-#define SECTOR_SHIFT 9
566 + #define SIMDISK_MINORS 1
567 + #define MAX_SIMDISK_COUNT 10
568 +
569 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
570 +index a3a65f5490c02..f90a20cad3fef 100644
571 +--- a/drivers/ata/libata-core.c
572 ++++ b/drivers/ata/libata-core.c
573 +@@ -4488,9 +4488,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
574 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
575 + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
576 +
577 +- /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
578 +- SD7SN6S256G and SD8SN8U256G */
579 +- { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
580 ++ /* Sandisk SD7/8/9s lock up hard on large trims */
581 ++ { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, },
582 +
583 + /* devices which puke on READ_NATIVE_MAX */
584 + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
585 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
586 +index bc2c27f0493fc..4b299efbd8047 100644
587 +--- a/drivers/ata/libata-scsi.c
588 ++++ b/drivers/ata/libata-scsi.c
589 +@@ -2392,6 +2392,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
590 +
591 + static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
592 + {
593 ++ struct ata_device *dev = args->dev;
594 + u16 min_io_sectors;
595 +
596 + rbuf[1] = 0xb0;
597 +@@ -2417,7 +2418,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
598 + * with the unmap bit set.
599 + */
600 + if (ata_id_has_trim(args->id)) {
601 +- put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
602 ++ u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
603 ++
604 ++ if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
605 ++ max_blocks = 128 << (20 - SECTOR_SHIFT);
606 ++
607 ++ put_unaligned_be64(max_blocks, &rbuf[36]);
608 + put_unaligned_be32(1, &rbuf[28]);
609 + }
610 +
611 +diff --git a/drivers/block/brd.c b/drivers/block/brd.c
612 +index 0129b1921cb36..78287f029cf07 100644
613 +--- a/drivers/block/brd.c
614 ++++ b/drivers/block/brd.c
615 +@@ -28,7 +28,6 @@
616 +
617 + #include <linux/uaccess.h>
618 +
619 +-#define SECTOR_SHIFT 9
620 + #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
621 + #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
622 +
623 +diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
624 +index ec670a1b7e02f..b499e72b2847e 100644
625 +--- a/drivers/block/null_blk.c
626 ++++ b/drivers/block/null_blk.c
627 +@@ -16,10 +16,8 @@
628 + #include <linux/configfs.h>
629 + #include <linux/badblocks.h>
630 +
631 +-#define SECTOR_SHIFT 9
632 + #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
633 + #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
634 +-#define SECTOR_SIZE (1 << SECTOR_SHIFT)
635 + #define SECTOR_MASK (PAGE_SECTORS - 1)
636 +
637 + #define FREE_BATCH 16
638 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
639 +index 557cf52f674b5..b9b20e1fa8c88 100644
640 +--- a/drivers/block/rbd.c
641 ++++ b/drivers/block/rbd.c
642 +@@ -50,15 +50,6 @@
643 +
644 + #define RBD_DEBUG /* Activate rbd_assert() calls */
645 +
646 +-/*
647 +- * The basic unit of block I/O is a sector. It is interpreted in a
648 +- * number of contexts in Linux (blk, bio, genhd), but the default is
649 +- * universally 512 bytes. These symbols are just slightly more
650 +- * meaningful than the bare numbers they represent.
651 +- */
652 +-#define SECTOR_SHIFT 9
653 +-#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
654 +-
655 + /*
656 + * Increment the given counter and return its updated value.
657 + * If the counter is already 0 it will not be incremented.
658 +diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
659 +index 31762db861e38..1e9bf65c0bfba 100644
660 +--- a/drivers/block/zram/zram_drv.h
661 ++++ b/drivers/block/zram/zram_drv.h
662 +@@ -37,7 +37,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
663 +
664 + /*-- End of configurable params */
665 +
666 +-#define SECTOR_SHIFT 9
667 + #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
668 + #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
669 + #define ZRAM_LOGICAL_BLOCK_SHIFT 12
670 +diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
671 +index ed4df58a855e1..da9eb38d79d9c 100644
672 +--- a/drivers/cpuidle/cpuidle.c
673 ++++ b/drivers/cpuidle/cpuidle.c
674 +@@ -144,7 +144,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
675 + */
676 + stop_critical_timings();
677 + drv->states[index].enter_s2idle(dev, drv, index);
678 +- WARN_ON(!irqs_disabled());
679 ++ if (WARN_ON_ONCE(!irqs_disabled()))
680 ++ local_irq_disable();
681 + /*
682 + * timekeeping_resume() that will be called by tick_unfreeze() for the
683 + * first CPU executing it calls functions containing RCU read-side
684 +diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
685 +index 21ed0e20c5d91..cf3225a229890 100644
686 +--- a/drivers/dma/at_hdmac.c
687 ++++ b/drivers/dma/at_hdmac.c
688 +@@ -1677,6 +1677,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
689 + return NULL;
690 +
691 + dmac_pdev = of_find_device_by_node(dma_spec->np);
692 ++ if (!dmac_pdev)
693 ++ return NULL;
694 +
695 + dma_cap_zero(mask);
696 + dma_cap_set(DMA_SLAVE, mask);
697 +diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
698 +index 91fd395c90c4c..8344a60c2131b 100644
699 +--- a/drivers/dma/of-dma.c
700 ++++ b/drivers/dma/of-dma.c
701 +@@ -72,12 +72,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
702 + return NULL;
703 +
704 + chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
705 +- if (chan) {
706 +- chan->router = ofdma->dma_router;
707 +- chan->route_data = route_data;
708 +- } else {
709 ++ if (IS_ERR_OR_NULL(chan)) {
710 + ofdma->dma_router->route_free(ofdma->dma_router->dev,
711 + route_data);
712 ++ } else {
713 ++ chan->router = ofdma->dma_router;
714 ++ chan->route_data = route_data;
715 + }
716 +
717 + /*
718 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
719 +index b4fa555a243f9..ff8b7042d28f4 100644
720 +--- a/drivers/dma/pl330.c
721 ++++ b/drivers/dma/pl330.c
722 +@@ -2661,6 +2661,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
723 + while (burst != (1 << desc->rqcfg.brst_size))
724 + desc->rqcfg.brst_size++;
725 +
726 ++ desc->rqcfg.brst_len = get_burst_len(desc, len);
727 + /*
728 + * If burst size is smaller than bus width then make sure we only
729 + * transfer one at a time to avoid a burst stradling an MFIFO entry.
730 +@@ -2668,7 +2669,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
731 + if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
732 + desc->rqcfg.brst_len = 1;
733 +
734 +- desc->rqcfg.brst_len = get_burst_len(desc, len);
735 + desc->bytes_requested = len;
736 +
737 + desc->txd.flags = flags;
738 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
739 +index d9c0687435a05..c59240b566d83 100644
740 +--- a/drivers/gpu/drm/msm/msm_drv.c
741 ++++ b/drivers/gpu/drm/msm/msm_drv.c
742 +@@ -1134,6 +1134,13 @@ static int msm_pdev_remove(struct platform_device *pdev)
743 + return 0;
744 + }
745 +
746 ++static void msm_pdev_shutdown(struct platform_device *pdev)
747 ++{
748 ++ struct drm_device *drm = platform_get_drvdata(pdev);
749 ++
750 ++ drm_atomic_helper_shutdown(drm);
751 ++}
752 ++
753 + static const struct of_device_id dt_match[] = {
754 + { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
755 + { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
756 +@@ -1144,6 +1151,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
757 + static struct platform_driver msm_platform_driver = {
758 + .probe = msm_pdev_probe,
759 + .remove = msm_pdev_remove,
760 ++ .shutdown = msm_pdev_shutdown,
761 + .driver = {
762 + .name = "msm",
763 + .of_match_table = dt_match,
764 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
765 +index 75b0a337114df..ff6e327cbd027 100644
766 +--- a/drivers/hid/hid-core.c
767 ++++ b/drivers/hid/hid-core.c
768 +@@ -1426,6 +1426,17 @@ static void hid_output_field(const struct hid_device *hid,
769 + }
770 + }
771 +
772 ++/*
773 ++ * Compute the size of a report.
774 ++ */
775 ++static size_t hid_compute_report_size(struct hid_report *report)
776 ++{
777 ++ if (report->size)
778 ++ return ((report->size - 1) >> 3) + 1;
779 ++
780 ++ return 0;
781 ++}
782 ++
783 + /*
784 + * Create a report. 'data' has to be allocated using
785 + * hid_alloc_report_buf() so that it has proper size.
786 +@@ -1438,7 +1449,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
787 + if (report->id > 0)
788 + *data++ = report->id;
789 +
790 +- memset(data, 0, ((report->size - 1) >> 3) + 1);
791 ++ memset(data, 0, hid_compute_report_size(report));
792 + for (n = 0; n < report->maxfield; n++)
793 + hid_output_field(report->device, report->field[n], data);
794 + }
795 +@@ -1565,7 +1576,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
796 + csize--;
797 + }
798 +
799 +- rsize = ((report->size - 1) >> 3) + 1;
800 ++ rsize = hid_compute_report_size(report);
801 +
802 + if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
803 + rsize = HID_MAX_BUFFER_SIZE - 1;
804 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
805 +index 204ccf6745333..3624d6e3384ff 100644
806 +--- a/drivers/hid/hid-input.c
807 ++++ b/drivers/hid/hid-input.c
808 +@@ -1116,6 +1116,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
809 + }
810 +
811 + mapped:
812 ++ /* Mapping failed, bail out */
813 ++ if (!bit)
814 ++ return;
815 ++
816 + if (device->driver->input_mapped &&
817 + device->driver->input_mapped(device, hidinput, field, usage,
818 + &bit, &max) < 0) {
819 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
820 +index db29bf539a4b2..ac31998f93a88 100644
821 +--- a/drivers/hid/hid-multitouch.c
822 ++++ b/drivers/hid/hid-multitouch.c
823 +@@ -616,6 +616,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
824 + (usage->hid & HID_USAGE) > 1)
825 + code--;
826 + hid_map_usage(hi, usage, bit, max, EV_KEY, code);
827 ++ if (!*bit)
828 ++ return -1;
829 + input_set_capability(hi->input, EV_KEY, code);
830 + return 1;
831 +
832 +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
833 +index 5c677ba440143..b201129a9beae 100644
834 +--- a/drivers/hwmon/applesmc.c
835 ++++ b/drivers/hwmon/applesmc.c
836 +@@ -760,15 +760,18 @@ static ssize_t applesmc_light_show(struct device *dev,
837 + }
838 +
839 + ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
840 ++ if (ret)
841 ++ goto out;
842 + /* newer macbooks report a single 10-bit bigendian value */
843 + if (data_length == 10) {
844 + left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
845 + goto out;
846 + }
847 + left = buffer[2];
848 ++
849 ++ ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
850 + if (ret)
851 + goto out;
852 +- ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
853 + right = buffer[2];
854 +
855 + out:
856 +@@ -817,12 +820,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
857 + to_index(attr));
858 +
859 + ret = applesmc_read_key(newkey, buffer, 2);
860 +- speed = ((buffer[0] << 8 | buffer[1]) >> 2);
861 +-
862 + if (ret)
863 + return ret;
864 +- else
865 +- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
866 ++
867 ++ speed = ((buffer[0] << 8 | buffer[1]) >> 2);
868 ++ return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
869 + }
870 +
871 + static ssize_t applesmc_store_fan_speed(struct device *dev,
872 +@@ -858,12 +860,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev,
873 + u8 buffer[2];
874 +
875 + ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
876 +- manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
877 +-
878 + if (ret)
879 + return ret;
880 +- else
881 +- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
882 ++
883 ++ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
884 ++ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
885 + }
886 +
887 + static ssize_t applesmc_store_fan_manual(struct device *dev,
888 +@@ -879,10 +880,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
889 + return -EINVAL;
890 +
891 + ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
892 +- val = (buffer[0] << 8 | buffer[1]);
893 + if (ret)
894 + goto out;
895 +
896 ++ val = (buffer[0] << 8 | buffer[1]);
897 ++
898 + if (input)
899 + val = val | (0x01 << to_index(attr));
900 + else
901 +@@ -958,13 +960,12 @@ static ssize_t applesmc_key_count_show(struct device *dev,
902 + u32 count;
903 +
904 + ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
905 +- count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
906 +- ((u32)buffer[2]<<8) + buffer[3];
907 +-
908 + if (ret)
909 + return ret;
910 +- else
911 +- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
912 ++
913 ++ count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
914 ++ ((u32)buffer[2]<<8) + buffer[3];
915 ++ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
916 + }
917 +
918 + static ssize_t applesmc_key_at_index_read_show(struct device *dev,
919 +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
920 +index 4de45db76756c..5aed1de845d0d 100644
921 +--- a/drivers/ide/ide-cd.c
922 ++++ b/drivers/ide/ide-cd.c
923 +@@ -712,7 +712,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
924 + struct request_queue *q = drive->queue;
925 + int write = rq_data_dir(rq) == WRITE;
926 + unsigned short sectors_per_frame =
927 +- queue_logical_block_size(q) >> SECTOR_BITS;
928 ++ queue_logical_block_size(q) >> SECTOR_SHIFT;
929 +
930 + ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
931 + "secs_per_frame: %u",
932 +@@ -919,7 +919,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
933 + * end up being bogus.
934 + */
935 + blocklen = be32_to_cpu(capbuf.blocklen);
936 +- blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS;
937 ++ blocklen = (blocklen >> SECTOR_SHIFT) << SECTOR_SHIFT;
938 + switch (blocklen) {
939 + case 512:
940 + case 1024:
941 +@@ -935,7 +935,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
942 + }
943 +
944 + *capacity = 1 + be32_to_cpu(capbuf.lba);
945 +- *sectors_per_frame = blocklen >> SECTOR_BITS;
946 ++ *sectors_per_frame = blocklen >> SECTOR_SHIFT;
947 +
948 + ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu",
949 + *capacity, *sectors_per_frame);
950 +@@ -1012,7 +1012,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
951 + drive->probed_capacity = toc->capacity * sectors_per_frame;
952 +
953 + blk_queue_logical_block_size(drive->queue,
954 +- sectors_per_frame << SECTOR_BITS);
955 ++ sectors_per_frame << SECTOR_SHIFT);
956 +
957 + /* first read just the header, so we know how long the TOC is */
958 + stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
959 +diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
960 +index 264e822eba58a..04f0f310a8566 100644
961 +--- a/drivers/ide/ide-cd.h
962 ++++ b/drivers/ide/ide-cd.h
963 +@@ -21,11 +21,7 @@
964 +
965 + /************************************************************************/
966 +
967 +-#define SECTOR_BITS 9
968 +-#ifndef SECTOR_SIZE
969 +-#define SECTOR_SIZE (1 << SECTOR_BITS)
970 +-#endif
971 +-#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_BITS)
972 ++#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_SHIFT)
973 + #define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32)
974 +
975 + /* Capabilities Page size including 8 bytes of Mode Page Header */
976 +diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
977 +index 7cc5b04e30b7a..09c6b17aaf80e 100644
978 +--- a/drivers/iommu/intel_irq_remapping.c
979 ++++ b/drivers/iommu/intel_irq_remapping.c
980 +@@ -479,12 +479,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
981 +
982 + /* Enable interrupt-remapping */
983 + iommu->gcmd |= DMA_GCMD_IRE;
984 +- iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
985 + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
986 +-
987 + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
988 + readl, (sts & DMA_GSTS_IRES), sts);
989 +
990 ++ /* Block compatibility-format MSIs */
991 ++ if (sts & DMA_GSTS_CFIS) {
992 ++ iommu->gcmd &= ~DMA_GCMD_CFI;
993 ++ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
994 ++ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
995 ++ readl, !(sts & DMA_GSTS_CFIS), sts);
996 ++ }
997 ++
998 + /*
999 + * With CFI clear in the Global Command register, we should be
1000 + * protected from dangerous (i.e. compatibility) interrupts
1001 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
1002 +index 394e53afc2593..0acd10d3b7bff 100644
1003 +--- a/drivers/md/dm-cache-metadata.c
1004 ++++ b/drivers/md/dm-cache-metadata.c
1005 +@@ -536,12 +536,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
1006 + CACHE_MAX_CONCURRENT_LOCKS);
1007 + if (IS_ERR(cmd->bm)) {
1008 + DMERR("could not create block manager");
1009 +- return PTR_ERR(cmd->bm);
1010 ++ r = PTR_ERR(cmd->bm);
1011 ++ cmd->bm = NULL;
1012 ++ return r;
1013 + }
1014 +
1015 + r = __open_or_format_metadata(cmd, may_format_device);
1016 +- if (r)
1017 ++ if (r) {
1018 + dm_block_manager_destroy(cmd->bm);
1019 ++ cmd->bm = NULL;
1020 ++ }
1021 +
1022 + return r;
1023 + }
1024 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1025 +index b85a66f42814e..29f8a632b42b2 100644
1026 +--- a/drivers/md/dm-thin-metadata.c
1027 ++++ b/drivers/md/dm-thin-metadata.c
1028 +@@ -698,12 +698,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
1029 + THIN_MAX_CONCURRENT_LOCKS);
1030 + if (IS_ERR(pmd->bm)) {
1031 + DMERR("could not create block manager");
1032 +- return PTR_ERR(pmd->bm);
1033 ++ r = PTR_ERR(pmd->bm);
1034 ++ pmd->bm = NULL;
1035 ++ return r;
1036 + }
1037 +
1038 + r = __open_or_format_metadata(pmd, format_device);
1039 +- if (r)
1040 ++ if (r) {
1041 + dm_block_manager_destroy(pmd->bm);
1042 ++ pmd->bm = NULL;
1043 ++ }
1044 +
1045 + return r;
1046 + }
1047 +diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
1048 +index 0187dbf3b87df..54cdafdd067db 100644
1049 +--- a/drivers/net/ethernet/arc/emac_mdio.c
1050 ++++ b/drivers/net/ethernet/arc/emac_mdio.c
1051 +@@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
1052 + if (IS_ERR(data->reset_gpio)) {
1053 + error = PTR_ERR(data->reset_gpio);
1054 + dev_err(priv->dev, "Failed to request gpio: %d\n", error);
1055 ++ mdiobus_free(bus);
1056 + return error;
1057 + }
1058 +
1059 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
1060 +index 123ee5c11bc0c..11eb393497a2a 100644
1061 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
1062 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
1063 +@@ -2087,8 +2087,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1064 + priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
1065 + sizeof(struct bcm_sysport_tx_ring),
1066 + GFP_KERNEL);
1067 +- if (!priv->tx_rings)
1068 +- return -ENOMEM;
1069 ++ if (!priv->tx_rings) {
1070 ++ ret = -ENOMEM;
1071 ++ goto err_free_netdev;
1072 ++ }
1073 +
1074 + priv->is_lite = params->is_lite;
1075 + priv->num_rx_desc_words = params->num_rx_desc_words;
1076 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1077 +index a189061d8f97e..7de38ae5c18f2 100644
1078 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1079 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1080 +@@ -8251,6 +8251,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1081 +
1082 + bnxt_parse_log_pcie_link(bp);
1083 +
1084 ++ pci_save_state(pdev);
1085 + return 0;
1086 +
1087 + init_err_cleanup_tc:
1088 +@@ -8412,6 +8413,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
1089 + "Cannot re-enable PCI device after reset.\n");
1090 + } else {
1091 + pci_set_master(pdev);
1092 ++ pci_restore_state(pdev);
1093 ++ pci_save_state(pdev);
1094 +
1095 + err = bnxt_hwrm_func_reset(bp);
1096 + if (!err && netif_running(netdev))
1097 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1098 +index 6edbbfc1709a2..a38433cb9015d 100644
1099 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1100 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1101 +@@ -1761,6 +1761,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
1102 + if (rc != 0)
1103 + return rc;
1104 +
1105 ++ if (!dir_entries || !entry_length)
1106 ++ return -EIO;
1107 ++
1108 + /* Insert 2 bytes of directory info (count and size of entries) */
1109 + if (len < 2)
1110 + return -EINVAL;
1111 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1112 +index e40d31b405253..480179ddc45b6 100644
1113 +--- a/drivers/net/ethernet/broadcom/tg3.c
1114 ++++ b/drivers/net/ethernet/broadcom/tg3.c
1115 +@@ -7204,8 +7204,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp)
1116 +
1117 + static inline void tg3_reset_task_cancel(struct tg3 *tp)
1118 + {
1119 +- cancel_work_sync(&tp->reset_task);
1120 +- tg3_flag_clear(tp, RESET_TASK_PENDING);
1121 ++ if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
1122 ++ cancel_work_sync(&tp->reset_task);
1123 + tg3_flag_clear(tp, TX_RECOVERY_PENDING);
1124 + }
1125 +
1126 +@@ -11182,18 +11182,27 @@ static void tg3_reset_task(struct work_struct *work)
1127 +
1128 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1129 + err = tg3_init_hw(tp, true);
1130 +- if (err)
1131 ++ if (err) {
1132 ++ tg3_full_unlock(tp);
1133 ++ tp->irq_sync = 0;
1134 ++ tg3_napi_enable(tp);
1135 ++ /* Clear this flag so that tg3_reset_task_cancel() will not
1136 ++ * call cancel_work_sync() and wait forever.
1137 ++ */
1138 ++ tg3_flag_clear(tp, RESET_TASK_PENDING);
1139 ++ dev_close(tp->dev);
1140 + goto out;
1141 ++ }
1142 +
1143 + tg3_netif_start(tp);
1144 +
1145 +-out:
1146 + tg3_full_unlock(tp);
1147 +
1148 + if (!err)
1149 + tg3_phy_start(tp);
1150 +
1151 + tg3_flag_clear(tp, RESET_TASK_PENDING);
1152 ++out:
1153 + rtnl_unlock();
1154 + }
1155 +
1156 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1157 +index 0733745f4be6c..af832929ae287 100644
1158 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1159 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1160 +@@ -2433,8 +2433,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1161 + priv->enet_ver = AE_VERSION_1;
1162 + else if (acpi_dev_found(hns_enet_acpi_match[1].id))
1163 + priv->enet_ver = AE_VERSION_2;
1164 +- else
1165 +- return -ENXIO;
1166 ++ else {
1167 ++ ret = -ENXIO;
1168 ++ goto out_read_prop_fail;
1169 ++ }
1170 +
1171 + /* try to find port-idx-in-ae first */
1172 + ret = acpi_node_get_property_reference(dev->fwnode,
1173 +@@ -2446,7 +2448,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
1174 + priv->fwnode = acpi_fwnode_handle(args.adev);
1175 + } else {
1176 + dev_err(dev, "cannot read cfg data from OF or acpi\n");
1177 +- return -ENXIO;
1178 ++ ret = -ENXIO;
1179 ++ goto out_read_prop_fail;
1180 + }
1181 +
1182 + ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
1183 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
1184 +index 20043f82c1d82..7c212d6618640 100644
1185 +--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
1186 ++++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
1187 +@@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
1188 + goto err_out;
1189 +
1190 + for (i = 0; i <= buddy->max_order; ++i) {
1191 +- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
1192 ++ s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
1193 + buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
1194 + if (!buddy->bits[i])
1195 + goto err_out_free;
1196 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
1197 +index 9f4d93a16b7e5..19fb3dbb80f58 100644
1198 +--- a/drivers/net/ethernet/renesas/ravb_main.c
1199 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
1200 +@@ -1374,6 +1374,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1201 + return error;
1202 + }
1203 +
1204 ++/* MDIO bus init function */
1205 ++static int ravb_mdio_init(struct ravb_private *priv)
1206 ++{
1207 ++ struct platform_device *pdev = priv->pdev;
1208 ++ struct device *dev = &pdev->dev;
1209 ++ int error;
1210 ++
1211 ++ /* Bitbang init */
1212 ++ priv->mdiobb.ops = &bb_ops;
1213 ++
1214 ++ /* MII controller setting */
1215 ++ priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1216 ++ if (!priv->mii_bus)
1217 ++ return -ENOMEM;
1218 ++
1219 ++ /* Hook up MII support for ethtool */
1220 ++ priv->mii_bus->name = "ravb_mii";
1221 ++ priv->mii_bus->parent = dev;
1222 ++ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1223 ++ pdev->name, pdev->id);
1224 ++
1225 ++ /* Register MDIO bus */
1226 ++ error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1227 ++ if (error)
1228 ++ goto out_free_bus;
1229 ++
1230 ++ return 0;
1231 ++
1232 ++out_free_bus:
1233 ++ free_mdio_bitbang(priv->mii_bus);
1234 ++ return error;
1235 ++}
1236 ++
1237 ++/* MDIO bus release function */
1238 ++static int ravb_mdio_release(struct ravb_private *priv)
1239 ++{
1240 ++ /* Unregister mdio bus */
1241 ++ mdiobus_unregister(priv->mii_bus);
1242 ++
1243 ++ /* Free bitbang info */
1244 ++ free_mdio_bitbang(priv->mii_bus);
1245 ++
1246 ++ return 0;
1247 ++}
1248 ++
1249 + /* Network device open function for Ethernet AVB */
1250 + static int ravb_open(struct net_device *ndev)
1251 + {
1252 +@@ -1382,6 +1427,13 @@ static int ravb_open(struct net_device *ndev)
1253 + struct device *dev = &pdev->dev;
1254 + int error;
1255 +
1256 ++ /* MDIO bus init */
1257 ++ error = ravb_mdio_init(priv);
1258 ++ if (error) {
1259 ++ netdev_err(ndev, "failed to initialize MDIO\n");
1260 ++ return error;
1261 ++ }
1262 ++
1263 + napi_enable(&priv->napi[RAVB_BE]);
1264 + napi_enable(&priv->napi[RAVB_NC]);
1265 +
1266 +@@ -1459,6 +1511,7 @@ out_free_irq:
1267 + out_napi_off:
1268 + napi_disable(&priv->napi[RAVB_NC]);
1269 + napi_disable(&priv->napi[RAVB_BE]);
1270 ++ ravb_mdio_release(priv);
1271 + return error;
1272 + }
1273 +
1274 +@@ -1757,6 +1810,8 @@ static int ravb_close(struct net_device *ndev)
1275 + ravb_ring_free(ndev, RAVB_BE);
1276 + ravb_ring_free(ndev, RAVB_NC);
1277 +
1278 ++ ravb_mdio_release(priv);
1279 ++
1280 + return 0;
1281 + }
1282 +
1283 +@@ -1858,51 +1913,6 @@ static const struct net_device_ops ravb_netdev_ops = {
1284 + .ndo_set_mac_address = eth_mac_addr,
1285 + };
1286 +
1287 +-/* MDIO bus init function */
1288 +-static int ravb_mdio_init(struct ravb_private *priv)
1289 +-{
1290 +- struct platform_device *pdev = priv->pdev;
1291 +- struct device *dev = &pdev->dev;
1292 +- int error;
1293 +-
1294 +- /* Bitbang init */
1295 +- priv->mdiobb.ops = &bb_ops;
1296 +-
1297 +- /* MII controller setting */
1298 +- priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1299 +- if (!priv->mii_bus)
1300 +- return -ENOMEM;
1301 +-
1302 +- /* Hook up MII support for ethtool */
1303 +- priv->mii_bus->name = "ravb_mii";
1304 +- priv->mii_bus->parent = dev;
1305 +- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1306 +- pdev->name, pdev->id);
1307 +-
1308 +- /* Register MDIO bus */
1309 +- error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1310 +- if (error)
1311 +- goto out_free_bus;
1312 +-
1313 +- return 0;
1314 +-
1315 +-out_free_bus:
1316 +- free_mdio_bitbang(priv->mii_bus);
1317 +- return error;
1318 +-}
1319 +-
1320 +-/* MDIO bus release function */
1321 +-static int ravb_mdio_release(struct ravb_private *priv)
1322 +-{
1323 +- /* Unregister mdio bus */
1324 +- mdiobus_unregister(priv->mii_bus);
1325 +-
1326 +- /* Free bitbang info */
1327 +- free_mdio_bitbang(priv->mii_bus);
1328 +-
1329 +- return 0;
1330 +-}
1331 +-
1332 + static const struct of_device_id ravb_match_table[] = {
1333 + { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
1334 + { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
1335 +@@ -2132,13 +2142,6 @@ static int ravb_probe(struct platform_device *pdev)
1336 + eth_hw_addr_random(ndev);
1337 + }
1338 +
1339 +- /* MDIO bus init */
1340 +- error = ravb_mdio_init(priv);
1341 +- if (error) {
1342 +- dev_err(&pdev->dev, "failed to initialize MDIO\n");
1343 +- goto out_dma_free;
1344 +- }
1345 +-
1346 + netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
1347 + netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
1348 +
1349 +@@ -2161,8 +2164,6 @@ static int ravb_probe(struct platform_device *pdev)
1350 + out_napi_del:
1351 + netif_napi_del(&priv->napi[RAVB_NC]);
1352 + netif_napi_del(&priv->napi[RAVB_BE]);
1353 +- ravb_mdio_release(priv);
1354 +-out_dma_free:
1355 + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
1356 + priv->desc_bat_dma);
1357 +
1358 +@@ -2195,7 +2196,6 @@ static int ravb_remove(struct platform_device *pdev)
1359 + unregister_netdev(ndev);
1360 + netif_napi_del(&priv->napi[RAVB_NC]);
1361 + netif_napi_del(&priv->napi[RAVB_BE]);
1362 +- ravb_mdio_release(priv);
1363 + pm_runtime_disable(&pdev->dev);
1364 + free_netdev(ndev);
1365 + platform_set_drvdata(pdev, NULL);
1366 +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
1367 +index 090607e725a24..d3ccd6929579a 100644
1368 +--- a/drivers/net/gtp.c
1369 ++++ b/drivers/net/gtp.c
1370 +@@ -1187,6 +1187,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
1371 + goto nlmsg_failure;
1372 +
1373 + if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
1374 ++ nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
1375 + nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
1376 + nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
1377 + goto nla_put_failure;
1378 +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
1379 +index e9fcf6ef716a8..9bf18127e63a1 100644
1380 +--- a/drivers/net/usb/asix_common.c
1381 ++++ b/drivers/net/usb/asix_common.c
1382 +@@ -309,7 +309,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal)
1383 +
1384 + netdev_dbg(dev->net, "asix_get_phy_addr()\n");
1385 +
1386 +- if (ret < 0) {
1387 ++ if (ret < 2) {
1388 + netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
1389 + goto out;
1390 + }
1391 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1392 +index 88b8ba0ad2cda..3e3dca59b7a69 100644
1393 +--- a/drivers/net/usb/qmi_wwan.c
1394 ++++ b/drivers/net/usb/qmi_wwan.c
1395 +@@ -1216,6 +1216,7 @@ static const struct usb_device_id products[] = {
1396 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1397 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1398 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1399 ++ {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
1400 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1401 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1402 + {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
1403 +@@ -1251,6 +1252,7 @@ static const struct usb_device_id products[] = {
1404 + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1405 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
1406 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
1407 ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
1408 + {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1409 + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
1410 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1411 +diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
1412 +index e3f060f0b83ea..dd3cb53846290 100644
1413 +--- a/drivers/nvdimm/nd.h
1414 ++++ b/drivers/nvdimm/nd.h
1415 +@@ -29,7 +29,6 @@ enum {
1416 + * BTT instance
1417 + */
1418 + ND_MAX_LANES = 256,
1419 +- SECTOR_SHIFT = 9,
1420 + INT_LBASIZE_ALIGNMENT = 64,
1421 + NVDIMM_IO_ATOMIC = 1,
1422 + };
1423 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
1424 +index 09a39f4aaf821..d0be85d0c289a 100644
1425 +--- a/drivers/nvme/target/core.c
1426 ++++ b/drivers/nvme/target/core.c
1427 +@@ -208,6 +208,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
1428 +
1429 + static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
1430 + {
1431 ++ if (unlikely(ctrl->kato == 0))
1432 ++ return;
1433 ++
1434 + pr_debug("ctrl %d start keep-alive timer for %d secs\n",
1435 + ctrl->cntlid, ctrl->kato);
1436 +
1437 +@@ -217,6 +220,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
1438 +
1439 + static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
1440 + {
1441 ++ if (unlikely(ctrl->kato == 0))
1442 ++ return;
1443 ++
1444 + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
1445 +
1446 + cancel_delayed_work_sync(&ctrl->ka_work);
1447 +diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
1448 +index b7a5d1065378d..df1c6dee255bf 100644
1449 +--- a/drivers/nvme/target/fc.c
1450 ++++ b/drivers/nvme/target/fc.c
1451 +@@ -1994,9 +1994,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1452 + return;
1453 + if (fcpreq->fcp_error ||
1454 + fcpreq->transferred_length != fcpreq->transfer_length) {
1455 +- spin_lock(&fod->flock);
1456 ++ spin_lock_irqsave(&fod->flock, flags);
1457 + fod->abort = true;
1458 +- spin_unlock(&fod->flock);
1459 ++ spin_unlock_irqrestore(&fod->flock, flags);
1460 +
1461 + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1462 + return;
1463 +diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
1464 +index 95fc720c1b304..e6e5ccb1e0f38 100644
1465 +--- a/drivers/scsi/gdth.h
1466 ++++ b/drivers/scsi/gdth.h
1467 +@@ -178,9 +178,6 @@
1468 + #define MSG_SIZE 34 /* size of message structure */
1469 + #define MSG_REQUEST 0 /* async. event: message */
1470 +
1471 +-/* cacheservice defines */
1472 +-#define SECTOR_SIZE 0x200 /* always 512 bytes per sec. */
1473 +-
1474 + /* DPMEM constants */
1475 + #define DPMEM_MAGIC 0xC0FFEE11
1476 + #define IC_HEADER_BYTES 48
1477 +diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
1478 +index fd11133606038..0c3141746edfe 100644
1479 +--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
1480 ++++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
1481 +@@ -49,20 +49,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = {
1482 +
1483 + /*
1484 + * Temperature values in milli degree celsius
1485 +- * ADC code values from 530 to 923
1486 ++ * ADC code values from 13 to 107, see TRM
1487 ++ * "18.4.10.2.3 ADC Codes Versus Temperature".
1488 + */
1489 + static const int
1490 + omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
1491 +- -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000,
1492 +- -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000,
1493 +- -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000,
1494 +- 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000,
1495 +- 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000,
1496 +- 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000,
1497 +- 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000,
1498 +- 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000,
1499 +- 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000,
1500 +- 117000, 118000, 120000, 122000, 123000,
1501 ++ -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000,
1502 ++ -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000,
1503 ++ -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000,
1504 ++ 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500,
1505 ++ 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000,
1506 ++ 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000,
1507 ++ 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000,
1508 ++ 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000,
1509 ++ 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000,
1510 ++ 115000, 117000, 118500, 120000, 122000, 123500, 125000,
1511 + };
1512 +
1513 + /* OMAP4430 data */
1514 +diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
1515 +index 6f2de3a3356d4..86850082b24b9 100644
1516 +--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
1517 ++++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
1518 +@@ -67,9 +67,13 @@
1519 + * and thresholds for OMAP4430.
1520 + */
1521 +
1522 +-/* ADC conversion table limits */
1523 +-#define OMAP4430_ADC_START_VALUE 0
1524 +-#define OMAP4430_ADC_END_VALUE 127
1525 ++/*
1526 ++ * ADC conversion table limits. Ignore values outside the TRM listed
1527 ++ * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter
1528 ++ * "18.4.10.2.3 ADC Codes Versus Temperature".
1529 ++ */
1530 ++#define OMAP4430_ADC_START_VALUE 13
1531 ++#define OMAP4430_ADC_END_VALUE 107
1532 + /* bandgap clock limits (no control on 4430) */
1533 + #define OMAP4430_MAX_FREQ 32768
1534 + #define OMAP4430_MIN_FREQ 32768
1535 +diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
1536 +index e94a61eaeceb0..f7b553faadb10 100644
1537 +--- a/drivers/xen/xenbus/xenbus_client.c
1538 ++++ b/drivers/xen/xenbus/xenbus_client.c
1539 +@@ -365,8 +365,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
1540 + int i, j;
1541 +
1542 + for (i = 0; i < nr_pages; i++) {
1543 +- err = gnttab_grant_foreign_access(dev->otherend_id,
1544 +- virt_to_gfn(vaddr), 0);
1545 ++ unsigned long gfn;
1546 ++
1547 ++ if (is_vmalloc_addr(vaddr))
1548 ++ gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
1549 ++ else
1550 ++ gfn = virt_to_gfn(vaddr);
1551 ++
1552 ++ err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
1553 + if (err < 0) {
1554 + xenbus_dev_fatal(dev, err,
1555 + "granting access to ring page");
1556 +diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
1557 +index 185d5ab7e986a..a80ce75b5bad1 100644
1558 +--- a/fs/affs/amigaffs.c
1559 ++++ b/fs/affs/amigaffs.c
1560 +@@ -419,24 +419,51 @@ affs_mode_to_prot(struct inode *inode)
1561 + u32 prot = AFFS_I(inode)->i_protect;
1562 + umode_t mode = inode->i_mode;
1563 +
1564 ++ /*
1565 ++ * First, clear all RWED bits for owner, group, other.
1566 ++ * Then, recalculate them afresh.
1567 ++ *
1568 ++ * We'll always clear the delete-inhibit bit for the owner, as that is
1569 ++ * the classic single-user mode AmigaOS protection bit and we need to
1570 ++ * stay compatible with all scenarios.
1571 ++ *
1572 ++ * Since multi-user AmigaOS is an extension, we'll only set the
1573 ++ * delete-allow bit if any of the other bits in the same user class
1574 ++ * (group/other) are used.
1575 ++ */
1576 ++ prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD
1577 ++ | FIBF_NOWRITE | FIBF_NODELETE
1578 ++ | FIBF_GRP_EXECUTE | FIBF_GRP_READ
1579 ++ | FIBF_GRP_WRITE | FIBF_GRP_DELETE
1580 ++ | FIBF_OTR_EXECUTE | FIBF_OTR_READ
1581 ++ | FIBF_OTR_WRITE | FIBF_OTR_DELETE);
1582 ++
1583 ++ /* Classic single-user AmigaOS flags. These are inverted. */
1584 + if (!(mode & 0100))
1585 + prot |= FIBF_NOEXECUTE;
1586 + if (!(mode & 0400))
1587 + prot |= FIBF_NOREAD;
1588 + if (!(mode & 0200))
1589 + prot |= FIBF_NOWRITE;
1590 ++
1591 ++ /* Multi-user extended flags. Not inverted. */
1592 + if (mode & 0010)
1593 + prot |= FIBF_GRP_EXECUTE;
1594 + if (mode & 0040)
1595 + prot |= FIBF_GRP_READ;
1596 + if (mode & 0020)
1597 + prot |= FIBF_GRP_WRITE;
1598 ++ if (mode & 0070)
1599 ++ prot |= FIBF_GRP_DELETE;
1600 ++
1601 + if (mode & 0001)
1602 + prot |= FIBF_OTR_EXECUTE;
1603 + if (mode & 0004)
1604 + prot |= FIBF_OTR_READ;
1605 + if (mode & 0002)
1606 + prot |= FIBF_OTR_WRITE;
1607 ++ if (mode & 0007)
1608 ++ prot |= FIBF_OTR_DELETE;
1609 +
1610 + AFFS_I(inode)->i_protect = prot;
1611 + }
1612 +diff --git a/fs/affs/file.c b/fs/affs/file.c
1613 +index a85817f54483f..ba084b0b214b9 100644
1614 +--- a/fs/affs/file.c
1615 ++++ b/fs/affs/file.c
1616 +@@ -428,6 +428,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
1617 + return ret;
1618 + }
1619 +
1620 ++static int affs_write_end(struct file *file, struct address_space *mapping,
1621 ++ loff_t pos, unsigned int len, unsigned int copied,
1622 ++ struct page *page, void *fsdata)
1623 ++{
1624 ++ struct inode *inode = mapping->host;
1625 ++ int ret;
1626 ++
1627 ++ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1628 ++
1629 ++ /* Clear Archived bit on file writes, as AmigaOS would do */
1630 ++ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
1631 ++ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
1632 ++ mark_inode_dirty(inode);
1633 ++ }
1634 ++
1635 ++ return ret;
1636 ++}
1637 ++
1638 + static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
1639 + {
1640 + return generic_block_bmap(mapping,block,affs_get_block);
1641 +@@ -437,7 +455,7 @@ const struct address_space_operations affs_aops = {
1642 + .readpage = affs_readpage,
1643 + .writepage = affs_writepage,
1644 + .write_begin = affs_write_begin,
1645 +- .write_end = generic_write_end,
1646 ++ .write_end = affs_write_end,
1647 + .direct_IO = affs_direct_IO,
1648 + .bmap = _affs_bmap
1649 + };
1650 +@@ -794,6 +812,12 @@ done:
1651 + if (tmp > inode->i_size)
1652 + inode->i_size = AFFS_I(inode)->mmu_private = tmp;
1653 +
1654 ++ /* Clear Archived bit on file writes, as AmigaOS would do */
1655 ++ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
1656 ++ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
1657 ++ mark_inode_dirty(inode);
1658 ++ }
1659 ++
1660 + err_first_bh:
1661 + unlock_page(page);
1662 + put_page(page);
1663 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
1664 +index f5a8c0d26cf36..cf1e8ba50f6bf 100644
1665 +--- a/fs/btrfs/ctree.c
1666 ++++ b/fs/btrfs/ctree.c
1667 +@@ -1367,7 +1367,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1668 + btrfs_tree_read_unlock_blocking(eb);
1669 + free_extent_buffer(eb);
1670 +
1671 +- extent_buffer_get(eb_rewin);
1672 ++ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
1673 ++ eb_rewin, btrfs_header_level(eb_rewin));
1674 + btrfs_tree_read_lock(eb_rewin);
1675 + __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1676 + WARN_ON(btrfs_header_nritems(eb_rewin) >
1677 +@@ -1438,8 +1439,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1678 +
1679 + if (!eb)
1680 + return NULL;
1681 +- extent_buffer_get(eb);
1682 +- btrfs_tree_read_lock(eb);
1683 + if (old_root) {
1684 + btrfs_set_header_bytenr(eb, eb->start);
1685 + btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1686 +@@ -1447,6 +1446,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1687 + btrfs_set_header_level(eb, old_root->level);
1688 + btrfs_set_header_generation(eb, old_generation);
1689 + }
1690 ++ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
1691 ++ btrfs_header_level(eb));
1692 ++ btrfs_tree_read_lock(eb);
1693 + if (tm)
1694 + __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1695 + else
1696 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
1697 +index ef1fd6a09d8e5..0ba338cffa937 100644
1698 +--- a/fs/btrfs/extent_io.c
1699 ++++ b/fs/btrfs/extent_io.c
1700 +@@ -5448,9 +5448,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
1701 + }
1702 + }
1703 +
1704 +-int read_extent_buffer_to_user(const struct extent_buffer *eb,
1705 +- void __user *dstv,
1706 +- unsigned long start, unsigned long len)
1707 ++int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
1708 ++ void __user *dstv,
1709 ++ unsigned long start, unsigned long len)
1710 + {
1711 + size_t cur;
1712 + size_t offset;
1713 +@@ -5471,7 +5471,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
1714 +
1715 + cur = min(len, (PAGE_SIZE - offset));
1716 + kaddr = page_address(page);
1717 +- if (copy_to_user(dst, kaddr + offset, cur)) {
1718 ++ if (probe_user_write(dst, kaddr + offset, cur)) {
1719 + ret = -EFAULT;
1720 + break;
1721 + }
1722 +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
1723 +index e5535bbe69537..90c5095ae97ee 100644
1724 +--- a/fs/btrfs/extent_io.h
1725 ++++ b/fs/btrfs/extent_io.h
1726 +@@ -455,9 +455,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
1727 + void read_extent_buffer(const struct extent_buffer *eb, void *dst,
1728 + unsigned long start,
1729 + unsigned long len);
1730 +-int read_extent_buffer_to_user(const struct extent_buffer *eb,
1731 +- void __user *dst, unsigned long start,
1732 +- unsigned long len);
1733 ++int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
1734 ++ void __user *dst, unsigned long start,
1735 ++ unsigned long len);
1736 + void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
1737 + void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
1738 + const void *src);
1739 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1740 +index e82b4f3f490c1..7b3e41987d072 100644
1741 +--- a/fs/btrfs/ioctl.c
1742 ++++ b/fs/btrfs/ioctl.c
1743 +@@ -2021,9 +2021,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
1744 + sh.len = item_len;
1745 + sh.transid = found_transid;
1746 +
1747 +- /* copy search result header */
1748 +- if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
1749 +- ret = -EFAULT;
1750 ++ /*
1751 ++ * Copy search result header. If we fault then loop again so we
1752 ++ * can fault in the pages and -EFAULT there if there's a
1753 ++ * problem. Otherwise we'll fault and then copy the buffer in
1754 ++ * properly this next time through
1755 ++ */
1756 ++ if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) {
1757 ++ ret = 0;
1758 + goto out;
1759 + }
1760 +
1761 +@@ -2031,10 +2036,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
1762 +
1763 + if (item_len) {
1764 + char __user *up = ubuf + *sk_offset;
1765 +- /* copy the item */
1766 +- if (read_extent_buffer_to_user(leaf, up,
1767 +- item_off, item_len)) {
1768 +- ret = -EFAULT;
1769 ++ /*
1770 ++ * Copy the item, same behavior as above, but reset the
1771 ++ * * sk_offset so we copy the full thing again.
1772 ++ */
1773 ++ if (read_extent_buffer_to_user_nofault(leaf, up,
1774 ++ item_off, item_len)) {
1775 ++ ret = 0;
1776 ++ *sk_offset -= sizeof(sh);
1777 + goto out;
1778 + }
1779 +
1780 +@@ -2122,6 +2131,10 @@ static noinline int search_ioctl(struct inode *inode,
1781 + key.offset = sk->min_offset;
1782 +
1783 + while (1) {
1784 ++ ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
1785 ++ if (ret)
1786 ++ break;
1787 ++
1788 + ret = btrfs_search_forward(root, &key, path, sk->min_transid);
1789 + if (ret != 0) {
1790 + if (ret > 0)
1791 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
1792 +index 4ff96e0aa26a9..a57f847303fcd 100644
1793 +--- a/fs/btrfs/volumes.c
1794 ++++ b/fs/btrfs/volumes.c
1795 +@@ -4174,6 +4174,7 @@ static int btrfs_uuid_scan_kthread(void *data)
1796 + goto skip;
1797 + }
1798 + update_tree:
1799 ++ btrfs_release_path(path);
1800 + if (!btrfs_is_empty_uuid(root_item.uuid)) {
1801 + ret = btrfs_uuid_tree_add(trans, fs_info,
1802 + root_item.uuid,
1803 +@@ -4199,6 +4200,7 @@ update_tree:
1804 + }
1805 +
1806 + skip:
1807 ++ btrfs_release_path(path);
1808 + if (trans) {
1809 + ret = btrfs_end_transaction(trans);
1810 + trans = NULL;
1811 +@@ -4206,7 +4208,6 @@ skip:
1812 + break;
1813 + }
1814 +
1815 +- btrfs_release_path(path);
1816 + if (key.offset < (u64)-1) {
1817 + key.offset++;
1818 + } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
1819 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
1820 +index 6d653235e323b..1f873034f4691 100644
1821 +--- a/fs/ceph/file.c
1822 ++++ b/fs/ceph/file.c
1823 +@@ -1728,6 +1728,7 @@ const struct file_operations ceph_file_fops = {
1824 + .mmap = ceph_mmap,
1825 + .fsync = ceph_fsync,
1826 + .lock = ceph_lock,
1827 ++ .setlease = simple_nosetlease,
1828 + .flock = ceph_flock,
1829 + .splice_read = generic_file_splice_read,
1830 + .splice_write = iter_file_splice_write,
1831 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
1832 +index 00f0902e27e88..af9dfa494b1fa 100644
1833 +--- a/fs/eventpoll.c
1834 ++++ b/fs/eventpoll.c
1835 +@@ -1901,9 +1901,9 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1836 + * during ep_insert().
1837 + */
1838 + if (list_empty(&epi->ffd.file->f_tfile_llink)) {
1839 +- get_file(epi->ffd.file);
1840 +- list_add(&epi->ffd.file->f_tfile_llink,
1841 +- &tfile_check_list);
1842 ++ if (get_file_rcu(epi->ffd.file))
1843 ++ list_add(&epi->ffd.file->f_tfile_llink,
1844 ++ &tfile_check_list);
1845 + }
1846 + }
1847 + }
1848 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
1849 +index 5999d80316759..1c3d774d3c839 100644
1850 +--- a/include/linux/blkdev.h
1851 ++++ b/include/linux/blkdev.h
1852 +@@ -1015,6 +1015,19 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1853 + return bdev->bd_disk->queue; /* this is never NULL */
1854 + }
1855 +
1856 ++/*
1857 ++ * The basic unit of block I/O is a sector. It is used in a number of contexts
1858 ++ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
1859 ++ * bytes. Variables of type sector_t represent an offset or size that is a
1860 ++ * multiple of 512 bytes. Hence these two constants.
1861 ++ */
1862 ++#ifndef SECTOR_SHIFT
1863 ++#define SECTOR_SHIFT 9
1864 ++#endif
1865 ++#ifndef SECTOR_SIZE
1866 ++#define SECTOR_SIZE (1 << SECTOR_SHIFT)
1867 ++#endif
1868 ++
1869 + /*
1870 + * blk_rq_pos() : the current sector
1871 + * blk_rq_bytes() : bytes left in the entire request
1872 +@@ -1042,12 +1055,12 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq);
1873 +
1874 + static inline unsigned int blk_rq_sectors(const struct request *rq)
1875 + {
1876 +- return blk_rq_bytes(rq) >> 9;
1877 ++ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1878 + }
1879 +
1880 + static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1881 + {
1882 +- return blk_rq_cur_bytes(rq) >> 9;
1883 ++ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1884 + }
1885 +
1886 + /*
1887 +@@ -1067,7 +1080,8 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
1888 + int op)
1889 + {
1890 + if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
1891 +- return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
1892 ++ return min(q->limits.max_discard_sectors,
1893 ++ UINT_MAX >> SECTOR_SHIFT);
1894 +
1895 + if (unlikely(op == REQ_OP_WRITE_SAME))
1896 + return q->limits.max_write_same_sectors;
1897 +@@ -1376,16 +1390,21 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1898 + static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1899 + sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1900 + {
1901 +- return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1902 +- nr_blocks << (sb->s_blocksize_bits - 9),
1903 ++ return blkdev_issue_discard(sb->s_bdev,
1904 ++ block << (sb->s_blocksize_bits -
1905 ++ SECTOR_SHIFT),
1906 ++ nr_blocks << (sb->s_blocksize_bits -
1907 ++ SECTOR_SHIFT),
1908 + gfp_mask, flags);
1909 + }
1910 + static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1911 + sector_t nr_blocks, gfp_t gfp_mask)
1912 + {
1913 + return blkdev_issue_zeroout(sb->s_bdev,
1914 +- block << (sb->s_blocksize_bits - 9),
1915 +- nr_blocks << (sb->s_blocksize_bits - 9),
1916 ++ block << (sb->s_blocksize_bits -
1917 ++ SECTOR_SHIFT),
1918 ++ nr_blocks << (sb->s_blocksize_bits -
1919 ++ SECTOR_SHIFT),
1920 + gfp_mask, 0);
1921 + }
1922 +
1923 +@@ -1492,7 +1511,8 @@ static inline int queue_alignment_offset(struct request_queue *q)
1924 + static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1925 + {
1926 + unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1927 +- unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
1928 ++ unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
1929 ++ << SECTOR_SHIFT;
1930 +
1931 + return (granularity + lim->alignment_offset - alignment) % granularity;
1932 + }
1933 +@@ -1526,8 +1546,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
1934 + return 0;
1935 +
1936 + /* Why are these in bytes, not sectors? */
1937 +- alignment = lim->discard_alignment >> 9;
1938 +- granularity = lim->discard_granularity >> 9;
1939 ++ alignment = lim->discard_alignment >> SECTOR_SHIFT;
1940 ++ granularity = lim->discard_granularity >> SECTOR_SHIFT;
1941 + if (!granularity)
1942 + return 0;
1943 +
1944 +@@ -1538,7 +1558,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
1945 + offset = (granularity + alignment - offset) % granularity;
1946 +
1947 + /* Turn it back into bytes, gaah */
1948 +- return offset << 9;
1949 ++ return offset << SECTOR_SHIFT;
1950 + }
1951 +
1952 + static inline int bdev_discard_alignment(struct block_device *bdev)
1953 +diff --git a/include/linux/bvec.h b/include/linux/bvec.h
1954 +index ec8a4d7af6bda..f7dc68cd0a392 100644
1955 +--- a/include/linux/bvec.h
1956 ++++ b/include/linux/bvec.h
1957 +@@ -119,10 +119,17 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv,
1958 + return true;
1959 + }
1960 +
1961 ++static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
1962 ++{
1963 ++ iter->bi_bvec_done = 0;
1964 ++ iter->bi_idx++;
1965 ++}
1966 ++
1967 + #define for_each_bvec(bvl, bio_vec, iter, start) \
1968 + for (iter = (start); \
1969 + (iter).bi_size && \
1970 + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
1971 +- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
1972 ++ (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
1973 ++ (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
1974 +
1975 + #endif /* __LINUX_BVEC_ITER_H */
1976 +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
1977 +index 91a063a1f3b37..5d067136fc27d 100644
1978 +--- a/include/linux/device-mapper.h
1979 ++++ b/include/linux/device-mapper.h
1980 +@@ -577,8 +577,6 @@ do { \
1981 + #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
1982 + 0 : scnprintf(result + sz, maxlen - sz, x))
1983 +
1984 +-#define SECTOR_SHIFT 9
1985 +-
1986 + /*
1987 + * Definitions of return values from target end_io function.
1988 + */
1989 +diff --git a/include/linux/hid.h b/include/linux/hid.h
1990 +index ba1f675598314..40409453ef3e5 100644
1991 +--- a/include/linux/hid.h
1992 ++++ b/include/linux/hid.h
1993 +@@ -919,34 +919,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) {
1994 + * @max: maximal valid usage->code to consider later (out parameter)
1995 + * @type: input event type (EV_KEY, EV_REL, ...)
1996 + * @c: code which corresponds to this usage and type
1997 ++ *
1998 ++ * The value pointed to by @bit will be set to NULL if either @type is
1999 ++ * an unhandled event type, or if @c is out of range for @type. This
2000 ++ * can be used as an error condition.
2001 + */
2002 + static inline void hid_map_usage(struct hid_input *hidinput,
2003 + struct hid_usage *usage, unsigned long **bit, int *max,
2004 +- __u8 type, __u16 c)
2005 ++ __u8 type, unsigned int c)
2006 + {
2007 + struct input_dev *input = hidinput->input;
2008 +-
2009 +- usage->type = type;
2010 +- usage->code = c;
2011 ++ unsigned long *bmap = NULL;
2012 ++ unsigned int limit = 0;
2013 +
2014 + switch (type) {
2015 + case EV_ABS:
2016 +- *bit = input->absbit;
2017 +- *max = ABS_MAX;
2018 ++ bmap = input->absbit;
2019 ++ limit = ABS_MAX;
2020 + break;
2021 + case EV_REL:
2022 +- *bit = input->relbit;
2023 +- *max = REL_MAX;
2024 ++ bmap = input->relbit;
2025 ++ limit = REL_MAX;
2026 + break;
2027 + case EV_KEY:
2028 +- *bit = input->keybit;
2029 +- *max = KEY_MAX;
2030 ++ bmap = input->keybit;
2031 ++ limit = KEY_MAX;
2032 + break;
2033 + case EV_LED:
2034 +- *bit = input->ledbit;
2035 +- *max = LED_MAX;
2036 ++ bmap = input->ledbit;
2037 ++ limit = LED_MAX;
2038 + break;
2039 + }
2040 ++
2041 ++ if (unlikely(c > limit || !bmap)) {
2042 ++ pr_warn_ratelimited("%s: Invalid code %d type %d\n",
2043 ++ input->name, c, type);
2044 ++ *bit = NULL;
2045 ++ return;
2046 ++ }
2047 ++
2048 ++ usage->type = type;
2049 ++ usage->code = c;
2050 ++ *max = limit;
2051 ++ *bit = bmap;
2052 + }
2053 +
2054 + /**
2055 +@@ -960,7 +975,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput,
2056 + __u8 type, __u16 c)
2057 + {
2058 + hid_map_usage(hidinput, usage, bit, max, type, c);
2059 +- clear_bit(c, *bit);
2060 ++ if (*bit)
2061 ++ clear_bit(usage->code, *bit);
2062 + }
2063 +
2064 + /**
2065 +diff --git a/include/linux/ide.h b/include/linux/ide.h
2066 +index 70db3af045417..9885080c21c5c 100644
2067 +--- a/include/linux/ide.h
2068 ++++ b/include/linux/ide.h
2069 +@@ -165,7 +165,6 @@ struct ide_io_ports {
2070 + */
2071 + #define PARTN_BITS 6 /* number of minor dev bits for partitions */
2072 + #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
2073 +-#define SECTOR_SIZE 512
2074 +
2075 + /*
2076 + * Timeouts for various operations:
2077 +diff --git a/include/linux/libata.h b/include/linux/libata.h
2078 +index 5c9a44e3a0278..f772c55ed901d 100644
2079 +--- a/include/linux/libata.h
2080 ++++ b/include/linux/libata.h
2081 +@@ -440,6 +440,7 @@ enum {
2082 + ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
2083 + ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
2084 + ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
2085 ++ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
2086 +
2087 + /* DMA mask for user DMA control: User visible values; DO NOT
2088 + renumber */
2089 +diff --git a/include/linux/log2.h b/include/linux/log2.h
2090 +index c373295f359fa..cca606609e1bc 100644
2091 +--- a/include/linux/log2.h
2092 ++++ b/include/linux/log2.h
2093 +@@ -159,7 +159,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
2094 + #define roundup_pow_of_two(n) \
2095 + ( \
2096 + __builtin_constant_p(n) ? ( \
2097 +- (n == 1) ? 1 : \
2098 ++ ((n) == 1) ? 1 : \
2099 + (1UL << (ilog2((n) - 1) + 1)) \
2100 + ) : \
2101 + __roundup_pow_of_two(n) \
2102 +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
2103 +index ec4f0053d6d8e..d49f193a44a29 100644
2104 +--- a/include/linux/uaccess.h
2105 ++++ b/include/linux/uaccess.h
2106 +@@ -242,6 +242,17 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
2107 + extern long probe_kernel_read(void *dst, const void *src, size_t size);
2108 + extern long __probe_kernel_read(void *dst, const void *src, size_t size);
2109 +
2110 ++/*
2111 ++ * probe_user_read(): safely attempt to read from a location in user space
2112 ++ * @dst: pointer to the buffer that shall take the data
2113 ++ * @src: address to read from
2114 ++ * @size: size of the data chunk
2115 ++ *
2116 ++ * Safely read from address @src to the buffer at @dst. If a kernel fault
2117 ++ * happens, handle that and return -EFAULT.
2118 ++ */
2119 ++extern long probe_user_read(void *dst, const void __user *src, size_t size);
2120 ++
2121 + /*
2122 + * probe_kernel_write(): safely attempt to write to a location
2123 + * @dst: address to write to
2124 +@@ -254,7 +265,22 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
2125 + extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
2126 + extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
2127 +
2128 ++/*
2129 ++ * probe_user_write(): safely attempt to write to a location in user space
2130 ++ * @dst: address to write to
2131 ++ * @src: pointer to the data that shall be written
2132 ++ * @size: size of the data chunk
2133 ++ *
2134 ++ * Safely write to address @dst from the buffer at @src. If a kernel fault
2135 ++ * happens, handle that and return -EFAULT.
2136 ++ */
2137 ++extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
2138 ++extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
2139 ++
2140 + extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
2141 ++extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
2142 ++ long count);
2143 ++extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
2144 +
2145 + /**
2146 + * probe_kernel_address(): safely attempt to read from a location
2147 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
2148 +index a9704c57430db..3107895115c25 100644
2149 +--- a/include/net/netfilter/nf_tables.h
2150 ++++ b/include/net/netfilter/nf_tables.h
2151 +@@ -136,6 +136,8 @@ static inline u8 nft_reg_load8(u32 *sreg)
2152 + static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
2153 + unsigned int len)
2154 + {
2155 ++ if (len % NFT_REG32_SIZE)
2156 ++ dst[len / NFT_REG32_SIZE] = 0;
2157 + memcpy(dst, src, len);
2158 + }
2159 +
2160 +diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
2161 +index a45d0754102e0..fde753735abae 100644
2162 +--- a/include/uapi/linux/msdos_fs.h
2163 ++++ b/include/uapi/linux/msdos_fs.h
2164 +@@ -10,7 +10,9 @@
2165 + * The MS-DOS filesystem constants/structures
2166 + */
2167 +
2168 ++#ifndef SECTOR_SIZE
2169 + #define SECTOR_SIZE 512 /* sector size (bytes) */
2170 ++#endif
2171 + #define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */
2172 + #define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */
2173 + #define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */
2174 +diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
2175 +index a3ee277b17a17..49b6997c32550 100644
2176 +--- a/include/uapi/linux/netfilter/nf_tables.h
2177 ++++ b/include/uapi/linux/netfilter/nf_tables.h
2178 +@@ -125,7 +125,7 @@ enum nf_tables_msg_types {
2179 + * @NFTA_LIST_ELEM: list element (NLA_NESTED)
2180 + */
2181 + enum nft_list_attributes {
2182 +- NFTA_LIST_UNPEC,
2183 ++ NFTA_LIST_UNSPEC,
2184 + NFTA_LIST_ELEM,
2185 + __NFTA_LIST_MAX
2186 + };
2187 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2188 +index 194125cf2d2b9..3d919635004e9 100644
2189 +--- a/mm/hugetlb.c
2190 ++++ b/mm/hugetlb.c
2191 +@@ -2911,6 +2911,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
2192 + }
2193 +
2194 + #ifdef CONFIG_SYSCTL
2195 ++static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
2196 ++ void *buffer, size_t *length,
2197 ++ loff_t *ppos, unsigned long *out)
2198 ++{
2199 ++ struct ctl_table dup_table;
2200 ++
2201 ++ /*
2202 ++ * In order to avoid races with __do_proc_doulongvec_minmax(), we
2203 ++ * can duplicate the @table and alter the duplicate of it.
2204 ++ */
2205 ++ dup_table = *table;
2206 ++ dup_table.data = out;
2207 ++
2208 ++ return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
2209 ++}
2210 ++
2211 + static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2212 + struct ctl_table *table, int write,
2213 + void __user *buffer, size_t *length, loff_t *ppos)
2214 +@@ -2922,9 +2938,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2215 + if (!hugepages_supported())
2216 + return -EOPNOTSUPP;
2217 +
2218 +- table->data = &tmp;
2219 +- table->maxlen = sizeof(unsigned long);
2220 +- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2221 ++ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
2222 ++ &tmp);
2223 + if (ret)
2224 + goto out;
2225 +
2226 +@@ -2968,9 +2983,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2227 + if (write && hstate_is_gigantic(h))
2228 + return -EINVAL;
2229 +
2230 +- table->data = &tmp;
2231 +- table->maxlen = sizeof(unsigned long);
2232 +- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2233 ++ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
2234 ++ &tmp);
2235 + if (ret)
2236 + goto out;
2237 +
2238 +diff --git a/mm/maccess.c b/mm/maccess.c
2239 +index 78f9274dd49d0..03ea550f5a743 100644
2240 +--- a/mm/maccess.c
2241 ++++ b/mm/maccess.c
2242 +@@ -5,8 +5,32 @@
2243 + #include <linux/mm.h>
2244 + #include <linux/uaccess.h>
2245 +
2246 ++static __always_inline long
2247 ++probe_read_common(void *dst, const void __user *src, size_t size)
2248 ++{
2249 ++ long ret;
2250 ++
2251 ++ pagefault_disable();
2252 ++ ret = __copy_from_user_inatomic(dst, src, size);
2253 ++ pagefault_enable();
2254 ++
2255 ++ return ret ? -EFAULT : 0;
2256 ++}
2257 ++
2258 ++static __always_inline long
2259 ++probe_write_common(void __user *dst, const void *src, size_t size)
2260 ++{
2261 ++ long ret;
2262 ++
2263 ++ pagefault_disable();
2264 ++ ret = __copy_to_user_inatomic(dst, src, size);
2265 ++ pagefault_enable();
2266 ++
2267 ++ return ret ? -EFAULT : 0;
2268 ++}
2269 ++
2270 + /**
2271 +- * probe_kernel_read(): safely attempt to read from a location
2272 ++ * probe_kernel_read(): safely attempt to read from a kernel-space location
2273 + * @dst: pointer to the buffer that shall take the data
2274 + * @src: address to read from
2275 + * @size: size of the data chunk
2276 +@@ -29,16 +53,40 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
2277 + mm_segment_t old_fs = get_fs();
2278 +
2279 + set_fs(KERNEL_DS);
2280 +- pagefault_disable();
2281 +- ret = __copy_from_user_inatomic(dst,
2282 +- (__force const void __user *)src, size);
2283 +- pagefault_enable();
2284 ++ ret = probe_read_common(dst, (__force const void __user *)src, size);
2285 + set_fs(old_fs);
2286 +
2287 +- return ret ? -EFAULT : 0;
2288 ++ return ret;
2289 + }
2290 + EXPORT_SYMBOL_GPL(probe_kernel_read);
2291 +
2292 ++/**
2293 ++ * probe_user_read(): safely attempt to read from a user-space location
2294 ++ * @dst: pointer to the buffer that shall take the data
2295 ++ * @src: address to read from. This must be a user address.
2296 ++ * @size: size of the data chunk
2297 ++ *
2298 ++ * Safely read from user address @src to the buffer at @dst. If a kernel fault
2299 ++ * happens, handle that and return -EFAULT.
2300 ++ */
2301 ++
2302 ++long __weak probe_user_read(void *dst, const void __user *src, size_t size)
2303 ++ __attribute__((alias("__probe_user_read")));
2304 ++
2305 ++long __probe_user_read(void *dst, const void __user *src, size_t size)
2306 ++{
2307 ++ long ret = -EFAULT;
2308 ++ mm_segment_t old_fs = get_fs();
2309 ++
2310 ++ set_fs(USER_DS);
2311 ++ if (access_ok(VERIFY_READ, src, size))
2312 ++ ret = probe_read_common(dst, src, size);
2313 ++ set_fs(old_fs);
2314 ++
2315 ++ return ret;
2316 ++}
2317 ++EXPORT_SYMBOL_GPL(probe_user_read);
2318 ++
2319 + /**
2320 + * probe_kernel_write(): safely attempt to write to a location
2321 + * @dst: address to write to
2322 +@@ -48,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
2323 + * Safely write to address @dst from the buffer at @src. If a kernel fault
2324 + * happens, handle that and return -EFAULT.
2325 + */
2326 ++
2327 + long __weak probe_kernel_write(void *dst, const void *src, size_t size)
2328 + __attribute__((alias("__probe_kernel_write")));
2329 +
2330 +@@ -57,15 +106,40 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
2331 + mm_segment_t old_fs = get_fs();
2332 +
2333 + set_fs(KERNEL_DS);
2334 +- pagefault_disable();
2335 +- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
2336 +- pagefault_enable();
2337 ++ ret = probe_write_common((__force void __user *)dst, src, size);
2338 + set_fs(old_fs);
2339 +
2340 +- return ret ? -EFAULT : 0;
2341 ++ return ret;
2342 + }
2343 + EXPORT_SYMBOL_GPL(probe_kernel_write);
2344 +
2345 ++/**
2346 ++ * probe_user_write(): safely attempt to write to a user-space location
2347 ++ * @dst: address to write to
2348 ++ * @src: pointer to the data that shall be written
2349 ++ * @size: size of the data chunk
2350 ++ *
2351 ++ * Safely write to address @dst from the buffer at @src. If a kernel fault
2352 ++ * happens, handle that and return -EFAULT.
2353 ++ */
2354 ++
2355 ++long __weak probe_user_write(void __user *dst, const void *src, size_t size)
2356 ++ __attribute__((alias("__probe_user_write")));
2357 ++
2358 ++long __probe_user_write(void __user *dst, const void *src, size_t size)
2359 ++{
2360 ++ long ret = -EFAULT;
2361 ++ mm_segment_t old_fs = get_fs();
2362 ++
2363 ++ set_fs(USER_DS);
2364 ++ if (access_ok(VERIFY_WRITE, dst, size))
2365 ++ ret = probe_write_common(dst, src, size);
2366 ++ set_fs(old_fs);
2367 ++
2368 ++ return ret;
2369 ++}
2370 ++EXPORT_SYMBOL_GPL(probe_user_write);
2371 ++
2372 + /**
2373 + * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
2374 + * @dst: Destination address, in kernel space. This buffer must be at
2375 +@@ -105,3 +179,76 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
2376 +
2377 + return ret ? -EFAULT : src - unsafe_addr;
2378 + }
2379 ++
2380 ++/**
2381 ++ * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user
2382 ++ * address.
2383 ++ * @dst: Destination address, in kernel space. This buffer must be at
2384 ++ * least @count bytes long.
2385 ++ * @unsafe_addr: Unsafe user address.
2386 ++ * @count: Maximum number of bytes to copy, including the trailing NUL.
2387 ++ *
2388 ++ * Copies a NUL-terminated string from unsafe user address to kernel buffer.
2389 ++ *
2390 ++ * On success, returns the length of the string INCLUDING the trailing NUL.
2391 ++ *
2392 ++ * If access fails, returns -EFAULT (some data may have been copied
2393 ++ * and the trailing NUL added).
2394 ++ *
2395 ++ * If @count is smaller than the length of the string, copies @count-1 bytes,
2396 ++ * sets the last byte of @dst buffer to NUL and returns @count.
2397 ++ */
2398 ++long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
2399 ++ long count)
2400 ++{
2401 ++ mm_segment_t old_fs = get_fs();
2402 ++ long ret;
2403 ++
2404 ++ if (unlikely(count <= 0))
2405 ++ return 0;
2406 ++
2407 ++ set_fs(USER_DS);
2408 ++ pagefault_disable();
2409 ++ ret = strncpy_from_user(dst, unsafe_addr, count);
2410 ++ pagefault_enable();
2411 ++ set_fs(old_fs);
2412 ++
2413 ++ if (ret >= count) {
2414 ++ ret = count;
2415 ++ dst[ret - 1] = '\0';
2416 ++ } else if (ret > 0) {
2417 ++ ret++;
2418 ++ }
2419 ++
2420 ++ return ret;
2421 ++}
2422 ++
2423 ++/**
2424 ++ * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL.
2425 ++ * @unsafe_addr: The string to measure.
2426 ++ * @count: Maximum count (including NUL)
2427 ++ *
2428 ++ * Get the size of a NUL-terminated string in user space without pagefault.
2429 ++ *
2430 ++ * Returns the size of the string INCLUDING the terminating NUL.
2431 ++ *
2432 ++ * If the string is too long, returns a number larger than @count. User
2433 ++ * has to check the return value against "> count".
2434 ++ * On exception (or invalid count), returns 0.
2435 ++ *
2436 ++ * Unlike strnlen_user, this can be used from IRQ handler etc. because
2437 ++ * it disables pagefaults.
2438 ++ */
2439 ++long strnlen_unsafe_user(const void __user *unsafe_addr, long count)
2440 ++{
2441 ++ mm_segment_t old_fs = get_fs();
2442 ++ int ret;
2443 ++
2444 ++ set_fs(USER_DS);
2445 ++ pagefault_disable();
2446 ++ ret = strnlen_user(unsafe_addr, count);
2447 ++ pagefault_enable();
2448 ++ set_fs(old_fs);
2449 ++
2450 ++ return ret;
2451 ++}
2452 +diff --git a/mm/slub.c b/mm/slub.c
2453 +index 09d4cc4391bb2..db2639832037d 100644
2454 +--- a/mm/slub.c
2455 ++++ b/mm/slub.c
2456 +@@ -659,12 +659,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
2457 + }
2458 +
2459 + static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
2460 +- void *freelist, void *nextfree)
2461 ++ void **freelist, void *nextfree)
2462 + {
2463 + if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
2464 +- !check_valid_pointer(s, page, nextfree)) {
2465 +- object_err(s, page, freelist, "Freechain corrupt");
2466 +- freelist = NULL;
2467 ++ !check_valid_pointer(s, page, nextfree) && freelist) {
2468 ++ object_err(s, page, *freelist, "Freechain corrupt");
2469 ++ *freelist = NULL;
2470 + slab_fix(s, "Isolate corrupted freechain");
2471 + return true;
2472 + }
2473 +@@ -1354,7 +1354,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
2474 + int objects) {}
2475 +
2476 + static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
2477 +- void *freelist, void *nextfree)
2478 ++ void **freelist, void *nextfree)
2479 + {
2480 + return false;
2481 + }
2482 +@@ -2053,7 +2053,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
2483 + * 'freelist' is already corrupted. So isolate all objects
2484 + * starting at 'freelist'.
2485 + */
2486 +- if (freelist_corrupted(s, page, freelist, nextfree))
2487 ++ if (freelist_corrupted(s, page, &freelist, nextfree))
2488 + break;
2489 +
2490 + do {
2491 +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
2492 +index f0abbbdafe07f..c49c48866a3fc 100644
2493 +--- a/net/batman-adv/bat_v_ogm.c
2494 ++++ b/net/batman-adv/bat_v_ogm.c
2495 +@@ -715,6 +715,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
2496 + ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
2497 + ogm_packet->version, ntohs(ogm_packet->tvlv_len));
2498 +
2499 ++ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
2500 ++ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
2501 ++ "Drop packet: originator packet from ourself\n");
2502 ++ return;
2503 ++ }
2504 ++
2505 + /* If the throughput metric is 0, immediately drop the packet. No need
2506 + * to create orig_node / neigh_node for an unusable route.
2507 + */
2508 +@@ -842,11 +848,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
2509 + if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
2510 + goto free_skb;
2511 +
2512 +- ogm_packet = (struct batadv_ogm2_packet *)skb->data;
2513 +-
2514 +- if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
2515 +- goto free_skb;
2516 +-
2517 + batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
2518 + batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
2519 + skb->len + ETH_HLEN);
2520 +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
2521 +index c761c0c233e4b..ae647fa69ce85 100644
2522 +--- a/net/batman-adv/bridge_loop_avoidance.c
2523 ++++ b/net/batman-adv/bridge_loop_avoidance.c
2524 +@@ -450,7 +450,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
2525 + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
2526 + skb->len + ETH_HLEN);
2527 +
2528 +- netif_rx(skb);
2529 ++ if (in_interrupt())
2530 ++ netif_rx(skb);
2531 ++ else
2532 ++ netif_rx_ni(skb);
2533 + out:
2534 + if (primary_if)
2535 + batadv_hardif_put(primary_if);
2536 +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
2537 +index c6a7341f05270..056af2eec4a2a 100644
2538 +--- a/net/batman-adv/gateway_client.c
2539 ++++ b/net/batman-adv/gateway_client.c
2540 +@@ -674,8 +674,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
2541 +
2542 + chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
2543 + /* store the client address if the message is going to a client */
2544 +- if (ret == BATADV_DHCP_TO_CLIENT &&
2545 +- pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
2546 ++ if (ret == BATADV_DHCP_TO_CLIENT) {
2547 ++ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
2548 ++ return BATADV_DHCP_NO;
2549 ++
2550 + /* check if the DHCP packet carries an Ethernet DHCP */
2551 + p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
2552 + if (*p != BATADV_DHCP_HTYPE_ETHERNET)
2553 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2554 +index 5b8d5bfeb7ac5..7c95314f0b7de 100644
2555 +--- a/net/netfilter/nf_tables_api.c
2556 ++++ b/net/netfilter/nf_tables_api.c
2557 +@@ -2882,7 +2882,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2558 + goto nla_put_failure;
2559 + }
2560 +
2561 +- if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
2562 ++ if (set->udata &&
2563 ++ nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
2564 + goto nla_put_failure;
2565 +
2566 + desc = nla_nest_start(skb, NFTA_SET_DESC);
2567 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
2568 +index 19446a89a2a81..b1a9f330a51fe 100644
2569 +--- a/net/netfilter/nft_payload.c
2570 ++++ b/net/netfilter/nft_payload.c
2571 +@@ -79,7 +79,9 @@ static void nft_payload_eval(const struct nft_expr *expr,
2572 + u32 *dest = &regs->data[priv->dreg];
2573 + int offset;
2574 +
2575 +- dest[priv->len / NFT_REG32_SIZE] = 0;
2576 ++ if (priv->len % NFT_REG32_SIZE)
2577 ++ dest[priv->len / NFT_REG32_SIZE] = 0;
2578 ++
2579 + switch (priv->base) {
2580 + case NFT_PAYLOAD_LL_HEADER:
2581 + if (!skb_mac_header_was_set(skb))
2582 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
2583 +index b95d1c2bdef7e..9eb9d34cef7b1 100644
2584 +--- a/net/wireless/reg.c
2585 ++++ b/net/wireless/reg.c
2586 +@@ -2408,6 +2408,9 @@ int regulatory_hint_user(const char *alpha2,
2587 + if (WARN_ON(!alpha2))
2588 + return -EINVAL;
2589 +
2590 ++ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
2591 ++ return -EINVAL;
2592 ++
2593 + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
2594 + if (!request)
2595 + return -ENOMEM;
2596 +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
2597 +index 8b80bac055e49..d702bdf19eb10 100755
2598 +--- a/scripts/checkpatch.pl
2599 ++++ b/scripts/checkpatch.pl
2600 +@@ -2428,8 +2428,8 @@ sub process {
2601 +
2602 + # Check if the commit log has what seems like a diff which can confuse patch
2603 + if ($in_commit_log && !$commit_log_has_diff &&
2604 +- (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
2605 +- $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
2606 ++ (($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
2607 ++ $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
2608 + $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
2609 + $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
2610 + ERROR("DIFF_IN_COMMIT_MSG",
2611 +diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
2612 +index 3788906421a73..fe27034f28460 100644
2613 +--- a/sound/core/oss/mulaw.c
2614 ++++ b/sound/core/oss/mulaw.c
2615 +@@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug,
2616 + snd_BUG();
2617 + return -EINVAL;
2618 + }
2619 +- if (snd_BUG_ON(!snd_pcm_format_linear(format->format)))
2620 +- return -ENXIO;
2621 ++ if (!snd_pcm_format_linear(format->format))
2622 ++ return -EINVAL;
2623 +
2624 + err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion",
2625 + src_format, dst_format,
2626 +diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
2627 +index ef689997d6a5b..bf53e342788e2 100644
2628 +--- a/sound/firewire/digi00x/digi00x.c
2629 ++++ b/sound/firewire/digi00x/digi00x.c
2630 +@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL v2");
2631 + #define VENDOR_DIGIDESIGN 0x00a07e
2632 + #define MODEL_CONSOLE 0x000001
2633 + #define MODEL_RACK 0x000002
2634 ++#define SPEC_VERSION 0x000001
2635 +
2636 + static int name_card(struct snd_dg00x *dg00x)
2637 + {
2638 +@@ -185,14 +186,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = {
2639 + /* Both of 002/003 use the same ID. */
2640 + {
2641 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
2642 ++ IEEE1394_MATCH_VERSION |
2643 + IEEE1394_MATCH_MODEL_ID,
2644 + .vendor_id = VENDOR_DIGIDESIGN,
2645 ++ .version = SPEC_VERSION,
2646 + .model_id = MODEL_CONSOLE,
2647 + },
2648 + {
2649 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
2650 ++ IEEE1394_MATCH_VERSION |
2651 + IEEE1394_MATCH_MODEL_ID,
2652 + .vendor_id = VENDOR_DIGIDESIGN,
2653 ++ .version = SPEC_VERSION,
2654 + .model_id = MODEL_RACK,
2655 + },
2656 + {}
2657 +diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
2658 +index cd27b55366544..675b812e96d63 100644
2659 +--- a/sound/pci/ca0106/ca0106_main.c
2660 ++++ b/sound/pci/ca0106/ca0106_main.c
2661 +@@ -551,7 +551,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id,
2662 + else
2663 + /* Power down */
2664 + chip->spi_dac_reg[reg] |= bit;
2665 +- return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]);
2666 ++ if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0)
2667 ++ return -ENXIO;
2668 + }
2669 + return 0;
2670 + }
2671 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2672 +index 9e8cfc409b4b3..cb7047bf844df 100644
2673 +--- a/sound/pci/hda/patch_hdmi.c
2674 ++++ b/sound/pci/hda/patch_hdmi.c
2675 +@@ -2546,6 +2546,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
2676 + hda_nid_t cvt_nid)
2677 + {
2678 + if (per_pin) {
2679 ++ haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid);
2680 + snd_hda_set_dev_select(codec, per_pin->pin_nid,
2681 + per_pin->dev_id);
2682 + intel_verify_pin_cvt_connect(codec, per_pin);
2683 +diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
2684 +index 362493a2f950b..3b733511bb2b3 100644
2685 +--- a/tools/include/uapi/linux/perf_event.h
2686 ++++ b/tools/include/uapi/linux/perf_event.h
2687 +@@ -1033,7 +1033,7 @@ union perf_mem_data_src {
2688 +
2689 + #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
2690 + /* 1 free */
2691 +-#define PERF_MEM_SNOOPX_SHIFT 37
2692 ++#define PERF_MEM_SNOOPX_SHIFT 38
2693 +
2694 + /* locked instruction */
2695 + #define PERF_MEM_LOCK_NA 0x01 /* not available */
2696 +diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
2697 +index 63526f4416ea4..7b36bc6e32bb0 100644
2698 +--- a/tools/perf/Documentation/perf-record.txt
2699 ++++ b/tools/perf/Documentation/perf-record.txt
2700 +@@ -33,6 +33,10 @@ OPTIONS
2701 + - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
2702 + hexadecimal event descriptor.
2703 +
2704 ++ - a symbolic or raw PMU event followed by an optional colon
2705 ++ and a list of event modifiers, e.g., cpu-cycles:p. See the
2706 ++ linkperf:perf-list[1] man page for details on event modifiers.
2707 ++
2708 + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
2709 + 'param1', 'param2', etc are defined as formats for the PMU in
2710 + /sys/bus/event_source/devices/<pmu>/format/*.
2711 +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
2712 +index c37d61682dfb1..670aff81518b6 100644
2713 +--- a/tools/perf/Documentation/perf-stat.txt
2714 ++++ b/tools/perf/Documentation/perf-stat.txt
2715 +@@ -39,6 +39,10 @@ report::
2716 + - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
2717 + hexadecimal event descriptor.
2718 +
2719 ++ - a symbolic or raw PMU event followed by an optional colon
2720 ++ and a list of event modifiers, e.g., cpu-cycles:p. See the
2721 ++ linkperf:perf-list[1] man page for details on event modifiers.
2722 ++
2723 + - a symbolically formed event like 'pmu/param1=0x3,param2/' where
2724 + param1 and param2 are defined as formats for the PMU in
2725 + /sys/bus/event_source/devices/<pmu>/format/*