Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Thu, 01 Oct 2020 12:45:40
Message-Id: 1601556324.310f5c1a8c792bf9601dccaa67621ff87d95d8a8.mpagano@gentoo
1 commit: 310f5c1a8c792bf9601dccaa67621ff87d95d8a8
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Oct 1 12:45:24 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Oct 1 12:45:24 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=310f5c1a
7
8 Linux patch 4.19.149
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1148_linux-4.19.149.patch | 9972 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 9976 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 9707ae7..e7a8587 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -631,6 +631,10 @@ Patch: 1147_linux-4.19.148.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.148
23
24 +Patch: 1148_linux-4.19.149.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.149
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1148_linux-4.19.149.patch b/1148_linux-4.19.149.patch
33 new file mode 100644
34 index 0000000..75c6340
35 --- /dev/null
36 +++ b/1148_linux-4.19.149.patch
37 @@ -0,0 +1,9972 @@
38 +diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
39 +index 68cccc4653ba3..367b58ce1bb92 100644
40 +--- a/Documentation/devicetree/bindings/sound/wm8994.txt
41 ++++ b/Documentation/devicetree/bindings/sound/wm8994.txt
42 +@@ -14,9 +14,15 @@ Required properties:
43 + - #gpio-cells : Must be 2. The first cell is the pin number and the
44 + second cell is used to specify optional parameters (currently unused).
45 +
46 +- - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply,
47 +- SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered
48 +- in Documentation/devicetree/bindings/regulator/regulator.txt
49 ++ - power supplies for the device, as covered in
50 ++ Documentation/devicetree/bindings/regulator/regulator.txt, depending
51 ++ on compatible:
52 ++ - for wlf,wm1811 and wlf,wm8958:
53 ++ AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
54 ++ DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
55 ++ - for wlf,wm8994:
56 ++ AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
57 ++ SPKVDD1-supply, SPKVDD2-supply
58 +
59 + Optional properties:
60 +
61 +@@ -73,11 +79,11 @@ wm8994: codec@1a {
62 +
63 + lineout1-se;
64 +
65 ++ AVDD1-supply = <&regulator>;
66 + AVDD2-supply = <&regulator>;
67 + CPVDD-supply = <&regulator>;
68 +- DBVDD1-supply = <&regulator>;
69 +- DBVDD2-supply = <&regulator>;
70 +- DBVDD3-supply = <&regulator>;
71 ++ DBVDD-supply = <&regulator>;
72 ++ DCVDD-supply = <&regulator>;
73 + SPKVDD1-supply = <&regulator>;
74 + SPKVDD2-supply = <&regulator>;
75 + };
76 +diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
77 +index 70e180e6b93dc..9f3e5dc311840 100644
78 +--- a/Documentation/driver-api/libata.rst
79 ++++ b/Documentation/driver-api/libata.rst
80 +@@ -250,7 +250,7 @@ High-level taskfile hooks
81 +
82 + ::
83 +
84 +- void (*qc_prep) (struct ata_queued_cmd *qc);
85 ++ enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
86 + int (*qc_issue) (struct ata_queued_cmd *qc);
87 +
88 +
89 +diff --git a/Makefile b/Makefile
90 +index 3ffd5b03e6ddf..3ff5cf33ef55c 100644
91 +--- a/Makefile
92 ++++ b/Makefile
93 +@@ -1,7 +1,7 @@
94 + # SPDX-License-Identifier: GPL-2.0
95 + VERSION = 4
96 + PATCHLEVEL = 19
97 +-SUBLEVEL = 148
98 ++SUBLEVEL = 149
99 + EXTRAVERSION =
100 + NAME = "People's Front"
101 +
102 +diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
103 +index 7d2ca035d6c8f..11d4ff9f3e4df 100644
104 +--- a/arch/arm/include/asm/kvm_emulate.h
105 ++++ b/arch/arm/include/asm/kvm_emulate.h
106 +@@ -216,7 +216,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
107 + return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
108 + }
109 +
110 +-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
111 ++static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
112 + {
113 + return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
114 + }
115 +@@ -248,16 +248,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
116 + return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
117 + }
118 +
119 +-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
120 ++static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
121 + {
122 + return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
123 + }
124 +
125 +-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
126 ++static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
127 + {
128 + return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
129 + }
130 +
131 ++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
132 ++{
133 ++ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
134 ++}
135 ++
136 + static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
137 + {
138 + return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
139 +diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
140 +index a4d4a28fe07df..d23ab9ec130a3 100644
141 +--- a/arch/arm/kernel/stacktrace.c
142 ++++ b/arch/arm/kernel/stacktrace.c
143 +@@ -115,6 +115,8 @@ static int save_trace(struct stackframe *frame, void *d)
144 + return 0;
145 +
146 + regs = (struct pt_regs *)frame->sp;
147 ++ if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
148 ++ return 0;
149 +
150 + trace->entries[trace->nr_entries++] = regs->ARM_pc;
151 +
152 +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
153 +index badf02ca36938..aec533168f046 100644
154 +--- a/arch/arm/kernel/traps.c
155 ++++ b/arch/arm/kernel/traps.c
156 +@@ -67,14 +67,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
157 +
158 + void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
159 + {
160 ++ unsigned long end = frame + 4 + sizeof(struct pt_regs);
161 ++
162 + #ifdef CONFIG_KALLSYMS
163 + printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
164 + #else
165 + printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
166 + #endif
167 +
168 +- if (in_entry_text(from))
169 +- dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
170 ++ if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
171 ++ dump_mem("", "Exception stack", frame + 4, end);
172 + }
173 +
174 + void dump_backtrace_stm(u32 *stack, u32 instruction)
175 +diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
176 +index 778cb4f868d9b..669c960dd069c 100644
177 +--- a/arch/arm64/include/asm/kvm_emulate.h
178 ++++ b/arch/arm64/include/asm/kvm_emulate.h
179 +@@ -303,7 +303,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
180 + return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
181 + }
182 +
183 +-static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
184 ++static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
185 + {
186 + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
187 + }
188 +@@ -311,7 +311,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
189 + static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
190 + {
191 + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
192 +- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
193 ++ kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
194 + }
195 +
196 + static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
197 +@@ -340,6 +340,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
198 + return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
199 + }
200 +
201 ++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
202 ++{
203 ++ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
204 ++}
205 ++
206 + static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
207 + {
208 + return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
209 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
210 +index ac3126aba0368..de6fa9b4abfa0 100644
211 +--- a/arch/arm64/kernel/cpufeature.c
212 ++++ b/arch/arm64/kernel/cpufeature.c
213 +@@ -155,11 +155,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
214 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
215 + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
216 + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
217 +- /* Linux doesn't care about the EL3 */
218 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
219 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
220 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
221 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
222 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
223 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
224 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
225 + ARM64_FTR_END,
226 + };
227 +
228 +@@ -301,7 +300,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
229 + };
230 +
231 + static const struct arm64_ftr_bits ftr_id_dfr0[] = {
232 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
233 ++ /* [31:28] TraceFilt */
234 + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
235 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
236 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
237 +@@ -671,9 +670,6 @@ void update_cpu_features(int cpu,
238 + taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
239 + info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
240 +
241 +- /*
242 +- * EL3 is not our concern.
243 +- */
244 + taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
245 + info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
246 + taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
247 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
248 +index f146bff53edf9..15312e429b7d1 100644
249 +--- a/arch/arm64/kvm/hyp/switch.c
250 ++++ b/arch/arm64/kvm/hyp/switch.c
251 +@@ -430,7 +430,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
252 + kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
253 + kvm_vcpu_dabt_isvalid(vcpu) &&
254 + !kvm_vcpu_dabt_isextabt(vcpu) &&
255 +- !kvm_vcpu_dabt_iss1tw(vcpu);
256 ++ !kvm_vcpu_abt_iss1tw(vcpu);
257 +
258 + if (valid) {
259 + int ret = __vgic_v2_perform_cpuif_access(vcpu);
260 +diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
261 +index 96810d91da2bd..4a25ce6a1823d 100644
262 +--- a/arch/m68k/q40/config.c
263 ++++ b/arch/m68k/q40/config.c
264 +@@ -273,6 +273,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
265 + {
266 + int tmp = Q40_RTC_CTRL;
267 +
268 ++ pll->pll_ctrl = 0;
269 + pll->pll_value = tmp & Q40_RTC_PLL_MASK;
270 + if (tmp & Q40_RTC_PLL_SIGN)
271 + pll->pll_value = -pll->pll_value;
272 +diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
273 +index a45af3de075d9..d43e4ab20b238 100644
274 +--- a/arch/mips/include/asm/cpu-type.h
275 ++++ b/arch/mips/include/asm/cpu-type.h
276 +@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
277 + case CPU_34K:
278 + case CPU_1004K:
279 + case CPU_74K:
280 ++ case CPU_1074K:
281 + case CPU_M14KC:
282 + case CPU_M14KEC:
283 + case CPU_INTERAPTIV:
284 +diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
285 +index a790d5cf6ea37..684e8ae00d160 100644
286 +--- a/arch/powerpc/include/asm/kvm_asm.h
287 ++++ b/arch/powerpc/include/asm/kvm_asm.h
288 +@@ -163,4 +163,7 @@
289 +
290 + #define KVM_INST_FETCH_FAILED -1
291 +
292 ++/* Extract PO and XOP opcode fields */
293 ++#define PO_XOP_OPCODE_MASK 0xfc0007fe
294 ++
295 + #endif /* __POWERPC_KVM_ASM_H__ */
296 +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
297 +index fe3c6f3bd3b62..d123cba0992d0 100644
298 +--- a/arch/powerpc/kernel/eeh.c
299 ++++ b/arch/powerpc/kernel/eeh.c
300 +@@ -502,7 +502,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
301 + rc = 1;
302 + if (pe->state & EEH_PE_ISOLATED) {
303 + pe->check_count++;
304 +- if (pe->check_count % EEH_MAX_FAILS == 0) {
305 ++ if (pe->check_count == EEH_MAX_FAILS) {
306 + dn = pci_device_to_OF_node(dev);
307 + if (dn)
308 + location = of_get_property(dn, "ibm,loc-code",
309 +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
310 +index d5f351f02c153..7781f0168ce8c 100644
311 +--- a/arch/powerpc/kernel/traps.c
312 ++++ b/arch/powerpc/kernel/traps.c
313 +@@ -430,11 +430,11 @@ out:
314 + #ifdef CONFIG_PPC_BOOK3S_64
315 + BUG_ON(get_paca()->in_nmi == 0);
316 + if (get_paca()->in_nmi > 1)
317 +- nmi_panic(regs, "Unrecoverable nested System Reset");
318 ++ die("Unrecoverable nested System Reset", regs, SIGABRT);
319 + #endif
320 + /* Must die if the interrupt is not recoverable */
321 + if (!(regs->msr & MSR_RI))
322 +- nmi_panic(regs, "Unrecoverable System Reset");
323 ++ die("Unrecoverable System Reset", regs, SIGABRT);
324 +
325 + if (!nested)
326 + nmi_exit();
327 +@@ -775,7 +775,7 @@ void machine_check_exception(struct pt_regs *regs)
328 +
329 + /* Must die if the interrupt is not recoverable */
330 + if (!(regs->msr & MSR_RI))
331 +- nmi_panic(regs, "Unrecoverable Machine check");
332 ++ die("Unrecoverable Machine check", regs, SIGBUS);
333 +
334 + return;
335 +
336 +diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
337 +index 31cd0f327c8a2..e7fd60cf97804 100644
338 +--- a/arch/powerpc/kvm/book3s_hv_tm.c
339 ++++ b/arch/powerpc/kvm/book3s_hv_tm.c
340 +@@ -6,6 +6,8 @@
341 + * published by the Free Software Foundation.
342 + */
343 +
344 ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
345 ++
346 + #include <linux/kvm_host.h>
347 +
348 + #include <asm/kvm_ppc.h>
349 +@@ -47,7 +49,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
350 + u64 newmsr, bescr;
351 + int ra, rs;
352 +
353 +- switch (instr & 0xfc0007ff) {
354 ++ /*
355 ++ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
356 ++ * in these instructions, so masking bit 31 out doesn't change these
357 ++ * instructions. For treclaim., tsr., and trechkpt. instructions if bit
358 ++ * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
359 ++ * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
360 ++ * 31 is an acceptable way to handle these invalid forms that have
361 ++ * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
362 ++ * bit 31 set) can generate a softpatch interrupt. Hence both forms
363 ++ * are handled below for these instructions so they behave the same way.
364 ++ */
365 ++ switch (instr & PO_XOP_OPCODE_MASK) {
366 + case PPC_INST_RFID:
367 + /* XXX do we need to check for PR=0 here? */
368 + newmsr = vcpu->arch.shregs.srr1;
369 +@@ -108,7 +121,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
370 + vcpu->arch.shregs.msr = newmsr;
371 + return RESUME_GUEST;
372 +
373 +- case PPC_INST_TSR:
374 ++ /* ignore bit 31, see comment above */
375 ++ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
376 + /* check for PR=1 and arch 2.06 bit set in PCR */
377 + if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
378 + /* generate an illegal instruction interrupt */
379 +@@ -143,7 +157,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
380 + vcpu->arch.shregs.msr = msr;
381 + return RESUME_GUEST;
382 +
383 +- case PPC_INST_TRECLAIM:
384 ++ /* ignore bit 31, see comment above */
385 ++ case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
386 + /* check for TM disabled in the HFSCR or MSR */
387 + if (!(vcpu->arch.hfscr & HFSCR_TM)) {
388 + /* generate an illegal instruction interrupt */
389 +@@ -179,7 +194,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
390 + vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
391 + return RESUME_GUEST;
392 +
393 +- case PPC_INST_TRECHKPT:
394 ++ /* ignore bit 31, see comment above */
395 ++ case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
396 + /* XXX do we need to check for PR=0 here? */
397 + /* check for TM disabled in the HFSCR or MSR */
398 + if (!(vcpu->arch.hfscr & HFSCR_TM)) {
399 +@@ -211,6 +227,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
400 + }
401 +
402 + /* What should we do here? We didn't recognize the instruction */
403 +- WARN_ON_ONCE(1);
404 ++ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
405 ++ pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
406 ++
407 + return RESUME_GUEST;
408 + }
409 +diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
410 +index 3cf5863bc06e8..3c7ca2fa19597 100644
411 +--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
412 ++++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
413 +@@ -26,7 +26,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
414 + u64 newmsr, msr, bescr;
415 + int rs;
416 +
417 +- switch (instr & 0xfc0007ff) {
418 ++ /*
419 ++ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
420 ++ * in these instructions, so masking bit 31 out doesn't change these
421 ++ * instructions. For the tsr. instruction if bit 31 = 0 then it is per
422 ++ * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
423 ++ * Forms, informs specifically that ignoring bit 31 is an acceptable way
424 ++ * to handle TM-related invalid forms that have bit 31 = 0. Moreover,
425 ++ * for emulation purposes both forms (w/ and wo/ bit 31 set) can
426 ++ * generate a softpatch interrupt. Hence both forms are handled below
427 ++ * for tsr. to make them behave the same way.
428 ++ */
429 ++ switch (instr & PO_XOP_OPCODE_MASK) {
430 + case PPC_INST_RFID:
431 + /* XXX do we need to check for PR=0 here? */
432 + newmsr = vcpu->arch.shregs.srr1;
433 +@@ -76,7 +87,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
434 + vcpu->arch.shregs.msr = newmsr;
435 + return 1;
436 +
437 +- case PPC_INST_TSR:
438 ++ /* ignore bit 31, see comment above */
439 ++ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
440 + /* we know the MSR has the TS field = S (0b01) here */
441 + msr = vcpu->arch.shregs.msr;
442 + /* check for PR=1 and arch 2.06 bit set in PCR */
443 +diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
444 +index c6dcc5291f972..02fbc175142e2 100644
445 +--- a/arch/riscv/include/asm/ftrace.h
446 ++++ b/arch/riscv/include/asm/ftrace.h
447 +@@ -63,4 +63,11 @@ do { \
448 + * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
449 + */
450 + #define MCOUNT_INSN_SIZE 8
451 ++
452 ++#ifndef __ASSEMBLY__
453 ++struct dyn_ftrace;
454 ++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
455 ++#define ftrace_init_nop ftrace_init_nop
456 ++#endif
457 ++
458 + #endif
459 +diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
460 +index 6d39f64e4dce4..fa8530f05ed4f 100644
461 +--- a/arch/riscv/kernel/ftrace.c
462 ++++ b/arch/riscv/kernel/ftrace.c
463 +@@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
464 + return __ftrace_modify_call(rec->ip, addr, false);
465 + }
466 +
467 ++
468 ++/*
469 ++ * This is called early on, and isn't wrapped by
470 ++ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
471 ++ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
472 ++ * just directly poke the text, but it's simpler to just take the lock
473 ++ * ourselves.
474 ++ */
475 ++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
476 ++{
477 ++ int out;
478 ++
479 ++ ftrace_arch_code_modify_prepare();
480 ++ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
481 ++ ftrace_arch_code_modify_post_process();
482 ++
483 ++ return out;
484 ++}
485 ++
486 + int ftrace_update_ftrace_func(ftrace_func_t func)
487 + {
488 + int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
489 +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
490 +index 74a296cea21cc..0e6d01225a670 100644
491 +--- a/arch/s390/kernel/perf_cpum_sf.c
492 ++++ b/arch/s390/kernel/perf_cpum_sf.c
493 +@@ -1377,8 +1377,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
494 + idx = aux->empty_mark + 1;
495 + for (i = 0; i < range_scan; i++, idx++) {
496 + te = aux_sdb_trailer(aux, idx);
497 +- te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
498 +- te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
499 ++ te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
500 ++ SDB_TE_ALERT_REQ_MASK);
501 + te->overflow = 0;
502 + }
503 + /* Save the position of empty SDBs */
504 +@@ -1425,8 +1425,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
505 + te = aux_sdb_trailer(aux, alert_index);
506 + do {
507 + orig_flags = te->flags;
508 +- orig_overflow = te->overflow;
509 +- *overflow = orig_overflow;
510 ++ *overflow = orig_overflow = te->overflow;
511 + if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
512 + /*
513 + * SDB is already set by hardware.
514 +@@ -1660,7 +1659,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
515 + }
516 +
517 + /* Allocate aux_buffer struct for the event */
518 +- aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
519 ++ aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL);
520 + if (!aux)
521 + goto no_aux;
522 + sfb = &aux->sfb;
523 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
524 +index 5f85e0dfa66d1..4bda9055daefa 100644
525 +--- a/arch/s390/kernel/setup.c
526 ++++ b/arch/s390/kernel/setup.c
527 +@@ -537,7 +537,7 @@ static struct notifier_block kdump_mem_nb = {
528 + /*
529 + * Make sure that the area behind memory_end is protected
530 + */
531 +-static void reserve_memory_end(void)
532 ++static void __init reserve_memory_end(void)
533 + {
534 + #ifdef CONFIG_CRASH_DUMP
535 + if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
536 +@@ -555,7 +555,7 @@ static void reserve_memory_end(void)
537 + /*
538 + * Make sure that oldmem, where the dump is stored, is protected
539 + */
540 +-static void reserve_oldmem(void)
541 ++static void __init reserve_oldmem(void)
542 + {
543 + #ifdef CONFIG_CRASH_DUMP
544 + if (OLDMEM_BASE)
545 +@@ -567,7 +567,7 @@ static void reserve_oldmem(void)
546 + /*
547 + * Make sure that oldmem, where the dump is stored, is protected
548 + */
549 +-static void remove_oldmem(void)
550 ++static void __init remove_oldmem(void)
551 + {
552 + #ifdef CONFIG_CRASH_DUMP
553 + if (OLDMEM_BASE)
554 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
555 +index e3f70c60e8ccd..62f9903544b59 100644
556 +--- a/arch/x86/include/asm/nospec-branch.h
557 ++++ b/arch/x86/include/asm/nospec-branch.h
558 +@@ -330,7 +330,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
559 + * combination with microcode which triggers a CPU buffer flush when the
560 + * instruction is executed.
561 + */
562 +-static inline void mds_clear_cpu_buffers(void)
563 ++static __always_inline void mds_clear_cpu_buffers(void)
564 + {
565 + static const u16 ds = __KERNEL_DS;
566 +
567 +@@ -351,7 +351,7 @@ static inline void mds_clear_cpu_buffers(void)
568 + *
569 + * Clear CPU buffers if the corresponding static key is enabled
570 + */
571 +-static inline void mds_user_clear_cpu_buffers(void)
572 ++static __always_inline void mds_user_clear_cpu_buffers(void)
573 + {
574 + if (static_branch_likely(&mds_user_clear))
575 + mds_clear_cpu_buffers();
576 +diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
577 +index 19b137f1b3beb..2ff9b98812b76 100644
578 +--- a/arch/x86/include/asm/pkeys.h
579 ++++ b/arch/x86/include/asm/pkeys.h
580 +@@ -4,6 +4,11 @@
581 +
582 + #define ARCH_DEFAULT_PKEY 0
583 +
584 ++/*
585 ++ * If more than 16 keys are ever supported, a thorough audit
586 ++ * will be necessary to ensure that the types that store key
587 ++ * numbers and masks have sufficient capacity.
588 ++ */
589 + #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
590 +
591 + extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
592 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
593 +index 95e21c4380124..15234885e60bc 100644
594 +--- a/arch/x86/kernel/apic/io_apic.c
595 ++++ b/arch/x86/kernel/apic/io_apic.c
596 +@@ -2250,6 +2250,7 @@ static inline void __init check_timer(void)
597 + legacy_pic->init(0);
598 + legacy_pic->make_irq(0);
599 + apic_write(APIC_LVT0, APIC_DM_EXTINT);
600 ++ legacy_pic->unmask(0);
601 +
602 + unlock_ExtINT_logic();
603 +
604 +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
605 +index 4b900035f2202..601a5da1d196a 100644
606 +--- a/arch/x86/kernel/fpu/xstate.c
607 ++++ b/arch/x86/kernel/fpu/xstate.c
608 +@@ -907,8 +907,6 @@ const void *get_xsave_field_ptr(int xsave_state)
609 +
610 + #ifdef CONFIG_ARCH_HAS_PKEYS
611 +
612 +-#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
613 +-#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
614 + /*
615 + * This will go out and modify PKRU register to set the access
616 + * rights for @pkey to @init_val.
617 +@@ -927,6 +925,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
618 + if (!boot_cpu_has(X86_FEATURE_OSPKE))
619 + return -EINVAL;
620 +
621 ++ /*
622 ++ * This code should only be called with valid 'pkey'
623 ++ * values originating from in-kernel users. Complain
624 ++ * if a bad value is observed.
625 ++ */
626 ++ WARN_ON_ONCE(pkey >= arch_max_pkey());
627 ++
628 + /* Set the bits we need in PKRU: */
629 + if (init_val & PKEY_DISABLE_ACCESS)
630 + new_pkru_bits |= PKRU_AD_BIT;
631 +diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
632 +index cb41b036eb264..7e0dc8c7da2c0 100644
633 +--- a/arch/x86/kvm/mmutrace.h
634 ++++ b/arch/x86/kvm/mmutrace.h
635 +@@ -339,7 +339,7 @@ TRACE_EVENT(
636 + /* These depend on page entry type, so compute them now. */
637 + __field(bool, r)
638 + __field(bool, x)
639 +- __field(u8, u)
640 ++ __field(signed char, u)
641 + ),
642 +
643 + TP_fast_assign(
644 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
645 +index 2aafb6c791345..cb09a0ec87500 100644
646 +--- a/arch/x86/kvm/svm.c
647 ++++ b/arch/x86/kvm/svm.c
648 +@@ -3942,6 +3942,12 @@ static int iret_interception(struct vcpu_svm *svm)
649 + return 1;
650 + }
651 +
652 ++static int invd_interception(struct vcpu_svm *svm)
653 ++{
654 ++ /* Treat an INVD instruction as a NOP and just skip it. */
655 ++ return kvm_skip_emulated_instruction(&svm->vcpu);
656 ++}
657 ++
658 + static int invlpg_interception(struct vcpu_svm *svm)
659 + {
660 + if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
661 +@@ -4831,7 +4837,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
662 + [SVM_EXIT_RDPMC] = rdpmc_interception,
663 + [SVM_EXIT_CPUID] = cpuid_interception,
664 + [SVM_EXIT_IRET] = iret_interception,
665 +- [SVM_EXIT_INVD] = emulate_on_interception,
666 ++ [SVM_EXIT_INVD] = invd_interception,
667 + [SVM_EXIT_PAUSE] = pause_interception,
668 + [SVM_EXIT_HLT] = halt_interception,
669 + [SVM_EXIT_INVLPG] = invlpg_interception,
670 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
671 +index 430a4bc66f604..dd182228be714 100644
672 +--- a/arch/x86/kvm/x86.c
673 ++++ b/arch/x86/kvm/x86.c
674 +@@ -858,6 +858,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
675 + unsigned long old_cr4 = kvm_read_cr4(vcpu);
676 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
677 + X86_CR4_SMEP;
678 ++ unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
679 +
680 + if (kvm_valid_cr4(vcpu, cr4))
681 + return 1;
682 +@@ -885,7 +886,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
683 + if (kvm_x86_ops->set_cr4(vcpu, cr4))
684 + return 1;
685 +
686 +- if (((cr4 ^ old_cr4) & pdptr_bits) ||
687 ++ if (((cr4 ^ old_cr4) & mmu_role_bits) ||
688 + (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
689 + kvm_mmu_reset_context(vcpu);
690 +
691 +@@ -4668,10 +4669,13 @@ set_identity_unlock:
692 + r = -EFAULT;
693 + if (copy_from_user(&u.ps, argp, sizeof u.ps))
694 + goto out;
695 ++ mutex_lock(&kvm->lock);
696 + r = -ENXIO;
697 + if (!kvm->arch.vpit)
698 +- goto out;
699 ++ goto set_pit_out;
700 + r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
701 ++set_pit_out:
702 ++ mutex_unlock(&kvm->lock);
703 + break;
704 + }
705 + case KVM_GET_PIT2: {
706 +@@ -4691,10 +4695,13 @@ set_identity_unlock:
707 + r = -EFAULT;
708 + if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
709 + goto out;
710 ++ mutex_lock(&kvm->lock);
711 + r = -ENXIO;
712 + if (!kvm->arch.vpit)
713 +- goto out;
714 ++ goto set_pit2_out;
715 + r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
716 ++set_pit2_out:
717 ++ mutex_unlock(&kvm->lock);
718 + break;
719 + }
720 + case KVM_REINJECT_CONTROL: {
721 +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
722 +index 7077b3e282414..40dbbd8f1fe41 100644
723 +--- a/arch/x86/lib/usercopy_64.c
724 ++++ b/arch/x86/lib/usercopy_64.c
725 +@@ -139,7 +139,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
726 + */
727 + if (size < 8) {
728 + if (!IS_ALIGNED(dest, 4) || size != 4)
729 +- clean_cache_range(dst, 1);
730 ++ clean_cache_range(dst, size);
731 + } else {
732 + if (!IS_ALIGNED(dest, 8)) {
733 + dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
734 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
735 +index 49e16f0090957..9415a0041aaf7 100644
736 +--- a/drivers/acpi/ec.c
737 ++++ b/drivers/acpi/ec.c
738 +@@ -1080,29 +1080,21 @@ void acpi_ec_dispatch_gpe(void)
739 + /* --------------------------------------------------------------------------
740 + Event Management
741 + -------------------------------------------------------------------------- */
742 +-static struct acpi_ec_query_handler *
743 +-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
744 +-{
745 +- if (handler)
746 +- kref_get(&handler->kref);
747 +- return handler;
748 +-}
749 +-
750 + static struct acpi_ec_query_handler *
751 + acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
752 + {
753 + struct acpi_ec_query_handler *handler;
754 +- bool found = false;
755 +
756 + mutex_lock(&ec->mutex);
757 + list_for_each_entry(handler, &ec->list, node) {
758 + if (value == handler->query_bit) {
759 +- found = true;
760 +- break;
761 ++ kref_get(&handler->kref);
762 ++ mutex_unlock(&ec->mutex);
763 ++ return handler;
764 + }
765 + }
766 + mutex_unlock(&ec->mutex);
767 +- return found ? acpi_ec_get_query_handler(handler) : NULL;
768 ++ return NULL;
769 + }
770 +
771 + static void acpi_ec_query_handler_release(struct kref *kref)
772 +diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
773 +index 583e366be7e23..505f8c3168188 100644
774 +--- a/drivers/ata/acard-ahci.c
775 ++++ b/drivers/ata/acard-ahci.c
776 +@@ -72,7 +72,7 @@ struct acard_sg {
777 + __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
778 + };
779 +
780 +-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
781 ++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
782 + static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
783 + static int acard_ahci_port_start(struct ata_port *ap);
784 + static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
785 +@@ -257,7 +257,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
786 + return si;
787 + }
788 +
789 +-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
790 ++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
791 + {
792 + struct ata_port *ap = qc->ap;
793 + struct ahci_port_priv *pp = ap->private_data;
794 +@@ -295,6 +295,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
795 + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
796 +
797 + ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
798 ++
799 ++ return AC_ERR_OK;
800 + }
801 +
802 + static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
803 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
804 +index 2bdb250a2142c..f1153e7ba3b3a 100644
805 +--- a/drivers/ata/libahci.c
806 ++++ b/drivers/ata/libahci.c
807 +@@ -73,7 +73,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
808 + static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
809 + static int ahci_port_start(struct ata_port *ap);
810 + static void ahci_port_stop(struct ata_port *ap);
811 +-static void ahci_qc_prep(struct ata_queued_cmd *qc);
812 ++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
813 + static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
814 + static void ahci_freeze(struct ata_port *ap);
815 + static void ahci_thaw(struct ata_port *ap);
816 +@@ -1640,7 +1640,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
817 + return sata_pmp_qc_defer_cmd_switch(qc);
818 + }
819 +
820 +-static void ahci_qc_prep(struct ata_queued_cmd *qc)
821 ++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
822 + {
823 + struct ata_port *ap = qc->ap;
824 + struct ahci_port_priv *pp = ap->private_data;
825 +@@ -1676,6 +1676,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
826 + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
827 +
828 + ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
829 ++
830 ++ return AC_ERR_OK;
831 + }
832 +
833 + static void ahci_fbs_dec_intr(struct ata_port *ap)
834 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
835 +index fead7243930c0..db1d86af21b4d 100644
836 +--- a/drivers/ata/libata-core.c
837 ++++ b/drivers/ata/libata-core.c
838 +@@ -4996,7 +4996,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
839 + return ATA_DEFER_LINK;
840 + }
841 +
842 +-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
843 ++enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
844 ++{
845 ++ return AC_ERR_OK;
846 ++}
847 +
848 + /**
849 + * ata_sg_init - Associate command with scatter-gather table.
850 +@@ -5483,7 +5486,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
851 + return;
852 + }
853 +
854 +- ap->ops->qc_prep(qc);
855 ++ qc->err_mask |= ap->ops->qc_prep(qc);
856 ++ if (unlikely(qc->err_mask))
857 ++ goto err;
858 + trace_ata_qc_issue(qc);
859 + qc->err_mask |= ap->ops->qc_issue(qc);
860 + if (unlikely(qc->err_mask))
861 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
862 +index 873cc09060551..7484ffdabd543 100644
863 +--- a/drivers/ata/libata-sff.c
864 ++++ b/drivers/ata/libata-sff.c
865 +@@ -2695,12 +2695,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
866 + * LOCKING:
867 + * spin_lock_irqsave(host lock)
868 + */
869 +-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
870 ++enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
871 + {
872 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
873 +- return;
874 ++ return AC_ERR_OK;
875 +
876 + ata_bmdma_fill_sg(qc);
877 ++
878 ++ return AC_ERR_OK;
879 + }
880 + EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
881 +
882 +@@ -2713,12 +2715,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
883 + * LOCKING:
884 + * spin_lock_irqsave(host lock)
885 + */
886 +-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
887 ++enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
888 + {
889 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
890 +- return;
891 ++ return AC_ERR_OK;
892 +
893 + ata_bmdma_fill_sg_dumb(qc);
894 ++
895 ++ return AC_ERR_OK;
896 + }
897 + EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
898 +
899 +diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
900 +index 9588e685d994c..765b99319d3cd 100644
901 +--- a/drivers/ata/pata_macio.c
902 ++++ b/drivers/ata/pata_macio.c
903 +@@ -507,7 +507,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
904 + return ATA_CBL_PATA40;
905 + }
906 +
907 +-static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
908 ++static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
909 + {
910 + unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
911 + struct ata_port *ap = qc->ap;
912 +@@ -520,7 +520,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
913 + __func__, qc, qc->flags, write, qc->dev->devno);
914 +
915 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
916 +- return;
917 ++ return AC_ERR_OK;
918 +
919 + table = (struct dbdma_cmd *) priv->dma_table_cpu;
920 +
921 +@@ -565,6 +565,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
922 + table->command = cpu_to_le16(DBDMA_STOP);
923 +
924 + dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
925 ++
926 ++ return AC_ERR_OK;
927 + }
928 +
929 +
930 +diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
931 +index e8b6a2e464c98..5b1458ca986b6 100644
932 +--- a/drivers/ata/pata_pxa.c
933 ++++ b/drivers/ata/pata_pxa.c
934 +@@ -58,25 +58,27 @@ static void pxa_ata_dma_irq(void *d)
935 + /*
936 + * Prepare taskfile for submission.
937 + */
938 +-static void pxa_qc_prep(struct ata_queued_cmd *qc)
939 ++static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
940 + {
941 + struct pata_pxa_data *pd = qc->ap->private_data;
942 + struct dma_async_tx_descriptor *tx;
943 + enum dma_transfer_direction dir;
944 +
945 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
946 +- return;
947 ++ return AC_ERR_OK;
948 +
949 + dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
950 + tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
951 + DMA_PREP_INTERRUPT);
952 + if (!tx) {
953 + ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
954 +- return;
955 ++ return AC_ERR_OK;
956 + }
957 + tx->callback = pxa_ata_dma_irq;
958 + tx->callback_param = pd;
959 + pd->dma_cookie = dmaengine_submit(tx);
960 ++
961 ++ return AC_ERR_OK;
962 + }
963 +
964 + /*
965 +diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
966 +index f1e873a37465e..096b4771b19da 100644
967 +--- a/drivers/ata/pdc_adma.c
968 ++++ b/drivers/ata/pdc_adma.c
969 +@@ -132,7 +132,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
970 + const struct pci_device_id *ent);
971 + static int adma_port_start(struct ata_port *ap);
972 + static void adma_port_stop(struct ata_port *ap);
973 +-static void adma_qc_prep(struct ata_queued_cmd *qc);
974 ++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
975 + static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
976 + static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
977 + static void adma_freeze(struct ata_port *ap);
978 +@@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
979 + return i;
980 + }
981 +
982 +-static void adma_qc_prep(struct ata_queued_cmd *qc)
983 ++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
984 + {
985 + struct adma_port_priv *pp = qc->ap->private_data;
986 + u8 *buf = pp->pkt;
987 +@@ -322,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
988 +
989 + adma_enter_reg_mode(qc->ap);
990 + if (qc->tf.protocol != ATA_PROT_DMA)
991 +- return;
992 ++ return AC_ERR_OK;
993 +
994 + buf[i++] = 0; /* Response flags */
995 + buf[i++] = 0; /* reserved */
996 +@@ -387,6 +387,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
997 + printk("%s\n", obuf);
998 + }
999 + #endif
1000 ++ return AC_ERR_OK;
1001 + }
1002 +
1003 + static inline void adma_packet_start(struct ata_queued_cmd *qc)
1004 +diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
1005 +index ae52a45fab5f7..8b3be0ff91cb4 100644
1006 +--- a/drivers/ata/sata_fsl.c
1007 ++++ b/drivers/ata/sata_fsl.c
1008 +@@ -507,7 +507,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
1009 + return num_prde;
1010 + }
1011 +
1012 +-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
1013 ++static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
1014 + {
1015 + struct ata_port *ap = qc->ap;
1016 + struct sata_fsl_port_priv *pp = ap->private_data;
1017 +@@ -553,6 +553,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
1018 +
1019 + VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
1020 + desc_info, ttl_dwords, num_prde);
1021 ++
1022 ++ return AC_ERR_OK;
1023 + }
1024 +
1025 + static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
1026 +diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
1027 +index 9b6d7930d1c79..6c7ddc037fce9 100644
1028 +--- a/drivers/ata/sata_inic162x.c
1029 ++++ b/drivers/ata/sata_inic162x.c
1030 +@@ -472,7 +472,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
1031 + prd[-1].flags |= PRD_END;
1032 + }
1033 +
1034 +-static void inic_qc_prep(struct ata_queued_cmd *qc)
1035 ++static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
1036 + {
1037 + struct inic_port_priv *pp = qc->ap->private_data;
1038 + struct inic_pkt *pkt = pp->pkt;
1039 +@@ -532,6 +532,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
1040 + inic_fill_sg(prd, qc);
1041 +
1042 + pp->cpb_tbl[0] = pp->pkt_dma;
1043 ++
1044 ++ return AC_ERR_OK;
1045 + }
1046 +
1047 + static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
1048 +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
1049 +index ab2e9f62ddc1a..2910b22fac117 100644
1050 +--- a/drivers/ata/sata_mv.c
1051 ++++ b/drivers/ata/sata_mv.c
1052 +@@ -605,8 +605,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1053 + static int mv_port_start(struct ata_port *ap);
1054 + static void mv_port_stop(struct ata_port *ap);
1055 + static int mv_qc_defer(struct ata_queued_cmd *qc);
1056 +-static void mv_qc_prep(struct ata_queued_cmd *qc);
1057 +-static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
1058 ++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
1059 ++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
1060 + static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
1061 + static int mv_hardreset(struct ata_link *link, unsigned int *class,
1062 + unsigned long deadline);
1063 +@@ -2044,7 +2044,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1064 + * LOCKING:
1065 + * Inherited from caller.
1066 + */
1067 +-static void mv_qc_prep(struct ata_queued_cmd *qc)
1068 ++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
1069 + {
1070 + struct ata_port *ap = qc->ap;
1071 + struct mv_port_priv *pp = ap->private_data;
1072 +@@ -2056,15 +2056,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1073 + switch (tf->protocol) {
1074 + case ATA_PROT_DMA:
1075 + if (tf->command == ATA_CMD_DSM)
1076 +- return;
1077 ++ return AC_ERR_OK;
1078 + /* fall-thru */
1079 + case ATA_PROT_NCQ:
1080 + break; /* continue below */
1081 + case ATA_PROT_PIO:
1082 + mv_rw_multi_errata_sata24(qc);
1083 +- return;
1084 ++ return AC_ERR_OK;
1085 + default:
1086 +- return;
1087 ++ return AC_ERR_OK;
1088 + }
1089 +
1090 + /* Fill in command request block
1091 +@@ -2111,12 +2111,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1092 + * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1093 + * of which are defined/used by Linux. If we get here, this
1094 + * driver needs work.
1095 +- *
1096 +- * FIXME: modify libata to give qc_prep a return value and
1097 +- * return error here.
1098 + */
1099 +- BUG_ON(tf->command);
1100 +- break;
1101 ++ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
1102 ++ tf->command);
1103 ++ return AC_ERR_INVALID;
1104 + }
1105 + mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1106 + mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1107 +@@ -2129,8 +2127,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1108 + mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1109 +
1110 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1111 +- return;
1112 ++ return AC_ERR_OK;
1113 + mv_fill_sg(qc);
1114 ++
1115 ++ return AC_ERR_OK;
1116 + }
1117 +
1118 + /**
1119 +@@ -2145,7 +2145,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1120 + * LOCKING:
1121 + * Inherited from caller.
1122 + */
1123 +-static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1124 ++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
1125 + {
1126 + struct ata_port *ap = qc->ap;
1127 + struct mv_port_priv *pp = ap->private_data;
1128 +@@ -2156,9 +2156,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1129 +
1130 + if ((tf->protocol != ATA_PROT_DMA) &&
1131 + (tf->protocol != ATA_PROT_NCQ))
1132 +- return;
1133 ++ return AC_ERR_OK;
1134 + if (tf->command == ATA_CMD_DSM)
1135 +- return; /* use bmdma for this */
1136 ++ return AC_ERR_OK; /* use bmdma for this */
1137 +
1138 + /* Fill in Gen IIE command request block */
1139 + if (!(tf->flags & ATA_TFLAG_WRITE))
1140 +@@ -2199,8 +2199,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1141 + );
1142 +
1143 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1144 +- return;
1145 ++ return AC_ERR_OK;
1146 + mv_fill_sg(qc);
1147 ++
1148 ++ return AC_ERR_OK;
1149 + }
1150 +
1151 + /**
1152 +diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
1153 +index 761577d57ff37..798d549435cc1 100644
1154 +--- a/drivers/ata/sata_nv.c
1155 ++++ b/drivers/ata/sata_nv.c
1156 +@@ -313,7 +313,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
1157 + static void nv_ck804_thaw(struct ata_port *ap);
1158 + static int nv_adma_slave_config(struct scsi_device *sdev);
1159 + static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
1160 +-static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
1161 ++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
1162 + static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
1163 + static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
1164 + static void nv_adma_irq_clear(struct ata_port *ap);
1165 +@@ -335,7 +335,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
1166 + static void nv_swncq_error_handler(struct ata_port *ap);
1167 + static int nv_swncq_slave_config(struct scsi_device *sdev);
1168 + static int nv_swncq_port_start(struct ata_port *ap);
1169 +-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
1170 ++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
1171 + static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
1172 + static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
1173 + static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
1174 +@@ -1365,7 +1365,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1175 + return 1;
1176 + }
1177 +
1178 +-static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1179 ++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1180 + {
1181 + struct nv_adma_port_priv *pp = qc->ap->private_data;
1182 + struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1183 +@@ -1377,7 +1377,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1184 + (qc->flags & ATA_QCFLAG_DMAMAP));
1185 + nv_adma_register_mode(qc->ap);
1186 + ata_bmdma_qc_prep(qc);
1187 +- return;
1188 ++ return AC_ERR_OK;
1189 + }
1190 +
1191 + cpb->resp_flags = NV_CPB_RESP_DONE;
1192 +@@ -1409,6 +1409,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1193 + cpb->ctl_flags = ctl_flags;
1194 + wmb();
1195 + cpb->resp_flags = 0;
1196 ++
1197 ++ return AC_ERR_OK;
1198 + }
1199 +
1200 + static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1201 +@@ -1972,17 +1974,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
1202 + return 0;
1203 + }
1204 +
1205 +-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1206 ++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1207 + {
1208 + if (qc->tf.protocol != ATA_PROT_NCQ) {
1209 + ata_bmdma_qc_prep(qc);
1210 +- return;
1211 ++ return AC_ERR_OK;
1212 + }
1213 +
1214 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1215 +- return;
1216 ++ return AC_ERR_OK;
1217 +
1218 + nv_swncq_fill_sg(qc);
1219 ++
1220 ++ return AC_ERR_OK;
1221 + }
1222 +
1223 + static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1224 +diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
1225 +index d032bf657f709..29d2bb465f60d 100644
1226 +--- a/drivers/ata/sata_promise.c
1227 ++++ b/drivers/ata/sata_promise.c
1228 +@@ -155,7 +155,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
1229 + static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
1230 + static int pdc_common_port_start(struct ata_port *ap);
1231 + static int pdc_sata_port_start(struct ata_port *ap);
1232 +-static void pdc_qc_prep(struct ata_queued_cmd *qc);
1233 ++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
1234 + static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
1235 + static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
1236 + static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
1237 +@@ -649,7 +649,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
1238 + prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1239 + }
1240 +
1241 +-static void pdc_qc_prep(struct ata_queued_cmd *qc)
1242 ++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
1243 + {
1244 + struct pdc_port_priv *pp = qc->ap->private_data;
1245 + unsigned int i;
1246 +@@ -681,6 +681,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
1247 + default:
1248 + break;
1249 + }
1250 ++
1251 ++ return AC_ERR_OK;
1252 + }
1253 +
1254 + static int pdc_is_sataii_tx4(unsigned long flags)
1255 +diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
1256 +index 1fe941688e95d..a66d10628c183 100644
1257 +--- a/drivers/ata/sata_qstor.c
1258 ++++ b/drivers/ata/sata_qstor.c
1259 +@@ -116,7 +116,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
1260 + static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
1261 + static int qs_port_start(struct ata_port *ap);
1262 + static void qs_host_stop(struct ata_host *host);
1263 +-static void qs_qc_prep(struct ata_queued_cmd *qc);
1264 ++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
1265 + static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
1266 + static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
1267 + static void qs_freeze(struct ata_port *ap);
1268 +@@ -276,7 +276,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
1269 + return si;
1270 + }
1271 +
1272 +-static void qs_qc_prep(struct ata_queued_cmd *qc)
1273 ++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
1274 + {
1275 + struct qs_port_priv *pp = qc->ap->private_data;
1276 + u8 dflags = QS_DF_PORD, *buf = pp->pkt;
1277 +@@ -288,7 +288,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
1278 +
1279 + qs_enter_reg_mode(qc->ap);
1280 + if (qc->tf.protocol != ATA_PROT_DMA)
1281 +- return;
1282 ++ return AC_ERR_OK;
1283 +
1284 + nelem = qs_fill_sg(qc);
1285 +
1286 +@@ -311,6 +311,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
1287 +
1288 + /* frame information structure (FIS) */
1289 + ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
1290 ++
1291 ++ return AC_ERR_OK;
1292 + }
1293 +
1294 + static inline void qs_packet_start(struct ata_queued_cmd *qc)
1295 +diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
1296 +index 50ebd779d975f..8323f88d17a53 100644
1297 +--- a/drivers/ata/sata_rcar.c
1298 ++++ b/drivers/ata/sata_rcar.c
1299 +@@ -554,12 +554,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
1300 + prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
1301 + }
1302 +
1303 +-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
1304 ++static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
1305 + {
1306 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1307 +- return;
1308 ++ return AC_ERR_OK;
1309 +
1310 + sata_rcar_bmdma_fill_sg(qc);
1311 ++
1312 ++ return AC_ERR_OK;
1313 + }
1314 +
1315 + static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
1316 +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
1317 +index ed76f070d21e4..82adaf02887fb 100644
1318 +--- a/drivers/ata/sata_sil.c
1319 ++++ b/drivers/ata/sata_sil.c
1320 +@@ -119,7 +119,7 @@ static void sil_dev_config(struct ata_device *dev);
1321 + static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
1322 + static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
1323 + static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
1324 +-static void sil_qc_prep(struct ata_queued_cmd *qc);
1325 ++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
1326 + static void sil_bmdma_setup(struct ata_queued_cmd *qc);
1327 + static void sil_bmdma_start(struct ata_queued_cmd *qc);
1328 + static void sil_bmdma_stop(struct ata_queued_cmd *qc);
1329 +@@ -333,12 +333,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
1330 + last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
1331 + }
1332 +
1333 +-static void sil_qc_prep(struct ata_queued_cmd *qc)
1334 ++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
1335 + {
1336 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1337 +- return;
1338 ++ return AC_ERR_OK;
1339 +
1340 + sil_fill_sg(qc);
1341 ++
1342 ++ return AC_ERR_OK;
1343 + }
1344 +
1345 + static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
1346 +diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
1347 +index 319f517137cd5..7a8ca81e52bfc 100644
1348 +--- a/drivers/ata/sata_sil24.c
1349 ++++ b/drivers/ata/sata_sil24.c
1350 +@@ -336,7 +336,7 @@ static void sil24_dev_config(struct ata_device *dev);
1351 + static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
1352 + static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
1353 + static int sil24_qc_defer(struct ata_queued_cmd *qc);
1354 +-static void sil24_qc_prep(struct ata_queued_cmd *qc);
1355 ++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
1356 + static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
1357 + static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
1358 + static void sil24_pmp_attach(struct ata_port *ap);
1359 +@@ -840,7 +840,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
1360 + return ata_std_qc_defer(qc);
1361 + }
1362 +
1363 +-static void sil24_qc_prep(struct ata_queued_cmd *qc)
1364 ++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
1365 + {
1366 + struct ata_port *ap = qc->ap;
1367 + struct sil24_port_priv *pp = ap->private_data;
1368 +@@ -884,6 +884,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
1369 +
1370 + if (qc->flags & ATA_QCFLAG_DMAMAP)
1371 + sil24_fill_sg(qc, sge);
1372 ++
1373 ++ return AC_ERR_OK;
1374 + }
1375 +
1376 + static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
1377 +diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
1378 +index 405e606a234d1..0d742457925ec 100644
1379 +--- a/drivers/ata/sata_sx4.c
1380 ++++ b/drivers/ata/sata_sx4.c
1381 +@@ -218,7 +218,7 @@ static void pdc_error_handler(struct ata_port *ap);
1382 + static void pdc_freeze(struct ata_port *ap);
1383 + static void pdc_thaw(struct ata_port *ap);
1384 + static int pdc_port_start(struct ata_port *ap);
1385 +-static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
1386 ++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
1387 + static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
1388 + static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
1389 + static unsigned int pdc20621_dimm_init(struct ata_host *host);
1390 +@@ -546,7 +546,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
1391 + VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
1392 + }
1393 +
1394 +-static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
1395 ++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
1396 + {
1397 + switch (qc->tf.protocol) {
1398 + case ATA_PROT_DMA:
1399 +@@ -558,6 +558,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
1400 + default:
1401 + break;
1402 + }
1403 ++
1404 ++ return AC_ERR_OK;
1405 + }
1406 +
1407 + static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
1408 +diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
1409 +index 7323e9210f4b1..38fec976e62d4 100644
1410 +--- a/drivers/atm/eni.c
1411 ++++ b/drivers/atm/eni.c
1412 +@@ -2243,7 +2243,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
1413 +
1414 + rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
1415 + if (rc < 0)
1416 +- goto out;
1417 ++ goto err_disable;
1418 +
1419 + rc = -ENOMEM;
1420 + eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
1421 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1422 +index d26b485ccc7d0..e8b3353c18eb8 100644
1423 +--- a/drivers/base/regmap/regmap.c
1424 ++++ b/drivers/base/regmap/regmap.c
1425 +@@ -2367,7 +2367,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1426 + EXPORT_SYMBOL_GPL(regmap_raw_write_async);
1427 +
1428 + static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1429 +- unsigned int val_len)
1430 ++ unsigned int val_len, bool noinc)
1431 + {
1432 + struct regmap_range_node *range;
1433 + int ret;
1434 +@@ -2380,7 +2380,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1435 + range = _regmap_range_lookup(map, reg);
1436 + if (range) {
1437 + ret = _regmap_select_page(map, &reg, range,
1438 +- val_len / map->format.val_bytes);
1439 ++ noinc ? 1 : val_len / map->format.val_bytes);
1440 + if (ret != 0)
1441 + return ret;
1442 + }
1443 +@@ -2418,7 +2418,7 @@ static int _regmap_bus_read(void *context, unsigned int reg,
1444 + if (!map->format.parse_val)
1445 + return -EINVAL;
1446 +
1447 +- ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
1448 ++ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
1449 + if (ret == 0)
1450 + *val = map->format.parse_val(work_val);
1451 +
1452 +@@ -2536,7 +2536,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1453 +
1454 + /* Read bytes that fit into whole chunks */
1455 + for (i = 0; i < chunk_count; i++) {
1456 +- ret = _regmap_raw_read(map, reg, val, chunk_bytes);
1457 ++ ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
1458 + if (ret != 0)
1459 + goto out;
1460 +
1461 +@@ -2547,7 +2547,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1462 +
1463 + /* Read remaining bytes */
1464 + if (val_len) {
1465 +- ret = _regmap_raw_read(map, reg, val, val_len);
1466 ++ ret = _regmap_raw_read(map, reg, val, val_len, false);
1467 + if (ret != 0)
1468 + goto out;
1469 + }
1470 +@@ -2622,7 +2622,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
1471 + read_len = map->max_raw_read;
1472 + else
1473 + read_len = val_len;
1474 +- ret = _regmap_raw_read(map, reg, val, read_len);
1475 ++ ret = _regmap_raw_read(map, reg, val, read_len, true);
1476 + if (ret)
1477 + goto out_unlock;
1478 + val = ((u8 *)val) + read_len;
1479 +diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
1480 +index 8d1cd2479e36f..cc51395d8b0e5 100644
1481 +--- a/drivers/bluetooth/btrtl.c
1482 ++++ b/drivers/bluetooth/btrtl.c
1483 +@@ -343,11 +343,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
1484 + * the end.
1485 + */
1486 + len = patch_length;
1487 +- buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
1488 +- GFP_KERNEL);
1489 ++ buf = kvmalloc(patch_length, GFP_KERNEL);
1490 + if (!buf)
1491 + return -ENOMEM;
1492 +
1493 ++ memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
1494 + memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
1495 +
1496 + *_buf = buf;
1497 +@@ -415,8 +415,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
1498 + if (ret < 0)
1499 + return ret;
1500 + ret = fw->size;
1501 +- *buff = kmemdup(fw->data, ret, GFP_KERNEL);
1502 +- if (!*buff)
1503 ++ *buff = kvmalloc(fw->size, GFP_KERNEL);
1504 ++ if (*buff)
1505 ++ memcpy(*buff, fw->data, ret);
1506 ++ else
1507 + ret = -ENOMEM;
1508 +
1509 + release_firmware(fw);
1510 +@@ -454,14 +456,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
1511 + goto out;
1512 +
1513 + if (btrtl_dev->cfg_len > 0) {
1514 +- tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
1515 ++ tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
1516 + if (!tbuff) {
1517 + ret = -ENOMEM;
1518 + goto out;
1519 + }
1520 +
1521 + memcpy(tbuff, fw_data, ret);
1522 +- kfree(fw_data);
1523 ++ kvfree(fw_data);
1524 +
1525 + memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
1526 + ret += btrtl_dev->cfg_len;
1527 +@@ -474,7 +476,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
1528 + ret = rtl_download_firmware(hdev, fw_data, ret);
1529 +
1530 + out:
1531 +- kfree(fw_data);
1532 ++ kvfree(fw_data);
1533 + return ret;
1534 + }
1535 +
1536 +@@ -501,8 +503,8 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
1537 +
1538 + void btrtl_free(struct btrtl_device_info *btrtl_dev)
1539 + {
1540 +- kfree(btrtl_dev->fw_data);
1541 +- kfree(btrtl_dev->cfg_data);
1542 ++ kvfree(btrtl_dev->fw_data);
1543 ++ kvfree(btrtl_dev->cfg_data);
1544 + kfree(btrtl_dev);
1545 + }
1546 + EXPORT_SYMBOL_GPL(btrtl_free);
1547 +diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
1548 +index e31c02dc77709..cbd970fb02f18 100644
1549 +--- a/drivers/bus/hisi_lpc.c
1550 ++++ b/drivers/bus/hisi_lpc.c
1551 +@@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
1552 + return 0;
1553 + }
1554 +
1555 ++/*
1556 ++ * Released firmware describes the IO port max address as 0x3fff, which is
1557 ++ * the max host bus address. Fixup to a proper range. This will probably
1558 ++ * never be fixed in firmware.
1559 ++ */
1560 ++static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
1561 ++ struct resource *r)
1562 ++{
1563 ++ if (r->end != 0x3fff)
1564 ++ return;
1565 ++
1566 ++ if (r->start == 0xe4)
1567 ++ r->end = 0xe4 + 0x04 - 1;
1568 ++ else if (r->start == 0x2f8)
1569 ++ r->end = 0x2f8 + 0x08 - 1;
1570 ++ else
1571 ++ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
1572 ++ r);
1573 ++}
1574 ++
1575 + /*
1576 + * hisi_lpc_acpi_set_io_res - set the resources for a child
1577 + * @child: the device node to be updated the I/O resource
1578 +@@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
1579 + return -ENOMEM;
1580 + }
1581 + count = 0;
1582 +- list_for_each_entry(rentry, &resource_list, node)
1583 +- resources[count++] = *rentry->res;
1584 ++ list_for_each_entry(rentry, &resource_list, node) {
1585 ++ resources[count] = *rentry->res;
1586 ++ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
1587 ++ count++;
1588 ++ }
1589 +
1590 + acpi_dev_free_resource_list(&resource_list);
1591 +
1592 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1593 +index 6a5d4dfafc474..80dedecfe15c5 100644
1594 +--- a/drivers/char/random.c
1595 ++++ b/drivers/char/random.c
1596 +@@ -1150,14 +1150,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1597 + * We take into account the first, second and third-order deltas
1598 + * in order to make our estimate.
1599 + */
1600 +- delta = sample.jiffies - state->last_time;
1601 +- state->last_time = sample.jiffies;
1602 ++ delta = sample.jiffies - READ_ONCE(state->last_time);
1603 ++ WRITE_ONCE(state->last_time, sample.jiffies);
1604 +
1605 +- delta2 = delta - state->last_delta;
1606 +- state->last_delta = delta;
1607 ++ delta2 = delta - READ_ONCE(state->last_delta);
1608 ++ WRITE_ONCE(state->last_delta, delta);
1609 +
1610 +- delta3 = delta2 - state->last_delta2;
1611 +- state->last_delta2 = delta2;
1612 ++ delta3 = delta2 - READ_ONCE(state->last_delta2);
1613 ++ WRITE_ONCE(state->last_delta2, delta2);
1614 +
1615 + if (delta < 0)
1616 + delta = -delta;
1617 +diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
1618 +index 8eeb4190207d1..dce22b7fc5449 100644
1619 +--- a/drivers/char/tlclk.c
1620 ++++ b/drivers/char/tlclk.c
1621 +@@ -776,17 +776,21 @@ static int __init tlclk_init(void)
1622 + {
1623 + int ret;
1624 +
1625 ++ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
1626 ++
1627 ++ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
1628 ++ if (!alarm_events) {
1629 ++ ret = -ENOMEM;
1630 ++ goto out1;
1631 ++ }
1632 ++
1633 + ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
1634 + if (ret < 0) {
1635 + printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
1636 ++ kfree(alarm_events);
1637 + return ret;
1638 + }
1639 + tlclk_major = ret;
1640 +- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
1641 +- if (!alarm_events) {
1642 +- ret = -ENOMEM;
1643 +- goto out1;
1644 +- }
1645 +
1646 + /* Read telecom clock IRQ number (Set by BIOS) */
1647 + if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
1648 +@@ -795,7 +799,6 @@ static int __init tlclk_init(void)
1649 + ret = -EBUSY;
1650 + goto out2;
1651 + }
1652 +- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
1653 +
1654 + if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
1655 + printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
1656 +@@ -836,8 +839,8 @@ out3:
1657 + release_region(TLCLK_BASE, 8);
1658 + out2:
1659 + kfree(alarm_events);
1660 +-out1:
1661 + unregister_chrdev(tlclk_major, "telco_clock");
1662 ++out1:
1663 + return ret;
1664 + }
1665 +
1666 +diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
1667 +index 763fc7e6c0058..20f27100708bd 100644
1668 +--- a/drivers/char/tpm/tpm_crb.c
1669 ++++ b/drivers/char/tpm/tpm_crb.c
1670 +@@ -26,6 +26,7 @@
1671 + #include "tpm.h"
1672 +
1673 + #define ACPI_SIG_TPM2 "TPM2"
1674 ++#define TPM_CRB_MAX_RESOURCES 3
1675 +
1676 + static const guid_t crb_acpi_start_guid =
1677 + GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
1678 +@@ -95,7 +96,6 @@ enum crb_status {
1679 + struct crb_priv {
1680 + u32 sm;
1681 + const char *hid;
1682 +- void __iomem *iobase;
1683 + struct crb_regs_head __iomem *regs_h;
1684 + struct crb_regs_tail __iomem *regs_t;
1685 + u8 __iomem *cmd;
1686 +@@ -438,21 +438,27 @@ static const struct tpm_class_ops tpm_crb = {
1687 +
1688 + static int crb_check_resource(struct acpi_resource *ares, void *data)
1689 + {
1690 +- struct resource *io_res = data;
1691 ++ struct resource *iores_array = data;
1692 + struct resource_win win;
1693 + struct resource *res = &(win.res);
1694 ++ int i;
1695 +
1696 + if (acpi_dev_resource_memory(ares, res) ||
1697 + acpi_dev_resource_address_space(ares, &win)) {
1698 +- *io_res = *res;
1699 +- io_res->name = NULL;
1700 ++ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
1701 ++ if (resource_type(iores_array + i) != IORESOURCE_MEM) {
1702 ++ iores_array[i] = *res;
1703 ++ iores_array[i].name = NULL;
1704 ++ break;
1705 ++ }
1706 ++ }
1707 + }
1708 +
1709 + return 1;
1710 + }
1711 +
1712 +-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
1713 +- struct resource *io_res, u64 start, u32 size)
1714 ++static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
1715 ++ void __iomem **iobase_ptr, u64 start, u32 size)
1716 + {
1717 + struct resource new_res = {
1718 + .start = start,
1719 +@@ -464,10 +470,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
1720 + if (start != new_res.start)
1721 + return (void __iomem *) ERR_PTR(-EINVAL);
1722 +
1723 +- if (!resource_contains(io_res, &new_res))
1724 ++ if (!iores)
1725 + return devm_ioremap_resource(dev, &new_res);
1726 +
1727 +- return priv->iobase + (new_res.start - io_res->start);
1728 ++ if (!*iobase_ptr) {
1729 ++ *iobase_ptr = devm_ioremap_resource(dev, iores);
1730 ++ if (IS_ERR(*iobase_ptr))
1731 ++ return *iobase_ptr;
1732 ++ }
1733 ++
1734 ++ return *iobase_ptr + (new_res.start - iores->start);
1735 + }
1736 +
1737 + /*
1738 +@@ -494,9 +506,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
1739 + static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
1740 + struct acpi_table_tpm2 *buf)
1741 + {
1742 +- struct list_head resources;
1743 +- struct resource io_res;
1744 ++ struct list_head acpi_resource_list;
1745 ++ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
1746 ++ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
1747 + struct device *dev = &device->dev;
1748 ++ struct resource *iores;
1749 ++ void __iomem **iobase_ptr;
1750 ++ int i;
1751 + u32 pa_high, pa_low;
1752 + u64 cmd_pa;
1753 + u32 cmd_size;
1754 +@@ -505,21 +521,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
1755 + u32 rsp_size;
1756 + int ret;
1757 +
1758 +- INIT_LIST_HEAD(&resources);
1759 +- ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
1760 +- &io_res);
1761 ++ INIT_LIST_HEAD(&acpi_resource_list);
1762 ++ ret = acpi_dev_get_resources(device, &acpi_resource_list,
1763 ++ crb_check_resource, iores_array);
1764 + if (ret < 0)
1765 + return ret;
1766 +- acpi_dev_free_resource_list(&resources);
1767 ++ acpi_dev_free_resource_list(&acpi_resource_list);
1768 +
1769 +- if (resource_type(&io_res) != IORESOURCE_MEM) {
1770 ++ if (resource_type(iores_array) != IORESOURCE_MEM) {
1771 + dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
1772 + return -EINVAL;
1773 ++ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
1774 ++ IORESOURCE_MEM) {
1775 ++ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
1776 ++ memset(iores_array + TPM_CRB_MAX_RESOURCES,
1777 ++ 0, sizeof(*iores_array));
1778 ++ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
1779 + }
1780 +
1781 +- priv->iobase = devm_ioremap_resource(dev, &io_res);
1782 +- if (IS_ERR(priv->iobase))
1783 +- return PTR_ERR(priv->iobase);
1784 ++ iores = NULL;
1785 ++ iobase_ptr = NULL;
1786 ++ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
1787 ++ if (buf->control_address >= iores_array[i].start &&
1788 ++ buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
1789 ++ iores_array[i].end) {
1790 ++ iores = iores_array + i;
1791 ++ iobase_ptr = iobase_array + i;
1792 ++ break;
1793 ++ }
1794 ++ }
1795 ++
1796 ++ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
1797 ++ sizeof(struct crb_regs_tail));
1798 ++
1799 ++ if (IS_ERR(priv->regs_t))
1800 ++ return PTR_ERR(priv->regs_t);
1801 +
1802 + /* The ACPI IO region starts at the head area and continues to include
1803 + * the control area, as one nice sane region except for some older
1804 +@@ -527,9 +563,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
1805 + */
1806 + if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
1807 + (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
1808 +- if (buf->control_address == io_res.start +
1809 ++ if (iores &&
1810 ++ buf->control_address == iores->start +
1811 + sizeof(*priv->regs_h))
1812 +- priv->regs_h = priv->iobase;
1813 ++ priv->regs_h = *iobase_ptr;
1814 + else
1815 + dev_warn(dev, FW_BUG "Bad ACPI memory layout");
1816 + }
1817 +@@ -538,13 +575,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
1818 + if (ret)
1819 + return ret;
1820 +
1821 +- priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
1822 +- sizeof(struct crb_regs_tail));
1823 +- if (IS_ERR(priv->regs_t)) {
1824 +- ret = PTR_ERR(priv->regs_t);
1825 +- goto out_relinquish_locality;
1826 +- }
1827 +-
1828 + /*
1829 + * PTT HW bug w/a: wake up the device to access
1830 + * possibly not retained registers.
1831 +@@ -556,13 +586,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
1832 + pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
1833 + pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
1834 + cmd_pa = ((u64)pa_high << 32) | pa_low;
1835 +- cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
1836 +- ioread32(&priv->regs_t->ctrl_cmd_size));
1837 ++ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
1838 ++
1839 ++ iores = NULL;
1840 ++ iobase_ptr = NULL;
1841 ++ for (i = 0; iores_array[i].end; ++i) {
1842 ++ if (cmd_pa >= iores_array[i].start &&
1843 ++ cmd_pa <= iores_array[i].end) {
1844 ++ iores = iores_array + i;
1845 ++ iobase_ptr = iobase_array + i;
1846 ++ break;
1847 ++ }
1848 ++ }
1849 ++
1850 ++ if (iores)
1851 ++ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
1852 +
1853 + dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
1854 + pa_high, pa_low, cmd_size);
1855 +
1856 +- priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
1857 ++ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
1858 + if (IS_ERR(priv->cmd)) {
1859 + ret = PTR_ERR(priv->cmd);
1860 + goto out;
1861 +@@ -570,11 +613,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
1862 +
1863 + memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
1864 + rsp_pa = le64_to_cpu(__rsp_pa);
1865 +- rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
1866 +- ioread32(&priv->regs_t->ctrl_rsp_size));
1867 ++ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
1868 ++
1869 ++ iores = NULL;
1870 ++ iobase_ptr = NULL;
1871 ++ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
1872 ++ if (rsp_pa >= iores_array[i].start &&
1873 ++ rsp_pa <= iores_array[i].end) {
1874 ++ iores = iores_array + i;
1875 ++ iobase_ptr = iobase_array + i;
1876 ++ break;
1877 ++ }
1878 ++ }
1879 ++
1880 ++ if (iores)
1881 ++ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
1882 +
1883 + if (cmd_pa != rsp_pa) {
1884 +- priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
1885 ++ priv->rsp = crb_map_res(dev, iores, iobase_ptr,
1886 ++ rsp_pa, rsp_size);
1887 + ret = PTR_ERR_OR_ZERO(priv->rsp);
1888 + goto out;
1889 + }
1890 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
1891 +index 569e93e1f06cc..3ba67bc6baba0 100644
1892 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
1893 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
1894 +@@ -588,6 +588,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
1895 + */
1896 + while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
1897 + ibmvtpm_crq_process(crq, ibmvtpm);
1898 ++ wake_up_interruptible(&ibmvtpm->crq_queue.wq);
1899 + crq->valid = 0;
1900 + smp_wmb();
1901 + }
1902 +@@ -635,6 +636,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
1903 + }
1904 +
1905 + crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
1906 ++ init_waitqueue_head(&crq_q->wq);
1907 + ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
1908 + CRQ_RES_BUF_SIZE,
1909 + DMA_BIDIRECTIONAL);
1910 +@@ -687,6 +689,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
1911 + if (rc)
1912 + goto init_irq_cleanup;
1913 +
1914 ++ if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
1915 ++ ibmvtpm->rtce_buf != NULL,
1916 ++ HZ)) {
1917 ++ dev_err(dev, "CRQ response timed out\n");
1918 ++ goto init_irq_cleanup;
1919 ++ }
1920 ++
1921 + return tpm_chip_register(chip);
1922 + init_irq_cleanup:
1923 + do {
1924 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
1925 +index 91dfe766d0800..4f6a124601db4 100644
1926 +--- a/drivers/char/tpm/tpm_ibmvtpm.h
1927 ++++ b/drivers/char/tpm/tpm_ibmvtpm.h
1928 +@@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue {
1929 + struct ibmvtpm_crq *crq_addr;
1930 + u32 index;
1931 + u32 num_entry;
1932 ++ wait_queue_head_t wq;
1933 + };
1934 +
1935 + struct ibmvtpm_dev {
1936 +diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
1937 +index c4d0b6f6abf2e..fc2e2839fe570 100644
1938 +--- a/drivers/clk/socfpga/clk-pll-s10.c
1939 ++++ b/drivers/clk/socfpga/clk-pll-s10.c
1940 +@@ -38,7 +38,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
1941 + /* read VCO1 reg for numerator and denominator */
1942 + reg = readl(socfpgaclk->hw.reg);
1943 + refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
1944 +- vco_freq = (unsigned long long)parent_rate / refdiv;
1945 ++
1946 ++ vco_freq = parent_rate;
1947 ++ do_div(vco_freq, refdiv);
1948 +
1949 + /* Read mdiv and fdiv from the fdbck register */
1950 + reg = readl(socfpgaclk->hw.reg + 0x4);
1951 +diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
1952 +index 688e403333b91..14926e07d09ae 100644
1953 +--- a/drivers/clk/ti/adpll.c
1954 ++++ b/drivers/clk/ti/adpll.c
1955 +@@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
1956 + if (err)
1957 + return NULL;
1958 + } else {
1959 +- const char *base_name = "adpll";
1960 +- char *buf;
1961 +-
1962 +- buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
1963 +- strlen(postfix), GFP_KERNEL);
1964 +- if (!buf)
1965 +- return NULL;
1966 +- sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
1967 +- name = buf;
1968 ++ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
1969 ++ d->pa, postfix);
1970 + }
1971 +
1972 + return name;
1973 +diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
1974 +index 1d740a8c42ab3..47114c2a7cb54 100644
1975 +--- a/drivers/clocksource/h8300_timer8.c
1976 ++++ b/drivers/clocksource/h8300_timer8.c
1977 +@@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node)
1978 + return PTR_ERR(clk);
1979 + }
1980 +
1981 +- ret = ENXIO;
1982 ++ ret = -ENXIO;
1983 + base = of_iomap(node, 0);
1984 + if (!base) {
1985 + pr_err("failed to map registers for clockevent\n");
1986 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
1987 +index 687c92ef76440..79942f7057576 100644
1988 +--- a/drivers/cpufreq/powernv-cpufreq.c
1989 ++++ b/drivers/cpufreq/powernv-cpufreq.c
1990 +@@ -903,6 +903,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
1991 + void powernv_cpufreq_work_fn(struct work_struct *work)
1992 + {
1993 + struct chip *chip = container_of(work, struct chip, throttle);
1994 ++ struct cpufreq_policy *policy;
1995 + unsigned int cpu;
1996 + cpumask_t mask;
1997 +
1998 +@@ -917,12 +918,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
1999 + chip->restore = false;
2000 + for_each_cpu(cpu, &mask) {
2001 + int index;
2002 +- struct cpufreq_policy policy;
2003 +
2004 +- cpufreq_get_policy(&policy, cpu);
2005 +- index = cpufreq_table_find_index_c(&policy, policy.cur);
2006 +- powernv_cpufreq_target_index(&policy, index);
2007 +- cpumask_andnot(&mask, &mask, policy.cpus);
2008 ++ policy = cpufreq_cpu_get(cpu);
2009 ++ if (!policy)
2010 ++ continue;
2011 ++ index = cpufreq_table_find_index_c(policy, policy->cur);
2012 ++ powernv_cpufreq_target_index(policy, index);
2013 ++ cpumask_andnot(&mask, &mask, policy->cpus);
2014 ++ cpufreq_cpu_put(policy);
2015 + }
2016 + out:
2017 + put_online_cpus();
2018 +diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
2019 +index 9b3c259f081d3..ee508bbbb7504 100644
2020 +--- a/drivers/crypto/chelsio/chcr_algo.c
2021 ++++ b/drivers/crypto/chelsio/chcr_algo.c
2022 +@@ -2418,8 +2418,9 @@ int chcr_aead_dma_map(struct device *dev,
2023 + else
2024 + reqctx->b0_dma = 0;
2025 + if (req->src == req->dst) {
2026 +- error = dma_map_sg(dev, req->src, sg_nents(req->src),
2027 +- DMA_BIDIRECTIONAL);
2028 ++ error = dma_map_sg(dev, req->src,
2029 ++ sg_nents_for_len(req->src, dst_size),
2030 ++ DMA_BIDIRECTIONAL);
2031 + if (!error)
2032 + goto err;
2033 + } else {
2034 +diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
2035 +index 1e0cc96306dd7..2c1f3ddb0cc79 100644
2036 +--- a/drivers/crypto/chelsio/chtls/chtls_io.c
2037 ++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
2038 +@@ -1449,7 +1449,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2039 + csk->wr_max_credits))
2040 + sk->sk_write_space(sk);
2041 +
2042 +- if (copied >= target && !sk->sk_backlog.tail)
2043 ++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
2044 + break;
2045 +
2046 + if (copied) {
2047 +@@ -1482,7 +1482,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2048 + break;
2049 + }
2050 + }
2051 +- if (sk->sk_backlog.tail) {
2052 ++ if (READ_ONCE(sk->sk_backlog.tail)) {
2053 + release_sock(sk);
2054 + lock_sock(sk);
2055 + chtls_cleanup_rbuf(sk, copied);
2056 +@@ -1627,7 +1627,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
2057 + break;
2058 + }
2059 +
2060 +- if (sk->sk_backlog.tail) {
2061 ++ if (READ_ONCE(sk->sk_backlog.tail)) {
2062 + /* Do not sleep, just process backlog. */
2063 + release_sock(sk);
2064 + lock_sock(sk);
2065 +@@ -1759,7 +1759,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2066 + csk->wr_max_credits))
2067 + sk->sk_write_space(sk);
2068 +
2069 +- if (copied >= target && !sk->sk_backlog.tail)
2070 ++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
2071 + break;
2072 +
2073 + if (copied) {
2074 +@@ -1790,7 +1790,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2075 + }
2076 + }
2077 +
2078 +- if (sk->sk_backlog.tail) {
2079 ++ if (READ_ONCE(sk->sk_backlog.tail)) {
2080 + release_sock(sk);
2081 + lock_sock(sk);
2082 + chtls_cleanup_rbuf(sk, copied);
2083 +diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
2084 +index 06768074d2d82..479d9575e1245 100644
2085 +--- a/drivers/devfreq/tegra-devfreq.c
2086 ++++ b/drivers/devfreq/tegra-devfreq.c
2087 +@@ -80,6 +80,8 @@
2088 +
2089 + #define KHZ 1000
2090 +
2091 ++#define KHZ_MAX (ULONG_MAX / KHZ)
2092 ++
2093 + /* Assume that the bus is saturated if the utilization is 25% */
2094 + #define BUS_SATURATION_RATIO 25
2095 +
2096 +@@ -180,7 +182,7 @@ struct tegra_actmon_emc_ratio {
2097 + };
2098 +
2099 + static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
2100 +- { 1400000, ULONG_MAX },
2101 ++ { 1400000, KHZ_MAX },
2102 + { 1200000, 750000 },
2103 + { 1100000, 600000 },
2104 + { 1000000, 500000 },
2105 +diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
2106 +index 1551ca7df3941..8586cc05def17 100644
2107 +--- a/drivers/dma-buf/dma-fence.c
2108 ++++ b/drivers/dma-buf/dma-fence.c
2109 +@@ -244,6 +244,30 @@ void dma_fence_free(struct dma_fence *fence)
2110 + }
2111 + EXPORT_SYMBOL(dma_fence_free);
2112 +
2113 ++static bool __dma_fence_enable_signaling(struct dma_fence *fence)
2114 ++{
2115 ++ bool was_set;
2116 ++
2117 ++ lockdep_assert_held(fence->lock);
2118 ++
2119 ++ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
2120 ++ &fence->flags);
2121 ++
2122 ++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
2123 ++ return false;
2124 ++
2125 ++ if (!was_set && fence->ops->enable_signaling) {
2126 ++ trace_dma_fence_enable_signal(fence);
2127 ++
2128 ++ if (!fence->ops->enable_signaling(fence)) {
2129 ++ dma_fence_signal_locked(fence);
2130 ++ return false;
2131 ++ }
2132 ++ }
2133 ++
2134 ++ return true;
2135 ++}
2136 ++
2137 + /**
2138 + * dma_fence_enable_sw_signaling - enable signaling on fence
2139 + * @fence: the fence to enable
2140 +@@ -256,19 +280,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
2141 + {
2142 + unsigned long flags;
2143 +
2144 +- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
2145 +- &fence->flags) &&
2146 +- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
2147 +- fence->ops->enable_signaling) {
2148 +- trace_dma_fence_enable_signal(fence);
2149 +-
2150 +- spin_lock_irqsave(fence->lock, flags);
2151 +-
2152 +- if (!fence->ops->enable_signaling(fence))
2153 +- dma_fence_signal_locked(fence);
2154 ++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
2155 ++ return;
2156 +
2157 +- spin_unlock_irqrestore(fence->lock, flags);
2158 +- }
2159 ++ spin_lock_irqsave(fence->lock, flags);
2160 ++ __dma_fence_enable_signaling(fence);
2161 ++ spin_unlock_irqrestore(fence->lock, flags);
2162 + }
2163 + EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
2164 +
2165 +@@ -302,7 +319,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
2166 + {
2167 + unsigned long flags;
2168 + int ret = 0;
2169 +- bool was_set;
2170 +
2171 + if (WARN_ON(!fence || !func))
2172 + return -EINVAL;
2173 +@@ -314,25 +330,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
2174 +
2175 + spin_lock_irqsave(fence->lock, flags);
2176 +
2177 +- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
2178 +- &fence->flags);
2179 +-
2180 +- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
2181 +- ret = -ENOENT;
2182 +- else if (!was_set && fence->ops->enable_signaling) {
2183 +- trace_dma_fence_enable_signal(fence);
2184 +-
2185 +- if (!fence->ops->enable_signaling(fence)) {
2186 +- dma_fence_signal_locked(fence);
2187 +- ret = -ENOENT;
2188 +- }
2189 +- }
2190 +-
2191 +- if (!ret) {
2192 ++ if (__dma_fence_enable_signaling(fence)) {
2193 + cb->func = func;
2194 + list_add_tail(&cb->node, &fence->cb_list);
2195 +- } else
2196 ++ } else {
2197 + INIT_LIST_HEAD(&cb->node);
2198 ++ ret = -ENOENT;
2199 ++ }
2200 ++
2201 + spin_unlock_irqrestore(fence->lock, flags);
2202 +
2203 + return ret;
2204 +@@ -432,7 +437,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
2205 + struct default_wait_cb cb;
2206 + unsigned long flags;
2207 + signed long ret = timeout ? timeout : 1;
2208 +- bool was_set;
2209 +
2210 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
2211 + return ret;
2212 +@@ -444,21 +448,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
2213 + goto out;
2214 + }
2215 +
2216 +- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
2217 +- &fence->flags);
2218 +-
2219 +- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
2220 ++ if (!__dma_fence_enable_signaling(fence))
2221 + goto out;
2222 +
2223 +- if (!was_set && fence->ops->enable_signaling) {
2224 +- trace_dma_fence_enable_signal(fence);
2225 +-
2226 +- if (!fence->ops->enable_signaling(fence)) {
2227 +- dma_fence_signal_locked(fence);
2228 +- goto out;
2229 +- }
2230 +- }
2231 +-
2232 + if (!timeout) {
2233 + ret = 0;
2234 + goto out;
2235 +diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
2236 +index b7ec56ae02a6e..fca232b1d4a64 100644
2237 +--- a/drivers/dma/mediatek/mtk-hsdma.c
2238 ++++ b/drivers/dma/mediatek/mtk-hsdma.c
2239 +@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
2240 + if (err) {
2241 + dev_err(&pdev->dev,
2242 + "request_irq failed with err %d\n", err);
2243 +- goto err_unregister;
2244 ++ goto err_free;
2245 + }
2246 +
2247 + platform_set_drvdata(pdev, hsdma);
2248 +@@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
2249 +
2250 + return 0;
2251 +
2252 ++err_free:
2253 ++ of_dma_controller_free(pdev->dev.of_node);
2254 + err_unregister:
2255 + dma_async_device_unregister(dd);
2256 +
2257 +diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
2258 +index 4903a408fc146..ac7af440f8658 100644
2259 +--- a/drivers/dma/stm32-dma.c
2260 ++++ b/drivers/dma/stm32-dma.c
2261 +@@ -494,8 +494,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
2262 +
2263 + spin_lock_irqsave(&chan->vchan.lock, flags);
2264 +
2265 +- if (chan->busy) {
2266 +- stm32_dma_stop(chan);
2267 ++ if (chan->desc) {
2268 ++ vchan_terminate_vdesc(&chan->desc->vdesc);
2269 ++ if (chan->busy)
2270 ++ stm32_dma_stop(chan);
2271 + chan->desc = NULL;
2272 + }
2273 +
2274 +@@ -551,6 +553,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
2275 + if (!vdesc)
2276 + return;
2277 +
2278 ++ list_del(&vdesc->node);
2279 ++
2280 + chan->desc = to_stm32_dma_desc(vdesc);
2281 + chan->next_sg = 0;
2282 + }
2283 +@@ -628,7 +632,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
2284 + } else {
2285 + chan->busy = false;
2286 + if (chan->next_sg == chan->desc->num_sgs) {
2287 +- list_del(&chan->desc->vdesc.node);
2288 + vchan_cookie_complete(&chan->desc->vdesc);
2289 + chan->desc = NULL;
2290 + }
2291 +diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
2292 +index 8c3c3e5b812a8..9c6867916e890 100644
2293 +--- a/drivers/dma/stm32-mdma.c
2294 ++++ b/drivers/dma/stm32-mdma.c
2295 +@@ -1137,6 +1137,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
2296 + return;
2297 + }
2298 +
2299 ++ list_del(&vdesc->node);
2300 ++
2301 + chan->desc = to_stm32_mdma_desc(vdesc);
2302 + hwdesc = chan->desc->node[0].hwdesc;
2303 + chan->curr_hwdesc = 0;
2304 +@@ -1252,8 +1254,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
2305 + LIST_HEAD(head);
2306 +
2307 + spin_lock_irqsave(&chan->vchan.lock, flags);
2308 +- if (chan->busy) {
2309 +- stm32_mdma_stop(chan);
2310 ++ if (chan->desc) {
2311 ++ vchan_terminate_vdesc(&chan->desc->vdesc);
2312 ++ if (chan->busy)
2313 ++ stm32_mdma_stop(chan);
2314 + chan->desc = NULL;
2315 + }
2316 + vchan_get_all_descriptors(&chan->vchan, &head);
2317 +@@ -1341,7 +1345,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
2318 +
2319 + static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
2320 + {
2321 +- list_del(&chan->desc->vdesc.node);
2322 + vchan_cookie_complete(&chan->desc->vdesc);
2323 + chan->desc = NULL;
2324 + chan->busy = false;
2325 +diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
2326 +index 15481aeaeecd1..5ccd24a46e381 100644
2327 +--- a/drivers/dma/tegra20-apb-dma.c
2328 ++++ b/drivers/dma/tegra20-apb-dma.c
2329 +@@ -1225,8 +1225,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
2330 +
2331 + dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
2332 +
2333 +- if (tdc->busy)
2334 +- tegra_dma_terminate_all(dc);
2335 ++ tegra_dma_terminate_all(dc);
2336 +
2337 + spin_lock_irqsave(&tdc->lock, flags);
2338 + list_splice_init(&tdc->pending_sg_req, &sg_req_list);
2339 +diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
2340 +index 73de6a6179fcd..e002ff8413e2a 100644
2341 +--- a/drivers/dma/xilinx/zynqmp_dma.c
2342 ++++ b/drivers/dma/xilinx/zynqmp_dma.c
2343 +@@ -127,10 +127,12 @@
2344 + /* Max transfer size per descriptor */
2345 + #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
2346 +
2347 ++/* Max burst lengths */
2348 ++#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
2349 ++#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
2350 ++
2351 + /* Reset values for data attributes */
2352 + #define ZYNQMP_DMA_AXCACHE_VAL 0xF
2353 +-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
2354 +-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
2355 +
2356 + #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
2357 +
2358 +@@ -536,17 +538,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
2359 +
2360 + static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
2361 + {
2362 +- u32 val;
2363 ++ u32 val, burst_val;
2364 +
2365 + val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
2366 + val |= ZYNQMP_DMA_POINT_TYPE_SG;
2367 + writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
2368 +
2369 + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
2370 ++ burst_val = __ilog2_u32(chan->src_burst_len);
2371 + val = (val & ~ZYNQMP_DMA_ARLEN) |
2372 +- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
2373 ++ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
2374 ++ burst_val = __ilog2_u32(chan->dst_burst_len);
2375 + val = (val & ~ZYNQMP_DMA_AWLEN) |
2376 +- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
2377 ++ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
2378 + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
2379 + }
2380 +
2381 +@@ -562,8 +566,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
2382 + {
2383 + struct zynqmp_dma_chan *chan = to_chan(dchan);
2384 +
2385 +- chan->src_burst_len = config->src_maxburst;
2386 +- chan->dst_burst_len = config->dst_maxburst;
2387 ++ chan->src_burst_len = clamp(config->src_maxburst, 1U,
2388 ++ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
2389 ++ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
2390 ++ ZYNQMP_DMA_MAX_DST_BURST_LEN);
2391 +
2392 + return 0;
2393 + }
2394 +@@ -884,8 +890,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
2395 + return PTR_ERR(chan->regs);
2396 +
2397 + chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
2398 +- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
2399 +- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
2400 ++ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
2401 ++ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
2402 + err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
2403 + if (err < 0) {
2404 + dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
2405 +diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
2406 +index 05b528c7ed8fd..e809f4d9a9e93 100644
2407 +--- a/drivers/firmware/arm_sdei.c
2408 ++++ b/drivers/firmware/arm_sdei.c
2409 +@@ -410,14 +410,19 @@ int sdei_event_enable(u32 event_num)
2410 + return -ENOENT;
2411 + }
2412 +
2413 +- spin_lock(&sdei_list_lock);
2414 +- event->reenable = true;
2415 +- spin_unlock(&sdei_list_lock);
2416 +
2417 ++ cpus_read_lock();
2418 + if (event->type == SDEI_EVENT_TYPE_SHARED)
2419 + err = sdei_api_event_enable(event->event_num);
2420 + else
2421 + err = sdei_do_cross_call(_local_event_enable, event);
2422 ++
2423 ++ if (!err) {
2424 ++ spin_lock(&sdei_list_lock);
2425 ++ event->reenable = true;
2426 ++ spin_unlock(&sdei_list_lock);
2427 ++ }
2428 ++ cpus_read_unlock();
2429 + mutex_unlock(&sdei_events_lock);
2430 +
2431 + return err;
2432 +@@ -619,21 +624,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
2433 + break;
2434 + }
2435 +
2436 +- spin_lock(&sdei_list_lock);
2437 +- event->reregister = true;
2438 +- spin_unlock(&sdei_list_lock);
2439 +-
2440 ++ cpus_read_lock();
2441 + err = _sdei_event_register(event);
2442 + if (err) {
2443 +- spin_lock(&sdei_list_lock);
2444 +- event->reregister = false;
2445 +- event->reenable = false;
2446 +- spin_unlock(&sdei_list_lock);
2447 +-
2448 + sdei_event_destroy(event);
2449 + pr_warn("Failed to register event %u: %d\n", event_num,
2450 + err);
2451 ++ } else {
2452 ++ spin_lock(&sdei_list_lock);
2453 ++ event->reregister = true;
2454 ++ spin_unlock(&sdei_list_lock);
2455 + }
2456 ++ cpus_read_unlock();
2457 + } while (0);
2458 + mutex_unlock(&sdei_events_lock);
2459 +
2460 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
2461 +index a5df80d50d447..6cf3dd5edffda 100644
2462 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
2463 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
2464 +@@ -191,30 +191,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
2465 +
2466 + static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
2467 + {
2468 +- uint8_t __iomem *bios;
2469 +- size_t size;
2470 ++ phys_addr_t rom = adev->pdev->rom;
2471 ++ size_t romlen = adev->pdev->romlen;
2472 ++ void __iomem *bios;
2473 +
2474 + adev->bios = NULL;
2475 +
2476 +- bios = pci_platform_rom(adev->pdev, &size);
2477 +- if (!bios) {
2478 ++ if (!rom || romlen == 0)
2479 + return false;
2480 +- }
2481 +
2482 +- adev->bios = kzalloc(size, GFP_KERNEL);
2483 +- if (adev->bios == NULL)
2484 ++ adev->bios = kzalloc(romlen, GFP_KERNEL);
2485 ++ if (!adev->bios)
2486 + return false;
2487 +
2488 +- memcpy_fromio(adev->bios, bios, size);
2489 ++ bios = ioremap(rom, romlen);
2490 ++ if (!bios)
2491 ++ goto free_bios;
2492 +
2493 +- if (!check_atom_bios(adev->bios, size)) {
2494 +- kfree(adev->bios);
2495 +- return false;
2496 +- }
2497 ++ memcpy_fromio(adev->bios, bios, romlen);
2498 ++ iounmap(bios);
2499 +
2500 +- adev->bios_size = size;
2501 ++ if (!check_atom_bios(adev->bios, romlen))
2502 ++ goto free_bios;
2503 ++
2504 ++ adev->bios_size = romlen;
2505 +
2506 + return true;
2507 ++free_bios:
2508 ++ kfree(adev->bios);
2509 ++ return false;
2510 + }
2511 +
2512 + #ifdef CONFIG_ACPI
2513 +diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
2514 +index e9934de1b9cf8..0222bb7ea49b4 100644
2515 +--- a/drivers/gpu/drm/amd/amdgpu/atom.c
2516 ++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
2517 +@@ -742,8 +742,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
2518 + cjiffies = jiffies;
2519 + if (time_after(cjiffies, ctx->last_jump_jiffies)) {
2520 + cjiffies -= ctx->last_jump_jiffies;
2521 +- if ((jiffies_to_msecs(cjiffies) > 5000)) {
2522 +- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
2523 ++ if ((jiffies_to_msecs(cjiffies) > 10000)) {
2524 ++ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
2525 + ctx->abort = true;
2526 + }
2527 + } else {
2528 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2529 +index 189212cb35475..bff39f561264e 100644
2530 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2531 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2532 +@@ -1101,6 +1101,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
2533 + unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2534 + dqm_unlock(dqm);
2535 +
2536 ++ pm_release_ib(&dqm->packets);
2537 ++
2538 + kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
2539 + pm_uninit(&dqm->packets);
2540 +
2541 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2542 +index 3abc0294c05f5..2fb2c683ad54b 100644
2543 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2544 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2545 +@@ -1576,8 +1576,7 @@ static void write_i2c_retimer_setting(
2546 + buffer, sizeof(buffer));
2547 +
2548 + if (!i2c_success)
2549 +- /* Write failure */
2550 +- ASSERT(i2c_success);
2551 ++ goto i2c_write_fail;
2552 +
2553 + /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
2554 + * needs to be set to 1 on every 0xA-0xC write.
2555 +@@ -1595,8 +1594,7 @@ static void write_i2c_retimer_setting(
2556 + pipe_ctx->stream->sink->link->ddc,
2557 + slave_address, &offset, 1, &value, 1);
2558 + if (!i2c_success)
2559 +- /* Write failure */
2560 +- ASSERT(i2c_success);
2561 ++ goto i2c_write_fail;
2562 + }
2563 +
2564 + buffer[0] = offset;
2565 +@@ -1605,8 +1603,7 @@ static void write_i2c_retimer_setting(
2566 + i2c_success = i2c_write(pipe_ctx, slave_address,
2567 + buffer, sizeof(buffer));
2568 + if (!i2c_success)
2569 +- /* Write failure */
2570 +- ASSERT(i2c_success);
2571 ++ goto i2c_write_fail;
2572 + }
2573 + }
2574 + }
2575 +@@ -1623,8 +1620,7 @@ static void write_i2c_retimer_setting(
2576 + buffer, sizeof(buffer));
2577 +
2578 + if (!i2c_success)
2579 +- /* Write failure */
2580 +- ASSERT(i2c_success);
2581 ++ goto i2c_write_fail;
2582 +
2583 + /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
2584 + * needs to be set to 1 on every 0xA-0xC write.
2585 +@@ -1642,8 +1638,7 @@ static void write_i2c_retimer_setting(
2586 + pipe_ctx->stream->sink->link->ddc,
2587 + slave_address, &offset, 1, &value, 1);
2588 + if (!i2c_success)
2589 +- /* Write failure */
2590 +- ASSERT(i2c_success);
2591 ++ goto i2c_write_fail;
2592 + }
2593 +
2594 + buffer[0] = offset;
2595 +@@ -1652,8 +1647,7 @@ static void write_i2c_retimer_setting(
2596 + i2c_success = i2c_write(pipe_ctx, slave_address,
2597 + buffer, sizeof(buffer));
2598 + if (!i2c_success)
2599 +- /* Write failure */
2600 +- ASSERT(i2c_success);
2601 ++ goto i2c_write_fail;
2602 + }
2603 + }
2604 + }
2605 +@@ -1668,8 +1662,7 @@ static void write_i2c_retimer_setting(
2606 + i2c_success = i2c_write(pipe_ctx, slave_address,
2607 + buffer, sizeof(buffer));
2608 + if (!i2c_success)
2609 +- /* Write failure */
2610 +- ASSERT(i2c_success);
2611 ++ goto i2c_write_fail;
2612 +
2613 + /* Write offset 0x00 to 0x23 */
2614 + buffer[0] = 0x00;
2615 +@@ -1677,8 +1670,7 @@ static void write_i2c_retimer_setting(
2616 + i2c_success = i2c_write(pipe_ctx, slave_address,
2617 + buffer, sizeof(buffer));
2618 + if (!i2c_success)
2619 +- /* Write failure */
2620 +- ASSERT(i2c_success);
2621 ++ goto i2c_write_fail;
2622 +
2623 + /* Write offset 0xff to 0x00 */
2624 + buffer[0] = 0xff;
2625 +@@ -1686,10 +1678,14 @@ static void write_i2c_retimer_setting(
2626 + i2c_success = i2c_write(pipe_ctx, slave_address,
2627 + buffer, sizeof(buffer));
2628 + if (!i2c_success)
2629 +- /* Write failure */
2630 +- ASSERT(i2c_success);
2631 ++ goto i2c_write_fail;
2632 +
2633 + }
2634 ++
2635 ++ return;
2636 ++
2637 ++i2c_write_fail:
2638 ++ DC_LOG_DEBUG("Set retimer failed");
2639 + }
2640 +
2641 + static void write_i2c_default_retimer_setting(
2642 +@@ -1710,8 +1706,7 @@ static void write_i2c_default_retimer_setting(
2643 + i2c_success = i2c_write(pipe_ctx, slave_address,
2644 + buffer, sizeof(buffer));
2645 + if (!i2c_success)
2646 +- /* Write failure */
2647 +- ASSERT(i2c_success);
2648 ++ goto i2c_write_fail;
2649 +
2650 + /* Write offset 0x0A to 0x17 */
2651 + buffer[0] = 0x0A;
2652 +@@ -1719,8 +1714,7 @@ static void write_i2c_default_retimer_setting(
2653 + i2c_success = i2c_write(pipe_ctx, slave_address,
2654 + buffer, sizeof(buffer));
2655 + if (!i2c_success)
2656 +- /* Write failure */
2657 +- ASSERT(i2c_success);
2658 ++ goto i2c_write_fail;
2659 +
2660 + /* Write offset 0x0B to 0xDA or 0xD8 */
2661 + buffer[0] = 0x0B;
2662 +@@ -1728,8 +1722,7 @@ static void write_i2c_default_retimer_setting(
2663 + i2c_success = i2c_write(pipe_ctx, slave_address,
2664 + buffer, sizeof(buffer));
2665 + if (!i2c_success)
2666 +- /* Write failure */
2667 +- ASSERT(i2c_success);
2668 ++ goto i2c_write_fail;
2669 +
2670 + /* Write offset 0x0A to 0x17 */
2671 + buffer[0] = 0x0A;
2672 +@@ -1737,8 +1730,7 @@ static void write_i2c_default_retimer_setting(
2673 + i2c_success = i2c_write(pipe_ctx, slave_address,
2674 + buffer, sizeof(buffer));
2675 + if (!i2c_success)
2676 +- /* Write failure */
2677 +- ASSERT(i2c_success);
2678 ++ goto i2c_write_fail;
2679 +
2680 + /* Write offset 0x0C to 0x1D or 0x91 */
2681 + buffer[0] = 0x0C;
2682 +@@ -1746,8 +1738,7 @@ static void write_i2c_default_retimer_setting(
2683 + i2c_success = i2c_write(pipe_ctx, slave_address,
2684 + buffer, sizeof(buffer));
2685 + if (!i2c_success)
2686 +- /* Write failure */
2687 +- ASSERT(i2c_success);
2688 ++ goto i2c_write_fail;
2689 +
2690 + /* Write offset 0x0A to 0x17 */
2691 + buffer[0] = 0x0A;
2692 +@@ -1755,8 +1746,7 @@ static void write_i2c_default_retimer_setting(
2693 + i2c_success = i2c_write(pipe_ctx, slave_address,
2694 + buffer, sizeof(buffer));
2695 + if (!i2c_success)
2696 +- /* Write failure */
2697 +- ASSERT(i2c_success);
2698 ++ goto i2c_write_fail;
2699 +
2700 +
2701 + if (is_vga_mode) {
2702 +@@ -1768,8 +1758,7 @@ static void write_i2c_default_retimer_setting(
2703 + i2c_success = i2c_write(pipe_ctx, slave_address,
2704 + buffer, sizeof(buffer));
2705 + if (!i2c_success)
2706 +- /* Write failure */
2707 +- ASSERT(i2c_success);
2708 ++ goto i2c_write_fail;
2709 +
2710 + /* Write offset 0x00 to 0x23 */
2711 + buffer[0] = 0x00;
2712 +@@ -1777,8 +1766,7 @@ static void write_i2c_default_retimer_setting(
2713 + i2c_success = i2c_write(pipe_ctx, slave_address,
2714 + buffer, sizeof(buffer));
2715 + if (!i2c_success)
2716 +- /* Write failure */
2717 +- ASSERT(i2c_success);
2718 ++ goto i2c_write_fail;
2719 +
2720 + /* Write offset 0xff to 0x00 */
2721 + buffer[0] = 0xff;
2722 +@@ -1786,9 +1774,13 @@ static void write_i2c_default_retimer_setting(
2723 + i2c_success = i2c_write(pipe_ctx, slave_address,
2724 + buffer, sizeof(buffer));
2725 + if (!i2c_success)
2726 +- /* Write failure */
2727 +- ASSERT(i2c_success);
2728 ++ goto i2c_write_fail;
2729 + }
2730 ++
2731 ++ return;
2732 ++
2733 ++i2c_write_fail:
2734 ++ DC_LOG_DEBUG("Set default retimer failed");
2735 + }
2736 +
2737 + static void write_i2c_redriver_setting(
2738 +@@ -1811,8 +1803,7 @@ static void write_i2c_redriver_setting(
2739 + buffer, sizeof(buffer));
2740 +
2741 + if (!i2c_success)
2742 +- /* Write failure */
2743 +- ASSERT(i2c_success);
2744 ++ DC_LOG_DEBUG("Set redriver failed");
2745 + }
2746 +
2747 + static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
2748 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
2749 +index 46c9cb47a96e5..145af3bb2dfcb 100644
2750 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
2751 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
2752 +@@ -127,22 +127,16 @@ struct aux_payloads {
2753 + struct vector payloads;
2754 + };
2755 +
2756 +-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
2757 ++static bool dal_ddc_i2c_payloads_create(
2758 ++ struct dc_context *ctx,
2759 ++ struct i2c_payloads *payloads,
2760 ++ uint32_t count)
2761 + {
2762 +- struct i2c_payloads *payloads;
2763 +-
2764 +- payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
2765 +-
2766 +- if (!payloads)
2767 +- return NULL;
2768 +-
2769 + if (dal_vector_construct(
2770 + &payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
2771 +- return payloads;
2772 +-
2773 +- kfree(payloads);
2774 +- return NULL;
2775 ++ return true;
2776 +
2777 ++ return false;
2778 + }
2779 +
2780 + static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
2781 +@@ -155,14 +149,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
2782 + return p->payloads.count;
2783 + }
2784 +
2785 +-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
2786 ++static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
2787 + {
2788 +- if (!p || !*p)
2789 ++ if (!p)
2790 + return;
2791 +- dal_vector_destruct(&(*p)->payloads);
2792 +- kfree(*p);
2793 +- *p = NULL;
2794 +
2795 ++ dal_vector_destruct(&p->payloads);
2796 + }
2797 +
2798 + static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
2799 +@@ -580,9 +572,13 @@ bool dal_ddc_service_query_ddc_data(
2800 +
2801 + uint32_t payloads_num = write_payloads + read_payloads;
2802 +
2803 ++
2804 + if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
2805 + return false;
2806 +
2807 ++ if (!payloads_num)
2808 ++ return false;
2809 ++
2810 + /*TODO: len of payload data for i2c and aux is uint8!!!!,
2811 + * but we want to read 256 over i2c!!!!*/
2812 + if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
2813 +@@ -613,23 +609,25 @@ bool dal_ddc_service_query_ddc_data(
2814 + dal_ddc_aux_payloads_destroy(&payloads);
2815 +
2816 + } else {
2817 +- struct i2c_payloads *payloads =
2818 +- dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
2819 ++ struct i2c_command command = {0};
2820 ++ struct i2c_payloads payloads;
2821 +
2822 +- struct i2c_command command = {
2823 +- .payloads = dal_ddc_i2c_payloads_get(payloads),
2824 +- .number_of_payloads = 0,
2825 +- .engine = DDC_I2C_COMMAND_ENGINE,
2826 +- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
2827 ++ if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
2828 ++ return false;
2829 ++
2830 ++ command.payloads = dal_ddc_i2c_payloads_get(&payloads);
2831 ++ command.number_of_payloads = 0;
2832 ++ command.engine = DDC_I2C_COMMAND_ENGINE;
2833 ++ command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
2834 +
2835 + dal_ddc_i2c_payloads_add(
2836 +- payloads, address, write_size, write_buf, true);
2837 ++ &payloads, address, write_size, write_buf, true);
2838 +
2839 + dal_ddc_i2c_payloads_add(
2840 +- payloads, address, read_size, read_buf, false);
2841 ++ &payloads, address, read_size, read_buf, false);
2842 +
2843 + command.number_of_payloads =
2844 +- dal_ddc_i2c_payloads_get_count(payloads);
2845 ++ dal_ddc_i2c_payloads_get_count(&payloads);
2846 +
2847 + ret = dm_helpers_submit_i2c(
2848 + ddc->ctx,
2849 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2850 +index 72c0a2ae2dd4f..058898b321b8a 100644
2851 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2852 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2853 +@@ -3970,6 +3970,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
2854 + "Failed to populate and upload SCLK MCLK DPM levels!",
2855 + result = tmp_result);
2856 +
2857 ++ /*
2858 ++ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
2859 ++ * That effectively disables AVFS feature.
2860 ++ */
2861 ++ if (hwmgr->hardcode_pp_table != NULL)
2862 ++ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
2863 ++
2864 + tmp_result = smu7_update_avfs(hwmgr);
2865 + PP_ASSERT_WITH_CODE((0 == tmp_result),
2866 + "Failed to update avfs voltages!",
2867 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2868 +index ce459ea4ec3ad..da9e6923fa659 100644
2869 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2870 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
2871 +@@ -3591,6 +3591,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
2872 + PP_ASSERT_WITH_CODE(!result,
2873 + "Failed to upload PPtable!", return result);
2874 +
2875 ++ /*
2876 ++ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
2877 ++ * That effectively disables AVFS feature.
2878 ++ */
2879 ++ if(hwmgr->hardcode_pp_table != NULL)
2880 ++ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
2881 ++
2882 + vega10_update_avfs(hwmgr);
2883 +
2884 + data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
2885 +diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
2886 +index 17db4b4749d5a..2e8479744ca4a 100644
2887 +--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
2888 ++++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
2889 +@@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
2890 + struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
2891 + struct gma_clock_t clock;
2892 +
2893 ++ memset(&clock, 0, sizeof(clock));
2894 ++
2895 + switch (refclk) {
2896 + case 27000:
2897 + if (target < 200000) {
2898 +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2899 +index 1fc9a7fa37b45..d29a58bd2f7a3 100644
2900 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2901 ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2902 +@@ -1474,18 +1474,31 @@ static const struct adreno_gpu_funcs funcs = {
2903 + static void check_speed_bin(struct device *dev)
2904 + {
2905 + struct nvmem_cell *cell;
2906 +- u32 bin, val;
2907 ++ u32 val;
2908 ++
2909 ++ /*
2910 ++ * If the OPP table specifies a opp-supported-hw property then we have
2911 ++ * to set something with dev_pm_opp_set_supported_hw() or the table
2912 ++ * doesn't get populated so pick an arbitrary value that should
2913 ++ * ensure the default frequencies are selected but not conflict with any
2914 ++ * actual bins
2915 ++ */
2916 ++ val = 0x80;
2917 +
2918 + cell = nvmem_cell_get(dev, "speed_bin");
2919 +
2920 +- /* If a nvmem cell isn't defined, nothing to do */
2921 +- if (IS_ERR(cell))
2922 +- return;
2923 ++ if (!IS_ERR(cell)) {
2924 ++ void *buf = nvmem_cell_read(cell, NULL);
2925 ++
2926 ++ if (!IS_ERR(buf)) {
2927 ++ u8 bin = *((u8 *) buf);
2928 +
2929 +- bin = *((u32 *) nvmem_cell_read(cell, NULL));
2930 +- nvmem_cell_put(cell);
2931 ++ val = (1 << bin);
2932 ++ kfree(buf);
2933 ++ }
2934 +
2935 +- val = (1 << bin);
2936 ++ nvmem_cell_put(cell);
2937 ++ }
2938 +
2939 + dev_pm_opp_set_supported_hw(dev, &val, 1);
2940 + }
2941 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
2942 +index 7f45486b6650b..3ba3ae9749bec 100644
2943 +--- a/drivers/gpu/drm/msm/msm_drv.c
2944 ++++ b/drivers/gpu/drm/msm/msm_drv.c
2945 +@@ -495,8 +495,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
2946 + if (!dev->dma_parms) {
2947 + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
2948 + GFP_KERNEL);
2949 +- if (!dev->dma_parms)
2950 +- return -ENOMEM;
2951 ++ if (!dev->dma_parms) {
2952 ++ ret = -ENOMEM;
2953 ++ goto err_msm_uninit;
2954 ++ }
2955 + }
2956 + dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
2957 +
2958 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2959 +index e06ea8c8184cb..1bb0a9f6fa730 100644
2960 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
2961 ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2962 +@@ -909,8 +909,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
2963 + return connector_status_disconnected;
2964 +
2965 + ret = pm_runtime_get_sync(connector->dev->dev);
2966 +- if (ret < 0 && ret != -EACCES)
2967 ++ if (ret < 0 && ret != -EACCES) {
2968 ++ pm_runtime_put_autosuspend(connector->dev->dev);
2969 + return connector_status_disconnected;
2970 ++ }
2971 +
2972 + conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
2973 + mstc->port);
2974 +diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
2975 +index 9635704a1d864..4561a786fab07 100644
2976 +--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
2977 ++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
2978 +@@ -161,8 +161,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
2979 + }
2980 +
2981 + ret = pm_runtime_get_sync(drm->dev);
2982 +- if (ret < 0 && ret != -EACCES)
2983 ++ if (ret < 0 && ret != -EACCES) {
2984 ++ pm_runtime_put_autosuspend(drm->dev);
2985 + return ret;
2986 ++ }
2987 ++
2988 + ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
2989 + pm_runtime_put_autosuspend(drm->dev);
2990 + if (ret < 0)
2991 +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
2992 +index 791f970714ed6..a98fccb0d32f9 100644
2993 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
2994 ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
2995 +@@ -82,8 +82,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
2996 + return ret;
2997 +
2998 + ret = pm_runtime_get_sync(dev);
2999 +- if (ret < 0 && ret != -EACCES)
3000 ++ if (ret < 0 && ret != -EACCES) {
3001 ++ pm_runtime_put_autosuspend(dev);
3002 + goto out;
3003 ++ }
3004 +
3005 + ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
3006 + pm_runtime_mark_last_busy(dev);
3007 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
3008 +index 9b91da09dc5f8..8d9812a51ef63 100644
3009 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
3010 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
3011 +@@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name)
3012 + else
3013 + return ERR_PTR(-ENODEV);
3014 +
3015 ++ if (!pdev->rom || pdev->romlen == 0)
3016 ++ return ERR_PTR(-ENODEV);
3017 ++
3018 + if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
3019 ++ priv->size = pdev->romlen;
3020 + if (ret = -ENODEV,
3021 +- (priv->rom = pci_platform_rom(pdev, &priv->size)))
3022 ++ (priv->rom = ioremap(pdev->rom, pdev->romlen)))
3023 + return priv;
3024 + kfree(priv);
3025 + }
3026 +@@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name)
3027 + return ERR_PTR(ret);
3028 + }
3029 +
3030 ++static void
3031 ++platform_fini(void *data)
3032 ++{
3033 ++ struct priv *priv = data;
3034 ++
3035 ++ iounmap(priv->rom);
3036 ++ kfree(priv);
3037 ++}
3038 ++
3039 + const struct nvbios_source
3040 + nvbios_platform = {
3041 + .name = "PLATFORM",
3042 + .init = platform_init,
3043 +- .fini = (void(*)(void *))kfree,
3044 ++ .fini = platform_fini,
3045 + .read = pcirom_read,
3046 + .rw = true,
3047 + };
3048 +diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
3049 +index 3bfb95d230e0e..d8fb686c1fda9 100644
3050 +--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
3051 ++++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
3052 +@@ -193,7 +193,7 @@ static int __init omapdss_boot_init(void)
3053 + dss = of_find_matching_node(NULL, omapdss_of_match);
3054 +
3055 + if (dss == NULL || !of_device_is_available(dss))
3056 +- return 0;
3057 ++ goto put_node;
3058 +
3059 + omapdss_walk_device(dss, true);
3060 +
3061 +@@ -218,6 +218,8 @@ static int __init omapdss_boot_init(void)
3062 + kfree(n);
3063 + }
3064 +
3065 ++put_node:
3066 ++ of_node_put(dss);
3067 + return 0;
3068 + }
3069 +
3070 +diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
3071 +index 04c0ed41374f1..dd0528cf98183 100644
3072 +--- a/drivers/gpu/drm/radeon/radeon_bios.c
3073 ++++ b/drivers/gpu/drm/radeon/radeon_bios.c
3074 +@@ -104,25 +104,33 @@ static bool radeon_read_bios(struct radeon_device *rdev)
3075 +
3076 + static bool radeon_read_platform_bios(struct radeon_device *rdev)
3077 + {
3078 +- uint8_t __iomem *bios;
3079 +- size_t size;
3080 ++ phys_addr_t rom = rdev->pdev->rom;
3081 ++ size_t romlen = rdev->pdev->romlen;
3082 ++ void __iomem *bios;
3083 +
3084 + rdev->bios = NULL;
3085 +
3086 +- bios = pci_platform_rom(rdev->pdev, &size);
3087 +- if (!bios) {
3088 ++ if (!rom || romlen == 0)
3089 + return false;
3090 +- }
3091 +
3092 +- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
3093 ++ rdev->bios = kzalloc(romlen, GFP_KERNEL);
3094 ++ if (!rdev->bios)
3095 + return false;
3096 +- }
3097 +- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
3098 +- if (rdev->bios == NULL) {
3099 +- return false;
3100 +- }
3101 ++
3102 ++ bios = ioremap(rom, romlen);
3103 ++ if (!bios)
3104 ++ goto free_bios;
3105 ++
3106 ++ memcpy_fromio(rdev->bios, bios, romlen);
3107 ++ iounmap(bios);
3108 ++
3109 ++ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa)
3110 ++ goto free_bios;
3111 +
3112 + return true;
3113 ++free_bios:
3114 ++ kfree(rdev->bios);
3115 ++ return false;
3116 + }
3117 +
3118 + #ifdef CONFIG_ACPI
3119 +diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
3120 +index 880e8fbb08556..242752b2d328c 100644
3121 +--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
3122 ++++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
3123 +@@ -14,7 +14,7 @@ struct sun8i_mixer;
3124 +
3125 + /* VI channel CSC units offsets */
3126 + #define CCSC00_OFFSET 0xAA050
3127 +-#define CCSC01_OFFSET 0xFA000
3128 ++#define CCSC01_OFFSET 0xFA050
3129 + #define CCSC10_OFFSET 0xA0000
3130 + #define CCSC11_OFFSET 0xF0000
3131 +
3132 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
3133 +index 86b98856756d9..1161662664577 100644
3134 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
3135 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
3136 +@@ -1134,6 +1134,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
3137 + card->num_links = 1;
3138 + card->name = "vc4-hdmi";
3139 + card->dev = dev;
3140 ++ card->owner = THIS_MODULE;
3141 +
3142 + /*
3143 + * Be careful, snd_soc_register_card() calls dev_set_drvdata() and
3144 +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
3145 +index f225bef1e043c..41dd0a08a625c 100644
3146 +--- a/drivers/i2c/i2c-core-base.c
3147 ++++ b/drivers/i2c/i2c-core-base.c
3148 +@@ -1292,8 +1292,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
3149 +
3150 + /* create pre-declared device nodes */
3151 + of_i2c_register_devices(adap);
3152 +- i2c_acpi_register_devices(adap);
3153 + i2c_acpi_install_space_handler(adap);
3154 ++ i2c_acpi_register_devices(adap);
3155 +
3156 + if (adap->nr < __i2c_first_dynamic_bus_num)
3157 + i2c_scan_static_board_info(adap);
3158 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
3159 +index 64f206e11d497..4ebf63360a697 100644
3160 +--- a/drivers/infiniband/core/cm.c
3161 ++++ b/drivers/infiniband/core/cm.c
3162 +@@ -1100,14 +1100,22 @@ retest:
3163 + break;
3164 + }
3165 +
3166 +- spin_lock_irq(&cm.lock);
3167 ++ spin_lock_irq(&cm_id_priv->lock);
3168 ++ spin_lock(&cm.lock);
3169 ++ /* Required for cleanup paths related cm_req_handler() */
3170 ++ if (cm_id_priv->timewait_info) {
3171 ++ cm_cleanup_timewait(cm_id_priv->timewait_info);
3172 ++ kfree(cm_id_priv->timewait_info);
3173 ++ cm_id_priv->timewait_info = NULL;
3174 ++ }
3175 + if (!list_empty(&cm_id_priv->altr_list) &&
3176 + (!cm_id_priv->altr_send_port_not_ready))
3177 + list_del(&cm_id_priv->altr_list);
3178 + if (!list_empty(&cm_id_priv->prim_list) &&
3179 + (!cm_id_priv->prim_send_port_not_ready))
3180 + list_del(&cm_id_priv->prim_list);
3181 +- spin_unlock_irq(&cm.lock);
3182 ++ spin_unlock(&cm.lock);
3183 ++ spin_unlock_irq(&cm_id_priv->lock);
3184 +
3185 + cm_free_id(cm_id->local_id);
3186 + cm_deref_id(cm_id_priv);
3187 +@@ -1424,7 +1432,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
3188 + /* Verify that we're not in timewait. */
3189 + cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3190 + spin_lock_irqsave(&cm_id_priv->lock, flags);
3191 +- if (cm_id->state != IB_CM_IDLE) {
3192 ++ if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
3193 + spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3194 + ret = -EINVAL;
3195 + goto out;
3196 +@@ -1442,12 +1450,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
3197 + param->ppath_sgid_attr, &cm_id_priv->av,
3198 + cm_id_priv);
3199 + if (ret)
3200 +- goto error1;
3201 ++ goto out;
3202 + if (param->alternate_path) {
3203 + ret = cm_init_av_by_path(param->alternate_path, NULL,
3204 + &cm_id_priv->alt_av, cm_id_priv);
3205 + if (ret)
3206 +- goto error1;
3207 ++ goto out;
3208 + }
3209 + cm_id->service_id = param->service_id;
3210 + cm_id->service_mask = ~cpu_to_be64(0);
3211 +@@ -1465,7 +1473,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
3212 +
3213 + ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
3214 + if (ret)
3215 +- goto error1;
3216 ++ goto out;
3217 +
3218 + req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
3219 + cm_format_req(req_msg, cm_id_priv, param);
3220 +@@ -1488,7 +1496,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
3221 + return 0;
3222 +
3223 + error2: cm_free_msg(cm_id_priv->msg);
3224 +-error1: kfree(cm_id_priv->timewait_info);
3225 + out: return ret;
3226 + }
3227 + EXPORT_SYMBOL(ib_send_cm_req);
3228 +@@ -1973,7 +1980,7 @@ static int cm_req_handler(struct cm_work *work)
3229 + pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
3230 + be32_to_cpu(cm_id->local_id));
3231 + ret = -EINVAL;
3232 +- goto free_timeinfo;
3233 ++ goto destroy;
3234 + }
3235 +
3236 + cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3237 +@@ -2057,8 +2064,6 @@ static int cm_req_handler(struct cm_work *work)
3238 + rejected:
3239 + atomic_dec(&cm_id_priv->refcount);
3240 + cm_deref_id(listen_cm_id_priv);
3241 +-free_timeinfo:
3242 +- kfree(cm_id_priv->timewait_info);
3243 + destroy:
3244 + ib_destroy_cm_id(cm_id);
3245 + return ret;
3246 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
3247 +index 16145b0a14583..3fd3dfa3478b7 100644
3248 +--- a/drivers/infiniband/hw/cxgb4/cm.c
3249 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
3250 +@@ -3293,7 +3293,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3251 + if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
3252 + err = pick_local_ipaddrs(dev, cm_id);
3253 + if (err)
3254 +- goto fail2;
3255 ++ goto fail3;
3256 + }
3257 +
3258 + /* find a route */
3259 +@@ -3315,7 +3315,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3260 + if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3261 + err = pick_local_ip6addrs(dev, cm_id);
3262 + if (err)
3263 +- goto fail2;
3264 ++ goto fail3;
3265 + }
3266 +
3267 + /* find a route */
3268 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
3269 +index 4321b9e3dbb4b..0273d0404e740 100644
3270 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
3271 ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
3272 +@@ -2071,9 +2071,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
3273 + dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
3274 + if (!dst || dst->error) {
3275 + if (dst) {
3276 +- dst_release(dst);
3277 + i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
3278 + dst->error);
3279 ++ dst_release(dst);
3280 + }
3281 + return rc;
3282 + }
3283 +diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
3284 +index 2566715773675..e908dfbaa1378 100644
3285 +--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
3286 ++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
3287 +@@ -460,10 +460,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
3288 +
3289 + if ((!dst) || dst->error) {
3290 + if (dst) {
3291 +- dst_release(dst);
3292 + DP_ERR(dev,
3293 + "ip6_route_output returned dst->error = %d\n",
3294 + dst->error);
3295 ++ dst_release(dst);
3296 + }
3297 + return -EINVAL;
3298 + }
3299 +diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
3300 +index 94dedabe648c2..6589ff51eaf5c 100644
3301 +--- a/drivers/infiniband/sw/rxe/rxe.c
3302 ++++ b/drivers/infiniband/sw/rxe/rxe.c
3303 +@@ -121,6 +121,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
3304 + rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
3305 + rxe->attr.max_pkeys = RXE_MAX_PKEYS;
3306 + rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
3307 ++ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
3308 ++ rxe->ndev->dev_addr);
3309 +
3310 + rxe->max_ucontext = RXE_MAX_UCONTEXT;
3311 + }
3312 +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
3313 +index 230697fa31fe3..8a22ab8b29e9b 100644
3314 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c
3315 ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
3316 +@@ -583,15 +583,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
3317 + int err;
3318 +
3319 + if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
3320 +- int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
3321 ++ int max_rd_atomic = attr->max_rd_atomic ?
3322 ++ roundup_pow_of_two(attr->max_rd_atomic) : 0;
3323 +
3324 + qp->attr.max_rd_atomic = max_rd_atomic;
3325 + atomic_set(&qp->req.rd_atomic, max_rd_atomic);
3326 + }
3327 +
3328 + if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
3329 +- int max_dest_rd_atomic =
3330 +- __roundup_pow_of_two(attr->max_dest_rd_atomic);
3331 ++ int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
3332 ++ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
3333 +
3334 + qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
3335 +
3336 +diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
3337 +index 1ee48cb21df95..022e973dc7c31 100644
3338 +--- a/drivers/leds/leds-mlxreg.c
3339 ++++ b/drivers/leds/leds-mlxreg.c
3340 +@@ -209,8 +209,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv)
3341 + brightness = LED_OFF;
3342 + led_data->base_color = MLXREG_LED_GREEN_SOLID;
3343 + }
3344 +- sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg",
3345 +- data->label);
3346 ++ snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name),
3347 ++ "mlxreg:%s", data->label);
3348 + led_cdev->name = led_data->led_cdev_name;
3349 + led_cdev->brightness = brightness;
3350 + led_cdev->max_brightness = LED_ON;
3351 +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
3352 +index 1cc6ae3e058c6..6a380ed4919a0 100644
3353 +--- a/drivers/md/bcache/bcache.h
3354 ++++ b/drivers/md/bcache/bcache.h
3355 +@@ -585,6 +585,7 @@ struct cache_set {
3356 + */
3357 + wait_queue_head_t btree_cache_wait;
3358 + struct task_struct *btree_cache_alloc_lock;
3359 ++ spinlock_t btree_cannibalize_lock;
3360 +
3361 + /*
3362 + * When we free a btree node, we increment the gen of the bucket the
3363 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
3364 +index d320574b9a4c8..e388e7bb7b5db 100644
3365 +--- a/drivers/md/bcache/btree.c
3366 ++++ b/drivers/md/bcache/btree.c
3367 +@@ -876,15 +876,17 @@ out:
3368 +
3369 + static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
3370 + {
3371 +- struct task_struct *old;
3372 +-
3373 +- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
3374 +- if (old && old != current) {
3375 ++ spin_lock(&c->btree_cannibalize_lock);
3376 ++ if (likely(c->btree_cache_alloc_lock == NULL)) {
3377 ++ c->btree_cache_alloc_lock = current;
3378 ++ } else if (c->btree_cache_alloc_lock != current) {
3379 + if (op)
3380 + prepare_to_wait(&c->btree_cache_wait, &op->wait,
3381 + TASK_UNINTERRUPTIBLE);
3382 ++ spin_unlock(&c->btree_cannibalize_lock);
3383 + return -EINTR;
3384 + }
3385 ++ spin_unlock(&c->btree_cannibalize_lock);
3386 +
3387 + return 0;
3388 + }
3389 +@@ -919,10 +921,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
3390 + */
3391 + static void bch_cannibalize_unlock(struct cache_set *c)
3392 + {
3393 ++ spin_lock(&c->btree_cannibalize_lock);
3394 + if (c->btree_cache_alloc_lock == current) {
3395 + c->btree_cache_alloc_lock = NULL;
3396 + wake_up(&c->btree_cache_wait);
3397 + }
3398 ++ spin_unlock(&c->btree_cannibalize_lock);
3399 + }
3400 +
3401 + static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
3402 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3403 +index 825bfde10c694..7787ec42f81e1 100644
3404 +--- a/drivers/md/bcache/super.c
3405 ++++ b/drivers/md/bcache/super.c
3406 +@@ -1737,6 +1737,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
3407 + sema_init(&c->sb_write_mutex, 1);
3408 + mutex_init(&c->bucket_lock);
3409 + init_waitqueue_head(&c->btree_cache_wait);
3410 ++ spin_lock_init(&c->btree_cannibalize_lock);
3411 + init_waitqueue_head(&c->bucket_wait);
3412 + init_waitqueue_head(&c->gc_wait);
3413 + sema_init(&c->uuid_write_mutex, 1);
3414 +diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
3415 +index 097c42d3f8c26..df0c7243eafe4 100644
3416 +--- a/drivers/media/dvb-frontends/tda10071.c
3417 ++++ b/drivers/media/dvb-frontends/tda10071.c
3418 +@@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
3419 + goto error;
3420 +
3421 + if (dev->delivery_system == SYS_DVBS) {
3422 +- dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
3423 +- buf[2] << 8 | buf[3] << 0;
3424 +- dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
3425 +- buf[2] << 8 | buf[3] << 0;
3426 ++ u32 bit_error = buf[0] << 24 | buf[1] << 16 |
3427 ++ buf[2] << 8 | buf[3] << 0;
3428 ++
3429 ++ dev->dvbv3_ber = bit_error;
3430 ++ dev->post_bit_error += bit_error;
3431 + c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
3432 + c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
3433 + dev->block_error += buf[4] << 8 | buf[5] << 0;
3434 +diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
3435 +index 4731e1c72f960..0a434bdce3b3b 100644
3436 +--- a/drivers/media/i2c/smiapp/smiapp-core.c
3437 ++++ b/drivers/media/i2c/smiapp/smiapp-core.c
3438 +@@ -2337,11 +2337,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
3439 + if (rval < 0) {
3440 + if (rval != -EBUSY && rval != -EAGAIN)
3441 + pm_runtime_set_active(&client->dev);
3442 +- pm_runtime_put(&client->dev);
3443 ++ pm_runtime_put_noidle(&client->dev);
3444 + return -ENODEV;
3445 + }
3446 +
3447 + if (smiapp_read_nvm(sensor, sensor->nvm)) {
3448 ++ pm_runtime_put(&client->dev);
3449 + dev_err(&client->dev, "nvm read failed\n");
3450 + return -ENODEV;
3451 + }
3452 +diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
3453 +index ed518b1f82e4a..d04ed438a45de 100644
3454 +--- a/drivers/media/media-device.c
3455 ++++ b/drivers/media/media-device.c
3456 +@@ -568,6 +568,38 @@ static void media_device_release(struct media_devnode *devnode)
3457 + dev_dbg(devnode->parent, "Media device released\n");
3458 + }
3459 +
3460 ++static void __media_device_unregister_entity(struct media_entity *entity)
3461 ++{
3462 ++ struct media_device *mdev = entity->graph_obj.mdev;
3463 ++ struct media_link *link, *tmp;
3464 ++ struct media_interface *intf;
3465 ++ unsigned int i;
3466 ++
3467 ++ ida_free(&mdev->entity_internal_idx, entity->internal_idx);
3468 ++
3469 ++ /* Remove all interface links pointing to this entity */
3470 ++ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
3471 ++ list_for_each_entry_safe(link, tmp, &intf->links, list) {
3472 ++ if (link->entity == entity)
3473 ++ __media_remove_intf_link(link);
3474 ++ }
3475 ++ }
3476 ++
3477 ++ /* Remove all data links that belong to this entity */
3478 ++ __media_entity_remove_links(entity);
3479 ++
3480 ++ /* Remove all pads that belong to this entity */
3481 ++ for (i = 0; i < entity->num_pads; i++)
3482 ++ media_gobj_destroy(&entity->pads[i].graph_obj);
3483 ++
3484 ++ /* Remove the entity */
3485 ++ media_gobj_destroy(&entity->graph_obj);
3486 ++
3487 ++ /* invoke entity_notify callbacks to handle entity removal?? */
3488 ++
3489 ++ entity->graph_obj.mdev = NULL;
3490 ++}
3491 ++
3492 + /**
3493 + * media_device_register_entity - Register an entity with a media device
3494 + * @mdev: The media device
3495 +@@ -625,6 +657,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
3496 + */
3497 + ret = media_graph_walk_init(&new, mdev);
3498 + if (ret) {
3499 ++ __media_device_unregister_entity(entity);
3500 + mutex_unlock(&mdev->graph_mutex);
3501 + return ret;
3502 + }
3503 +@@ -637,38 +670,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
3504 + }
3505 + EXPORT_SYMBOL_GPL(media_device_register_entity);
3506 +
3507 +-static void __media_device_unregister_entity(struct media_entity *entity)
3508 +-{
3509 +- struct media_device *mdev = entity->graph_obj.mdev;
3510 +- struct media_link *link, *tmp;
3511 +- struct media_interface *intf;
3512 +- unsigned int i;
3513 +-
3514 +- ida_free(&mdev->entity_internal_idx, entity->internal_idx);
3515 +-
3516 +- /* Remove all interface links pointing to this entity */
3517 +- list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
3518 +- list_for_each_entry_safe(link, tmp, &intf->links, list) {
3519 +- if (link->entity == entity)
3520 +- __media_remove_intf_link(link);
3521 +- }
3522 +- }
3523 +-
3524 +- /* Remove all data links that belong to this entity */
3525 +- __media_entity_remove_links(entity);
3526 +-
3527 +- /* Remove all pads that belong to this entity */
3528 +- for (i = 0; i < entity->num_pads; i++)
3529 +- media_gobj_destroy(&entity->pads[i].graph_obj);
3530 +-
3531 +- /* Remove the entity */
3532 +- media_gobj_destroy(&entity->graph_obj);
3533 +-
3534 +- /* invoke entity_notify callbacks to handle entity removal?? */
3535 +-
3536 +- entity->graph_obj.mdev = NULL;
3537 +-}
3538 +-
3539 + void media_device_unregister_entity(struct media_entity *entity)
3540 + {
3541 + struct media_device *mdev = entity->graph_obj.mdev;
3542 +diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
3543 +index be3155275a6ba..d945323fc437d 100644
3544 +--- a/drivers/media/platform/ti-vpe/cal.c
3545 ++++ b/drivers/media/platform/ti-vpe/cal.c
3546 +@@ -684,12 +684,13 @@ static void pix_proc_config(struct cal_ctx *ctx)
3547 + }
3548 +
3549 + static void cal_wr_dma_config(struct cal_ctx *ctx,
3550 +- unsigned int width)
3551 ++ unsigned int width, unsigned int height)
3552 + {
3553 + u32 val;
3554 +
3555 + val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
3556 + set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
3557 ++ set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
3558 + set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
3559 + CAL_WR_DMA_CTRL_DTAG_MASK);
3560 + set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
3561 +@@ -1315,7 +1316,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
3562 + csi2_lane_config(ctx);
3563 + csi2_ctx_config(ctx);
3564 + pix_proc_config(ctx);
3565 +- cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
3566 ++ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
3567 ++ ctx->v_fmt.fmt.pix.height);
3568 + cal_wr_dma_addr(ctx, addr);
3569 + csi2_ppi_enable(ctx);
3570 +
3571 +diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
3572 +index 19c6a0354ce00..b84a6f6548610 100644
3573 +--- a/drivers/media/usb/go7007/go7007-usb.c
3574 ++++ b/drivers/media/usb/go7007/go7007-usb.c
3575 +@@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
3576 + struct go7007_usb *usb;
3577 + const struct go7007_usb_board *board;
3578 + struct usb_device *usbdev = interface_to_usbdev(intf);
3579 ++ struct usb_host_endpoint *ep;
3580 + unsigned num_i2c_devs;
3581 + char *name;
3582 + int video_pipe, i, v_urb_len;
3583 +@@ -1148,7 +1149,8 @@ static int go7007_usb_probe(struct usb_interface *intf,
3584 + if (usb->intr_urb->transfer_buffer == NULL)
3585 + goto allocfail;
3586 +
3587 +- if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
3588 ++ ep = usb->usbdev->ep_in[4];
3589 ++ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
3590 + usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
3591 + usb_rcvbulkpipe(usb->usbdev, 4),
3592 + usb->intr_urb->transfer_buffer, 2*sizeof(u16),
3593 +diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
3594 +index 182973df1aed4..77c965c6a65f1 100644
3595 +--- a/drivers/mfd/mfd-core.c
3596 ++++ b/drivers/mfd/mfd-core.c
3597 +@@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev)
3598 + const struct mfd_cell *cell = mfd_get_cell(pdev);
3599 + int err = 0;
3600 +
3601 ++ if (!cell->enable) {
3602 ++ dev_dbg(&pdev->dev, "No .enable() call-back registered\n");
3603 ++ return 0;
3604 ++ }
3605 ++
3606 + /* only call enable hook if the cell wasn't previously enabled */
3607 + if (atomic_inc_return(cell->usage_count) == 1)
3608 + err = cell->enable(pdev);
3609 +@@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev)
3610 + const struct mfd_cell *cell = mfd_get_cell(pdev);
3611 + int err = 0;
3612 +
3613 ++ if (!cell->disable) {
3614 ++ dev_dbg(&pdev->dev, "No .disable() call-back registered\n");
3615 ++ return 0;
3616 ++ }
3617 ++
3618 + /* only disable if no other clients are using it */
3619 + if (atomic_dec_return(cell->usage_count) == 0)
3620 + err = cell->disable(pdev);
3621 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
3622 +index 5ca53e225382d..4b18034537f53 100644
3623 +--- a/drivers/mmc/core/mmc.c
3624 ++++ b/drivers/mmc/core/mmc.c
3625 +@@ -300,7 +300,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
3626 + }
3627 + }
3628 +
3629 +-static void mmc_part_add(struct mmc_card *card, unsigned int size,
3630 ++static void mmc_part_add(struct mmc_card *card, u64 size,
3631 + unsigned int part_cfg, char *name, int idx, bool ro,
3632 + int area_type)
3633 + {
3634 +@@ -316,7 +316,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
3635 + {
3636 + int idx;
3637 + u8 hc_erase_grp_sz, hc_wp_grp_sz;
3638 +- unsigned int part_size;
3639 ++ u64 part_size;
3640 +
3641 + /*
3642 + * General purpose partition feature support --
3643 +@@ -346,8 +346,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
3644 + (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
3645 + << 8) +
3646 + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
3647 +- part_size *= (size_t)(hc_erase_grp_sz *
3648 +- hc_wp_grp_sz);
3649 ++ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
3650 + mmc_part_add(card, part_size << 19,
3651 + EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
3652 + "gp%d", idx, false,
3653 +@@ -365,7 +364,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
3654 + static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
3655 + {
3656 + int err = 0, idx;
3657 +- unsigned int part_size;
3658 ++ u64 part_size;
3659 + struct device_node *np;
3660 + bool broken_hpi = false;
3661 +
3662 +diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
3663 +index 1dbc9554a0786..3ab75d3e2ce32 100644
3664 +--- a/drivers/mtd/chips/cfi_cmdset_0002.c
3665 ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
3666 +@@ -727,7 +727,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
3667 + kfree(mtd->eraseregions);
3668 + kfree(mtd);
3669 + kfree(cfi->cmdset_priv);
3670 +- kfree(cfi->cfiq);
3671 + return NULL;
3672 + }
3673 +
3674 +diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
3675 +index 3ea44cff9b759..c29205ee82e20 100644
3676 +--- a/drivers/mtd/cmdlinepart.c
3677 ++++ b/drivers/mtd/cmdlinepart.c
3678 +@@ -231,12 +231,29 @@ static int mtdpart_setup_real(char *s)
3679 + struct cmdline_mtd_partition *this_mtd;
3680 + struct mtd_partition *parts;
3681 + int mtd_id_len, num_parts;
3682 +- char *p, *mtd_id;
3683 ++ char *p, *mtd_id, *semicol;
3684 ++
3685 ++ /*
3686 ++ * Replace the first ';' by a NULL char so strrchr can work
3687 ++ * properly.
3688 ++ */
3689 ++ semicol = strchr(s, ';');
3690 ++ if (semicol)
3691 ++ *semicol = '\0';
3692 +
3693 + mtd_id = s;
3694 +
3695 +- /* fetch <mtd-id> */
3696 +- p = strchr(s, ':');
3697 ++ /*
3698 ++ * fetch <mtd-id>. We use strrchr to ignore all ':' that could
3699 ++ * be present in the MTD name, only the last one is interpreted
3700 ++ * as an <mtd-id>/<part-definition> separator.
3701 ++ */
3702 ++ p = strrchr(s, ':');
3703 ++
3704 ++ /* Restore the ';' now. */
3705 ++ if (semicol)
3706 ++ *semicol = ';';
3707 ++
3708 + if (!p) {
3709 + pr_err("no mtd-id\n");
3710 + return -EINVAL;
3711 +diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
3712 +index a3f32f939cc17..6736777a41567 100644
3713 +--- a/drivers/mtd/nand/raw/omap_elm.c
3714 ++++ b/drivers/mtd/nand/raw/omap_elm.c
3715 +@@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev)
3716 + pm_runtime_enable(&pdev->dev);
3717 + if (pm_runtime_get_sync(&pdev->dev) < 0) {
3718 + ret = -EINVAL;
3719 ++ pm_runtime_put_sync(&pdev->dev);
3720 + pm_runtime_disable(&pdev->dev);
3721 + dev_err(&pdev->dev, "can't enable clock\n");
3722 + return ret;
3723 +diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
3724 +index 98f7d6be8d1fc..e08f6b4637dda 100644
3725 +--- a/drivers/mtd/ubi/fastmap-wl.c
3726 ++++ b/drivers/mtd/ubi/fastmap-wl.c
3727 +@@ -48,6 +48,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
3728 + return victim;
3729 + }
3730 +
3731 ++static inline void return_unused_peb(struct ubi_device *ubi,
3732 ++ struct ubi_wl_entry *e)
3733 ++{
3734 ++ wl_tree_add(e, &ubi->free);
3735 ++ ubi->free_count++;
3736 ++}
3737 ++
3738 + /**
3739 + * return_unused_pool_pebs - returns unused PEB to the free tree.
3740 + * @ubi: UBI device description object
3741 +@@ -61,23 +68,10 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
3742 +
3743 + for (i = pool->used; i < pool->size; i++) {
3744 + e = ubi->lookuptbl[pool->pebs[i]];
3745 +- wl_tree_add(e, &ubi->free);
3746 +- ubi->free_count++;
3747 ++ return_unused_peb(ubi, e);
3748 + }
3749 + }
3750 +
3751 +-static int anchor_pebs_available(struct rb_root *root)
3752 +-{
3753 +- struct rb_node *p;
3754 +- struct ubi_wl_entry *e;
3755 +-
3756 +- ubi_rb_for_each_entry(p, e, root, u.rb)
3757 +- if (e->pnum < UBI_FM_MAX_START)
3758 +- return 1;
3759 +-
3760 +- return 0;
3761 +-}
3762 +-
3763 + /**
3764 + * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
3765 + * @ubi: UBI device description object
3766 +@@ -286,8 +280,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
3767 + int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
3768 + {
3769 + struct ubi_work *wrk;
3770 ++ struct ubi_wl_entry *anchor;
3771 +
3772 + spin_lock(&ubi->wl_lock);
3773 ++
3774 ++ /* Do we already have an anchor? */
3775 ++ if (ubi->fm_anchor) {
3776 ++ spin_unlock(&ubi->wl_lock);
3777 ++ return 0;
3778 ++ }
3779 ++
3780 ++ /* See if we can find an anchor PEB on the list of free PEBs */
3781 ++ anchor = ubi_wl_get_fm_peb(ubi, 1);
3782 ++ if (anchor) {
3783 ++ ubi->fm_anchor = anchor;
3784 ++ spin_unlock(&ubi->wl_lock);
3785 ++ return 0;
3786 ++ }
3787 ++
3788 ++ /* No luck, trigger wear leveling to produce a new anchor PEB */
3789 ++ ubi->fm_do_produce_anchor = 1;
3790 + if (ubi->wl_scheduled) {
3791 + spin_unlock(&ubi->wl_lock);
3792 + return 0;
3793 +@@ -303,7 +315,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
3794 + return -ENOMEM;
3795 + }
3796 +
3797 +- wrk->anchor = 1;
3798 + wrk->func = &wear_leveling_worker;
3799 + __schedule_ubi_work(ubi, wrk);
3800 + return 0;
3801 +@@ -365,6 +376,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
3802 + return_unused_pool_pebs(ubi, &ubi->fm_pool);
3803 + return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
3804 +
3805 ++ if (ubi->fm_anchor) {
3806 ++ return_unused_peb(ubi, ubi->fm_anchor);
3807 ++ ubi->fm_anchor = NULL;
3808 ++ }
3809 ++
3810 + if (ubi->fm) {
3811 + for (i = 0; i < ubi->fm->used_blocks; i++)
3812 + kfree(ubi->fm->e[i]);
3813 +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
3814 +index 8e292992f84c7..b88ef875236cc 100644
3815 +--- a/drivers/mtd/ubi/fastmap.c
3816 ++++ b/drivers/mtd/ubi/fastmap.c
3817 +@@ -1552,14 +1552,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
3818 + return 0;
3819 + }
3820 +
3821 +- ret = ubi_ensure_anchor_pebs(ubi);
3822 +- if (ret) {
3823 +- up_write(&ubi->fm_eba_sem);
3824 +- up_write(&ubi->work_sem);
3825 +- up_write(&ubi->fm_protect);
3826 +- return ret;
3827 +- }
3828 +-
3829 + new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
3830 + if (!new_fm) {
3831 + up_write(&ubi->fm_eba_sem);
3832 +@@ -1630,7 +1622,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
3833 + }
3834 +
3835 + spin_lock(&ubi->wl_lock);
3836 +- tmp_e = ubi_wl_get_fm_peb(ubi, 1);
3837 ++ tmp_e = ubi->fm_anchor;
3838 ++ ubi->fm_anchor = NULL;
3839 + spin_unlock(&ubi->wl_lock);
3840 +
3841 + if (old_fm) {
3842 +@@ -1682,6 +1675,9 @@ out_unlock:
3843 + up_write(&ubi->work_sem);
3844 + up_write(&ubi->fm_protect);
3845 + kfree(old_fm);
3846 ++
3847 ++ ubi_ensure_anchor_pebs(ubi);
3848 ++
3849 + return ret;
3850 +
3851 + err:
3852 +diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
3853 +index d47b9e436e673..d248ec371cc17 100644
3854 +--- a/drivers/mtd/ubi/ubi.h
3855 ++++ b/drivers/mtd/ubi/ubi.h
3856 +@@ -504,6 +504,8 @@ struct ubi_debug_info {
3857 + * @fm_work: fastmap work queue
3858 + * @fm_work_scheduled: non-zero if fastmap work was scheduled
3859 + * @fast_attach: non-zero if UBI was attached by fastmap
3860 ++ * @fm_anchor: The next anchor PEB to use for fastmap
3861 ++ * @fm_do_produce_anchor: If true produce an anchor PEB in wl
3862 + *
3863 + * @used: RB-tree of used physical eraseblocks
3864 + * @erroneous: RB-tree of erroneous used physical eraseblocks
3865 +@@ -612,6 +614,8 @@ struct ubi_device {
3866 + struct work_struct fm_work;
3867 + int fm_work_scheduled;
3868 + int fast_attach;
3869 ++ struct ubi_wl_entry *fm_anchor;
3870 ++ int fm_do_produce_anchor;
3871 +
3872 + /* Wear-leveling sub-system's stuff */
3873 + struct rb_root used;
3874 +@@ -802,7 +806,6 @@ struct ubi_attach_info {
3875 + * @vol_id: the volume ID on which this erasure is being performed
3876 + * @lnum: the logical eraseblock number
3877 + * @torture: if the physical eraseblock has to be tortured
3878 +- * @anchor: produce a anchor PEB to by used by fastmap
3879 + *
3880 + * The @func pointer points to the worker function. If the @shutdown argument is
3881 + * not zero, the worker has to free the resources and exit immediately as the
3882 +@@ -818,7 +821,6 @@ struct ubi_work {
3883 + int vol_id;
3884 + int lnum;
3885 + int torture;
3886 +- int anchor;
3887 + };
3888 +
3889 + #include "debug.h"
3890 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
3891 +index 6f2ac865ff05e..80d64d7e7a8be 100644
3892 +--- a/drivers/mtd/ubi/wl.c
3893 ++++ b/drivers/mtd/ubi/wl.c
3894 +@@ -331,13 +331,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
3895 + }
3896 + }
3897 +
3898 +- /* If no fastmap has been written and this WL entry can be used
3899 +- * as anchor PEB, hold it back and return the second best WL entry
3900 +- * such that fastmap can use the anchor PEB later. */
3901 +- if (prev_e && !ubi->fm_disabled &&
3902 +- !ubi->fm && e->pnum < UBI_FM_MAX_START)
3903 +- return prev_e;
3904 +-
3905 + return e;
3906 + }
3907 +
3908 +@@ -648,9 +641,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
3909 + {
3910 + int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
3911 + int erase = 0, keep = 0, vol_id = -1, lnum = -1;
3912 +-#ifdef CONFIG_MTD_UBI_FASTMAP
3913 +- int anchor = wrk->anchor;
3914 +-#endif
3915 + struct ubi_wl_entry *e1, *e2;
3916 + struct ubi_vid_io_buf *vidb;
3917 + struct ubi_vid_hdr *vid_hdr;
3918 +@@ -690,11 +680,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
3919 + }
3920 +
3921 + #ifdef CONFIG_MTD_UBI_FASTMAP
3922 +- /* Check whether we need to produce an anchor PEB */
3923 +- if (!anchor)
3924 +- anchor = !anchor_pebs_available(&ubi->free);
3925 +-
3926 +- if (anchor) {
3927 ++ if (ubi->fm_do_produce_anchor) {
3928 + e1 = find_anchor_wl_entry(&ubi->used);
3929 + if (!e1)
3930 + goto out_cancel;
3931 +@@ -705,6 +691,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
3932 + self_check_in_wl_tree(ubi, e1, &ubi->used);
3933 + rb_erase(&e1->u.rb, &ubi->used);
3934 + dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
3935 ++ ubi->fm_do_produce_anchor = 0;
3936 + } else if (!ubi->scrub.rb_node) {
3937 + #else
3938 + if (!ubi->scrub.rb_node) {
3939 +@@ -1037,7 +1024,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
3940 + goto out_cancel;
3941 + }
3942 +
3943 +- wrk->anchor = 0;
3944 + wrk->func = &wear_leveling_worker;
3945 + if (nested)
3946 + __schedule_ubi_work(ubi, wrk);
3947 +@@ -1079,8 +1065,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
3948 + err = sync_erase(ubi, e, wl_wrk->torture);
3949 + if (!err) {
3950 + spin_lock(&ubi->wl_lock);
3951 +- wl_tree_add(e, &ubi->free);
3952 +- ubi->free_count++;
3953 ++
3954 ++ if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
3955 ++ ubi->fm_anchor = e;
3956 ++ ubi->fm_do_produce_anchor = 0;
3957 ++ } else {
3958 ++ wl_tree_add(e, &ubi->free);
3959 ++ ubi->free_count++;
3960 ++ }
3961 ++
3962 + spin_unlock(&ubi->wl_lock);
3963 +
3964 + /*
3965 +@@ -1724,6 +1717,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
3966 + if (err)
3967 + goto out_free;
3968 +
3969 ++#ifdef CONFIG_MTD_UBI_FASTMAP
3970 ++ ubi_ensure_anchor_pebs(ubi);
3971 ++#endif
3972 + return 0;
3973 +
3974 + out_free:
3975 +diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
3976 +index a9e2d669acd81..c93a532937863 100644
3977 +--- a/drivers/mtd/ubi/wl.h
3978 ++++ b/drivers/mtd/ubi/wl.h
3979 +@@ -2,7 +2,6 @@
3980 + #ifndef UBI_WL_H
3981 + #define UBI_WL_H
3982 + #ifdef CONFIG_MTD_UBI_FASTMAP
3983 +-static int anchor_pebs_available(struct rb_root *root);
3984 + static void update_fastmap_work_fn(struct work_struct *wrk);
3985 + static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
3986 + static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
3987 +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
3988 +index 47b867c64b147..195108858f38f 100644
3989 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
3990 ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
3991 +@@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter)
3992 + WARN_ON(in_interrupt());
3993 + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3994 + msleep(1);
3995 +- e1000_down(adapter);
3996 +- e1000_up(adapter);
3997 ++
3998 ++ /* only run the task if not already down */
3999 ++ if (!test_bit(__E1000_DOWN, &adapter->flags)) {
4000 ++ e1000_down(adapter);
4001 ++ e1000_up(adapter);
4002 ++ }
4003 ++
4004 + clear_bit(__E1000_RESETTING, &adapter->flags);
4005 + }
4006 +
4007 +@@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev)
4008 + struct e1000_hw *hw = &adapter->hw;
4009 + int count = E1000_CHECK_RESET_COUNT;
4010 +
4011 +- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
4012 ++ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
4013 + usleep_range(10000, 20000);
4014 +
4015 +- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4016 ++ WARN_ON(count < 0);
4017 ++
4018 ++ /* signal that we're down so that the reset task will no longer run */
4019 ++ set_bit(__E1000_DOWN, &adapter->flags);
4020 ++ clear_bit(__E1000_RESETTING, &adapter->flags);
4021 ++
4022 + e1000_down(adapter);
4023 + e1000_power_down_phy(adapter);
4024 + e1000_free_irq(adapter);
4025 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
4026 +index 71a7af134dd8e..886c7aae662fa 100644
4027 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
4028 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
4029 +@@ -96,6 +96,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
4030 + p_ramrod->personality = PERSONALITY_ETH;
4031 + break;
4032 + case QED_PCI_ETH_ROCE:
4033 ++ case QED_PCI_ETH_IWARP:
4034 + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
4035 + break;
4036 + default:
4037 +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
4038 +index 71be8524cca87..a686926bba71e 100644
4039 +--- a/drivers/net/ieee802154/adf7242.c
4040 ++++ b/drivers/net/ieee802154/adf7242.c
4041 +@@ -883,7 +883,9 @@ static int adf7242_rx(struct adf7242_local *lp)
4042 + int ret;
4043 + u8 lqi, len_u8, *data;
4044 +
4045 +- adf7242_read_reg(lp, 0, &len_u8);
4046 ++ ret = adf7242_read_reg(lp, 0, &len_u8);
4047 ++ if (ret)
4048 ++ return ret;
4049 +
4050 + len = len_u8;
4051 +
4052 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
4053 +index 38a41651e451c..deace0aadad24 100644
4054 +--- a/drivers/net/ieee802154/ca8210.c
4055 ++++ b/drivers/net/ieee802154/ca8210.c
4056 +@@ -2923,6 +2923,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
4057 + );
4058 + if (!priv->irq_workqueue) {
4059 + dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
4060 ++ destroy_workqueue(priv->mlme_workqueue);
4061 + return -ENOMEM;
4062 + }
4063 +
4064 +diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
4065 +index da2d179430ca5..4c57e79e5779a 100644
4066 +--- a/drivers/net/wireless/ath/ar5523/ar5523.c
4067 ++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
4068 +@@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = {
4069 + AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
4070 + AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
4071 + AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
4072 ++ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
4073 ++ SMCWUSBT-G2 */
4074 + AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
4075 + AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
4076 + AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
4077 +diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
4078 +index 0baaad90b8d18..4e980e78ba95c 100644
4079 +--- a/drivers/net/wireless/ath/ath10k/debug.c
4080 ++++ b/drivers/net/wireless/ath/ath10k/debug.c
4081 +@@ -1521,7 +1521,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
4082 + *len += scnprintf(buf + *len, buf_len - *len,
4083 + "No. Preamble Rate_code ");
4084 +
4085 +- for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++)
4086 ++ for (i = 0; i < tpc_stats->num_tx_chain; i++)
4087 + *len += scnprintf(buf + *len, buf_len - *len,
4088 + "tpc_value%d ", i);
4089 +
4090 +@@ -2365,6 +2365,7 @@ void ath10k_debug_destroy(struct ath10k *ar)
4091 + ath10k_debug_fw_stats_reset(ar);
4092 +
4093 + kfree(ar->debug.tpc_stats);
4094 ++ kfree(ar->debug.tpc_stats_final);
4095 + }
4096 +
4097 + int ath10k_debug_register(struct ath10k *ar)
4098 +diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
4099 +index 0ecaba824fb28..0cdaecb0e28a9 100644
4100 +--- a/drivers/net/wireless/ath/ath10k/sdio.c
4101 ++++ b/drivers/net/wireless/ath/ath10k/sdio.c
4102 +@@ -1567,23 +1567,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
4103 + size_t buf_len)
4104 + {
4105 + int ret;
4106 ++ void *mem;
4107 ++
4108 ++ mem = kzalloc(buf_len, GFP_KERNEL);
4109 ++ if (!mem)
4110 ++ return -ENOMEM;
4111 +
4112 + /* set window register to start read cycle */
4113 + ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
4114 + if (ret) {
4115 + ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
4116 +- return ret;
4117 ++ goto out;
4118 + }
4119 +
4120 + /* read the data */
4121 +- ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
4122 ++ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
4123 + if (ret) {
4124 + ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
4125 + ret);
4126 +- return ret;
4127 ++ goto out;
4128 + }
4129 +
4130 +- return 0;
4131 ++ memcpy(buf, mem, buf_len);
4132 ++
4133 ++out:
4134 ++ kfree(mem);
4135 ++
4136 ++ return ret;
4137 + }
4138 +
4139 + static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
4140 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
4141 +index 3372dfa0deccf..3f3fbee631c34 100644
4142 +--- a/drivers/net/wireless/ath/ath10k/wmi.c
4143 ++++ b/drivers/net/wireless/ath/ath10k/wmi.c
4144 +@@ -4550,16 +4550,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
4145 + }
4146 +
4147 + pream_idx = 0;
4148 +- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
4149 ++ for (i = 0; i < tpc_stats->rate_max; i++) {
4150 + memset(tpc_value, 0, sizeof(tpc_value));
4151 + memset(buff, 0, sizeof(buff));
4152 + if (i == pream_table[pream_idx])
4153 + pream_idx++;
4154 +
4155 +- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
4156 +- if (j >= __le32_to_cpu(ev->num_tx_chain))
4157 +- break;
4158 +-
4159 ++ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
4160 + tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
4161 + rate_code[i],
4162 + type);
4163 +@@ -4672,7 +4669,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
4164 +
4165 + void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4166 + {
4167 +- u32 num_tx_chain;
4168 ++ u32 num_tx_chain, rate_max;
4169 + u8 rate_code[WMI_TPC_RATE_MAX];
4170 + u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4171 + struct wmi_pdev_tpc_config_event *ev;
4172 +@@ -4688,6 +4685,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4173 + return;
4174 + }
4175 +
4176 ++ rate_max = __le32_to_cpu(ev->rate_max);
4177 ++ if (rate_max > WMI_TPC_RATE_MAX) {
4178 ++ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
4179 ++ rate_max, WMI_TPC_RATE_MAX);
4180 ++ rate_max = WMI_TPC_RATE_MAX;
4181 ++ }
4182 ++
4183 + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4184 + if (!tpc_stats)
4185 + return;
4186 +@@ -4704,8 +4708,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4187 + __le32_to_cpu(ev->twice_antenna_reduction);
4188 + tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4189 + tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4190 +- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4191 +- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
4192 ++ tpc_stats->num_tx_chain = num_tx_chain;
4193 ++ tpc_stats->rate_max = rate_max;
4194 +
4195 + ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4196 + rate_code, pream_table,
4197 +@@ -4900,16 +4904,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
4198 + }
4199 +
4200 + pream_idx = 0;
4201 +- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
4202 ++ for (i = 0; i < tpc_stats->rate_max; i++) {
4203 + memset(tpc_value, 0, sizeof(tpc_value));
4204 + memset(buff, 0, sizeof(buff));
4205 + if (i == pream_table[pream_idx])
4206 + pream_idx++;
4207 +
4208 +- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
4209 +- if (j >= __le32_to_cpu(ev->num_tx_chain))
4210 +- break;
4211 +-
4212 ++ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
4213 + tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
4214 + rate_code[i],
4215 + type, pream_idx);
4216 +@@ -4925,7 +4926,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
4217 +
4218 + void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
4219 + {
4220 +- u32 num_tx_chain;
4221 ++ u32 num_tx_chain, rate_max;
4222 + u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
4223 + u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4224 + struct wmi_pdev_tpc_final_table_event *ev;
4225 +@@ -4933,12 +4934,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
4226 +
4227 + ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
4228 +
4229 ++ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4230 ++ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
4231 ++ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
4232 ++ num_tx_chain, WMI_TPC_TX_N_CHAIN);
4233 ++ return;
4234 ++ }
4235 ++
4236 ++ rate_max = __le32_to_cpu(ev->rate_max);
4237 ++ if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
4238 ++ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
4239 ++ rate_max, WMI_TPC_FINAL_RATE_MAX);
4240 ++ rate_max = WMI_TPC_FINAL_RATE_MAX;
4241 ++ }
4242 ++
4243 + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4244 + if (!tpc_stats)
4245 + return;
4246 +
4247 +- num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4248 +-
4249 + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4250 + num_tx_chain);
4251 +
4252 +@@ -4951,8 +4964,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
4253 + __le32_to_cpu(ev->twice_antenna_reduction);
4254 + tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4255 + tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4256 +- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4257 +- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
4258 ++ tpc_stats->num_tx_chain = num_tx_chain;
4259 ++ tpc_stats->rate_max = rate_max;
4260 +
4261 + ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
4262 + rate_code, pream_table,
4263 +diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
4264 +index 1fb76d2f5d3fd..8b9d0809daf62 100644
4265 +--- a/drivers/net/wireless/marvell/mwifiex/fw.h
4266 ++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
4267 +@@ -953,7 +953,7 @@ struct mwifiex_tkip_param {
4268 + struct mwifiex_aes_param {
4269 + u8 pn[WPA_PN_SIZE];
4270 + __le16 key_len;
4271 +- u8 key[WLAN_KEY_LEN_CCMP];
4272 ++ u8 key[WLAN_KEY_LEN_CCMP_256];
4273 + } __packed;
4274 +
4275 + struct mwifiex_wapi_param {
4276 +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
4277 +index 797c2e9783943..7003767eef423 100644
4278 +--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
4279 ++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
4280 +@@ -620,7 +620,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
4281 + key_v2 = &resp->params.key_material_v2;
4282 +
4283 + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
4284 +- if (len > WLAN_KEY_LEN_CCMP)
4285 ++ if (len > sizeof(key_v2->key_param_set.key_params.aes.key))
4286 + return -EINVAL;
4287 +
4288 + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
4289 +@@ -636,7 +636,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
4290 + return 0;
4291 +
4292 + memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
4293 +- WLAN_KEY_LEN_CCMP);
4294 ++ sizeof(key_v2->key_param_set.key_params.aes.key));
4295 + priv->aes_key_v2.key_param_set.key_params.aes.key_len =
4296 + cpu_to_le16(len);
4297 + memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
4298 +diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
4299 +index d44d57e6eb27a..97df6b3a472b1 100644
4300 +--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
4301 ++++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
4302 +@@ -278,6 +278,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
4303 + if (!skb)
4304 + continue;
4305 +
4306 ++ tid->reorder_buf[i] = NULL;
4307 + tid->nframes--;
4308 + dev_kfree_skb(skb);
4309 + }
4310 +diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
4311 +index 2ca5658bbc2ab..43c7b37dec0c9 100644
4312 +--- a/drivers/net/wireless/ti/wlcore/main.c
4313 ++++ b/drivers/net/wireless/ti/wlcore/main.c
4314 +@@ -3671,8 +3671,10 @@ void wlcore_regdomain_config(struct wl1271 *wl)
4315 + goto out;
4316 +
4317 + ret = pm_runtime_get_sync(wl->dev);
4318 +- if (ret < 0)
4319 ++ if (ret < 0) {
4320 ++ pm_runtime_put_autosuspend(wl->dev);
4321 + goto out;
4322 ++ }
4323 +
4324 + ret = wlcore_cmd_regdomain_config_locked(wl);
4325 + if (ret < 0) {
4326 +diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
4327 +index b6e19c2d66b0a..250bcbf4ea2f2 100644
4328 +--- a/drivers/net/wireless/ti/wlcore/tx.c
4329 ++++ b/drivers/net/wireless/ti/wlcore/tx.c
4330 +@@ -877,6 +877,7 @@ void wl1271_tx_work(struct work_struct *work)
4331 +
4332 + ret = wlcore_tx_work_locked(wl);
4333 + if (ret < 0) {
4334 ++ pm_runtime_put_noidle(wl->dev);
4335 + wl12xx_queue_recovery_work(wl);
4336 + goto out;
4337 + }
4338 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
4339 +index 0d60f2f8f3eec..33dad9774da01 100644
4340 +--- a/drivers/nvme/host/core.c
4341 ++++ b/drivers/nvme/host/core.c
4342 +@@ -255,11 +255,8 @@ void nvme_complete_rq(struct request *req)
4343 + trace_nvme_complete_rq(req);
4344 +
4345 + if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
4346 +- if ((req->cmd_flags & REQ_NVME_MPATH) &&
4347 +- blk_path_error(status)) {
4348 +- nvme_failover_req(req);
4349 ++ if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
4350 + return;
4351 +- }
4352 +
4353 + if (!blk_queue_dying(req->q)) {
4354 + nvme_req(req)->retries++;
4355 +@@ -1602,7 +1599,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
4356 + if (ns->head->disk) {
4357 + nvme_update_disk_info(ns->head->disk, ns, id);
4358 + blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
4359 +- revalidate_disk(ns->head->disk);
4360 ++ nvme_mpath_update_disk_size(ns->head->disk);
4361 + }
4362 + #endif
4363 + }
4364 +@@ -2859,6 +2856,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
4365 + {
4366 + struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
4367 +
4368 ++ /* Can't delete non-created controllers */
4369 ++ if (!ctrl->created)
4370 ++ return -EBUSY;
4371 ++
4372 + if (device_remove_file_self(dev, attr))
4373 + nvme_delete_ctrl_sync(ctrl);
4374 + return count;
4375 +@@ -3579,6 +3580,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4376 + queue_work(nvme_wq, &ctrl->async_event_work);
4377 + nvme_start_queues(ctrl);
4378 + }
4379 ++ ctrl->created = true;
4380 + }
4381 + EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4382 +
4383 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
4384 +index 2e63c1106030b..e71075338ff5c 100644
4385 +--- a/drivers/nvme/host/multipath.c
4386 ++++ b/drivers/nvme/host/multipath.c
4387 +@@ -73,17 +73,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
4388 + }
4389 + }
4390 +
4391 +-void nvme_failover_req(struct request *req)
4392 ++bool nvme_failover_req(struct request *req)
4393 + {
4394 + struct nvme_ns *ns = req->q->queuedata;
4395 + u16 status = nvme_req(req)->status;
4396 + unsigned long flags;
4397 +
4398 +- spin_lock_irqsave(&ns->head->requeue_lock, flags);
4399 +- blk_steal_bios(&ns->head->requeue_list, req);
4400 +- spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
4401 +- blk_mq_end_request(req, 0);
4402 +-
4403 + switch (status & 0x7ff) {
4404 + case NVME_SC_ANA_TRANSITION:
4405 + case NVME_SC_ANA_INACCESSIBLE:
4406 +@@ -111,15 +106,17 @@ void nvme_failover_req(struct request *req)
4407 + nvme_mpath_clear_current_path(ns);
4408 + break;
4409 + default:
4410 +- /*
4411 +- * Reset the controller for any non-ANA error as we don't know
4412 +- * what caused the error.
4413 +- */
4414 +- nvme_reset_ctrl(ns->ctrl);
4415 +- break;
4416 ++ /* This was a non-ANA error so follow the normal error path. */
4417 ++ return false;
4418 + }
4419 +
4420 ++ spin_lock_irqsave(&ns->head->requeue_lock, flags);
4421 ++ blk_steal_bios(&ns->head->requeue_list, req);
4422 ++ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
4423 ++ blk_mq_end_request(req, 0);
4424 ++
4425 + kblockd_schedule_work(&ns->head->requeue_work);
4426 ++ return true;
4427 + }
4428 +
4429 + void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
4430 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
4431 +index cc4273f119894..9c2e7a151e400 100644
4432 +--- a/drivers/nvme/host/nvme.h
4433 ++++ b/drivers/nvme/host/nvme.h
4434 +@@ -206,6 +206,7 @@ struct nvme_ctrl {
4435 + struct nvme_command ka_cmd;
4436 + struct work_struct fw_act_work;
4437 + unsigned long events;
4438 ++ bool created;
4439 +
4440 + #ifdef CONFIG_NVME_MULTIPATH
4441 + /* asymmetric namespace access: */
4442 +@@ -477,7 +478,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
4443 + void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
4444 + void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
4445 + struct nvme_ctrl *ctrl, int *flags);
4446 +-void nvme_failover_req(struct request *req);
4447 ++bool nvme_failover_req(struct request *req);
4448 + void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
4449 + int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
4450 + void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
4451 +@@ -503,6 +504,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
4452 + kblockd_schedule_work(&head->requeue_work);
4453 + }
4454 +
4455 ++static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
4456 ++{
4457 ++ struct block_device *bdev = bdget_disk(disk, 0);
4458 ++
4459 ++ if (bdev) {
4460 ++ bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
4461 ++ bdput(bdev);
4462 ++ }
4463 ++}
4464 ++
4465 + extern struct device_attribute dev_attr_ana_grpid;
4466 + extern struct device_attribute dev_attr_ana_state;
4467 +
4468 +@@ -521,8 +532,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
4469 + sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
4470 + }
4471 +
4472 +-static inline void nvme_failover_req(struct request *req)
4473 ++static inline bool nvme_failover_req(struct request *req)
4474 + {
4475 ++ return false;
4476 + }
4477 + static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
4478 + {
4479 +@@ -568,6 +580,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
4480 + static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
4481 + {
4482 + }
4483 ++static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
4484 ++{
4485 ++}
4486 + #endif /* CONFIG_NVME_MULTIPATH */
4487 +
4488 + #ifdef CONFIG_NVM
4489 +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
4490 +index 08f997a390d5d..cfd26437aeaea 100644
4491 +--- a/drivers/nvme/target/rdma.c
4492 ++++ b/drivers/nvme/target/rdma.c
4493 +@@ -83,6 +83,7 @@ enum nvmet_rdma_queue_state {
4494 +
4495 + struct nvmet_rdma_queue {
4496 + struct rdma_cm_id *cm_id;
4497 ++ struct ib_qp *qp;
4498 + struct nvmet_port *port;
4499 + struct ib_cq *cq;
4500 + atomic_t sq_wr_avail;
4501 +@@ -471,7 +472,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
4502 + if (ndev->srq)
4503 + ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
4504 + else
4505 +- ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
4506 ++ ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
4507 +
4508 + if (unlikely(ret))
4509 + pr_err("post_recv cmd failed\n");
4510 +@@ -510,7 +511,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
4511 + atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
4512 +
4513 + if (rsp->n_rdma) {
4514 +- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
4515 ++ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
4516 + queue->cm_id->port_num, rsp->req.sg,
4517 + rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
4518 + }
4519 +@@ -594,7 +595,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
4520 +
4521 + WARN_ON(rsp->n_rdma <= 0);
4522 + atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
4523 +- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
4524 ++ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
4525 + queue->cm_id->port_num, rsp->req.sg,
4526 + rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
4527 + rsp->n_rdma = 0;
4528 +@@ -737,7 +738,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
4529 + }
4530 +
4531 + if (nvmet_rdma_need_data_in(rsp)) {
4532 +- if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
4533 ++ if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
4534 + queue->cm_id->port_num, &rsp->read_cqe, NULL))
4535 + nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
4536 + } else {
4537 +@@ -1020,6 +1021,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
4538 + pr_err("failed to create_qp ret= %d\n", ret);
4539 + goto err_destroy_cq;
4540 + }
4541 ++ queue->qp = queue->cm_id->qp;
4542 +
4543 + atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
4544 +
4545 +@@ -1048,11 +1050,10 @@ err_destroy_cq:
4546 +
4547 + static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
4548 + {
4549 +- struct ib_qp *qp = queue->cm_id->qp;
4550 +-
4551 +- ib_drain_qp(qp);
4552 +- rdma_destroy_id(queue->cm_id);
4553 +- ib_destroy_qp(qp);
4554 ++ ib_drain_qp(queue->qp);
4555 ++ if (queue->cm_id)
4556 ++ rdma_destroy_id(queue->cm_id);
4557 ++ ib_destroy_qp(queue->qp);
4558 + ib_free_cq(queue->cq);
4559 + }
4560 +
4561 +@@ -1286,9 +1287,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
4562 +
4563 + ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
4564 + if (ret) {
4565 +- schedule_work(&queue->release_work);
4566 +- /* Destroying rdma_cm id is not needed here */
4567 +- return 0;
4568 ++ /*
4569 ++ * Don't destroy the cm_id in free path, as we implicitly
4570 ++ * destroy the cm_id here with non-zero ret code.
4571 ++ */
4572 ++ queue->cm_id = NULL;
4573 ++ goto free_queue;
4574 + }
4575 +
4576 + mutex_lock(&nvmet_rdma_queue_mutex);
4577 +@@ -1297,6 +1301,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
4578 +
4579 + return 0;
4580 +
4581 ++free_queue:
4582 ++ nvmet_rdma_free_queue(queue);
4583 + put_device:
4584 + kref_put(&ndev->ref, nvmet_rdma_free_dev);
4585 +
4586 +diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
4587 +index 6f86583605a46..097c02197ec8f 100644
4588 +--- a/drivers/pci/controller/pci-tegra.c
4589 ++++ b/drivers/pci/controller/pci-tegra.c
4590 +@@ -2400,7 +2400,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
4591 + err = pm_runtime_get_sync(pcie->dev);
4592 + if (err < 0) {
4593 + dev_err(dev, "fail to enable pcie controller: %d\n", err);
4594 +- goto teardown_msi;
4595 ++ goto pm_runtime_put;
4596 + }
4597 +
4598 + err = tegra_pcie_request_resources(pcie);
4599 +@@ -2440,7 +2440,6 @@ free_resources:
4600 + pm_runtime_put:
4601 + pm_runtime_put_sync(pcie->dev);
4602 + pm_runtime_disable(pcie->dev);
4603 +-teardown_msi:
4604 + tegra_pcie_msi_teardown(pcie);
4605 + put_resources:
4606 + tegra_pcie_put_resources(pcie);
4607 +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
4608 +index 07940d1d83b70..005817e40ad39 100644
4609 +--- a/drivers/pci/hotplug/pciehp_hpc.c
4610 ++++ b/drivers/pci/hotplug/pciehp_hpc.c
4611 +@@ -530,7 +530,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
4612 + struct controller *ctrl = (struct controller *)dev_id;
4613 + struct pci_dev *pdev = ctrl_dev(ctrl);
4614 + struct device *parent = pdev->dev.parent;
4615 +- u16 status, events;
4616 ++ u16 status, events = 0;
4617 +
4618 + /*
4619 + * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
4620 +@@ -553,6 +553,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
4621 + }
4622 + }
4623 +
4624 ++read_status:
4625 + pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
4626 + if (status == (u16) ~0) {
4627 + ctrl_info(ctrl, "%s: no response from device\n", __func__);
4628 +@@ -565,24 +566,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
4629 + * Slot Status contains plain status bits as well as event
4630 + * notification bits; right now we only want the event bits.
4631 + */
4632 +- events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
4633 +- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
4634 +- PCI_EXP_SLTSTA_DLLSC);
4635 ++ status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
4636 ++ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
4637 ++ PCI_EXP_SLTSTA_DLLSC;
4638 +
4639 + /*
4640 + * If we've already reported a power fault, don't report it again
4641 + * until we've done something to handle it.
4642 + */
4643 + if (ctrl->power_fault_detected)
4644 +- events &= ~PCI_EXP_SLTSTA_PFD;
4645 ++ status &= ~PCI_EXP_SLTSTA_PFD;
4646 +
4647 ++ events |= status;
4648 + if (!events) {
4649 + if (parent)
4650 + pm_runtime_put(parent);
4651 + return IRQ_NONE;
4652 + }
4653 +
4654 +- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
4655 ++ if (status) {
4656 ++ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
4657 ++
4658 ++ /*
4659 ++ * In MSI mode, all event bits must be zero before the port
4660 ++ * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
4661 ++ * So re-read the Slot Status register in case a bit was set
4662 ++ * between read and write.
4663 ++ */
4664 ++ if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
4665 ++ goto read_status;
4666 ++ }
4667 ++
4668 + ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
4669 + if (parent)
4670 + pm_runtime_put(parent);
4671 +diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
4672 +index 137bf0cee897c..8fc9a4e911e3a 100644
4673 +--- a/drivers/pci/rom.c
4674 ++++ b/drivers/pci/rom.c
4675 +@@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
4676 + pci_disable_rom(pdev);
4677 + }
4678 + EXPORT_SYMBOL(pci_unmap_rom);
4679 +-
4680 +-/**
4681 +- * pci_platform_rom - provides a pointer to any ROM image provided by the
4682 +- * platform
4683 +- * @pdev: pointer to pci device struct
4684 +- * @size: pointer to receive size of pci window over ROM
4685 +- */
4686 +-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
4687 +-{
4688 +- if (pdev->rom && pdev->romlen) {
4689 +- *size = pdev->romlen;
4690 +- return phys_to_virt((phys_addr_t)pdev->rom);
4691 +- }
4692 +-
4693 +- return NULL;
4694 +-}
4695 +-EXPORT_SYMBOL(pci_platform_rom);
4696 +diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c
4697 +index f6f72339bbc32..bb7fdf491c1c2 100644
4698 +--- a/drivers/phy/samsung/phy-s5pv210-usb2.c
4699 ++++ b/drivers/phy/samsung/phy-s5pv210-usb2.c
4700 +@@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
4701 + udelay(10);
4702 + rst &= ~rstbits;
4703 + writel(rst, drv->reg_phy + S5PV210_UPHYRST);
4704 ++ /* The following delay is necessary for the reset sequence to be
4705 ++ * completed
4706 ++ */
4707 ++ udelay(80);
4708 + } else {
4709 + pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
4710 + pwr |= phypwr;
4711 +diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
4712 +index 33c40f79d23d5..2c35c13ad546f 100644
4713 +--- a/drivers/power/supply/max17040_battery.c
4714 ++++ b/drivers/power/supply/max17040_battery.c
4715 +@@ -109,7 +109,7 @@ static void max17040_get_vcell(struct i2c_client *client)
4716 +
4717 + vcell = max17040_read_reg(client, MAX17040_VCELL);
4718 +
4719 +- chip->vcell = vcell;
4720 ++ chip->vcell = (vcell >> 4) * 1250;
4721 + }
4722 +
4723 + static void max17040_get_soc(struct i2c_client *client)
4724 +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
4725 +index 5940780648e0f..f36a8a5261a13 100644
4726 +--- a/drivers/rapidio/devices/rio_mport_cdev.c
4727 ++++ b/drivers/rapidio/devices/rio_mport_cdev.c
4728 +@@ -2385,13 +2385,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
4729 + cdev_init(&md->cdev, &mport_fops);
4730 + md->cdev.owner = THIS_MODULE;
4731 +
4732 +- ret = cdev_device_add(&md->cdev, &md->dev);
4733 +- if (ret) {
4734 +- rmcd_error("Failed to register mport %d (err=%d)",
4735 +- mport->id, ret);
4736 +- goto err_cdev;
4737 +- }
4738 +-
4739 + INIT_LIST_HEAD(&md->doorbells);
4740 + spin_lock_init(&md->db_lock);
4741 + INIT_LIST_HEAD(&md->portwrites);
4742 +@@ -2411,6 +2404,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
4743 + #else
4744 + md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
4745 + #endif
4746 ++
4747 ++ ret = cdev_device_add(&md->cdev, &md->dev);
4748 ++ if (ret) {
4749 ++ rmcd_error("Failed to register mport %d (err=%d)",
4750 ++ mport->id, ret);
4751 ++ goto err_cdev;
4752 ++ }
4753 + ret = rio_query_mport(mport, &attr);
4754 + if (!ret) {
4755 + md->properties.flags = attr.flags;
4756 +diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
4757 +index 38a2e9e684df4..77a106e90124b 100644
4758 +--- a/drivers/rtc/rtc-ds1374.c
4759 ++++ b/drivers/rtc/rtc-ds1374.c
4760 +@@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client,
4761 + if (!ds1374)
4762 + return -ENOMEM;
4763 +
4764 ++ ds1374->rtc = devm_rtc_allocate_device(&client->dev);
4765 ++ if (IS_ERR(ds1374->rtc))
4766 ++ return PTR_ERR(ds1374->rtc);
4767 ++
4768 + ds1374->client = client;
4769 + i2c_set_clientdata(client, ds1374);
4770 +
4771 +@@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client,
4772 + device_set_wakeup_capable(&client->dev, 1);
4773 + }
4774 +
4775 +- ds1374->rtc = devm_rtc_device_register(&client->dev, client->name,
4776 +- &ds1374_rtc_ops, THIS_MODULE);
4777 +- if (IS_ERR(ds1374->rtc)) {
4778 +- dev_err(&client->dev, "unable to register the class device\n");
4779 +- return PTR_ERR(ds1374->rtc);
4780 +- }
4781 ++ ds1374->rtc->ops = &ds1374_rtc_ops;
4782 ++
4783 ++ ret = rtc_register_device(ds1374->rtc);
4784 ++ if (ret)
4785 ++ return ret;
4786 +
4787 + #ifdef CONFIG_RTC_DRV_DS1374_WDT
4788 + save_client = client;
4789 +diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
4790 +index 304d905cb23fd..56f625371735f 100644
4791 +--- a/drivers/rtc/rtc-sa1100.c
4792 ++++ b/drivers/rtc/rtc-sa1100.c
4793 +@@ -186,7 +186,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
4794 +
4795 + int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
4796 + {
4797 +- struct rtc_device *rtc;
4798 + int ret;
4799 +
4800 + spin_lock_init(&info->lock);
4801 +@@ -215,15 +214,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
4802 + writel_relaxed(0, info->rcnr);
4803 + }
4804 +
4805 +- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops,
4806 +- THIS_MODULE);
4807 +- if (IS_ERR(rtc)) {
4808 ++ info->rtc->ops = &sa1100_rtc_ops;
4809 ++ info->rtc->max_user_freq = RTC_FREQ;
4810 ++
4811 ++ ret = rtc_register_device(info->rtc);
4812 ++ if (ret) {
4813 + clk_disable_unprepare(info->clk);
4814 +- return PTR_ERR(rtc);
4815 ++ return ret;
4816 + }
4817 +- info->rtc = rtc;
4818 +-
4819 +- rtc->max_user_freq = RTC_FREQ;
4820 +
4821 + /* Fix for a nasty initialization problem the in SA11xx RTSR register.
4822 + * See also the comments in sa1100_rtc_interrupt().
4823 +@@ -272,6 +270,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
4824 + info->irq_1hz = irq_1hz;
4825 + info->irq_alarm = irq_alarm;
4826 +
4827 ++ info->rtc = devm_rtc_allocate_device(&pdev->dev);
4828 ++ if (IS_ERR(info->rtc))
4829 ++ return PTR_ERR(info->rtc);
4830 ++
4831 + ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0,
4832 + "rtc 1Hz", &pdev->dev);
4833 + if (ret) {
4834 +diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
4835 +index 56007a3e7f110..fab09455ba944 100644
4836 +--- a/drivers/s390/block/dasd_fba.c
4837 ++++ b/drivers/s390/block/dasd_fba.c
4838 +@@ -40,6 +40,7 @@
4839 + MODULE_LICENSE("GPL");
4840 +
4841 + static struct dasd_discipline dasd_fba_discipline;
4842 ++static void *dasd_fba_zero_page;
4843 +
4844 + struct dasd_fba_private {
4845 + struct dasd_fba_characteristics rdc_data;
4846 +@@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
4847 + ccw->cmd_code = DASD_FBA_CCW_WRITE;
4848 + ccw->flags |= CCW_FLAG_SLI;
4849 + ccw->count = count;
4850 +- ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0));
4851 ++ ccw->cda = (__u32) (addr_t) dasd_fba_zero_page;
4852 + }
4853 +
4854 + /*
4855 +@@ -811,6 +812,11 @@ dasd_fba_init(void)
4856 + int ret;
4857 +
4858 + ASCEBC(dasd_fba_discipline.ebcname, 4);
4859 ++
4860 ++ dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
4861 ++ if (!dasd_fba_zero_page)
4862 ++ return -ENOMEM;
4863 ++
4864 + ret = ccw_driver_register(&dasd_fba_driver);
4865 + if (!ret)
4866 + wait_for_device_probe();
4867 +@@ -822,6 +828,7 @@ static void __exit
4868 + dasd_fba_cleanup(void)
4869 + {
4870 + ccw_driver_unregister(&dasd_fba_driver);
4871 ++ free_page((unsigned long)dasd_fba_zero_page);
4872 + }
4873 +
4874 + module_init(dasd_fba_init);
4875 +diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
4876 +index 23c24a699cefe..b7cb897cd83e0 100644
4877 +--- a/drivers/s390/crypto/zcrypt_api.c
4878 ++++ b/drivers/s390/crypto/zcrypt_api.c
4879 +@@ -915,7 +915,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
4880 + if (!reqcnt)
4881 + return -ENOMEM;
4882 + zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
4883 +- if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
4884 ++ if (copy_to_user((int __user *) arg, reqcnt,
4885 ++ sizeof(u32) * AP_DEVICES))
4886 + rc = -EFAULT;
4887 + kfree(reqcnt);
4888 + return rc;
4889 +diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
4890 +index 6e356325d8d98..54717fb84a54c 100644
4891 +--- a/drivers/scsi/aacraid/aachba.c
4892 ++++ b/drivers/scsi/aacraid/aachba.c
4893 +@@ -2481,13 +2481,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
4894 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
4895 + SAM_STAT_CHECK_CONDITION;
4896 + set_sense(&dev->fsa_dev[cid].sense_data,
4897 +- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
4898 ++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
4899 + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
4900 + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
4901 + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
4902 + SCSI_SENSE_BUFFERSIZE));
4903 + scsicmd->scsi_done(scsicmd);
4904 +- return 1;
4905 ++ return 0;
4906 + }
4907 +
4908 + dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
4909 +@@ -2573,13 +2573,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
4910 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
4911 + SAM_STAT_CHECK_CONDITION;
4912 + set_sense(&dev->fsa_dev[cid].sense_data,
4913 +- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
4914 ++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
4915 + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
4916 + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
4917 + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
4918 + SCSI_SENSE_BUFFERSIZE));
4919 + scsicmd->scsi_done(scsicmd);
4920 +- return 1;
4921 ++ return 0;
4922 + }
4923 +
4924 + dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
4925 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
4926 +index b7588de4484e5..4cb6ee6e1212e 100644
4927 +--- a/drivers/scsi/aacraid/commsup.c
4928 ++++ b/drivers/scsi/aacraid/commsup.c
4929 +@@ -743,7 +743,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
4930 + hbacmd->request_id =
4931 + cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
4932 + fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
4933 +- } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
4934 ++ } else
4935 + return -EINVAL;
4936 +
4937 +
4938 +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
4939 +index 1046947064a0b..eecffc03084c0 100644
4940 +--- a/drivers/scsi/aacraid/linit.c
4941 ++++ b/drivers/scsi/aacraid/linit.c
4942 +@@ -736,7 +736,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
4943 + status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
4944 + (fib_callback) aac_hba_callback,
4945 + (void *) cmd);
4946 +-
4947 ++ if (status != -EINPROGRESS) {
4948 ++ aac_fib_complete(fib);
4949 ++ aac_fib_free(fib);
4950 ++ return ret;
4951 ++ }
4952 + /* Wait up to 15 secs for completion */
4953 + for (count = 0; count < 15; ++count) {
4954 + if (cmd->SCp.sent_command) {
4955 +@@ -915,11 +919,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
4956 +
4957 + info = &aac->hba_map[bus][cid];
4958 +
4959 +- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
4960 +- info->reset_state > 0)
4961 ++ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
4962 ++ !(info->reset_state > 0)))
4963 + return FAILED;
4964 +
4965 +- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
4966 ++ pr_err("%s: Host device reset request. SCSI hang ?\n",
4967 + AAC_DRIVERNAME);
4968 +
4969 + fib = aac_fib_alloc(aac);
4970 +@@ -934,7 +938,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
4971 + status = aac_hba_send(command, fib,
4972 + (fib_callback) aac_tmf_callback,
4973 + (void *) info);
4974 +-
4975 ++ if (status != -EINPROGRESS) {
4976 ++ info->reset_state = 0;
4977 ++ aac_fib_complete(fib);
4978 ++ aac_fib_free(fib);
4979 ++ return ret;
4980 ++ }
4981 + /* Wait up to 15 seconds for completion */
4982 + for (count = 0; count < 15; ++count) {
4983 + if (info->reset_state == 0) {
4984 +@@ -973,11 +982,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
4985 +
4986 + info = &aac->hba_map[bus][cid];
4987 +
4988 +- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
4989 +- info->reset_state > 0)
4990 ++ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
4991 ++ !(info->reset_state > 0)))
4992 + return FAILED;
4993 +
4994 +- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
4995 ++ pr_err("%s: Host target reset request. SCSI hang ?\n",
4996 + AAC_DRIVERNAME);
4997 +
4998 + fib = aac_fib_alloc(aac);
4999 +@@ -994,6 +1003,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
5000 + (fib_callback) aac_tmf_callback,
5001 + (void *) info);
5002 +
5003 ++ if (status != -EINPROGRESS) {
5004 ++ info->reset_state = 0;
5005 ++ aac_fib_complete(fib);
5006 ++ aac_fib_free(fib);
5007 ++ return ret;
5008 ++ }
5009 ++
5010 + /* Wait up to 15 seconds for completion */
5011 + for (count = 0; count < 15; ++count) {
5012 + if (info->reset_state <= 0) {
5013 +@@ -1046,7 +1062,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
5014 + }
5015 + }
5016 +
5017 +- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
5018 ++ pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
5019 +
5020 + /*
5021 + * Check the health of the controller
5022 +@@ -1604,7 +1620,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
5023 + struct Scsi_Host *shost;
5024 + struct aac_dev *aac;
5025 + struct list_head *insert = &aac_devices;
5026 +- int error = -ENODEV;
5027 ++ int error;
5028 + int unique_id = 0;
5029 + u64 dmamask;
5030 + int mask_bits = 0;
5031 +@@ -1629,7 +1645,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
5032 + error = pci_enable_device(pdev);
5033 + if (error)
5034 + goto out;
5035 +- error = -ENODEV;
5036 +
5037 + if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
5038 + error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5039 +@@ -1661,8 +1676,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
5040 + pci_set_master(pdev);
5041 +
5042 + shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
5043 +- if (!shost)
5044 ++ if (!shost) {
5045 ++ error = -ENOMEM;
5046 + goto out_disable_pdev;
5047 ++ }
5048 +
5049 + shost->irq = pdev->irq;
5050 + shost->unique_id = unique_id;
5051 +@@ -1687,8 +1704,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
5052 + aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
5053 + sizeof(struct fib),
5054 + GFP_KERNEL);
5055 +- if (!aac->fibs)
5056 ++ if (!aac->fibs) {
5057 ++ error = -ENOMEM;
5058 + goto out_free_host;
5059 ++ }
5060 ++
5061 + spin_lock_init(&aac->fib_lock);
5062 +
5063 + mutex_init(&aac->ioctl_mutex);
5064 +diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
5065 +index f987c40c47a13..443813feaef47 100644
5066 +--- a/drivers/scsi/cxlflash/main.c
5067 ++++ b/drivers/scsi/cxlflash/main.c
5068 +@@ -3749,6 +3749,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
5069 + cfg->afu_cookie = cfg->ops->create_afu(pdev);
5070 + if (unlikely(!cfg->afu_cookie)) {
5071 + dev_err(dev, "%s: create_afu failed\n", __func__);
5072 ++ rc = -ENOMEM;
5073 + goto out_remove;
5074 + }
5075 +
5076 +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
5077 +index 73ffc16ec0225..b521fc7650cb9 100644
5078 +--- a/drivers/scsi/fnic/fnic_scsi.c
5079 ++++ b/drivers/scsi/fnic/fnic_scsi.c
5080 +@@ -1034,7 +1034,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
5081 + atomic64_inc(&fnic_stats->io_stats.io_completions);
5082 +
5083 +
5084 +- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
5085 ++ io_duration_time = jiffies_to_msecs(jiffies) -
5086 ++ jiffies_to_msecs(start_time);
5087 +
5088 + if(io_duration_time <= 10)
5089 + atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
5090 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
5091 +index f570b8c5d857c..11de2198bb87d 100644
5092 +--- a/drivers/scsi/hpsa.c
5093 ++++ b/drivers/scsi/hpsa.c
5094 +@@ -507,6 +507,12 @@ static ssize_t host_store_rescan(struct device *dev,
5095 + return count;
5096 + }
5097 +
5098 ++static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
5099 ++{
5100 ++ device->offload_enabled = 0;
5101 ++ device->offload_to_be_enabled = 0;
5102 ++}
5103 ++
5104 + static ssize_t host_show_firmware_revision(struct device *dev,
5105 + struct device_attribute *attr, char *buf)
5106 + {
5107 +@@ -1743,8 +1749,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
5108 + __func__,
5109 + h->scsi_host->host_no, logical_drive->bus,
5110 + logical_drive->target, logical_drive->lun);
5111 +- logical_drive->offload_enabled = 0;
5112 +- logical_drive->offload_to_be_enabled = 0;
5113 ++ hpsa_turn_off_ioaccel_for_device(logical_drive);
5114 + logical_drive->queue_depth = 8;
5115 + }
5116 + }
5117 +@@ -2496,8 +2501,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
5118 + IOACCEL2_SERV_RESPONSE_FAILURE) {
5119 + if (c2->error_data.status ==
5120 + IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
5121 +- dev->offload_enabled = 0;
5122 +- dev->offload_to_be_enabled = 0;
5123 ++ hpsa_turn_off_ioaccel_for_device(dev);
5124 + }
5125 +
5126 + return hpsa_retry_cmd(h, c);
5127 +@@ -3676,10 +3680,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
5128 + this_device->offload_config =
5129 + !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
5130 + if (this_device->offload_config) {
5131 +- this_device->offload_to_be_enabled =
5132 ++ bool offload_enabled =
5133 + !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
5134 +- if (hpsa_get_raid_map(h, scsi3addr, this_device))
5135 +- this_device->offload_to_be_enabled = 0;
5136 ++ /*
5137 ++ * Check to see if offload can be enabled.
5138 ++ */
5139 ++ if (offload_enabled) {
5140 ++ rc = hpsa_get_raid_map(h, scsi3addr, this_device);
5141 ++ if (rc) /* could not load raid_map */
5142 ++ goto out;
5143 ++ this_device->offload_to_be_enabled = 1;
5144 ++ }
5145 + }
5146 +
5147 + out:
5148 +@@ -3998,8 +4009,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
5149 + } else {
5150 + this_device->raid_level = RAID_UNKNOWN;
5151 + this_device->offload_config = 0;
5152 +- this_device->offload_enabled = 0;
5153 +- this_device->offload_to_be_enabled = 0;
5154 ++ hpsa_turn_off_ioaccel_for_device(this_device);
5155 + this_device->hba_ioaccel_enabled = 0;
5156 + this_device->volume_offline = 0;
5157 + this_device->queue_depth = h->nr_cmds;
5158 +@@ -5213,8 +5223,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5159 + /* Handles load balance across RAID 1 members.
5160 + * (2-drive R1 and R10 with even # of drives.)
5161 + * Appropriate for SSDs, not optimal for HDDs
5162 ++ * Ensure we have the correct raid_map.
5163 + */
5164 +- BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
5165 ++ if (le16_to_cpu(map->layout_map_count) != 2) {
5166 ++ hpsa_turn_off_ioaccel_for_device(dev);
5167 ++ return IO_ACCEL_INELIGIBLE;
5168 ++ }
5169 + if (dev->offload_to_mirror)
5170 + map_index += le16_to_cpu(map->data_disks_per_row);
5171 + dev->offload_to_mirror = !dev->offload_to_mirror;
5172 +@@ -5222,8 +5236,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5173 + case HPSA_RAID_ADM:
5174 + /* Handles N-way mirrors (R1-ADM)
5175 + * and R10 with # of drives divisible by 3.)
5176 ++ * Ensure we have the correct raid_map.
5177 + */
5178 +- BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
5179 ++ if (le16_to_cpu(map->layout_map_count) != 3) {
5180 ++ hpsa_turn_off_ioaccel_for_device(dev);
5181 ++ return IO_ACCEL_INELIGIBLE;
5182 ++ }
5183 +
5184 + offload_to_mirror = dev->offload_to_mirror;
5185 + raid_map_helper(map, offload_to_mirror,
5186 +@@ -5248,7 +5266,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5187 + r5or6_blocks_per_row =
5188 + le16_to_cpu(map->strip_size) *
5189 + le16_to_cpu(map->data_disks_per_row);
5190 +- BUG_ON(r5or6_blocks_per_row == 0);
5191 ++ if (r5or6_blocks_per_row == 0) {
5192 ++ hpsa_turn_off_ioaccel_for_device(dev);
5193 ++ return IO_ACCEL_INELIGIBLE;
5194 ++ }
5195 + stripesize = r5or6_blocks_per_row *
5196 + le16_to_cpu(map->layout_map_count);
5197 + #if BITS_PER_LONG == 32
5198 +@@ -8218,7 +8239,7 @@ static int detect_controller_lockup(struct ctlr_info *h)
5199 + *
5200 + * Called from monitor controller worker (hpsa_event_monitor_worker)
5201 + *
5202 +- * A Volume (or Volumes that comprise an Array set may be undergoing a
5203 ++ * A Volume (or Volumes that comprise an Array set) may be undergoing a
5204 + * transformation, so we will be turning off ioaccel for all volumes that
5205 + * make up the Array.
5206 + */
5207 +@@ -8241,6 +8262,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
5208 + * Run through current device list used during I/O requests.
5209 + */
5210 + for (i = 0; i < h->ndevices; i++) {
5211 ++ int offload_to_be_enabled = 0;
5212 ++ int offload_config = 0;
5213 ++
5214 + device = h->dev[i];
5215 +
5216 + if (!device)
5217 +@@ -8258,25 +8282,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
5218 + continue;
5219 +
5220 + ioaccel_status = buf[IOACCEL_STATUS_BYTE];
5221 +- device->offload_config =
5222 ++
5223 ++ /*
5224 ++ * Check if offload is still configured on
5225 ++ */
5226 ++ offload_config =
5227 + !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
5228 +- if (device->offload_config)
5229 +- device->offload_to_be_enabled =
5230 ++ /*
5231 ++ * If offload is configured on, check to see if ioaccel
5232 ++ * needs to be enabled.
5233 ++ */
5234 ++ if (offload_config)
5235 ++ offload_to_be_enabled =
5236 + !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
5237 +
5238 ++ /*
5239 ++ * If ioaccel is to be re-enabled, re-enable later during the
5240 ++ * scan operation so the driver can get a fresh raidmap
5241 ++ * before turning ioaccel back on.
5242 ++ */
5243 ++ if (offload_to_be_enabled)
5244 ++ continue;
5245 ++
5246 + /*
5247 + * Immediately turn off ioaccel for any volume the
5248 + * controller tells us to. Some of the reasons could be:
5249 + * transformation - change to the LVs of an Array.
5250 + * degraded volume - component failure
5251 +- *
5252 +- * If ioaccel is to be re-enabled, re-enable later during the
5253 +- * scan operation so the driver can get a fresh raidmap
5254 +- * before turning ioaccel back on.
5255 +- *
5256 + */
5257 +- if (!device->offload_to_be_enabled)
5258 +- device->offload_enabled = 0;
5259 ++ hpsa_turn_off_ioaccel_for_device(device);
5260 + }
5261 +
5262 + kfree(buf);
5263 +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
5264 +index 90a748551ede5..2b3239765c249 100644
5265 +--- a/drivers/scsi/libfc/fc_rport.c
5266 ++++ b/drivers/scsi/libfc/fc_rport.c
5267 +@@ -145,8 +145,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
5268 + lockdep_assert_held(&lport->disc.disc_mutex);
5269 +
5270 + rdata = fc_rport_lookup(lport, port_id);
5271 +- if (rdata)
5272 ++ if (rdata) {
5273 ++ kref_put(&rdata->kref, fc_rport_destroy);
5274 + return rdata;
5275 ++ }
5276 +
5277 + if (lport->rport_priv_size > 0)
5278 + rport_priv_size = lport->rport_priv_size;
5279 +@@ -493,10 +495,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
5280 +
5281 + fc_rport_state_enter(rdata, RPORT_ST_DELETE);
5282 +
5283 +- kref_get(&rdata->kref);
5284 +- if (rdata->event == RPORT_EV_NONE &&
5285 +- !queue_work(rport_event_queue, &rdata->event_work))
5286 +- kref_put(&rdata->kref, fc_rport_destroy);
5287 ++ if (rdata->event == RPORT_EV_NONE) {
5288 ++ kref_get(&rdata->kref);
5289 ++ if (!queue_work(rport_event_queue, &rdata->event_work))
5290 ++ kref_put(&rdata->kref, fc_rport_destroy);
5291 ++ }
5292 +
5293 + rdata->event = event;
5294 + }
5295 +diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
5296 +index fe084d47ed9e5..3447d19d4147a 100644
5297 +--- a/drivers/scsi/lpfc/lpfc_attr.c
5298 ++++ b/drivers/scsi/lpfc/lpfc_attr.c
5299 +@@ -332,7 +332,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
5300 + if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
5301 + goto buffer_done;
5302 +
5303 +- rcu_read_lock();
5304 + scnprintf(tmp, sizeof(tmp),
5305 + "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
5306 + phba->brd_no,
5307 +@@ -341,7 +340,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
5308 + phba->sli4_hba.scsi_xri_max,
5309 + lpfc_sli4_get_els_iocb_cnt(phba));
5310 + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
5311 +- goto rcu_unlock_buf_done;
5312 ++ goto buffer_done;
5313 +
5314 + /* Port state is only one of two values for now. */
5315 + if (localport->port_id)
5316 +@@ -357,7 +356,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
5317 + wwn_to_u64(vport->fc_nodename.u.wwn),
5318 + localport->port_id, statep);
5319 + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
5320 +- goto rcu_unlock_buf_done;
5321 ++ goto buffer_done;
5322 ++
5323 ++ spin_lock_irq(shost->host_lock);
5324 +
5325 + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5326 + nrport = NULL;
5327 +@@ -384,39 +385,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
5328 +
5329 + /* Tab in to show lport ownership. */
5330 + if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
5331 +- goto rcu_unlock_buf_done;
5332 ++ goto unlock_buf_done;
5333 + if (phba->brd_no >= 10) {
5334 + if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
5335 +- goto rcu_unlock_buf_done;
5336 ++ goto unlock_buf_done;
5337 + }
5338 +
5339 + scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
5340 + nrport->port_name);
5341 + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
5342 +- goto rcu_unlock_buf_done;
5343 ++ goto unlock_buf_done;
5344 +
5345 + scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
5346 + nrport->node_name);
5347 + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
5348 +- goto rcu_unlock_buf_done;
5349 ++ goto unlock_buf_done;
5350 +
5351 + scnprintf(tmp, sizeof(tmp), "DID x%06x ",
5352 + nrport->port_id);
5353 + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
5354 +- goto rcu_unlock_buf_done;
5355 ++ goto unlock_buf_done;
5356 +
5357 + /* An NVME rport can have multiple roles. */
5358 + if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
5359 + if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
5360 +- goto rcu_unlock_buf_done;
5361 ++ goto unlock_buf_done;
5362 + }
5363 + if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
5364 + if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
5365 +- goto rcu_unlock_buf_done;
5366 ++ goto unlock_buf_done;
5367 + }
5368 + if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
5369 + if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
5370 +- goto rcu_unlock_buf_done;
5371 ++ goto unlock_buf_done;
5372 + }
5373 + if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
5374 + FC_PORT_ROLE_NVME_TARGET |
5375 +@@ -424,14 +425,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
5376 + scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
5377 + nrport->port_role);
5378 + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
5379 +- goto rcu_unlock_buf_done;
5380 ++ goto unlock_buf_done;
5381 + }
5382 +
5383 + scnprintf(tmp, sizeof(tmp), "%s\n", statep);
5384 + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
5385 +- goto rcu_unlock_buf_done;
5386 ++ goto unlock_buf_done;
5387 + }
5388 +- rcu_read_unlock();
5389 ++ spin_unlock_irq(shost->host_lock);
5390 +
5391 + if (!lport)
5392 + goto buffer_done;
5393 +@@ -491,11 +492,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
5394 + atomic_read(&lport->cmpl_fcp_err));
5395 + strlcat(buf, tmp, PAGE_SIZE);
5396 +
5397 +- /* RCU is already unlocked. */
5398 ++ /* host_lock is already unlocked. */
5399 + goto buffer_done;
5400 +
5401 +- rcu_unlock_buf_done:
5402 +- rcu_read_unlock();
5403 ++ unlock_buf_done:
5404 ++ spin_unlock_irq(shost->host_lock);
5405 +
5406 + buffer_done:
5407 + len = strnlen(buf, PAGE_SIZE);
5408 +diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
5409 +index 384f5cd7c3c81..99b4ff78f9dce 100644
5410 +--- a/drivers/scsi/lpfc/lpfc_ct.c
5411 ++++ b/drivers/scsi/lpfc/lpfc_ct.c
5412 +@@ -1737,8 +1737,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
5413 + struct lpfc_fdmi_attr_entry *ae;
5414 + uint32_t size;
5415 +
5416 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5417 +- memset(ae, 0, sizeof(struct lpfc_name));
5418 ++ ae = &ad->AttrValue;
5419 ++ memset(ae, 0, sizeof(*ae));
5420 +
5421 + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
5422 + sizeof(struct lpfc_name));
5423 +@@ -1754,8 +1754,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
5424 + struct lpfc_fdmi_attr_entry *ae;
5425 + uint32_t len, size;
5426 +
5427 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5428 +- memset(ae, 0, 256);
5429 ++ ae = &ad->AttrValue;
5430 ++ memset(ae, 0, sizeof(*ae));
5431 +
5432 + /* This string MUST be consistent with other FC platforms
5433 + * supported by Broadcom.
5434 +@@ -1779,8 +1779,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
5435 + struct lpfc_fdmi_attr_entry *ae;
5436 + uint32_t len, size;
5437 +
5438 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5439 +- memset(ae, 0, 256);
5440 ++ ae = &ad->AttrValue;
5441 ++ memset(ae, 0, sizeof(*ae));
5442 +
5443 + strncpy(ae->un.AttrString, phba->SerialNumber,
5444 + sizeof(ae->un.AttrString));
5445 +@@ -1801,8 +1801,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
5446 + struct lpfc_fdmi_attr_entry *ae;
5447 + uint32_t len, size;
5448 +
5449 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5450 +- memset(ae, 0, 256);
5451 ++ ae = &ad->AttrValue;
5452 ++ memset(ae, 0, sizeof(*ae));
5453 +
5454 + strncpy(ae->un.AttrString, phba->ModelName,
5455 + sizeof(ae->un.AttrString));
5456 +@@ -1822,8 +1822,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
5457 + struct lpfc_fdmi_attr_entry *ae;
5458 + uint32_t len, size;
5459 +
5460 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5461 +- memset(ae, 0, 256);
5462 ++ ae = &ad->AttrValue;
5463 ++ memset(ae, 0, sizeof(*ae));
5464 +
5465 + strncpy(ae->un.AttrString, phba->ModelDesc,
5466 + sizeof(ae->un.AttrString));
5467 +@@ -1845,8 +1845,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
5468 + struct lpfc_fdmi_attr_entry *ae;
5469 + uint32_t i, j, incr, size;
5470 +
5471 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5472 +- memset(ae, 0, 256);
5473 ++ ae = &ad->AttrValue;
5474 ++ memset(ae, 0, sizeof(*ae));
5475 +
5476 + /* Convert JEDEC ID to ascii for hardware version */
5477 + incr = vp->rev.biuRev;
5478 +@@ -1875,8 +1875,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
5479 + struct lpfc_fdmi_attr_entry *ae;
5480 + uint32_t len, size;
5481 +
5482 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5483 +- memset(ae, 0, 256);
5484 ++ ae = &ad->AttrValue;
5485 ++ memset(ae, 0, sizeof(*ae));
5486 +
5487 + strncpy(ae->un.AttrString, lpfc_release_version,
5488 + sizeof(ae->un.AttrString));
5489 +@@ -1897,8 +1897,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
5490 + struct lpfc_fdmi_attr_entry *ae;
5491 + uint32_t len, size;
5492 +
5493 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5494 +- memset(ae, 0, 256);
5495 ++ ae = &ad->AttrValue;
5496 ++ memset(ae, 0, sizeof(*ae));
5497 +
5498 + if (phba->sli_rev == LPFC_SLI_REV4)
5499 + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
5500 +@@ -1922,8 +1922,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
5501 + struct lpfc_fdmi_attr_entry *ae;
5502 + uint32_t len, size;
5503 +
5504 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5505 +- memset(ae, 0, 256);
5506 ++ ae = &ad->AttrValue;
5507 ++ memset(ae, 0, sizeof(*ae));
5508 +
5509 + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
5510 + len = strnlen(ae->un.AttrString,
5511 +@@ -1942,8 +1942,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
5512 + struct lpfc_fdmi_attr_entry *ae;
5513 + uint32_t len, size;
5514 +
5515 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5516 +- memset(ae, 0, 256);
5517 ++ ae = &ad->AttrValue;
5518 ++ memset(ae, 0, sizeof(*ae));
5519 +
5520 + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
5521 + init_utsname()->sysname,
5522 +@@ -1965,7 +1965,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
5523 + struct lpfc_fdmi_attr_entry *ae;
5524 + uint32_t size;
5525 +
5526 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5527 ++ ae = &ad->AttrValue;
5528 +
5529 + ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
5530 + size = FOURBYTES + sizeof(uint32_t);
5531 +@@ -1981,8 +1981,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
5532 + struct lpfc_fdmi_attr_entry *ae;
5533 + uint32_t len, size;
5534 +
5535 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5536 +- memset(ae, 0, 256);
5537 ++ ae = &ad->AttrValue;
5538 ++ memset(ae, 0, sizeof(*ae));
5539 +
5540 + len = lpfc_vport_symbolic_node_name(vport,
5541 + ae->un.AttrString, 256);
5542 +@@ -2000,7 +2000,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
5543 + struct lpfc_fdmi_attr_entry *ae;
5544 + uint32_t size;
5545 +
5546 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5547 ++ ae = &ad->AttrValue;
5548 +
5549 + /* Nothing is defined for this currently */
5550 + ae->un.AttrInt = cpu_to_be32(0);
5551 +@@ -2017,7 +2017,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
5552 + struct lpfc_fdmi_attr_entry *ae;
5553 + uint32_t size;
5554 +
5555 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5556 ++ ae = &ad->AttrValue;
5557 +
5558 + /* Each driver instance corresponds to a single port */
5559 + ae->un.AttrInt = cpu_to_be32(1);
5560 +@@ -2034,8 +2034,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
5561 + struct lpfc_fdmi_attr_entry *ae;
5562 + uint32_t size;
5563 +
5564 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5565 +- memset(ae, 0, sizeof(struct lpfc_name));
5566 ++ ae = &ad->AttrValue;
5567 ++ memset(ae, 0, sizeof(*ae));
5568 +
5569 + memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
5570 + sizeof(struct lpfc_name));
5571 +@@ -2053,8 +2053,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
5572 + struct lpfc_fdmi_attr_entry *ae;
5573 + uint32_t len, size;
5574 +
5575 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5576 +- memset(ae, 0, 256);
5577 ++ ae = &ad->AttrValue;
5578 ++ memset(ae, 0, sizeof(*ae));
5579 +
5580 + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
5581 + len = strnlen(ae->un.AttrString,
5582 +@@ -2073,7 +2073,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
5583 + struct lpfc_fdmi_attr_entry *ae;
5584 + uint32_t size;
5585 +
5586 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5587 ++ ae = &ad->AttrValue;
5588 +
5589 + /* Driver doesn't have access to this information */
5590 + ae->un.AttrInt = cpu_to_be32(0);
5591 +@@ -2090,8 +2090,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
5592 + struct lpfc_fdmi_attr_entry *ae;
5593 + uint32_t len, size;
5594 +
5595 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5596 +- memset(ae, 0, 256);
5597 ++ ae = &ad->AttrValue;
5598 ++ memset(ae, 0, sizeof(*ae));
5599 +
5600 + strncpy(ae->un.AttrString, "EMULEX",
5601 + sizeof(ae->un.AttrString));
5602 +@@ -2112,8 +2112,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
5603 + struct lpfc_fdmi_attr_entry *ae;
5604 + uint32_t size;
5605 +
5606 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5607 +- memset(ae, 0, 32);
5608 ++ ae = &ad->AttrValue;
5609 ++ memset(ae, 0, sizeof(*ae));
5610 +
5611 + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
5612 + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
5613 +@@ -2134,7 +2134,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
5614 + struct lpfc_fdmi_attr_entry *ae;
5615 + uint32_t size;
5616 +
5617 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5618 ++ ae = &ad->AttrValue;
5619 +
5620 + ae->un.AttrInt = 0;
5621 + if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5622 +@@ -2186,7 +2186,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
5623 + struct lpfc_fdmi_attr_entry *ae;
5624 + uint32_t size;
5625 +
5626 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5627 ++ ae = &ad->AttrValue;
5628 +
5629 + if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5630 + switch (phba->fc_linkspeed) {
5631 +@@ -2253,7 +2253,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
5632 + struct lpfc_fdmi_attr_entry *ae;
5633 + uint32_t size;
5634 +
5635 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5636 ++ ae = &ad->AttrValue;
5637 +
5638 + hsp = (struct serv_parm *)&vport->fc_sparam;
5639 + ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
5640 +@@ -2273,8 +2273,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
5641 + struct lpfc_fdmi_attr_entry *ae;
5642 + uint32_t len, size;
5643 +
5644 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5645 +- memset(ae, 0, 256);
5646 ++ ae = &ad->AttrValue;
5647 ++ memset(ae, 0, sizeof(*ae));
5648 +
5649 + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
5650 + "/sys/class/scsi_host/host%d", shost->host_no);
5651 +@@ -2294,8 +2294,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
5652 + struct lpfc_fdmi_attr_entry *ae;
5653 + uint32_t len, size;
5654 +
5655 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5656 +- memset(ae, 0, 256);
5657 ++ ae = &ad->AttrValue;
5658 ++ memset(ae, 0, sizeof(*ae));
5659 +
5660 + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
5661 + init_utsname()->nodename);
5662 +@@ -2315,8 +2315,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
5663 + struct lpfc_fdmi_attr_entry *ae;
5664 + uint32_t size;
5665 +
5666 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5667 +- memset(ae, 0, sizeof(struct lpfc_name));
5668 ++ ae = &ad->AttrValue;
5669 ++ memset(ae, 0, sizeof(*ae));
5670 +
5671 + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
5672 + sizeof(struct lpfc_name));
5673 +@@ -2333,8 +2333,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
5674 + struct lpfc_fdmi_attr_entry *ae;
5675 + uint32_t size;
5676 +
5677 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5678 +- memset(ae, 0, sizeof(struct lpfc_name));
5679 ++ ae = &ad->AttrValue;
5680 ++ memset(ae, 0, sizeof(*ae));
5681 +
5682 + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
5683 + sizeof(struct lpfc_name));
5684 +@@ -2351,8 +2351,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
5685 + struct lpfc_fdmi_attr_entry *ae;
5686 + uint32_t len, size;
5687 +
5688 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5689 +- memset(ae, 0, 256);
5690 ++ ae = &ad->AttrValue;
5691 ++ memset(ae, 0, sizeof(*ae));
5692 +
5693 + len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
5694 + len += (len & 3) ? (4 - (len & 3)) : 4;
5695 +@@ -2370,7 +2370,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
5696 + struct lpfc_fdmi_attr_entry *ae;
5697 + uint32_t size;
5698 +
5699 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5700 ++ ae = &ad->AttrValue;
5701 + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
5702 + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
5703 + else
5704 +@@ -2388,7 +2388,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
5705 + struct lpfc_fdmi_attr_entry *ae;
5706 + uint32_t size;
5707 +
5708 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5709 ++ ae = &ad->AttrValue;
5710 + ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
5711 + size = FOURBYTES + sizeof(uint32_t);
5712 + ad->AttrLen = cpu_to_be16(size);
5713 +@@ -2403,8 +2403,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
5714 + struct lpfc_fdmi_attr_entry *ae;
5715 + uint32_t size;
5716 +
5717 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5718 +- memset(ae, 0, sizeof(struct lpfc_name));
5719 ++ ae = &ad->AttrValue;
5720 ++ memset(ae, 0, sizeof(*ae));
5721 +
5722 + memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
5723 + sizeof(struct lpfc_name));
5724 +@@ -2421,8 +2421,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
5725 + struct lpfc_fdmi_attr_entry *ae;
5726 + uint32_t size;
5727 +
5728 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5729 +- memset(ae, 0, 32);
5730 ++ ae = &ad->AttrValue;
5731 ++ memset(ae, 0, sizeof(*ae));
5732 +
5733 + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
5734 + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
5735 +@@ -2442,7 +2442,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
5736 + struct lpfc_fdmi_attr_entry *ae;
5737 + uint32_t size;
5738 +
5739 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5740 ++ ae = &ad->AttrValue;
5741 + /* Link Up - operational */
5742 + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
5743 + size = FOURBYTES + sizeof(uint32_t);
5744 +@@ -2458,7 +2458,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
5745 + struct lpfc_fdmi_attr_entry *ae;
5746 + uint32_t size;
5747 +
5748 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5749 ++ ae = &ad->AttrValue;
5750 + vport->fdmi_num_disc = lpfc_find_map_node(vport);
5751 + ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
5752 + size = FOURBYTES + sizeof(uint32_t);
5753 +@@ -2474,7 +2474,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
5754 + struct lpfc_fdmi_attr_entry *ae;
5755 + uint32_t size;
5756 +
5757 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5758 ++ ae = &ad->AttrValue;
5759 + ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
5760 + size = FOURBYTES + sizeof(uint32_t);
5761 + ad->AttrLen = cpu_to_be16(size);
5762 +@@ -2489,8 +2489,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
5763 + struct lpfc_fdmi_attr_entry *ae;
5764 + uint32_t len, size;
5765 +
5766 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5767 +- memset(ae, 0, 256);
5768 ++ ae = &ad->AttrValue;
5769 ++ memset(ae, 0, sizeof(*ae));
5770 +
5771 + strncpy(ae->un.AttrString, "Smart SAN Initiator",
5772 + sizeof(ae->un.AttrString));
5773 +@@ -2510,8 +2510,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
5774 + struct lpfc_fdmi_attr_entry *ae;
5775 + uint32_t size;
5776 +
5777 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5778 +- memset(ae, 0, 256);
5779 ++ ae = &ad->AttrValue;
5780 ++ memset(ae, 0, sizeof(*ae));
5781 +
5782 + memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
5783 + sizeof(struct lpfc_name));
5784 +@@ -2531,8 +2531,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
5785 + struct lpfc_fdmi_attr_entry *ae;
5786 + uint32_t len, size;
5787 +
5788 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5789 +- memset(ae, 0, 256);
5790 ++ ae = &ad->AttrValue;
5791 ++ memset(ae, 0, sizeof(*ae));
5792 +
5793 + strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
5794 + sizeof(ae->un.AttrString));
5795 +@@ -2553,8 +2553,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
5796 + struct lpfc_fdmi_attr_entry *ae;
5797 + uint32_t len, size;
5798 +
5799 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5800 +- memset(ae, 0, 256);
5801 ++ ae = &ad->AttrValue;
5802 ++ memset(ae, 0, sizeof(*ae));
5803 +
5804 + strncpy(ae->un.AttrString, phba->ModelName,
5805 + sizeof(ae->un.AttrString));
5806 +@@ -2573,7 +2573,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
5807 + struct lpfc_fdmi_attr_entry *ae;
5808 + uint32_t size;
5809 +
5810 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5811 ++ ae = &ad->AttrValue;
5812 +
5813 + /* SRIOV (type 3) is not supported */
5814 + if (vport->vpi)
5815 +@@ -2593,7 +2593,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
5816 + struct lpfc_fdmi_attr_entry *ae;
5817 + uint32_t size;
5818 +
5819 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5820 ++ ae = &ad->AttrValue;
5821 + ae->un.AttrInt = cpu_to_be32(0);
5822 + size = FOURBYTES + sizeof(uint32_t);
5823 + ad->AttrLen = cpu_to_be16(size);
5824 +@@ -2608,7 +2608,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
5825 + struct lpfc_fdmi_attr_entry *ae;
5826 + uint32_t size;
5827 +
5828 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5829 ++ ae = &ad->AttrValue;
5830 + ae->un.AttrInt = cpu_to_be32(1);
5831 + size = FOURBYTES + sizeof(uint32_t);
5832 + ad->AttrLen = cpu_to_be16(size);
5833 +@@ -2756,7 +2756,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5834 + /* Registered Port List */
5835 + /* One entry (port) per adapter */
5836 + rh->rpl.EntryCnt = cpu_to_be32(1);
5837 +- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName,
5838 ++ memcpy(&rh->rpl.pe.PortName,
5839 ++ &phba->pport->fc_sparam.portName,
5840 + sizeof(struct lpfc_name));
5841 +
5842 + /* point to the HBA attribute block */
5843 +diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
5844 +index 009aa0eee0408..48d4d576d588e 100644
5845 +--- a/drivers/scsi/lpfc/lpfc_hw.h
5846 ++++ b/drivers/scsi/lpfc/lpfc_hw.h
5847 +@@ -1333,25 +1333,8 @@ struct fc_rdp_res_frame {
5848 + /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
5849 + #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
5850 +
5851 +-/*
5852 +- * Registered Port List Format
5853 +- */
5854 +-struct lpfc_fdmi_reg_port_list {
5855 +- uint32_t EntryCnt;
5856 +- uint32_t pe; /* Variable-length array */
5857 +-};
5858 +-
5859 +-
5860 + /* Definitions for HBA / Port attribute entries */
5861 +
5862 +-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
5863 +- /* Structure is in Big Endian format */
5864 +- uint32_t AttrType:16;
5865 +- uint32_t AttrLen:16;
5866 +- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
5867 +-};
5868 +-
5869 +-
5870 + /* Attribute Entry */
5871 + struct lpfc_fdmi_attr_entry {
5872 + union {
5873 +@@ -1362,7 +1345,13 @@ struct lpfc_fdmi_attr_entry {
5874 + } un;
5875 + };
5876 +
5877 +-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
5878 ++struct lpfc_fdmi_attr_def { /* Defined in TLV format */
5879 ++ /* Structure is in Big Endian format */
5880 ++ uint32_t AttrType:16;
5881 ++ uint32_t AttrLen:16;
5882 ++ /* Marks start of Value (ATTRIBUTE_ENTRY) */
5883 ++ struct lpfc_fdmi_attr_entry AttrValue;
5884 ++} __packed;
5885 +
5886 + /*
5887 + * HBA Attribute Block
5888 +@@ -1386,13 +1375,20 @@ struct lpfc_fdmi_hba_ident {
5889 + struct lpfc_name PortName;
5890 + };
5891 +
5892 ++/*
5893 ++ * Registered Port List Format
5894 ++ */
5895 ++struct lpfc_fdmi_reg_port_list {
5896 ++ uint32_t EntryCnt;
5897 ++ struct lpfc_fdmi_port_entry pe;
5898 ++} __packed;
5899 ++
5900 + /*
5901 + * Register HBA(RHBA)
5902 + */
5903 + struct lpfc_fdmi_reg_hba {
5904 + struct lpfc_fdmi_hba_ident hi;
5905 +- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
5906 +-/* struct lpfc_fdmi_attr_block ab; */
5907 ++ struct lpfc_fdmi_reg_port_list rpl;
5908 + };
5909 +
5910 + /*
5911 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
5912 +index a56a939792ac1..2ab351260e815 100644
5913 +--- a/drivers/scsi/lpfc/lpfc_sli.c
5914 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
5915 +@@ -17413,6 +17413,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
5916 + list_add_tail(&iocbq->list, &first_iocbq->list);
5917 + }
5918 + }
5919 ++ /* Free the sequence's header buffer */
5920 ++ if (!first_iocbq)
5921 ++ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
5922 ++
5923 + return first_iocbq;
5924 + }
5925 +
5926 +diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
5927 +index ba79b37d8cf7e..5becdde3ea324 100644
5928 +--- a/drivers/scsi/pm8001/pm8001_sas.c
5929 ++++ b/drivers/scsi/pm8001/pm8001_sas.c
5930 +@@ -1184,8 +1184,8 @@ int pm8001_abort_task(struct sas_task *task)
5931 + pm8001_ha = pm8001_find_ha_by_dev(dev);
5932 + device_id = pm8001_dev->device_id;
5933 + phy_id = pm8001_dev->attached_phy;
5934 +- rc = pm8001_find_tag(task, &tag);
5935 +- if (rc == 0) {
5936 ++ ret = pm8001_find_tag(task, &tag);
5937 ++ if (ret == 0) {
5938 + pm8001_printk("no tag for task:%p\n", task);
5939 + return TMF_RESP_FUNC_FAILED;
5940 + }
5941 +@@ -1223,26 +1223,50 @@ int pm8001_abort_task(struct sas_task *task)
5942 +
5943 + /* 2. Send Phy Control Hard Reset */
5944 + reinit_completion(&completion);
5945 ++ phy->port_reset_status = PORT_RESET_TMO;
5946 + phy->reset_success = false;
5947 + phy->enable_completion = &completion;
5948 + phy->reset_completion = &completion_reset;
5949 + ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
5950 + PHY_HARD_RESET);
5951 +- if (ret)
5952 +- goto out;
5953 +- PM8001_MSG_DBG(pm8001_ha,
5954 +- pm8001_printk("Waiting for local phy ctl\n"));
5955 +- wait_for_completion(&completion);
5956 +- if (!phy->reset_success)
5957 ++ if (ret) {
5958 ++ phy->enable_completion = NULL;
5959 ++ phy->reset_completion = NULL;
5960 + goto out;
5961 ++ }
5962 +
5963 +- /* 3. Wait for Port Reset complete / Port reset TMO */
5964 ++ /* In the case of the reset timeout/fail we still
5965 ++ * abort the command at the firmware. The assumption
5966 ++ * here is that the drive is off doing something so
5967 ++ * that it's not processing requests, and we want to
5968 ++ * avoid getting a completion for this and either
5969 ++ * leaking the task in libsas or losing the race and
5970 ++ * getting a double free.
5971 ++ */
5972 + PM8001_MSG_DBG(pm8001_ha,
5973 ++ pm8001_printk("Waiting for local phy ctl\n"));
5974 ++ ret = wait_for_completion_timeout(&completion,
5975 ++ PM8001_TASK_TIMEOUT * HZ);
5976 ++ if (!ret || !phy->reset_success) {
5977 ++ phy->enable_completion = NULL;
5978 ++ phy->reset_completion = NULL;
5979 ++ } else {
5980 ++ /* 3. Wait for Port Reset complete or
5981 ++ * Port reset TMO
5982 ++ */
5983 ++ PM8001_MSG_DBG(pm8001_ha,
5984 + pm8001_printk("Waiting for Port reset\n"));
5985 +- wait_for_completion(&completion_reset);
5986 +- if (phy->port_reset_status) {
5987 +- pm8001_dev_gone_notify(dev);
5988 +- goto out;
5989 ++ ret = wait_for_completion_timeout(
5990 ++ &completion_reset,
5991 ++ PM8001_TASK_TIMEOUT * HZ);
5992 ++ if (!ret)
5993 ++ phy->reset_completion = NULL;
5994 ++ WARN_ON(phy->port_reset_status ==
5995 ++ PORT_RESET_TMO);
5996 ++ if (phy->port_reset_status == PORT_RESET_TMO) {
5997 ++ pm8001_dev_gone_notify(dev);
5998 ++ goto out;
5999 ++ }
6000 + }
6001 +
6002 + /*
6003 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
6004 +index 751941a3ed303..aa451c8b49e56 100644
6005 +--- a/drivers/scsi/qedi/qedi_iscsi.c
6006 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
6007 +@@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
6008 + break;
6009 + }
6010 +
6011 ++ if (!abrt_conn)
6012 ++ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
6013 ++
6014 + qedi_ep->state = EP_STATE_DISCONN_START;
6015 + ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
6016 + if (ret) {
6017 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
6018 +index eb10a5cacd90c..b2cbdd01ab10b 100644
6019 +--- a/drivers/scsi/ufs/ufshcd.c
6020 ++++ b/drivers/scsi/ufs/ufshcd.c
6021 +@@ -353,27 +353,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
6022 + u8 opcode = 0;
6023 + u32 intr, doorbell;
6024 + struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6025 ++ struct scsi_cmnd *cmd = lrbp->cmd;
6026 + int transfer_len = -1;
6027 +
6028 + if (!trace_ufshcd_command_enabled()) {
6029 + /* trace UPIU W/O tracing command */
6030 +- if (lrbp->cmd)
6031 ++ if (cmd)
6032 + ufshcd_add_cmd_upiu_trace(hba, tag, str);
6033 + return;
6034 + }
6035 +
6036 +- if (lrbp->cmd) { /* data phase exists */
6037 ++ if (cmd) { /* data phase exists */
6038 + /* trace UPIU also */
6039 + ufshcd_add_cmd_upiu_trace(hba, tag, str);
6040 +- opcode = (u8)(*lrbp->cmd->cmnd);
6041 ++ opcode = cmd->cmnd[0];
6042 + if ((opcode == READ_10) || (opcode == WRITE_10)) {
6043 + /*
6044 + * Currently we only fully trace read(10) and write(10)
6045 + * commands
6046 + */
6047 +- if (lrbp->cmd->request && lrbp->cmd->request->bio)
6048 +- lba =
6049 +- lrbp->cmd->request->bio->bi_iter.bi_sector;
6050 ++ if (cmd->request && cmd->request->bio)
6051 ++ lba = cmd->request->bio->bi_iter.bi_sector;
6052 + transfer_len = be32_to_cpu(
6053 + lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
6054 + }
6055 +@@ -1910,12 +1910,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
6056 + {
6057 + hba->lrb[task_tag].issue_time_stamp = ktime_get();
6058 + hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
6059 ++ ufshcd_add_command_trace(hba, task_tag, "send");
6060 + ufshcd_clk_scaling_start_busy(hba);
6061 + __set_bit(task_tag, &hba->outstanding_reqs);
6062 + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6063 + /* Make sure that doorbell is committed immediately */
6064 + wmb();
6065 +- ufshcd_add_command_trace(hba, task_tag, "send");
6066 + }
6067 +
6068 + /**
6069 +diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
6070 +index 256039ce561e6..81a3370551dbc 100644
6071 +--- a/drivers/staging/media/imx/imx-media-capture.c
6072 ++++ b/drivers/staging/media/imx/imx-media-capture.c
6073 +@@ -678,7 +678,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
6074 + /* setup default format */
6075 + fmt_src.pad = priv->src_sd_pad;
6076 + fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
6077 +- v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
6078 ++ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
6079 + if (ret) {
6080 + v4l2_err(sd, "failed to get src_sd format\n");
6081 + goto unreg;
6082 +diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
6083 +index 17b4b9257b495..0ddf41b5a734a 100644
6084 +--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
6085 ++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
6086 +@@ -1535,21 +1535,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
6087 +
6088 + /* Allocate new skb for releasing to upper layer */
6089 + sub_skb = dev_alloc_skb(nSubframe_Length + 12);
6090 +- if (sub_skb) {
6091 +- skb_reserve(sub_skb, 12);
6092 +- skb_put_data(sub_skb, pdata, nSubframe_Length);
6093 +- } else {
6094 +- sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
6095 +- if (sub_skb) {
6096 +- sub_skb->data = pdata;
6097 +- sub_skb->len = nSubframe_Length;
6098 +- skb_set_tail_pointer(sub_skb, nSubframe_Length);
6099 +- } else {
6100 +- DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
6101 +- break;
6102 +- }
6103 ++ if (!sub_skb) {
6104 ++ DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes);
6105 ++ break;
6106 + }
6107 +
6108 ++ skb_reserve(sub_skb, 12);
6109 ++ skb_put_data(sub_skb, pdata, nSubframe_Length);
6110 ++
6111 + subframes[nr_subframes++] = sub_skb;
6112 +
6113 + if (nr_subframes >= MAX_SUBFRAME_COUNT) {
6114 +diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
6115 +index 4dc30e7890f6c..140386d7c75a3 100644
6116 +--- a/drivers/thermal/rcar_thermal.c
6117 ++++ b/drivers/thermal/rcar_thermal.c
6118 +@@ -505,8 +505,10 @@ static int rcar_thermal_probe(struct platform_device *pdev)
6119 + res = platform_get_resource(pdev, IORESOURCE_MEM,
6120 + mres++);
6121 + common->base = devm_ioremap_resource(dev, res);
6122 +- if (IS_ERR(common->base))
6123 +- return PTR_ERR(common->base);
6124 ++ if (IS_ERR(common->base)) {
6125 ++ ret = PTR_ERR(common->base);
6126 ++ goto error_unregister;
6127 ++ }
6128 +
6129 + idle = 0; /* polling delay is not needed */
6130 + }
6131 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
6132 +index a019286f8bb65..cbd006fb7fbb9 100644
6133 +--- a/drivers/tty/serial/8250/8250_omap.c
6134 ++++ b/drivers/tty/serial/8250/8250_omap.c
6135 +@@ -781,7 +781,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
6136 + dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
6137 +
6138 + count = dma->rx_size - state.residue;
6139 +-
6140 ++ if (count < dma->rx_size)
6141 ++ dmaengine_terminate_async(dma->rxchan);
6142 ++ if (!count)
6143 ++ goto unlock;
6144 + ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
6145 +
6146 + p->port.icount.rx += ret;
6147 +@@ -843,7 +846,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
6148 + spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
6149 +
6150 + __dma_rx_do_complete(p);
6151 +- dmaengine_terminate_all(dma->rxchan);
6152 + }
6153 +
6154 + static int omap_8250_rx_dma(struct uart_8250_port *p)
6155 +@@ -1227,11 +1229,11 @@ static int omap8250_probe(struct platform_device *pdev)
6156 + spin_lock_init(&priv->rx_dma_lock);
6157 +
6158 + device_init_wakeup(&pdev->dev, true);
6159 ++ pm_runtime_enable(&pdev->dev);
6160 + pm_runtime_use_autosuspend(&pdev->dev);
6161 + pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
6162 +
6163 + pm_runtime_irq_safe(&pdev->dev);
6164 +- pm_runtime_enable(&pdev->dev);
6165 +
6166 + pm_runtime_get_sync(&pdev->dev);
6167 +
6168 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
6169 +index 09f0dc3b967b1..60ca19eca1f63 100644
6170 +--- a/drivers/tty/serial/8250/8250_port.c
6171 ++++ b/drivers/tty/serial/8250/8250_port.c
6172 +@@ -1861,6 +1861,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
6173 + unsigned char status;
6174 + unsigned long flags;
6175 + struct uart_8250_port *up = up_to_u8250p(port);
6176 ++ bool skip_rx = false;
6177 +
6178 + if (iir & UART_IIR_NO_INT)
6179 + return 0;
6180 +@@ -1869,7 +1870,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
6181 +
6182 + status = serial_port_in(port, UART_LSR);
6183 +
6184 +- if (status & (UART_LSR_DR | UART_LSR_BI)) {
6185 ++ /*
6186 ++ * If port is stopped and there are no error conditions in the
6187 ++ * FIFO, then don't drain the FIFO, as this may lead to TTY buffer
6188 ++ * overflow. Not servicing, RX FIFO would trigger auto HW flow
6189 ++ * control when FIFO occupancy reaches preset threshold, thus
6190 ++ * halting RX. This only works when auto HW flow control is
6191 ++ * available.
6192 ++ */
6193 ++ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
6194 ++ (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
6195 ++ !(port->read_status_mask & UART_LSR_DR))
6196 ++ skip_rx = true;
6197 ++
6198 ++ if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
6199 + if (!up->dma || handle_rx_dma(up, iir))
6200 + status = serial8250_rx_chars(up, status);
6201 + }
6202 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
6203 +index fcb89bf2524d1..1528a7ba2bf4d 100644
6204 +--- a/drivers/tty/serial/samsung.c
6205 ++++ b/drivers/tty/serial/samsung.c
6206 +@@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
6207 + struct s3c24xx_uart_info *info = ourport->info;
6208 + struct clk *clk;
6209 + unsigned long rate;
6210 +- unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
6211 ++ unsigned int cnt, baud, quot, best_quot = 0;
6212 + char clkname[MAX_CLK_NAME_LENGTH];
6213 + int calc_deviation, deviation = (1 << 30) - 1;
6214 +
6215 +- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
6216 +- ourport->info->def_clk_sel;
6217 + for (cnt = 0; cnt < info->num_clks; cnt++) {
6218 +- if (!(clk_sel & (1 << cnt)))
6219 ++ /* Keep selected clock if provided */
6220 ++ if (ourport->cfg->clk_sel &&
6221 ++ !(ourport->cfg->clk_sel & (1 << cnt)))
6222 + continue;
6223 +
6224 + sprintf(clkname, "clk_uart_baud%d", cnt);
6225 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
6226 +index 31950a38f0fb7..23f9b0cdff086 100644
6227 +--- a/drivers/tty/serial/xilinx_uartps.c
6228 ++++ b/drivers/tty/serial/xilinx_uartps.c
6229 +@@ -1236,6 +1236,7 @@ static int cdns_uart_console_setup(struct console *co, char *options)
6230 + int bits = 8;
6231 + int parity = 'n';
6232 + int flow = 'n';
6233 ++ unsigned long time_out;
6234 +
6235 + if (!port->membase) {
6236 + pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
6237 +@@ -1246,6 +1247,13 @@ static int cdns_uart_console_setup(struct console *co, char *options)
6238 + if (options)
6239 + uart_parse_options(options, &baud, &parity, &bits, &flow);
6240 +
6241 ++ /* Wait for tx_empty before setting up the console */
6242 ++ time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
6243 ++
6244 ++ while (time_before(jiffies, time_out) &&
6245 ++ cdns_uart_tx_empty(port) != TIOCSER_TEMT)
6246 ++ cpu_relax();
6247 ++
6248 + return uart_set_options(port, co, baud, parity, bits, flow);
6249 + }
6250 +
6251 +diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
6252 +index 58b454c34560a..10a832a2135e2 100644
6253 +--- a/drivers/tty/vcc.c
6254 ++++ b/drivers/tty/vcc.c
6255 +@@ -604,6 +604,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6256 + port->index = vcc_table_add(port);
6257 + if (port->index == -1) {
6258 + pr_err("VCC: no more TTY indices left for allocation\n");
6259 ++ rv = -ENOMEM;
6260 + goto free_ldc;
6261 + }
6262 +
6263 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
6264 +index 7bf2573dd459e..37cc3fd7c3cad 100644
6265 +--- a/drivers/usb/dwc3/gadget.c
6266 ++++ b/drivers/usb/dwc3/gadget.c
6267 +@@ -270,7 +270,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
6268 + {
6269 + const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
6270 + struct dwc3 *dwc = dep->dwc;
6271 +- u32 timeout = 1000;
6272 ++ u32 timeout = 5000;
6273 + u32 saved_config = 0;
6274 + u32 reg;
6275 +
6276 +diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
6277 +index de764459e05a6..9d93e7441bbca 100644
6278 +--- a/drivers/usb/host/ehci-mv.c
6279 ++++ b/drivers/usb/host/ehci-mv.c
6280 +@@ -192,12 +192,10 @@ static int mv_ehci_probe(struct platform_device *pdev)
6281 + hcd->rsrc_len = resource_size(r);
6282 + hcd->regs = ehci_mv->op_regs;
6283 +
6284 +- hcd->irq = platform_get_irq(pdev, 0);
6285 +- if (!hcd->irq) {
6286 +- dev_err(&pdev->dev, "Cannot get irq.");
6287 +- retval = -ENODEV;
6288 ++ retval = platform_get_irq(pdev, 0);
6289 ++ if (retval < 0)
6290 + goto err_disable_clk;
6291 +- }
6292 ++ hcd->irq = retval;
6293 +
6294 + ehci = hcd_to_ehci(hcd);
6295 + ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
6296 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
6297 +index 9f72a6ee13b53..58e7336b2748b 100644
6298 +--- a/drivers/vfio/pci/vfio_pci.c
6299 ++++ b/drivers/vfio/pci/vfio_pci.c
6300 +@@ -409,6 +409,19 @@ static void vfio_pci_release(void *device_data)
6301 + if (!(--vdev->refcnt)) {
6302 + vfio_spapr_pci_eeh_release(vdev->pdev);
6303 + vfio_pci_disable(vdev);
6304 ++ mutex_lock(&vdev->igate);
6305 ++ if (vdev->err_trigger) {
6306 ++ eventfd_ctx_put(vdev->err_trigger);
6307 ++ vdev->err_trigger = NULL;
6308 ++ }
6309 ++ mutex_unlock(&vdev->igate);
6310 ++
6311 ++ mutex_lock(&vdev->igate);
6312 ++ if (vdev->req_trigger) {
6313 ++ eventfd_ctx_put(vdev->req_trigger);
6314 ++ vdev->req_trigger = NULL;
6315 ++ }
6316 ++ mutex_unlock(&vdev->igate);
6317 + }
6318 +
6319 + mutex_unlock(&driver_lock);
6320 +diff --git a/fs/block_dev.c b/fs/block_dev.c
6321 +index 8ac8f7469354b..9f3faac490259 100644
6322 +--- a/fs/block_dev.c
6323 ++++ b/fs/block_dev.c
6324 +@@ -1793,6 +1793,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
6325 + struct gendisk *disk = bdev->bd_disk;
6326 + struct block_device *victim = NULL;
6327 +
6328 ++ /*
6329 ++ * Sync early if it looks like we're the last one. If someone else
6330 ++ * opens the block device between now and the decrement of bd_openers
6331 ++ * then we did a sync that we didn't need to, but that's not the end
6332 ++ * of the world and we want to avoid long (could be several minute)
6333 ++ * syncs while holding the mutex.
6334 ++ */
6335 ++ if (bdev->bd_openers == 1)
6336 ++ sync_blockdev(bdev);
6337 ++
6338 + mutex_lock_nested(&bdev->bd_mutex, for_part);
6339 + if (for_part)
6340 + bdev->bd_part_count--;
6341 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
6342 +index 319a89d4d0735..ce5e0f6c6af4f 100644
6343 +--- a/fs/btrfs/extent-tree.c
6344 ++++ b/fs/btrfs/extent-tree.c
6345 +@@ -9098,8 +9098,6 @@ out:
6346 + */
6347 + if (!for_reloc && !root_dropped)
6348 + btrfs_add_dead_root(root);
6349 +- if (err && err != -EAGAIN)
6350 +- btrfs_handle_fs_error(fs_info, err, NULL);
6351 + return err;
6352 + }
6353 +
6354 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
6355 +index bdfe159a60da6..64d459ca76d06 100644
6356 +--- a/fs/btrfs/inode.c
6357 ++++ b/fs/btrfs/inode.c
6358 +@@ -8913,20 +8913,17 @@ again:
6359 + /*
6360 + * Qgroup reserved space handler
6361 + * Page here will be either
6362 +- * 1) Already written to disk
6363 +- * In this case, its reserved space is released from data rsv map
6364 +- * and will be freed by delayed_ref handler finally.
6365 +- * So even we call qgroup_free_data(), it won't decrease reserved
6366 +- * space.
6367 +- * 2) Not written to disk
6368 +- * This means the reserved space should be freed here. However,
6369 +- * if a truncate invalidates the page (by clearing PageDirty)
6370 +- * and the page is accounted for while allocating extent
6371 +- * in btrfs_check_data_free_space() we let delayed_ref to
6372 +- * free the entire extent.
6373 ++ * 1) Already written to disk or ordered extent already submitted
6374 ++ * Then its QGROUP_RESERVED bit in io_tree is already cleaned.
6375 ++ * Qgroup will be handled by its qgroup_record then.
6376 ++ * btrfs_qgroup_free_data() call will do nothing here.
6377 ++ *
6378 ++ * 2) Not written to disk yet
6379 ++ * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
6380 ++ * bit of its io_tree, and free the qgroup reserved data space.
6381 ++ * Since the IO will never happen for this page.
6382 + */
6383 +- if (PageDirty(page))
6384 +- btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
6385 ++ btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
6386 + if (!inode_evicting) {
6387 + clear_extent_bit(tree, page_start, page_end,
6388 + EXTENT_LOCKED | EXTENT_DIRTY |
6389 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
6390 +index a2d4eed27f804..c0dbf8b7762b4 100644
6391 +--- a/fs/ceph/caps.c
6392 ++++ b/fs/ceph/caps.c
6393 +@@ -2015,12 +2015,24 @@ ack:
6394 + if (mutex_trylock(&session->s_mutex) == 0) {
6395 + dout("inverting session/ino locks on %p\n",
6396 + session);
6397 ++ session = ceph_get_mds_session(session);
6398 + spin_unlock(&ci->i_ceph_lock);
6399 + if (took_snap_rwsem) {
6400 + up_read(&mdsc->snap_rwsem);
6401 + took_snap_rwsem = 0;
6402 + }
6403 +- mutex_lock(&session->s_mutex);
6404 ++ if (session) {
6405 ++ mutex_lock(&session->s_mutex);
6406 ++ ceph_put_mds_session(session);
6407 ++ } else {
6408 ++ /*
6409 ++ * Because we take the reference while
6410 ++ * holding the i_ceph_lock, it should
6411 ++ * never be NULL. Throw a warning if it
6412 ++ * ever is.
6413 ++ */
6414 ++ WARN_ON_ONCE(true);
6415 ++ }
6416 + goto retry;
6417 + }
6418 + }
6419 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
6420 +index 1e438e0faf77e..3c24fb77ef325 100644
6421 +--- a/fs/ceph/inode.c
6422 ++++ b/fs/ceph/inode.c
6423 +@@ -764,8 +764,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
6424 + info_caps = le32_to_cpu(info->cap.caps);
6425 +
6426 + /* prealloc new cap struct */
6427 +- if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
6428 ++ if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
6429 + new_cap = ceph_get_cap(mdsc, caps_reservation);
6430 ++ if (!new_cap)
6431 ++ return -ENOMEM;
6432 ++ }
6433 +
6434 + /*
6435 + * prealloc xattr data, if it looks like we'll need it. only
6436 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
6437 +index 71c2dd0c7f038..2c632793c88c5 100644
6438 +--- a/fs/cifs/cifsglob.h
6439 ++++ b/fs/cifs/cifsglob.h
6440 +@@ -259,8 +259,9 @@ struct smb_version_operations {
6441 + int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
6442 + bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
6443 + int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
6444 +- void (*downgrade_oplock)(struct TCP_Server_Info *,
6445 +- struct cifsInodeInfo *, bool);
6446 ++ void (*downgrade_oplock)(struct TCP_Server_Info *server,
6447 ++ struct cifsInodeInfo *cinode, __u32 oplock,
6448 ++ unsigned int epoch, bool *purge_cache);
6449 + /* process transaction2 response */
6450 + bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
6451 + char *, int);
6452 +@@ -1160,6 +1161,8 @@ struct cifsFileInfo {
6453 + unsigned int f_flags;
6454 + bool invalidHandle:1; /* file closed via session abend */
6455 + bool oplock_break_cancelled:1;
6456 ++ unsigned int oplock_epoch; /* epoch from the lease break */
6457 ++ __u32 oplock_level; /* oplock/lease level from the lease break */
6458 + int count;
6459 + spinlock_t file_info_lock; /* protects four flag/count fields above */
6460 + struct mutex fh_mutex; /* prevents reopen race after dead ses*/
6461 +@@ -1300,7 +1303,7 @@ struct cifsInodeInfo {
6462 + unsigned int epoch; /* used to track lease state changes */
6463 + #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
6464 + #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
6465 +-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
6466 ++#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */
6467 + #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
6468 + #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
6469 + #define CIFS_INO_LOCK (5) /* lock bit for synchronization */
6470 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
6471 +index 128cbd69911b4..5cb15649adb07 100644
6472 +--- a/fs/cifs/file.c
6473 ++++ b/fs/cifs/file.c
6474 +@@ -3804,7 +3804,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
6475 + break;
6476 +
6477 + __SetPageLocked(page);
6478 +- if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
6479 ++ rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
6480 ++ if (rc) {
6481 + __ClearPageLocked(page);
6482 + break;
6483 + }
6484 +@@ -3820,6 +3821,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
6485 + struct list_head *page_list, unsigned num_pages)
6486 + {
6487 + int rc;
6488 ++ int err = 0;
6489 + struct list_head tmplist;
6490 + struct cifsFileInfo *open_file = file->private_data;
6491 + struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
6492 +@@ -3860,7 +3862,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
6493 + * the order of declining indexes. When we put the pages in
6494 + * the rdata->pages, then we want them in increasing order.
6495 + */
6496 +- while (!list_empty(page_list)) {
6497 ++ while (!list_empty(page_list) && !err) {
6498 + unsigned int i, nr_pages, bytes, rsize;
6499 + loff_t offset;
6500 + struct page *page, *tpage;
6501 +@@ -3883,9 +3885,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
6502 + return 0;
6503 + }
6504 +
6505 +- rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
6506 ++ nr_pages = 0;
6507 ++ err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
6508 + &nr_pages, &offset, &bytes);
6509 +- if (rc) {
6510 ++ if (!nr_pages) {
6511 + add_credits_and_wake_if(server, credits, 0);
6512 + break;
6513 + }
6514 +@@ -4185,12 +4188,13 @@ void cifs_oplock_break(struct work_struct *work)
6515 + struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
6516 + struct TCP_Server_Info *server = tcon->ses->server;
6517 + int rc = 0;
6518 ++ bool purge_cache = false;
6519 +
6520 + wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
6521 + TASK_UNINTERRUPTIBLE);
6522 +
6523 +- server->ops->downgrade_oplock(server, cinode,
6524 +- test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
6525 ++ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
6526 ++ cfile->oplock_epoch, &purge_cache);
6527 +
6528 + if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
6529 + cifs_has_mand_locks(cinode)) {
6530 +@@ -4205,18 +4209,21 @@ void cifs_oplock_break(struct work_struct *work)
6531 + else
6532 + break_lease(inode, O_WRONLY);
6533 + rc = filemap_fdatawrite(inode->i_mapping);
6534 +- if (!CIFS_CACHE_READ(cinode)) {
6535 ++ if (!CIFS_CACHE_READ(cinode) || purge_cache) {
6536 + rc = filemap_fdatawait(inode->i_mapping);
6537 + mapping_set_error(inode->i_mapping, rc);
6538 + cifs_zap_mapping(inode);
6539 + }
6540 + cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
6541 ++ if (CIFS_CACHE_WRITE(cinode))
6542 ++ goto oplock_break_ack;
6543 + }
6544 +
6545 + rc = cifs_push_locks(cfile);
6546 + if (rc)
6547 + cifs_dbg(VFS, "Push locks rc = %d\n", rc);
6548 +
6549 ++oplock_break_ack:
6550 + /*
6551 + * releasing stale oplock after recent reconnect of smb session using
6552 + * a now incorrect file handle is not a data integrity issue but do
6553 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
6554 +index e45f8e321371c..dd67f56ea61e5 100644
6555 +--- a/fs/cifs/misc.c
6556 ++++ b/fs/cifs/misc.c
6557 +@@ -477,21 +477,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
6558 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
6559 + &pCifsInode->flags);
6560 +
6561 +- /*
6562 +- * Set flag if the server downgrades the oplock
6563 +- * to L2 else clear.
6564 +- */
6565 +- if (pSMB->OplockLevel)
6566 +- set_bit(
6567 +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
6568 +- &pCifsInode->flags);
6569 +- else
6570 +- clear_bit(
6571 +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
6572 +- &pCifsInode->flags);
6573 +-
6574 +- cifs_queue_oplock_break(netfile);
6575 ++ netfile->oplock_epoch = 0;
6576 ++ netfile->oplock_level = pSMB->OplockLevel;
6577 + netfile->oplock_break_cancelled = false;
6578 ++ cifs_queue_oplock_break(netfile);
6579 +
6580 + spin_unlock(&tcon->open_file_lock);
6581 + spin_unlock(&cifs_tcp_ses_lock);
6582 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
6583 +index c7f0c85664425..0b7f924512848 100644
6584 +--- a/fs/cifs/smb1ops.c
6585 ++++ b/fs/cifs/smb1ops.c
6586 +@@ -381,12 +381,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
6587 +
6588 + static void
6589 + cifs_downgrade_oplock(struct TCP_Server_Info *server,
6590 +- struct cifsInodeInfo *cinode, bool set_level2)
6591 ++ struct cifsInodeInfo *cinode, __u32 oplock,
6592 ++ unsigned int epoch, bool *purge_cache)
6593 + {
6594 +- if (set_level2)
6595 +- cifs_set_oplock_level(cinode, OPLOCK_READ);
6596 +- else
6597 +- cifs_set_oplock_level(cinode, 0);
6598 ++ cifs_set_oplock_level(cinode, oplock);
6599 + }
6600 +
6601 + static bool
6602 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
6603 +index 2fc96f7923ee5..7d875a47d0226 100644
6604 +--- a/fs/cifs/smb2misc.c
6605 ++++ b/fs/cifs/smb2misc.c
6606 +@@ -550,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
6607 +
6608 + cifs_dbg(FYI, "found in the open list\n");
6609 + cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
6610 +- le32_to_cpu(rsp->NewLeaseState));
6611 ++ lease_state);
6612 +
6613 + if (ack_req)
6614 + cfile->oplock_break_cancelled = false;
6615 +@@ -559,17 +559,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
6616 +
6617 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
6618 +
6619 +- /*
6620 +- * Set or clear flags depending on the lease state being READ.
6621 +- * HANDLE caching flag should be added when the client starts
6622 +- * to defer closing remote file handles with HANDLE leases.
6623 +- */
6624 +- if (lease_state & SMB2_LEASE_READ_CACHING_HE)
6625 +- set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
6626 +- &cinode->flags);
6627 +- else
6628 +- clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
6629 +- &cinode->flags);
6630 ++ cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
6631 ++ cfile->oplock_level = lease_state;
6632 +
6633 + cifs_queue_oplock_break(cfile);
6634 + return true;
6635 +@@ -599,7 +590,7 @@ smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
6636 +
6637 + cifs_dbg(FYI, "found in the pending open list\n");
6638 + cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
6639 +- le32_to_cpu(rsp->NewLeaseState));
6640 ++ lease_state);
6641 +
6642 + open->oplock = lease_state;
6643 + }
6644 +@@ -732,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
6645 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
6646 + &cinode->flags);
6647 +
6648 +- /*
6649 +- * Set flag if the server downgrades the oplock
6650 +- * to L2 else clear.
6651 +- */
6652 +- if (rsp->OplockLevel)
6653 +- set_bit(
6654 +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
6655 +- &cinode->flags);
6656 +- else
6657 +- clear_bit(
6658 +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
6659 +- &cinode->flags);
6660 ++ cfile->oplock_epoch = 0;
6661 ++ cfile->oplock_level = rsp->OplockLevel;
6662 ++
6663 + spin_unlock(&cfile->file_info_lock);
6664 +
6665 + cifs_queue_oplock_break(cfile);
6666 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
6667 +index 2a523139a05fb..947a40069d246 100644
6668 +--- a/fs/cifs/smb2ops.c
6669 ++++ b/fs/cifs/smb2ops.c
6670 +@@ -2358,22 +2358,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
6671 +
6672 + static void
6673 + smb2_downgrade_oplock(struct TCP_Server_Info *server,
6674 +- struct cifsInodeInfo *cinode, bool set_level2)
6675 ++ struct cifsInodeInfo *cinode, __u32 oplock,
6676 ++ unsigned int epoch, bool *purge_cache)
6677 + {
6678 +- if (set_level2)
6679 +- server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
6680 +- 0, NULL);
6681 +- else
6682 +- server->ops->set_oplock_level(cinode, 0, 0, NULL);
6683 ++ server->ops->set_oplock_level(cinode, oplock, 0, NULL);
6684 + }
6685 +
6686 + static void
6687 +-smb21_downgrade_oplock(struct TCP_Server_Info *server,
6688 +- struct cifsInodeInfo *cinode, bool set_level2)
6689 ++smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
6690 ++ unsigned int epoch, bool *purge_cache);
6691 ++
6692 ++static void
6693 ++smb3_downgrade_oplock(struct TCP_Server_Info *server,
6694 ++ struct cifsInodeInfo *cinode, __u32 oplock,
6695 ++ unsigned int epoch, bool *purge_cache)
6696 + {
6697 +- server->ops->set_oplock_level(cinode,
6698 +- set_level2 ? SMB2_LEASE_READ_CACHING_HE :
6699 +- 0, 0, NULL);
6700 ++ unsigned int old_state = cinode->oplock;
6701 ++ unsigned int old_epoch = cinode->epoch;
6702 ++ unsigned int new_state;
6703 ++
6704 ++ if (epoch > old_epoch) {
6705 ++ smb21_set_oplock_level(cinode, oplock, 0, NULL);
6706 ++ cinode->epoch = epoch;
6707 ++ }
6708 ++
6709 ++ new_state = cinode->oplock;
6710 ++ *purge_cache = false;
6711 ++
6712 ++ if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
6713 ++ (new_state & CIFS_CACHE_READ_FLG) == 0)
6714 ++ *purge_cache = true;
6715 ++ else if (old_state == new_state && (epoch - old_epoch > 1))
6716 ++ *purge_cache = true;
6717 + }
6718 +
6719 + static void
6720 +@@ -3449,7 +3465,7 @@ struct smb_version_operations smb21_operations = {
6721 + .print_stats = smb2_print_stats,
6722 + .is_oplock_break = smb2_is_valid_oplock_break,
6723 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
6724 +- .downgrade_oplock = smb21_downgrade_oplock,
6725 ++ .downgrade_oplock = smb2_downgrade_oplock,
6726 + .need_neg = smb2_need_neg,
6727 + .negotiate = smb2_negotiate,
6728 + .negotiate_wsize = smb2_negotiate_wsize,
6729 +@@ -3546,7 +3562,7 @@ struct smb_version_operations smb30_operations = {
6730 + .dump_share_caps = smb2_dump_share_caps,
6731 + .is_oplock_break = smb2_is_valid_oplock_break,
6732 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
6733 +- .downgrade_oplock = smb21_downgrade_oplock,
6734 ++ .downgrade_oplock = smb3_downgrade_oplock,
6735 + .need_neg = smb2_need_neg,
6736 + .negotiate = smb2_negotiate,
6737 + .negotiate_wsize = smb2_negotiate_wsize,
6738 +@@ -3651,7 +3667,7 @@ struct smb_version_operations smb311_operations = {
6739 + .dump_share_caps = smb2_dump_share_caps,
6740 + .is_oplock_break = smb2_is_valid_oplock_break,
6741 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
6742 +- .downgrade_oplock = smb21_downgrade_oplock,
6743 ++ .downgrade_oplock = smb3_downgrade_oplock,
6744 + .need_neg = smb2_need_neg,
6745 + .negotiate = smb2_negotiate,
6746 + .negotiate_wsize = smb2_negotiate_wsize,
6747 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
6748 +index 308c682fa4d3b..44501f8cbd75e 100644
6749 +--- a/fs/cifs/smb2pdu.h
6750 ++++ b/fs/cifs/smb2pdu.h
6751 +@@ -1209,7 +1209,7 @@ struct smb2_oplock_break {
6752 + struct smb2_lease_break {
6753 + struct smb2_sync_hdr sync_hdr;
6754 + __le16 StructureSize; /* Must be 44 */
6755 +- __le16 Reserved;
6756 ++ __le16 Epoch;
6757 + __le32 Flags;
6758 + __u8 LeaseKey[16];
6759 + __le32 CurrentLeaseState;
6760 +diff --git a/fs/dcache.c b/fs/dcache.c
6761 +index 6e0022326afe3..20370a0997bf9 100644
6762 +--- a/fs/dcache.c
6763 ++++ b/fs/dcache.c
6764 +@@ -864,17 +864,19 @@ struct dentry *dget_parent(struct dentry *dentry)
6765 + {
6766 + int gotref;
6767 + struct dentry *ret;
6768 ++ unsigned seq;
6769 +
6770 + /*
6771 + * Do optimistic parent lookup without any
6772 + * locking.
6773 + */
6774 + rcu_read_lock();
6775 ++ seq = raw_seqcount_begin(&dentry->d_seq);
6776 + ret = READ_ONCE(dentry->d_parent);
6777 + gotref = lockref_get_not_zero(&ret->d_lockref);
6778 + rcu_read_unlock();
6779 + if (likely(gotref)) {
6780 +- if (likely(ret == READ_ONCE(dentry->d_parent)))
6781 ++ if (!read_seqcount_retry(&dentry->d_seq, seq))
6782 + return ret;
6783 + dput(ret);
6784 + }
6785 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
6786 +index cd833f4e64ef1..52be4c9650241 100644
6787 +--- a/fs/ext4/inode.c
6788 ++++ b/fs/ext4/inode.c
6789 +@@ -5315,7 +5315,7 @@ static int ext4_do_update_inode(handle_t *handle,
6790 + raw_inode->i_file_acl_high =
6791 + cpu_to_le16(ei->i_file_acl >> 32);
6792 + raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
6793 +- if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
6794 ++ if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
6795 + ext4_isize_set(raw_inode, ei->i_disksize);
6796 + need_datasync = 1;
6797 + }
6798 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
6799 +index 8dd54a8a03610..054cfdd007d69 100644
6800 +--- a/fs/ext4/mballoc.c
6801 ++++ b/fs/ext4/mballoc.c
6802 +@@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
6803 + BUG_ON(buddy == NULL);
6804 +
6805 + k = mb_find_next_zero_bit(buddy, max, 0);
6806 +- BUG_ON(k >= max);
6807 +-
6808 ++ if (k >= max) {
6809 ++ ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
6810 ++ "%d free clusters of order %d. But found 0",
6811 ++ grp->bb_counters[i], i);
6812 ++ ext4_mark_group_bitmap_corrupted(ac->ac_sb,
6813 ++ e4b->bd_group,
6814 ++ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
6815 ++ break;
6816 ++ }
6817 + ac->ac_found++;
6818 +
6819 + ac->ac_b_ex.fe_len = 1 << i;
6820 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
6821 +index 01e6ea11822bf..c51c9a6881e49 100644
6822 +--- a/fs/fuse/dev.c
6823 ++++ b/fs/fuse/dev.c
6824 +@@ -831,7 +831,6 @@ static int fuse_check_page(struct page *page)
6825 + {
6826 + if (page_mapcount(page) ||
6827 + page->mapping != NULL ||
6828 +- page_count(page) != 1 ||
6829 + (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
6830 + ~(1 << PG_locked |
6831 + 1 << PG_referenced |
6832 +diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
6833 +index d968b5c5df217..a52b8b0dceeb9 100644
6834 +--- a/fs/gfs2/inode.c
6835 ++++ b/fs/gfs2/inode.c
6836 +@@ -715,7 +715,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
6837 +
6838 + error = gfs2_trans_begin(sdp, blocks, 0);
6839 + if (error)
6840 +- goto fail_gunlock2;
6841 ++ goto fail_free_inode;
6842 +
6843 + if (blocks > 1) {
6844 + ip->i_eattr = ip->i_no_addr + 1;
6845 +@@ -726,7 +726,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
6846 +
6847 + error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
6848 + if (error)
6849 +- goto fail_gunlock2;
6850 ++ goto fail_free_inode;
6851 +
6852 + BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
6853 +
6854 +@@ -735,7 +735,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
6855 + goto fail_gunlock2;
6856 +
6857 + glock_set_object(ip->i_iopen_gh.gh_gl, ip);
6858 +- gfs2_glock_put(io_gl);
6859 + gfs2_set_iop(inode);
6860 + insert_inode_hash(inode);
6861 +
6862 +@@ -768,6 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
6863 +
6864 + mark_inode_dirty(inode);
6865 + d_instantiate(dentry, inode);
6866 ++ /* After instantiate, errors should result in evict which will destroy
6867 ++ * both inode and iopen glocks properly. */
6868 + if (file) {
6869 + file->f_mode |= FMODE_CREATED;
6870 + error = finish_open(file, dentry, gfs2_open_common);
6871 +@@ -775,15 +776,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
6872 + gfs2_glock_dq_uninit(ghs);
6873 + gfs2_glock_dq_uninit(ghs + 1);
6874 + clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
6875 ++ gfs2_glock_put(io_gl);
6876 + return error;
6877 +
6878 + fail_gunlock3:
6879 + glock_clear_object(io_gl, ip);
6880 + gfs2_glock_dq_uninit(&ip->i_iopen_gh);
6881 +- gfs2_glock_put(io_gl);
6882 + fail_gunlock2:
6883 +- if (io_gl)
6884 +- clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
6885 ++ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
6886 ++ gfs2_glock_put(io_gl);
6887 + fail_free_inode:
6888 + if (ip->i_gl) {
6889 + glock_clear_object(ip->i_gl, ip);
6890 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
6891 +index 5dae7c85d9b6e..2c7d76b4c5e18 100644
6892 +--- a/fs/nfs/pagelist.c
6893 ++++ b/fs/nfs/pagelist.c
6894 +@@ -132,47 +132,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
6895 + EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
6896 +
6897 + /*
6898 +- * nfs_page_group_lock - lock the head of the page group
6899 +- * @req - request in group that is to be locked
6900 ++ * nfs_page_set_headlock - set the request PG_HEADLOCK
6901 ++ * @req: request that is to be locked
6902 + *
6903 +- * this lock must be held when traversing or modifying the page
6904 +- * group list
6905 ++ * this lock must be held when modifying req->wb_head
6906 + *
6907 + * return 0 on success, < 0 on error
6908 + */
6909 + int
6910 +-nfs_page_group_lock(struct nfs_page *req)
6911 ++nfs_page_set_headlock(struct nfs_page *req)
6912 + {
6913 +- struct nfs_page *head = req->wb_head;
6914 +-
6915 +- WARN_ON_ONCE(head != head->wb_head);
6916 +-
6917 +- if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
6918 ++ if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
6919 + return 0;
6920 +
6921 +- set_bit(PG_CONTENDED1, &head->wb_flags);
6922 ++ set_bit(PG_CONTENDED1, &req->wb_flags);
6923 + smp_mb__after_atomic();
6924 +- return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
6925 ++ return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
6926 + TASK_UNINTERRUPTIBLE);
6927 + }
6928 +
6929 + /*
6930 +- * nfs_page_group_unlock - unlock the head of the page group
6931 +- * @req - request in group that is to be unlocked
6932 ++ * nfs_page_clear_headlock - clear the request PG_HEADLOCK
6933 ++ * @req: request that is to be locked
6934 + */
6935 + void
6936 +-nfs_page_group_unlock(struct nfs_page *req)
6937 ++nfs_page_clear_headlock(struct nfs_page *req)
6938 + {
6939 +- struct nfs_page *head = req->wb_head;
6940 +-
6941 +- WARN_ON_ONCE(head != head->wb_head);
6942 +-
6943 + smp_mb__before_atomic();
6944 +- clear_bit(PG_HEADLOCK, &head->wb_flags);
6945 ++ clear_bit(PG_HEADLOCK, &req->wb_flags);
6946 + smp_mb__after_atomic();
6947 +- if (!test_bit(PG_CONTENDED1, &head->wb_flags))
6948 ++ if (!test_bit(PG_CONTENDED1, &req->wb_flags))
6949 + return;
6950 +- wake_up_bit(&head->wb_flags, PG_HEADLOCK);
6951 ++ wake_up_bit(&req->wb_flags, PG_HEADLOCK);
6952 ++}
6953 ++
6954 ++/*
6955 ++ * nfs_page_group_lock - lock the head of the page group
6956 ++ * @req: request in group that is to be locked
6957 ++ *
6958 ++ * this lock must be held when traversing or modifying the page
6959 ++ * group list
6960 ++ *
6961 ++ * return 0 on success, < 0 on error
6962 ++ */
6963 ++int
6964 ++nfs_page_group_lock(struct nfs_page *req)
6965 ++{
6966 ++ int ret;
6967 ++
6968 ++ ret = nfs_page_set_headlock(req);
6969 ++ if (ret || req->wb_head == req)
6970 ++ return ret;
6971 ++ return nfs_page_set_headlock(req->wb_head);
6972 ++}
6973 ++
6974 ++/*
6975 ++ * nfs_page_group_unlock - unlock the head of the page group
6976 ++ * @req: request in group that is to be unlocked
6977 ++ */
6978 ++void
6979 ++nfs_page_group_unlock(struct nfs_page *req)
6980 ++{
6981 ++ if (req != req->wb_head)
6982 ++ nfs_page_clear_headlock(req->wb_head);
6983 ++ nfs_page_clear_headlock(req);
6984 + }
6985 +
6986 + /*
6987 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
6988 +index 63d20308a9bb7..d419d89b91f7c 100644
6989 +--- a/fs/nfs/write.c
6990 ++++ b/fs/nfs/write.c
6991 +@@ -416,22 +416,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
6992 + destroy_list = (subreq->wb_this_page == old_head) ?
6993 + NULL : subreq->wb_this_page;
6994 +
6995 ++ /* Note: lock subreq in order to change subreq->wb_head */
6996 ++ nfs_page_set_headlock(subreq);
6997 + WARN_ON_ONCE(old_head != subreq->wb_head);
6998 +
6999 + /* make sure old group is not used */
7000 + subreq->wb_this_page = subreq;
7001 ++ subreq->wb_head = subreq;
7002 +
7003 + clear_bit(PG_REMOVE, &subreq->wb_flags);
7004 +
7005 + /* Note: races with nfs_page_group_destroy() */
7006 + if (!kref_read(&subreq->wb_kref)) {
7007 + /* Check if we raced with nfs_page_group_destroy() */
7008 +- if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
7009 ++ if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
7010 ++ nfs_page_clear_headlock(subreq);
7011 + nfs_free_request(subreq);
7012 ++ } else
7013 ++ nfs_page_clear_headlock(subreq);
7014 + continue;
7015 + }
7016 ++ nfs_page_clear_headlock(subreq);
7017 +
7018 +- subreq->wb_head = subreq;
7019 + nfs_release_request(old_head);
7020 +
7021 + if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
7022 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
7023 +index c24306af9758f..655079ae1dd1f 100644
7024 +--- a/fs/nfsd/nfs4state.c
7025 ++++ b/fs/nfsd/nfs4state.c
7026 +@@ -471,6 +471,8 @@ find_any_file(struct nfs4_file *f)
7027 + {
7028 + struct file *ret;
7029 +
7030 ++ if (!f)
7031 ++ return NULL;
7032 + spin_lock(&f->fi_lock);
7033 + ret = __nfs4_get_fd(f, O_RDWR);
7034 + if (!ret) {
7035 +@@ -1207,6 +1209,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
7036 + nfs4_free_stateowner(sop);
7037 + }
7038 +
7039 ++static bool
7040 ++nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
7041 ++{
7042 ++ return list_empty(&stp->st_perfile);
7043 ++}
7044 ++
7045 + static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
7046 + {
7047 + struct nfs4_file *fp = stp->st_stid.sc_file;
7048 +@@ -1274,9 +1282,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
7049 + {
7050 + lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
7051 +
7052 ++ if (!unhash_ol_stateid(stp))
7053 ++ return false;
7054 + list_del_init(&stp->st_locks);
7055 + nfs4_unhash_stid(&stp->st_stid);
7056 +- return unhash_ol_stateid(stp);
7057 ++ return true;
7058 + }
7059 +
7060 + static void release_lock_stateid(struct nfs4_ol_stateid *stp)
7061 +@@ -1341,13 +1351,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
7062 + static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
7063 + struct list_head *reaplist)
7064 + {
7065 +- bool unhashed;
7066 +-
7067 + lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
7068 +
7069 +- unhashed = unhash_ol_stateid(stp);
7070 ++ if (!unhash_ol_stateid(stp))
7071 ++ return false;
7072 + release_open_stateid_locks(stp, reaplist);
7073 +- return unhashed;
7074 ++ return true;
7075 + }
7076 +
7077 + static void release_open_stateid(struct nfs4_ol_stateid *stp)
7078 +@@ -5774,21 +5783,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7079 + }
7080 +
7081 + static struct nfs4_ol_stateid *
7082 +-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
7083 ++find_lock_stateid(const struct nfs4_lockowner *lo,
7084 ++ const struct nfs4_ol_stateid *ost)
7085 + {
7086 + struct nfs4_ol_stateid *lst;
7087 +- struct nfs4_client *clp = lo->lo_owner.so_client;
7088 +
7089 +- lockdep_assert_held(&clp->cl_lock);
7090 ++ lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7091 +
7092 +- list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
7093 +- if (lst->st_stid.sc_type != NFS4_LOCK_STID)
7094 +- continue;
7095 +- if (lst->st_stid.sc_file == fp) {
7096 +- refcount_inc(&lst->st_stid.sc_count);
7097 +- return lst;
7098 ++ /* If ost is not hashed, ost->st_locks will not be valid */
7099 ++ if (!nfs4_ol_stateid_unhashed(ost))
7100 ++ list_for_each_entry(lst, &ost->st_locks, st_locks) {
7101 ++ if (lst->st_stateowner == &lo->lo_owner) {
7102 ++ refcount_inc(&lst->st_stid.sc_count);
7103 ++ return lst;
7104 ++ }
7105 + }
7106 +- }
7107 + return NULL;
7108 + }
7109 +
7110 +@@ -5804,11 +5813,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7111 + mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7112 + retry:
7113 + spin_lock(&clp->cl_lock);
7114 +- spin_lock(&fp->fi_lock);
7115 +- retstp = find_lock_stateid(lo, fp);
7116 ++ if (nfs4_ol_stateid_unhashed(open_stp))
7117 ++ goto out_close;
7118 ++ retstp = find_lock_stateid(lo, open_stp);
7119 + if (retstp)
7120 +- goto out_unlock;
7121 +-
7122 ++ goto out_found;
7123 + refcount_inc(&stp->st_stid.sc_count);
7124 + stp->st_stid.sc_type = NFS4_LOCK_STID;
7125 + stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7126 +@@ -5817,22 +5826,26 @@ retry:
7127 + stp->st_access_bmap = 0;
7128 + stp->st_deny_bmap = open_stp->st_deny_bmap;
7129 + stp->st_openstp = open_stp;
7130 ++ spin_lock(&fp->fi_lock);
7131 + list_add(&stp->st_locks, &open_stp->st_locks);
7132 + list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7133 + list_add(&stp->st_perfile, &fp->fi_stateids);
7134 +-out_unlock:
7135 + spin_unlock(&fp->fi_lock);
7136 + spin_unlock(&clp->cl_lock);
7137 +- if (retstp) {
7138 +- if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7139 +- nfs4_put_stid(&retstp->st_stid);
7140 +- goto retry;
7141 +- }
7142 +- /* To keep mutex tracking happy */
7143 +- mutex_unlock(&stp->st_mutex);
7144 +- stp = retstp;
7145 +- }
7146 + return stp;
7147 ++out_found:
7148 ++ spin_unlock(&clp->cl_lock);
7149 ++ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7150 ++ nfs4_put_stid(&retstp->st_stid);
7151 ++ goto retry;
7152 ++ }
7153 ++ /* To keep mutex tracking happy */
7154 ++ mutex_unlock(&stp->st_mutex);
7155 ++ return retstp;
7156 ++out_close:
7157 ++ spin_unlock(&clp->cl_lock);
7158 ++ mutex_unlock(&stp->st_mutex);
7159 ++ return NULL;
7160 + }
7161 +
7162 + static struct nfs4_ol_stateid *
7163 +@@ -5847,7 +5860,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7164 +
7165 + *new = false;
7166 + spin_lock(&clp->cl_lock);
7167 +- lst = find_lock_stateid(lo, fi);
7168 ++ lst = find_lock_stateid(lo, ost);
7169 + spin_unlock(&clp->cl_lock);
7170 + if (lst != NULL) {
7171 + if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7172 +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
7173 +index 099bec94b8207..fab29f899f913 100644
7174 +--- a/fs/ubifs/io.c
7175 ++++ b/fs/ubifs/io.c
7176 +@@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
7177 + int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
7178 + int offs, int quiet, int must_chk_crc)
7179 + {
7180 +- int err = -EINVAL, type, node_len;
7181 ++ int err = -EINVAL, type, node_len, dump_node = 1;
7182 + uint32_t crc, node_crc, magic;
7183 + const struct ubifs_ch *ch = buf;
7184 +
7185 +@@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
7186 + out_len:
7187 + if (!quiet)
7188 + ubifs_err(c, "bad node length %d", node_len);
7189 ++ if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ)
7190 ++ dump_node = 0;
7191 + out:
7192 + if (!quiet) {
7193 + ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
7194 +- ubifs_dump_node(c, buf);
7195 ++ if (dump_node) {
7196 ++ ubifs_dump_node(c, buf);
7197 ++ } else {
7198 ++ int safe_len = min3(node_len, c->leb_size - offs,
7199 ++ (int)UBIFS_MAX_DATA_NODE_SZ);
7200 ++ pr_err("\tprevent out-of-bounds memory access\n");
7201 ++ pr_err("\ttruncated data node length %d\n", safe_len);
7202 ++ pr_err("\tcorrupted data node:\n");
7203 ++ print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
7204 ++ buf, safe_len, 0);
7205 ++ }
7206 + dump_stack();
7207 + }
7208 + return err;
7209 +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
7210 +index bd37f4a292c3b..efb586ea508bf 100644
7211 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c
7212 ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
7213 +@@ -1438,7 +1438,9 @@ xfs_attr3_leaf_add_work(
7214 + for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
7215 + if (ichdr->freemap[i].base == tmp) {
7216 + ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
7217 +- ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
7218 ++ ichdr->freemap[i].size -=
7219 ++ min_t(uint16_t, ichdr->freemap[i].size,
7220 ++ sizeof(xfs_attr_leaf_entry_t));
7221 + }
7222 + }
7223 + ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
7224 +diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
7225 +index f1bb3434f51c7..01e99806b941f 100644
7226 +--- a/fs/xfs/libxfs/xfs_dir2_node.c
7227 ++++ b/fs/xfs/libxfs/xfs_dir2_node.c
7228 +@@ -214,6 +214,7 @@ __xfs_dir3_free_read(
7229 + if (fa) {
7230 + xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
7231 + xfs_trans_brelse(tp, *bpp);
7232 ++ *bpp = NULL;
7233 + return -EFSCORRUPTED;
7234 + }
7235 +
7236 +diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
7237 +index f99a7aefe4184..2b3cc5a8ced1b 100644
7238 +--- a/fs/xfs/libxfs/xfs_trans_resv.c
7239 ++++ b/fs/xfs/libxfs/xfs_trans_resv.c
7240 +@@ -197,6 +197,24 @@ xfs_calc_inode_chunk_res(
7241 + return res;
7242 + }
7243 +
7244 ++/*
7245 ++ * Per-extent log reservation for the btree changes involved in freeing or
7246 ++ * allocating a realtime extent. We have to be able to log as many rtbitmap
7247 ++ * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents,
7248 ++ * as well as the realtime summary block.
7249 ++ */
7250 ++unsigned int
7251 ++xfs_rtalloc_log_count(
7252 ++ struct xfs_mount *mp,
7253 ++ unsigned int num_ops)
7254 ++{
7255 ++ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
7256 ++ unsigned int rtbmp_bytes;
7257 ++
7258 ++ rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY;
7259 ++ return (howmany(rtbmp_bytes, blksz) + 1) * num_ops;
7260 ++}
7261 ++
7262 + /*
7263 + * Various log reservation values.
7264 + *
7265 +@@ -219,13 +237,21 @@ xfs_calc_inode_chunk_res(
7266 +
7267 + /*
7268 + * In a write transaction we can allocate a maximum of 2
7269 +- * extents. This gives:
7270 ++ * extents. This gives (t1):
7271 + * the inode getting the new extents: inode size
7272 + * the inode's bmap btree: max depth * block size
7273 + * the agfs of the ags from which the extents are allocated: 2 * sector
7274 + * the superblock free block counter: sector size
7275 + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
7276 +- * And the bmap_finish transaction can free bmap blocks in a join:
7277 ++ * Or, if we're writing to a realtime file (t2):
7278 ++ * the inode getting the new extents: inode size
7279 ++ * the inode's bmap btree: max depth * block size
7280 ++ * the agfs of the ags from which the extents are allocated: 2 * sector
7281 ++ * the superblock free block counter: sector size
7282 ++ * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes
7283 ++ * the realtime summary: 1 block
7284 ++ * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
7285 ++ * And the bmap_finish transaction can free bmap blocks in a join (t3):
7286 + * the agfs of the ags containing the blocks: 2 * sector size
7287 + * the agfls of the ags containing the blocks: 2 * sector size
7288 + * the super block free block counter: sector size
7289 +@@ -235,40 +261,72 @@ STATIC uint
7290 + xfs_calc_write_reservation(
7291 + struct xfs_mount *mp)
7292 + {
7293 +- return XFS_DQUOT_LOGRES(mp) +
7294 +- max((xfs_calc_inode_res(mp, 1) +
7295 ++ unsigned int t1, t2, t3;
7296 ++ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
7297 ++
7298 ++ t1 = xfs_calc_inode_res(mp, 1) +
7299 ++ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
7300 ++ xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
7301 ++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
7302 ++
7303 ++ if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
7304 ++ t2 = xfs_calc_inode_res(mp, 1) +
7305 + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
7306 +- XFS_FSB_TO_B(mp, 1)) +
7307 ++ blksz) +
7308 + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
7309 +- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
7310 +- XFS_FSB_TO_B(mp, 1))),
7311 +- (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
7312 +- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
7313 +- XFS_FSB_TO_B(mp, 1))));
7314 ++ xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) +
7315 ++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz);
7316 ++ } else {
7317 ++ t2 = 0;
7318 ++ }
7319 ++
7320 ++ t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
7321 ++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
7322 ++
7323 ++ return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
7324 + }
7325 +
7326 + /*
7327 +- * In truncating a file we free up to two extents at once. We can modify:
7328 ++ * In truncating a file we free up to two extents at once. We can modify (t1):
7329 + * the inode being truncated: inode size
7330 + * the inode's bmap btree: (max depth + 1) * block size
7331 +- * And the bmap_finish transaction can free the blocks and bmap blocks:
7332 ++ * And the bmap_finish transaction can free the blocks and bmap blocks (t2):
7333 + * the agf for each of the ags: 4 * sector size
7334 + * the agfl for each of the ags: 4 * sector size
7335 + * the super block to reflect the freed blocks: sector size
7336 + * worst case split in allocation btrees per extent assuming 4 extents:
7337 + * 4 exts * 2 trees * (2 * max depth - 1) * block size
7338 ++ * Or, if it's a realtime file (t3):
7339 ++ * the agf for each of the ags: 2 * sector size
7340 ++ * the agfl for each of the ags: 2 * sector size
7341 ++ * the super block to reflect the freed blocks: sector size
7342 ++ * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes
7343 ++ * the realtime summary: 2 exts * 1 block
7344 ++ * worst case split in allocation btrees per extent assuming 2 extents:
7345 ++ * 2 exts * 2 trees * (2 * max depth - 1) * block size
7346 + */
7347 + STATIC uint
7348 + xfs_calc_itruncate_reservation(
7349 + struct xfs_mount *mp)
7350 + {
7351 +- return XFS_DQUOT_LOGRES(mp) +
7352 +- max((xfs_calc_inode_res(mp, 1) +
7353 +- xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
7354 +- XFS_FSB_TO_B(mp, 1))),
7355 +- (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
7356 +- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
7357 +- XFS_FSB_TO_B(mp, 1))));
7358 ++ unsigned int t1, t2, t3;
7359 ++ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
7360 ++
7361 ++ t1 = xfs_calc_inode_res(mp, 1) +
7362 ++ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
7363 ++
7364 ++ t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
7365 ++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz);
7366 ++
7367 ++ if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
7368 ++ t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
7369 ++ xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) +
7370 ++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
7371 ++ } else {
7372 ++ t3 = 0;
7373 ++ }
7374 ++
7375 ++ return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
7376 + }
7377 +
7378 + /*
7379 +diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
7380 +index cd3e4d768a18c..33dfcba72c7a0 100644
7381 +--- a/fs/xfs/scrub/dir.c
7382 ++++ b/fs/xfs/scrub/dir.c
7383 +@@ -156,6 +156,9 @@ xchk_dir_actor(
7384 + xname.type = XFS_DIR3_FT_UNKNOWN;
7385 +
7386 + error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
7387 ++ /* ENOENT means the hash lookup failed and the dir is corrupt */
7388 ++ if (error == -ENOENT)
7389 ++ error = -EFSCORRUPTED;
7390 + if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
7391 + &error))
7392 + goto out;
7393 +diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
7394 +index 3b0ba54cc4d5b..3bc1034c57e66 100644
7395 +--- a/include/linux/debugfs.h
7396 ++++ b/include/linux/debugfs.h
7397 +@@ -54,6 +54,8 @@ static const struct file_operations __fops = { \
7398 + .llseek = no_llseek, \
7399 + }
7400 +
7401 ++typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
7402 ++
7403 + #if defined(CONFIG_DEBUG_FS)
7404 +
7405 + struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
7406 +@@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
7407 + struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
7408 + const char *dest);
7409 +
7410 +-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
7411 + struct dentry *debugfs_create_automount(const char *name,
7412 + struct dentry *parent,
7413 + debugfs_automount_t f,
7414 +@@ -204,7 +205,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
7415 +
7416 + static inline struct dentry *debugfs_create_automount(const char *name,
7417 + struct dentry *parent,
7418 +- struct vfsmount *(*f)(void *),
7419 ++ debugfs_automount_t f,
7420 + void *data)
7421 + {
7422 + return ERR_PTR(-ENODEV);
7423 +diff --git a/include/linux/libata.h b/include/linux/libata.h
7424 +index afc1d72161ba5..3d076aca7ac2a 100644
7425 +--- a/include/linux/libata.h
7426 ++++ b/include/linux/libata.h
7427 +@@ -503,6 +503,7 @@ enum hsm_task_states {
7428 + };
7429 +
7430 + enum ata_completion_errors {
7431 ++ AC_ERR_OK = 0, /* no error */
7432 + AC_ERR_DEV = (1 << 0), /* device reported error */
7433 + AC_ERR_HSM = (1 << 1), /* host state machine violation */
7434 + AC_ERR_TIMEOUT = (1 << 2), /* timeout */
7435 +@@ -912,9 +913,9 @@ struct ata_port_operations {
7436 + /*
7437 + * Command execution
7438 + */
7439 +- int (*qc_defer)(struct ata_queued_cmd *qc);
7440 +- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
7441 +- void (*qc_prep)(struct ata_queued_cmd *qc);
7442 ++ int (*qc_defer)(struct ata_queued_cmd *qc);
7443 ++ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
7444 ++ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
7445 + unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
7446 + bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
7447 +
7448 +@@ -1181,7 +1182,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
7449 + extern const char *ata_mode_string(unsigned long xfer_mask);
7450 + extern unsigned long ata_id_xfermask(const u16 *id);
7451 + extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
7452 +-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
7453 ++extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
7454 + extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
7455 + unsigned int n_elem);
7456 + extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
7457 +@@ -1916,9 +1917,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
7458 + .sg_tablesize = LIBATA_MAX_PRD, \
7459 + .dma_boundary = ATA_DMA_BOUNDARY
7460 +
7461 +-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
7462 ++extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
7463 + extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
7464 +-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
7465 ++extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
7466 + extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
7467 + struct ata_queued_cmd *qc);
7468 + extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
7469 +diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
7470 +index 8ef330027b134..3f8e84a80b4ad 100644
7471 +--- a/include/linux/mmc/card.h
7472 ++++ b/include/linux/mmc/card.h
7473 +@@ -227,7 +227,7 @@ struct mmc_queue_req;
7474 + * MMC Physical partitions
7475 + */
7476 + struct mmc_part {
7477 +- unsigned int size; /* partition size (in bytes) */
7478 ++ u64 size; /* partition size (in bytes) */
7479 + unsigned int part_cfg; /* partition type */
7480 + char name[MAX_MMC_PART_NAME_LEN];
7481 + bool force_ro; /* to make boot parts RO by default */
7482 +diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
7483 +index ad69430fd0eb5..5162fc1533c2f 100644
7484 +--- a/include/linux/nfs_page.h
7485 ++++ b/include/linux/nfs_page.h
7486 +@@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *);
7487 + extern int nfs_page_group_lock(struct nfs_page *);
7488 + extern void nfs_page_group_unlock(struct nfs_page *);
7489 + extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
7490 ++extern int nfs_page_set_headlock(struct nfs_page *req);
7491 ++extern void nfs_page_clear_headlock(struct nfs_page *req);
7492 + extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
7493 +
7494 + /*
7495 +diff --git a/include/linux/pci.h b/include/linux/pci.h
7496 +index 2517492dd1855..2fda9893962d1 100644
7497 +--- a/include/linux/pci.h
7498 ++++ b/include/linux/pci.h
7499 +@@ -1144,7 +1144,6 @@ int pci_enable_rom(struct pci_dev *pdev);
7500 + void pci_disable_rom(struct pci_dev *pdev);
7501 + void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
7502 + void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
7503 +-void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
7504 +
7505 + /* Power management related routines */
7506 + int pci_save_state(struct pci_dev *dev);
7507 +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
7508 +index bcf4cf26b8c89..a42a29952889c 100644
7509 +--- a/include/linux/seqlock.h
7510 ++++ b/include/linux/seqlock.h
7511 +@@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
7512 + * usual consistency guarantee. It is one wmb cheaper, because we can
7513 + * collapse the two back-to-back wmb()s.
7514 + *
7515 ++ * Note that, writes surrounding the barrier should be declared atomic (e.g.
7516 ++ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
7517 ++ * atomically, avoiding compiler optimizations; b) to document which writes are
7518 ++ * meant to propagate to the reader critical section. This is necessary because
7519 ++ * neither writes before and after the barrier are enclosed in a seq-writer
7520 ++ * critical section that would ensure readers are aware of ongoing writes.
7521 ++ *
7522 + * seqcount_t seq;
7523 + * bool X = true, Y = false;
7524 + *
7525 +@@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
7526 + *
7527 + * void write(void)
7528 + * {
7529 +- * Y = true;
7530 ++ * WRITE_ONCE(Y, true);
7531 + *
7532 + * raw_write_seqcount_barrier(seq);
7533 + *
7534 +- * X = false;
7535 ++ * WRITE_ONCE(X, false);
7536 + * }
7537 + */
7538 + static inline void raw_write_seqcount_barrier(seqcount_t *s)
7539 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
7540 +index cbc0294f39899..703ce71caeacb 100644
7541 +--- a/include/linux/skbuff.h
7542 ++++ b/include/linux/skbuff.h
7543 +@@ -1688,6 +1688,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
7544 + return list_->qlen;
7545 + }
7546 +
7547 ++/**
7548 ++ * skb_queue_len_lockless - get queue length
7549 ++ * @list_: list to measure
7550 ++ *
7551 ++ * Return the length of an &sk_buff queue.
7552 ++ * This variant can be used in lockless contexts.
7553 ++ */
7554 ++static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
7555 ++{
7556 ++ return READ_ONCE(list_->qlen);
7557 ++}
7558 ++
7559 + /**
7560 + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
7561 + * @list: queue to initialize
7562 +@@ -1895,7 +1907,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
7563 + {
7564 + struct sk_buff *next, *prev;
7565 +
7566 +- list->qlen--;
7567 ++ WRITE_ONCE(list->qlen, list->qlen - 1);
7568 + next = skb->next;
7569 + prev = skb->prev;
7570 + skb->next = skb->prev = NULL;
7571 +diff --git a/include/net/sock.h b/include/net/sock.h
7572 +index 77f36257cac97..bc752237dff3f 100644
7573 +--- a/include/net/sock.h
7574 ++++ b/include/net/sock.h
7575 +@@ -900,11 +900,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
7576 + skb_dst_force(skb);
7577 +
7578 + if (!sk->sk_backlog.tail)
7579 +- sk->sk_backlog.head = skb;
7580 ++ WRITE_ONCE(sk->sk_backlog.head, skb);
7581 + else
7582 + sk->sk_backlog.tail->next = skb;
7583 +
7584 +- sk->sk_backlog.tail = skb;
7585 ++ WRITE_ONCE(sk->sk_backlog.tail, skb);
7586 + skb->next = NULL;
7587 + }
7588 +
7589 +diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
7590 +index 7475c7be165aa..d4aac34365955 100644
7591 +--- a/include/trace/events/sctp.h
7592 ++++ b/include/trace/events/sctp.h
7593 +@@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe,
7594 + __entry->pathmtu = asoc->pathmtu;
7595 + __entry->rwnd = asoc->peer.rwnd;
7596 + __entry->unack_data = asoc->unack_data;
7597 +-
7598 +- if (trace_sctp_probe_path_enabled()) {
7599 +- struct sctp_transport *sp;
7600 +-
7601 +- list_for_each_entry(sp, &asoc->peer.transport_addr_list,
7602 +- transports) {
7603 +- trace_sctp_probe_path(sp, asoc);
7604 +- }
7605 +- }
7606 + ),
7607 +
7608 + TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
7609 +diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
7610 +index 4f7262eba73d8..50952d6d81209 100644
7611 +--- a/kernel/audit_watch.c
7612 ++++ b/kernel/audit_watch.c
7613 +@@ -317,8 +317,6 @@ static void audit_update_watch(struct audit_parent *parent,
7614 + if (oentry->rule.exe)
7615 + audit_remove_mark(oentry->rule.exe);
7616 +
7617 +- audit_watch_log_rule_change(r, owatch, "updated_rules");
7618 +-
7619 + call_rcu(&oentry->rcu, audit_free_rule_rcu);
7620 + }
7621 +
7622 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
7623 +index 1b28fb006763a..3f3ed33bd2fdc 100644
7624 +--- a/kernel/bpf/hashtab.c
7625 ++++ b/kernel/bpf/hashtab.c
7626 +@@ -667,15 +667,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
7627 + struct htab_elem *l = container_of(head, struct htab_elem, rcu);
7628 + struct bpf_htab *htab = l->htab;
7629 +
7630 +- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
7631 +- * we're calling kfree, otherwise deadlock is possible if kprobes
7632 +- * are placed somewhere inside of slub
7633 +- */
7634 +- preempt_disable();
7635 +- __this_cpu_inc(bpf_prog_active);
7636 + htab_elem_free(htab, l);
7637 +- __this_cpu_dec(bpf_prog_active);
7638 +- preempt_enable();
7639 + }
7640 +
7641 + static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
7642 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
7643 +index c04815bb15cc1..11fade89c1f38 100644
7644 +--- a/kernel/bpf/inode.c
7645 ++++ b/kernel/bpf/inode.c
7646 +@@ -207,10 +207,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
7647 + else
7648 + prev_key = key;
7649 +
7650 ++ rcu_read_lock();
7651 + if (map->ops->map_get_next_key(map, prev_key, key)) {
7652 + map_iter(m)->done = true;
7653 +- return NULL;
7654 ++ key = NULL;
7655 + }
7656 ++ rcu_read_unlock();
7657 + return key;
7658 + }
7659 +
7660 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
7661 +index 230d9d599b5aa..2161f519d4812 100644
7662 +--- a/kernel/kprobes.c
7663 ++++ b/kernel/kprobes.c
7664 +@@ -1065,9 +1065,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
7665 + return ret;
7666 + }
7667 + #else /* !CONFIG_KPROBES_ON_FTRACE */
7668 +-#define prepare_kprobe(p) arch_prepare_kprobe(p)
7669 +-#define arm_kprobe_ftrace(p) (-ENODEV)
7670 +-#define disarm_kprobe_ftrace(p) (-ENODEV)
7671 ++static inline int prepare_kprobe(struct kprobe *p)
7672 ++{
7673 ++ return arch_prepare_kprobe(p);
7674 ++}
7675 ++
7676 ++static inline int arm_kprobe_ftrace(struct kprobe *p)
7677 ++{
7678 ++ return -ENODEV;
7679 ++}
7680 ++
7681 ++static inline int disarm_kprobe_ftrace(struct kprobe *p)
7682 ++{
7683 ++ return -ENODEV;
7684 ++}
7685 + #endif
7686 +
7687 + /* Arm a kprobe with text_mutex */
7688 +@@ -2083,9 +2094,10 @@ static void kill_kprobe(struct kprobe *p)
7689 +
7690 + /*
7691 + * The module is going away. We should disarm the kprobe which
7692 +- * is using ftrace.
7693 ++ * is using ftrace, because ftrace framework is still available at
7694 ++ * MODULE_STATE_GOING notification.
7695 + */
7696 +- if (kprobe_ftrace(p))
7697 ++ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
7698 + disarm_kprobe_ftrace(p);
7699 + }
7700 +
7701 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
7702 +index 3cb0e5b479ff3..cf272aba362be 100644
7703 +--- a/kernel/printk/printk.c
7704 ++++ b/kernel/printk/printk.c
7705 +@@ -2148,6 +2148,9 @@ static int __init console_setup(char *str)
7706 + char *s, *options, *brl_options = NULL;
7707 + int idx;
7708 +
7709 ++ if (str[0] == 0)
7710 ++ return 1;
7711 ++
7712 + if (_braille_console_setup(&str, &brl_options))
7713 + return 1;
7714 +
7715 +diff --git a/kernel/sys.c b/kernel/sys.c
7716 +index 096932a450466..baf60a3aa34b7 100644
7717 +--- a/kernel/sys.c
7718 ++++ b/kernel/sys.c
7719 +@@ -1275,11 +1275,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
7720 +
7721 + SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
7722 + {
7723 +- struct oldold_utsname tmp = {};
7724 ++ struct oldold_utsname tmp;
7725 +
7726 + if (!name)
7727 + return -EFAULT;
7728 +
7729 ++ memset(&tmp, 0, sizeof(tmp));
7730 ++
7731 + down_read(&uts_sem);
7732 + memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
7733 + memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
7734 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
7735 +index 81ee5b83c9200..c66fd11d94bc4 100644
7736 +--- a/kernel/time/timekeeping.c
7737 ++++ b/kernel/time/timekeeping.c
7738 +@@ -1004,9 +1004,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
7739 + ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
7740 + return -EOVERFLOW;
7741 + tmp *= mult;
7742 +- rem *= mult;
7743 +
7744 +- do_div(rem, div);
7745 ++ rem = div64_u64(rem * mult, div);
7746 + *base = tmp + rem;
7747 + return 0;
7748 + }
7749 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
7750 +index 4966410bb0f4d..6bf617ff03694 100644
7751 +--- a/kernel/trace/trace.c
7752 ++++ b/kernel/trace/trace.c
7753 +@@ -3037,6 +3037,9 @@ int trace_array_printk(struct trace_array *tr,
7754 + if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
7755 + return 0;
7756 +
7757 ++ if (!tr)
7758 ++ return -ENOENT;
7759 ++
7760 + va_start(ap, fmt);
7761 + ret = trace_array_vprintk(tr, ip, fmt, ap);
7762 + va_end(ap);
7763 +@@ -8526,7 +8529,7 @@ __init static int tracer_alloc_buffers(void)
7764 + goto out_free_buffer_mask;
7765 +
7766 + /* Only allocate trace_printk buffers if a trace_printk exists */
7767 +- if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7768 ++ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
7769 + /* Must be called before global_trace.buffer is allocated */
7770 + trace_printk_init_buffers();
7771 +
7772 +diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
7773 +index 06bb2fd9a56c5..a97aad105d367 100644
7774 +--- a/kernel/trace/trace_entries.h
7775 ++++ b/kernel/trace/trace_entries.h
7776 +@@ -179,7 +179,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
7777 +
7778 + F_STRUCT(
7779 + __field( int, size )
7780 +- __dynamic_array(unsigned long, caller )
7781 ++ __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
7782 + ),
7783 +
7784 + F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
7785 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
7786 +index 27726121d332c..0fc06a7da87fb 100644
7787 +--- a/kernel/trace/trace_events.c
7788 ++++ b/kernel/trace/trace_events.c
7789 +@@ -800,6 +800,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
7790 + char *event = NULL, *sub = NULL, *match;
7791 + int ret;
7792 +
7793 ++ if (!tr)
7794 ++ return -ENOENT;
7795 + /*
7796 + * The buf format can be <subsystem>:<event-name>
7797 + * *:<event-name> means any event by that name.
7798 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
7799 +index dbd3c97d1501a..3ed2d7f7e5712 100644
7800 +--- a/kernel/trace/trace_events_hist.c
7801 ++++ b/kernel/trace/trace_events_hist.c
7802 +@@ -4225,7 +4225,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)
7803 +
7804 + s = kstrdup(field_str, GFP_KERNEL);
7805 + if (!s) {
7806 +- kfree(hist_data->attrs->var_defs.name[n_vars]);
7807 + ret = -ENOMEM;
7808 + goto free;
7809 + }
7810 +diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
7811 +index 71f553cceb3c1..0e373cb0106bb 100644
7812 +--- a/kernel/trace/trace_preemptirq.c
7813 ++++ b/kernel/trace/trace_preemptirq.c
7814 +@@ -59,14 +59,14 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
7815 +
7816 + __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
7817 + {
7818 ++ lockdep_hardirqs_off(CALLER_ADDR0);
7819 ++
7820 + if (!this_cpu_read(tracing_irq_cpu)) {
7821 + this_cpu_write(tracing_irq_cpu, 1);
7822 + tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
7823 + if (!in_nmi())
7824 + trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
7825 + }
7826 +-
7827 +- lockdep_hardirqs_off(CALLER_ADDR0);
7828 + }
7829 + EXPORT_SYMBOL(trace_hardirqs_off_caller);
7830 + #endif /* CONFIG_TRACE_IRQFLAGS */
7831 +diff --git a/lib/string.c b/lib/string.c
7832 +index 72125fd5b4a64..edf4907ec946f 100644
7833 +--- a/lib/string.c
7834 ++++ b/lib/string.c
7835 +@@ -236,6 +236,30 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
7836 + EXPORT_SYMBOL(strscpy);
7837 + #endif
7838 +
7839 ++/**
7840 ++ * stpcpy - copy a string from src to dest returning a pointer to the new end
7841 ++ * of dest, including src's %NUL-terminator. May overrun dest.
7842 ++ * @dest: pointer to end of string being copied into. Must be large enough
7843 ++ * to receive copy.
7844 ++ * @src: pointer to the beginning of string being copied from. Must not overlap
7845 ++ * dest.
7846 ++ *
7847 ++ * stpcpy differs from strcpy in a key way: the return value is a pointer
7848 ++ * to the new %NUL-terminating character in @dest. (For strcpy, the return
7849 ++ * value is a pointer to the start of @dest). This interface is considered
7850 ++ * unsafe as it doesn't perform bounds checking of the inputs. As such it's
7851 ++ * not recommended for usage. Instead, its definition is provided in case
7852 ++ * the compiler lowers other libcalls to stpcpy.
7853 ++ */
7854 ++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
7855 ++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
7856 ++{
7857 ++ while ((*dest++ = *src++) != '\0')
7858 ++ /* nothing */;
7859 ++ return --dest;
7860 ++}
7861 ++EXPORT_SYMBOL(stpcpy);
7862 ++
7863 + #ifndef __HAVE_ARCH_STRCAT
7864 + /**
7865 + * strcat - Append one %NUL-terminated string to another
7866 +diff --git a/mm/filemap.c b/mm/filemap.c
7867 +index 45f1c6d73b5b0..f2e777003b901 100644
7868 +--- a/mm/filemap.c
7869 ++++ b/mm/filemap.c
7870 +@@ -2889,6 +2889,14 @@ filler:
7871 + unlock_page(page);
7872 + goto out;
7873 + }
7874 ++
7875 ++ /*
7876 ++ * A previous I/O error may have been due to temporary
7877 ++ * failures.
7878 ++ * Clear page error before actual read, PG_error will be
7879 ++ * set again if read page fails.
7880 ++ */
7881 ++ ClearPageError(page);
7882 + goto filler;
7883 +
7884 + out:
7885 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
7886 +index 5eeabece0c178..f54734abf9466 100644
7887 +--- a/mm/kmemleak.c
7888 ++++ b/mm/kmemleak.c
7889 +@@ -2039,7 +2039,7 @@ void __init kmemleak_init(void)
7890 + create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
7891 + KMEMLEAK_GREY, GFP_ATOMIC);
7892 + /* only register .data..ro_after_init if not within .data */
7893 +- if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
7894 ++ if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
7895 + create_object((unsigned long)__start_ro_after_init,
7896 + __end_ro_after_init - __start_ro_after_init,
7897 + KMEMLEAK_GREY, GFP_ATOMIC);
7898 +diff --git a/mm/memory.c b/mm/memory.c
7899 +index bbf0cc4066c84..eeae63bd95027 100644
7900 +--- a/mm/memory.c
7901 ++++ b/mm/memory.c
7902 +@@ -116,6 +116,18 @@ int randomize_va_space __read_mostly =
7903 + 2;
7904 + #endif
7905 +
7906 ++#ifndef arch_faults_on_old_pte
7907 ++static inline bool arch_faults_on_old_pte(void)
7908 ++{
7909 ++ /*
7910 ++ * Those arches which don't have hw access flag feature need to
7911 ++ * implement their own helper. By default, "true" means pagefault
7912 ++ * will be hit on old pte.
7913 ++ */
7914 ++ return true;
7915 ++}
7916 ++#endif
7917 ++
7918 + static int __init disable_randmaps(char *s)
7919 + {
7920 + randomize_va_space = 0;
7921 +@@ -2335,32 +2347,101 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
7922 + return same;
7923 + }
7924 +
7925 +-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
7926 ++static inline bool cow_user_page(struct page *dst, struct page *src,
7927 ++ struct vm_fault *vmf)
7928 + {
7929 ++ bool ret;
7930 ++ void *kaddr;
7931 ++ void __user *uaddr;
7932 ++ bool locked = false;
7933 ++ struct vm_area_struct *vma = vmf->vma;
7934 ++ struct mm_struct *mm = vma->vm_mm;
7935 ++ unsigned long addr = vmf->address;
7936 ++
7937 + debug_dma_assert_idle(src);
7938 +
7939 ++ if (likely(src)) {
7940 ++ copy_user_highpage(dst, src, addr, vma);
7941 ++ return true;
7942 ++ }
7943 ++
7944 + /*
7945 + * If the source page was a PFN mapping, we don't have
7946 + * a "struct page" for it. We do a best-effort copy by
7947 + * just copying from the original user address. If that
7948 + * fails, we just zero-fill it. Live with it.
7949 + */
7950 +- if (unlikely(!src)) {
7951 +- void *kaddr = kmap_atomic(dst);
7952 +- void __user *uaddr = (void __user *)(va & PAGE_MASK);
7953 ++ kaddr = kmap_atomic(dst);
7954 ++ uaddr = (void __user *)(addr & PAGE_MASK);
7955 ++
7956 ++ /*
7957 ++ * On architectures with software "accessed" bits, we would
7958 ++ * take a double page fault, so mark it accessed here.
7959 ++ */
7960 ++ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
7961 ++ pte_t entry;
7962 ++
7963 ++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
7964 ++ locked = true;
7965 ++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
7966 ++ /*
7967 ++ * Other thread has already handled the fault
7968 ++ * and we don't need to do anything. If it's
7969 ++ * not the case, the fault will be triggered
7970 ++ * again on the same address.
7971 ++ */
7972 ++ ret = false;
7973 ++ goto pte_unlock;
7974 ++ }
7975 ++
7976 ++ entry = pte_mkyoung(vmf->orig_pte);
7977 ++ if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
7978 ++ update_mmu_cache(vma, addr, vmf->pte);
7979 ++ }
7980 ++
7981 ++ /*
7982 ++ * This really shouldn't fail, because the page is there
7983 ++ * in the page tables. But it might just be unreadable,
7984 ++ * in which case we just give up and fill the result with
7985 ++ * zeroes.
7986 ++ */
7987 ++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
7988 ++ if (locked)
7989 ++ goto warn;
7990 ++
7991 ++ /* Re-validate under PTL if the page is still mapped */
7992 ++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
7993 ++ locked = true;
7994 ++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
7995 ++ /* The PTE changed under us. Retry page fault. */
7996 ++ ret = false;
7997 ++ goto pte_unlock;
7998 ++ }
7999 +
8000 + /*
8001 +- * This really shouldn't fail, because the page is there
8002 +- * in the page tables. But it might just be unreadable,
8003 +- * in which case we just give up and fill the result with
8004 +- * zeroes.
8005 ++ * The same page can be mapped back since last copy attampt.
8006 ++ * Try to copy again under PTL.
8007 + */
8008 +- if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
8009 ++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
8010 ++ /*
8011 ++ * Give a warn in case there can be some obscure
8012 ++ * use-case
8013 ++ */
8014 ++warn:
8015 ++ WARN_ON_ONCE(1);
8016 + clear_page(kaddr);
8017 +- kunmap_atomic(kaddr);
8018 +- flush_dcache_page(dst);
8019 +- } else
8020 +- copy_user_highpage(dst, src, va, vma);
8021 ++ }
8022 ++ }
8023 ++
8024 ++ ret = true;
8025 ++
8026 ++pte_unlock:
8027 ++ if (locked)
8028 ++ pte_unmap_unlock(vmf->pte, vmf->ptl);
8029 ++ kunmap_atomic(kaddr);
8030 ++ flush_dcache_page(dst);
8031 ++
8032 ++ return ret;
8033 + }
8034 +
8035 + static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
8036 +@@ -2514,7 +2595,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
8037 + vmf->address);
8038 + if (!new_page)
8039 + goto oom;
8040 +- cow_user_page(new_page, old_page, vmf->address, vma);
8041 ++
8042 ++ if (!cow_user_page(new_page, old_page, vmf)) {
8043 ++ /*
8044 ++ * COW failed, if the fault was solved by other,
8045 ++ * it's fine. If not, userspace would re-fault on
8046 ++ * the same address and we will handle the fault
8047 ++ * from the second attempt.
8048 ++ */
8049 ++ put_page(new_page);
8050 ++ if (old_page)
8051 ++ put_page(old_page);
8052 ++ return 0;
8053 ++ }
8054 + }
8055 +
8056 + if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
8057 +diff --git a/mm/mmap.c b/mm/mmap.c
8058 +index e84fd3347a518..f875386e7acd4 100644
8059 +--- a/mm/mmap.c
8060 ++++ b/mm/mmap.c
8061 +@@ -2077,6 +2077,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
8062 + info.low_limit = mm->mmap_base;
8063 + info.high_limit = TASK_SIZE;
8064 + info.align_mask = 0;
8065 ++ info.align_offset = 0;
8066 + return vm_unmapped_area(&info);
8067 + }
8068 + #endif
8069 +@@ -2118,6 +2119,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8070 + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
8071 + info.high_limit = mm->mmap_base;
8072 + info.align_mask = 0;
8073 ++ info.align_offset = 0;
8074 + addr = vm_unmapped_area(&info);
8075 +
8076 + /*
8077 +diff --git a/mm/pagewalk.c b/mm/pagewalk.c
8078 +index c3084ff2569d2..3c0930d94a295 100644
8079 +--- a/mm/pagewalk.c
8080 ++++ b/mm/pagewalk.c
8081 +@@ -15,9 +15,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
8082 + err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
8083 + if (err)
8084 + break;
8085 +- addr += PAGE_SIZE;
8086 +- if (addr == end)
8087 ++ if (addr >= end - PAGE_SIZE)
8088 + break;
8089 ++ addr += PAGE_SIZE;
8090 + pte++;
8091 + }
8092 +
8093 +diff --git a/mm/swap_state.c b/mm/swap_state.c
8094 +index 09731f4174c7e..3febffe0fca4a 100644
8095 +--- a/mm/swap_state.c
8096 ++++ b/mm/swap_state.c
8097 +@@ -537,10 +537,11 @@ static unsigned long swapin_nr_pages(unsigned long offset)
8098 + return 1;
8099 +
8100 + hits = atomic_xchg(&swapin_readahead_hits, 0);
8101 +- pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
8102 ++ pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
8103 ++ max_pages,
8104 + atomic_read(&last_readahead_pages));
8105 + if (!hits)
8106 +- prev_offset = offset;
8107 ++ WRITE_ONCE(prev_offset, offset);
8108 + atomic_set(&last_readahead_pages, pages);
8109 +
8110 + return pages;
8111 +diff --git a/mm/swapfile.c b/mm/swapfile.c
8112 +index 0047dcaf93697..adeb49fcad23e 100644
8113 +--- a/mm/swapfile.c
8114 ++++ b/mm/swapfile.c
8115 +@@ -998,7 +998,7 @@ start_over:
8116 + goto nextsi;
8117 + }
8118 + if (size == SWAPFILE_CLUSTER) {
8119 +- if (!(si->flags & SWP_FILE))
8120 ++ if (si->flags & SWP_BLKDEV)
8121 + n_ret = swap_alloc_cluster(si, swp_entries);
8122 + } else
8123 + n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
8124 +@@ -2738,10 +2738,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
8125 + else
8126 + type = si->type + 1;
8127 +
8128 ++ ++(*pos);
8129 + for (; (si = swap_type_to_swap_info(type)); type++) {
8130 + if (!(si->flags & SWP_USED) || !si->swap_map)
8131 + continue;
8132 +- ++*pos;
8133 + return si;
8134 + }
8135 +
8136 +diff --git a/mm/vmscan.c b/mm/vmscan.c
8137 +index b93dc8fc6007f..b7d7f6d65bd5b 100644
8138 +--- a/mm/vmscan.c
8139 ++++ b/mm/vmscan.c
8140 +@@ -3109,8 +3109,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
8141 +
8142 + /* kswapd must be awake if processes are being throttled */
8143 + if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
8144 +- pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
8145 +- (enum zone_type)ZONE_NORMAL);
8146 ++ if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL)
8147 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL);
8148 ++
8149 + wake_up_interruptible(&pgdat->kswapd_wait);
8150 + }
8151 +
8152 +@@ -3626,9 +3627,9 @@ out:
8153 + static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
8154 + enum zone_type prev_classzone_idx)
8155 + {
8156 +- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
8157 +- return prev_classzone_idx;
8158 +- return pgdat->kswapd_classzone_idx;
8159 ++ enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
8160 ++
8161 ++ return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx;
8162 + }
8163 +
8164 + static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
8165 +@@ -3672,8 +3673,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o
8166 + * the previous request that slept prematurely.
8167 + */
8168 + if (remaining) {
8169 +- pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
8170 +- pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
8171 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx,
8172 ++ kswapd_classzone_idx(pgdat, classzone_idx));
8173 ++
8174 ++ if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
8175 ++ WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
8176 + }
8177 +
8178 + finish_wait(&pgdat->kswapd_wait, &wait);
8179 +@@ -3755,12 +3759,12 @@ static int kswapd(void *p)
8180 + tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
8181 + set_freezable();
8182 +
8183 +- pgdat->kswapd_order = 0;
8184 +- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
8185 ++ WRITE_ONCE(pgdat->kswapd_order, 0);
8186 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
8187 + for ( ; ; ) {
8188 + bool ret;
8189 +
8190 +- alloc_order = reclaim_order = pgdat->kswapd_order;
8191 ++ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
8192 + classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
8193 +
8194 + kswapd_try_sleep:
8195 +@@ -3768,10 +3772,10 @@ kswapd_try_sleep:
8196 + classzone_idx);
8197 +
8198 + /* Read the new order and classzone_idx */
8199 +- alloc_order = reclaim_order = pgdat->kswapd_order;
8200 ++ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
8201 + classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
8202 +- pgdat->kswapd_order = 0;
8203 +- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
8204 ++ WRITE_ONCE(pgdat->kswapd_order, 0);
8205 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
8206 +
8207 + ret = try_to_freeze();
8208 + if (kthread_should_stop())
8209 +@@ -3816,20 +3820,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
8210 + enum zone_type classzone_idx)
8211 + {
8212 + pg_data_t *pgdat;
8213 ++ enum zone_type curr_idx;
8214 +
8215 + if (!managed_zone(zone))
8216 + return;
8217 +
8218 + if (!cpuset_zone_allowed(zone, gfp_flags))
8219 + return;
8220 ++
8221 + pgdat = zone->zone_pgdat;
8222 ++ curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
8223 ++
8224 ++ if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx)
8225 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx);
8226 ++
8227 ++ if (READ_ONCE(pgdat->kswapd_order) < order)
8228 ++ WRITE_ONCE(pgdat->kswapd_order, order);
8229 +
8230 +- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
8231 +- pgdat->kswapd_classzone_idx = classzone_idx;
8232 +- else
8233 +- pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
8234 +- classzone_idx);
8235 +- pgdat->kswapd_order = max(pgdat->kswapd_order, order);
8236 + if (!waitqueue_active(&pgdat->kswapd_wait))
8237 + return;
8238 +
8239 +diff --git a/net/atm/lec.c b/net/atm/lec.c
8240 +index ad4f829193f05..5a6186b809874 100644
8241 +--- a/net/atm/lec.c
8242 ++++ b/net/atm/lec.c
8243 +@@ -1270,6 +1270,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
8244 + entry->vcc = NULL;
8245 + }
8246 + if (entry->recv_vcc) {
8247 ++ struct atm_vcc *vcc = entry->recv_vcc;
8248 ++ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
8249 ++
8250 ++ kfree(vpriv);
8251 ++ vcc->user_back = NULL;
8252 ++
8253 + entry->recv_vcc->push = entry->old_recv_push;
8254 + vcc_release_async(entry->recv_vcc, -EPIPE);
8255 + entry->recv_vcc = NULL;
8256 +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
8257 +index 9b8bf06ccb613..1401031f4bb4a 100644
8258 +--- a/net/batman-adv/bridge_loop_avoidance.c
8259 ++++ b/net/batman-adv/bridge_loop_avoidance.c
8260 +@@ -37,6 +37,7 @@
8261 + #include <linux/lockdep.h>
8262 + #include <linux/netdevice.h>
8263 + #include <linux/netlink.h>
8264 ++#include <linux/preempt.h>
8265 + #include <linux/rculist.h>
8266 + #include <linux/rcupdate.h>
8267 + #include <linux/seq_file.h>
8268 +@@ -96,11 +97,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
8269 + */
8270 + static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
8271 + {
8272 +- const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
8273 ++ const struct batadv_bla_backbone_gw *gw;
8274 + u32 hash = 0;
8275 +
8276 +- hash = jhash(&claim->addr, sizeof(claim->addr), hash);
8277 +- hash = jhash(&claim->vid, sizeof(claim->vid), hash);
8278 ++ gw = (struct batadv_bla_backbone_gw *)data;
8279 ++ hash = jhash(&gw->orig, sizeof(gw->orig), hash);
8280 ++ hash = jhash(&gw->vid, sizeof(gw->vid), hash);
8281 +
8282 + return hash % size;
8283 + }
8284 +@@ -1592,13 +1594,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
8285 + }
8286 +
8287 + /**
8288 +- * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
8289 ++ * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
8290 + * @bat_priv: the bat priv with all the soft interface information
8291 +- * @skb: contains the bcast_packet to be checked
8292 ++ * @skb: contains the multicast packet to be checked
8293 ++ * @payload_ptr: pointer to position inside the head buffer of the skb
8294 ++ * marking the start of the data to be CRC'ed
8295 ++ * @orig: originator mac address, NULL if unknown
8296 + *
8297 +- * check if it is on our broadcast list. Another gateway might
8298 +- * have sent the same packet because it is connected to the same backbone,
8299 +- * so we have to remove this duplicate.
8300 ++ * Check if it is on our broadcast list. Another gateway might have sent the
8301 ++ * same packet because it is connected to the same backbone, so we have to
8302 ++ * remove this duplicate.
8303 + *
8304 + * This is performed by checking the CRC, which will tell us
8305 + * with a good chance that it is the same packet. If it is furthermore
8306 +@@ -1607,19 +1612,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
8307 + *
8308 + * Return: true if a packet is in the duplicate list, false otherwise.
8309 + */
8310 +-bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
8311 +- struct sk_buff *skb)
8312 ++static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
8313 ++ struct sk_buff *skb, u8 *payload_ptr,
8314 ++ const u8 *orig)
8315 + {
8316 +- int i, curr;
8317 +- __be32 crc;
8318 +- struct batadv_bcast_packet *bcast_packet;
8319 + struct batadv_bcast_duplist_entry *entry;
8320 + bool ret = false;
8321 +-
8322 +- bcast_packet = (struct batadv_bcast_packet *)skb->data;
8323 ++ int i, curr;
8324 ++ __be32 crc;
8325 +
8326 + /* calculate the crc ... */
8327 +- crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
8328 ++ crc = batadv_skb_crc32(skb, payload_ptr);
8329 +
8330 + spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
8331 +
8332 +@@ -1638,8 +1641,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
8333 + if (entry->crc != crc)
8334 + continue;
8335 +
8336 +- if (batadv_compare_eth(entry->orig, bcast_packet->orig))
8337 +- continue;
8338 ++ /* are the originators both known and not anonymous? */
8339 ++ if (orig && !is_zero_ether_addr(orig) &&
8340 ++ !is_zero_ether_addr(entry->orig)) {
8341 ++ /* If known, check if the new frame came from
8342 ++ * the same originator:
8343 ++ * We are safe to take identical frames from the
8344 ++ * same orig, if known, as multiplications in
8345 ++ * the mesh are detected via the (orig, seqno) pair.
8346 ++ * So we can be a bit more liberal here and allow
8347 ++ * identical frames from the same orig which the source
8348 ++ * host might have sent multiple times on purpose.
8349 ++ */
8350 ++ if (batadv_compare_eth(entry->orig, orig))
8351 ++ continue;
8352 ++ }
8353 +
8354 + /* this entry seems to match: same crc, not too old,
8355 + * and from another gw. therefore return true to forbid it.
8356 +@@ -1655,7 +1671,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
8357 + entry = &bat_priv->bla.bcast_duplist[curr];
8358 + entry->crc = crc;
8359 + entry->entrytime = jiffies;
8360 +- ether_addr_copy(entry->orig, bcast_packet->orig);
8361 ++
8362 ++ /* known originator */
8363 ++ if (orig)
8364 ++ ether_addr_copy(entry->orig, orig);
8365 ++ /* anonymous originator */
8366 ++ else
8367 ++ eth_zero_addr(entry->orig);
8368 ++
8369 + bat_priv->bla.bcast_duplist_curr = curr;
8370 +
8371 + out:
8372 +@@ -1664,6 +1687,48 @@ out:
8373 + return ret;
8374 + }
8375 +
8376 ++/**
8377 ++ * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup.
8378 ++ * @bat_priv: the bat priv with all the soft interface information
8379 ++ * @skb: contains the multicast packet to be checked, decapsulated from a
8380 ++ * unicast_packet
8381 ++ *
8382 ++ * Check if it is on our broadcast list. Another gateway might have sent the
8383 ++ * same packet because it is connected to the same backbone, so we have to
8384 ++ * remove this duplicate.
8385 ++ *
8386 ++ * Return: true if a packet is in the duplicate list, false otherwise.
8387 ++ */
8388 ++static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
8389 ++ struct sk_buff *skb)
8390 ++{
8391 ++ return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
8392 ++}
8393 ++
8394 ++/**
8395 ++ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
8396 ++ * @bat_priv: the bat priv with all the soft interface information
8397 ++ * @skb: contains the bcast_packet to be checked
8398 ++ *
8399 ++ * Check if it is on our broadcast list. Another gateway might have sent the
8400 ++ * same packet because it is connected to the same backbone, so we have to
8401 ++ * remove this duplicate.
8402 ++ *
8403 ++ * Return: true if a packet is in the duplicate list, false otherwise.
8404 ++ */
8405 ++bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
8406 ++ struct sk_buff *skb)
8407 ++{
8408 ++ struct batadv_bcast_packet *bcast_packet;
8409 ++ u8 *payload_ptr;
8410 ++
8411 ++ bcast_packet = (struct batadv_bcast_packet *)skb->data;
8412 ++ payload_ptr = (u8 *)(bcast_packet + 1);
8413 ++
8414 ++ return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
8415 ++ bcast_packet->orig);
8416 ++}
8417 ++
8418 + /**
8419 + * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
8420 + * the VLAN identified by vid.
8421 +@@ -1825,7 +1890,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
8422 + * @bat_priv: the bat priv with all the soft interface information
8423 + * @skb: the frame to be checked
8424 + * @vid: the VLAN ID of the frame
8425 +- * @is_bcast: the packet came in a broadcast packet type.
8426 ++ * @packet_type: the batman packet type this frame came in
8427 + *
8428 + * batadv_bla_rx avoidance checks if:
8429 + * * we have to race for a claim
8430 +@@ -1837,7 +1902,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
8431 + * further process the skb.
8432 + */
8433 + bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
8434 +- unsigned short vid, bool is_bcast)
8435 ++ unsigned short vid, int packet_type)
8436 + {
8437 + struct batadv_bla_backbone_gw *backbone_gw;
8438 + struct ethhdr *ethhdr;
8439 +@@ -1859,9 +1924,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
8440 + goto handled;
8441 +
8442 + if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
8443 +- /* don't allow broadcasts while requests are in flight */
8444 +- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
8445 +- goto handled;
8446 ++ /* don't allow multicast packets while requests are in flight */
8447 ++ if (is_multicast_ether_addr(ethhdr->h_dest))
8448 ++ /* Both broadcast flooding or multicast-via-unicasts
8449 ++ * delivery might send to multiple backbone gateways
8450 ++ * sharing the same LAN and therefore need to coordinate
8451 ++ * which backbone gateway forwards into the LAN,
8452 ++ * by claiming the payload source address.
8453 ++ *
8454 ++ * Broadcast flooding and multicast-via-unicasts
8455 ++ * delivery use the following two batman packet types.
8456 ++ * Note: explicitly exclude BATADV_UNICAST_4ADDR,
8457 ++ * as the DHCP gateway feature will send explicitly
8458 ++ * to only one BLA gateway, so the claiming process
8459 ++ * should be avoided there.
8460 ++ */
8461 ++ if (packet_type == BATADV_BCAST ||
8462 ++ packet_type == BATADV_UNICAST)
8463 ++ goto handled;
8464 ++
8465 ++ /* potential duplicates from foreign BLA backbone gateways via
8466 ++ * multicast-in-unicast packets
8467 ++ */
8468 ++ if (is_multicast_ether_addr(ethhdr->h_dest) &&
8469 ++ packet_type == BATADV_UNICAST &&
8470 ++ batadv_bla_check_ucast_duplist(bat_priv, skb))
8471 ++ goto handled;
8472 +
8473 + ether_addr_copy(search_claim.addr, ethhdr->h_source);
8474 + search_claim.vid = vid;
8475 +@@ -1896,13 +1984,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
8476 + goto allow;
8477 + }
8478 +
8479 +- /* if it is a broadcast ... */
8480 +- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
8481 ++ /* if it is a multicast ... */
8482 ++ if (is_multicast_ether_addr(ethhdr->h_dest) &&
8483 ++ (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
8484 + /* ... drop it. the responsible gateway is in charge.
8485 + *
8486 +- * We need to check is_bcast because with the gateway
8487 ++ * We need to check packet type because with the gateway
8488 + * feature, broadcasts (like DHCP requests) may be sent
8489 +- * using a unicast packet type.
8490 ++ * using a unicast 4 address packet type. See comment above.
8491 + */
8492 + goto handled;
8493 + } else {
8494 +diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
8495 +index 71f95a3e4d3f3..af28fdb01467c 100644
8496 +--- a/net/batman-adv/bridge_loop_avoidance.h
8497 ++++ b/net/batman-adv/bridge_loop_avoidance.h
8498 +@@ -48,7 +48,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
8499 +
8500 + #ifdef CONFIG_BATMAN_ADV_BLA
8501 + bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
8502 +- unsigned short vid, bool is_bcast);
8503 ++ unsigned short vid, int packet_type);
8504 + bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
8505 + unsigned short vid);
8506 + bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
8507 +@@ -79,7 +79,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
8508 +
8509 + static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
8510 + struct sk_buff *skb, unsigned short vid,
8511 +- bool is_bcast)
8512 ++ int packet_type)
8513 + {
8514 + return false;
8515 + }
8516 +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
8517 +index cc3ed93a6d513..98af41e3810dc 100644
8518 +--- a/net/batman-adv/routing.c
8519 ++++ b/net/batman-adv/routing.c
8520 +@@ -838,6 +838,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
8521 + vid = batadv_get_vid(skb, hdr_len);
8522 + ethhdr = (struct ethhdr *)(skb->data + hdr_len);
8523 +
8524 ++ /* do not reroute multicast frames in a unicast header */
8525 ++ if (is_multicast_ether_addr(ethhdr->h_dest))
8526 ++ return true;
8527 ++
8528 + /* check if the destination client was served by this node and it is now
8529 + * roaming. In this case, it means that the node has got a ROAM_ADV
8530 + * message and that it knows the new destination in the mesh to re-route
8531 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
8532 +index a2976adeeedce..6ff78080ec7fb 100644
8533 +--- a/net/batman-adv/soft-interface.c
8534 ++++ b/net/batman-adv/soft-interface.c
8535 +@@ -426,10 +426,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
8536 + struct vlan_ethhdr *vhdr;
8537 + struct ethhdr *ethhdr;
8538 + unsigned short vid;
8539 +- bool is_bcast;
8540 ++ int packet_type;
8541 +
8542 + batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
8543 +- is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
8544 ++ packet_type = batadv_bcast_packet->packet_type;
8545 +
8546 + skb_pull_rcsum(skb, hdr_size);
8547 + skb_reset_mac_header(skb);
8548 +@@ -472,7 +472,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
8549 + /* Let the bridge loop avoidance check the packet. If will
8550 + * not handle it, we can safely push it up.
8551 + */
8552 +- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
8553 ++ if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
8554 + goto out;
8555 +
8556 + if (orig_node)
8557 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
8558 +index 2b4a7cf03041b..310622086f74b 100644
8559 +--- a/net/bluetooth/hci_event.c
8560 ++++ b/net/bluetooth/hci_event.c
8561 +@@ -41,12 +41,27 @@
8562 +
8563 + /* Handle HCI Event packets */
8564 +
8565 +-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
8566 ++static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
8567 ++ u8 *new_status)
8568 + {
8569 + __u8 status = *((__u8 *) skb->data);
8570 +
8571 + BT_DBG("%s status 0x%2.2x", hdev->name, status);
8572 +
8573 ++ /* It is possible that we receive Inquiry Complete event right
8574 ++ * before we receive Inquiry Cancel Command Complete event, in
8575 ++ * which case the latter event should have status of Command
8576 ++ * Disallowed (0x0c). This should not be treated as error, since
8577 ++ * we actually achieve what Inquiry Cancel wants to achieve,
8578 ++ * which is to end the last Inquiry session.
8579 ++ */
8580 ++ if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
8581 ++ bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
8582 ++ status = 0x00;
8583 ++ }
8584 ++
8585 ++ *new_status = status;
8586 ++
8587 + if (status)
8588 + return;
8589 +
8590 +@@ -3039,7 +3054,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
8591 +
8592 + switch (*opcode) {
8593 + case HCI_OP_INQUIRY_CANCEL:
8594 +- hci_cc_inquiry_cancel(hdev, skb);
8595 ++ hci_cc_inquiry_cancel(hdev, skb, status);
8596 + break;
8597 +
8598 + case HCI_OP_PERIODIC_INQ:
8599 +@@ -5738,6 +5753,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
8600 + u8 status = 0, event = hdr->evt, req_evt = 0;
8601 + u16 opcode = HCI_OP_NOP;
8602 +
8603 ++ if (!event) {
8604 ++ bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
8605 ++ goto done;
8606 ++ }
8607 ++
8608 + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
8609 + struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
8610 + opcode = __le16_to_cpu(cmd_hdr->opcode);
8611 +@@ -5949,6 +5969,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
8612 + req_complete_skb(hdev, status, opcode, orig_skb);
8613 + }
8614 +
8615 ++done:
8616 + kfree_skb(orig_skb);
8617 + kfree_skb(skb);
8618 + hdev->stat.evt_rx++;
8619 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
8620 +index 0d84d1f820d4c..c04107d446016 100644
8621 +--- a/net/bluetooth/l2cap_core.c
8622 ++++ b/net/bluetooth/l2cap_core.c
8623 +@@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
8624 + BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
8625 +
8626 + mutex_lock(&conn->chan_lock);
8627 ++ /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
8628 ++ * this work. No need to call l2cap_chan_hold(chan) here again.
8629 ++ */
8630 + l2cap_chan_lock(chan);
8631 +
8632 + if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
8633 +@@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
8634 +
8635 + l2cap_chan_close(chan, reason);
8636 +
8637 +- l2cap_chan_unlock(chan);
8638 +-
8639 + chan->ops->close(chan);
8640 +- mutex_unlock(&conn->chan_lock);
8641 +
8642 ++ l2cap_chan_unlock(chan);
8643 + l2cap_chan_put(chan);
8644 ++
8645 ++ mutex_unlock(&conn->chan_lock);
8646 + }
8647 +
8648 + struct l2cap_chan *l2cap_chan_create(void)
8649 +@@ -1725,9 +1728,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
8650 +
8651 + l2cap_chan_del(chan, err);
8652 +
8653 +- l2cap_chan_unlock(chan);
8654 +-
8655 + chan->ops->close(chan);
8656 ++
8657 ++ l2cap_chan_unlock(chan);
8658 + l2cap_chan_put(chan);
8659 + }
8660 +
8661 +@@ -4114,7 +4117,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
8662 + return 0;
8663 + }
8664 +
8665 +- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
8666 ++ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
8667 ++ chan->state != BT_CONNECTED) {
8668 + cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
8669 + chan->dcid);
8670 + goto unlock;
8671 +@@ -4337,6 +4341,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
8672 + return 0;
8673 + }
8674 +
8675 ++ l2cap_chan_hold(chan);
8676 + l2cap_chan_lock(chan);
8677 +
8678 + rsp.dcid = cpu_to_le16(chan->scid);
8679 +@@ -4345,12 +4350,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
8680 +
8681 + chan->ops->set_shutdown(chan);
8682 +
8683 +- l2cap_chan_hold(chan);
8684 + l2cap_chan_del(chan, ECONNRESET);
8685 +
8686 +- l2cap_chan_unlock(chan);
8687 +-
8688 + chan->ops->close(chan);
8689 ++
8690 ++ l2cap_chan_unlock(chan);
8691 + l2cap_chan_put(chan);
8692 +
8693 + mutex_unlock(&conn->chan_lock);
8694 +@@ -4382,20 +4386,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
8695 + return 0;
8696 + }
8697 +
8698 ++ l2cap_chan_hold(chan);
8699 + l2cap_chan_lock(chan);
8700 +
8701 + if (chan->state != BT_DISCONN) {
8702 + l2cap_chan_unlock(chan);
8703 ++ l2cap_chan_put(chan);
8704 + mutex_unlock(&conn->chan_lock);
8705 + return 0;
8706 + }
8707 +
8708 +- l2cap_chan_hold(chan);
8709 + l2cap_chan_del(chan, 0);
8710 +
8711 +- l2cap_chan_unlock(chan);
8712 +-
8713 + chan->ops->close(chan);
8714 ++
8715 ++ l2cap_chan_unlock(chan);
8716 + l2cap_chan_put(chan);
8717 +
8718 + mutex_unlock(&conn->chan_lock);
8719 +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
8720 +index a3a2cd55e23a9..5572042f04531 100644
8721 +--- a/net/bluetooth/l2cap_sock.c
8722 ++++ b/net/bluetooth/l2cap_sock.c
8723 +@@ -1039,7 +1039,7 @@ done:
8724 + }
8725 +
8726 + /* Kill socket (only if zapped and orphan)
8727 +- * Must be called on unlocked socket.
8728 ++ * Must be called on unlocked socket, with l2cap channel lock.
8729 + */
8730 + static void l2cap_sock_kill(struct sock *sk)
8731 + {
8732 +@@ -1190,6 +1190,7 @@ static int l2cap_sock_release(struct socket *sock)
8733 + {
8734 + struct sock *sk = sock->sk;
8735 + int err;
8736 ++ struct l2cap_chan *chan;
8737 +
8738 + BT_DBG("sock %p, sk %p", sock, sk);
8739 +
8740 +@@ -1199,9 +1200,17 @@ static int l2cap_sock_release(struct socket *sock)
8741 + bt_sock_unlink(&l2cap_sk_list, sk);
8742 +
8743 + err = l2cap_sock_shutdown(sock, 2);
8744 ++ chan = l2cap_pi(sk)->chan;
8745 ++
8746 ++ l2cap_chan_hold(chan);
8747 ++ l2cap_chan_lock(chan);
8748 +
8749 + sock_orphan(sk);
8750 + l2cap_sock_kill(sk);
8751 ++
8752 ++ l2cap_chan_unlock(chan);
8753 ++ l2cap_chan_put(chan);
8754 ++
8755 + return err;
8756 + }
8757 +
8758 +@@ -1219,12 +1228,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
8759 + BT_DBG("child chan %p state %s", chan,
8760 + state_to_string(chan->state));
8761 +
8762 ++ l2cap_chan_hold(chan);
8763 + l2cap_chan_lock(chan);
8764 ++
8765 + __clear_chan_timer(chan);
8766 + l2cap_chan_close(chan, ECONNRESET);
8767 +- l2cap_chan_unlock(chan);
8768 +-
8769 + l2cap_sock_kill(sk);
8770 ++
8771 ++ l2cap_chan_unlock(chan);
8772 ++ l2cap_chan_put(chan);
8773 + }
8774 + }
8775 +
8776 +diff --git a/net/core/filter.c b/net/core/filter.c
8777 +index 25a2c3186e14a..557bd5cc8f94c 100644
8778 +--- a/net/core/filter.c
8779 ++++ b/net/core/filter.c
8780 +@@ -5418,8 +5418,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
8781 + bool indirect = BPF_MODE(orig->code) == BPF_IND;
8782 + struct bpf_insn *insn = insn_buf;
8783 +
8784 +- /* We're guaranteed here that CTX is in R6. */
8785 +- *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
8786 + if (!indirect) {
8787 + *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
8788 + } else {
8789 +@@ -5427,6 +5425,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
8790 + if (orig->imm)
8791 + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
8792 + }
8793 ++ /* We're guaranteed here that CTX is in R6. */
8794 ++ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
8795 +
8796 + switch (BPF_SIZE(orig->code)) {
8797 + case BPF_B:
8798 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
8799 +index bf738ec68cb53..6e890f51b7d86 100644
8800 +--- a/net/core/neighbour.c
8801 ++++ b/net/core/neighbour.c
8802 +@@ -2844,6 +2844,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8803 + *pos = cpu+1;
8804 + return per_cpu_ptr(tbl->stats, cpu);
8805 + }
8806 ++ (*pos)++;
8807 + return NULL;
8808 + }
8809 +
8810 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
8811 +index 84de87b7eedcd..3db428242b22d 100644
8812 +--- a/net/ipv4/route.c
8813 ++++ b/net/ipv4/route.c
8814 +@@ -274,6 +274,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8815 + *pos = cpu+1;
8816 + return &per_cpu(rt_cache_stat, cpu);
8817 + }
8818 ++ (*pos)++;
8819 + return NULL;
8820 +
8821 + }
8822 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
8823 +index 616ff2970f4fc..4ce3397e6fcf7 100644
8824 +--- a/net/ipv4/tcp.c
8825 ++++ b/net/ipv4/tcp.c
8826 +@@ -2038,7 +2038,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
8827 +
8828 + /* Well, if we have backlog, try to process it now yet. */
8829 +
8830 +- if (copied >= target && !sk->sk_backlog.tail)
8831 ++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
8832 + break;
8833 +
8834 + if (copied) {
8835 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
8836 +index 05a206202e23d..b924941b96a31 100644
8837 +--- a/net/ipv6/ip6_fib.c
8838 ++++ b/net/ipv6/ip6_fib.c
8839 +@@ -2377,14 +2377,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8840 + struct net *net = seq_file_net(seq);
8841 + struct ipv6_route_iter *iter = seq->private;
8842 +
8843 ++ ++(*pos);
8844 + if (!v)
8845 + goto iter_table;
8846 +
8847 + n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
8848 +- if (n) {
8849 +- ++*pos;
8850 ++ if (n)
8851 + return n;
8852 +- }
8853 +
8854 + iter_table:
8855 + ipv6_route_check_sernum(iter);
8856 +@@ -2392,8 +2391,6 @@ iter_table:
8857 + r = fib6_walk_continue(&iter->w);
8858 + spin_unlock_bh(&iter->tbl->tb6_lock);
8859 + if (r > 0) {
8860 +- if (v)
8861 +- ++*pos;
8862 + return iter->w.leaf;
8863 + } else if (r < 0) {
8864 + fib6_walker_unlink(net, &iter->w);
8865 +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
8866 +index 6ead3c39f3566..bcba579e292ff 100644
8867 +--- a/net/llc/af_llc.c
8868 ++++ b/net/llc/af_llc.c
8869 +@@ -785,7 +785,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
8870 + }
8871 + /* Well, if we have backlog, try to process it now yet. */
8872 +
8873 +- if (copied >= target && !sk->sk_backlog.tail)
8874 ++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
8875 + break;
8876 +
8877 + if (copied) {
8878 +diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
8879 +index bcd1a5e6ebf42..2f873a0dc5836 100644
8880 +--- a/net/mac802154/tx.c
8881 ++++ b/net/mac802154/tx.c
8882 +@@ -42,11 +42,11 @@ void ieee802154_xmit_worker(struct work_struct *work)
8883 + if (res)
8884 + goto err_tx;
8885 +
8886 +- ieee802154_xmit_complete(&local->hw, skb, false);
8887 +-
8888 + dev->stats.tx_packets++;
8889 + dev->stats.tx_bytes += skb->len;
8890 +
8891 ++ ieee802154_xmit_complete(&local->hw, skb, false);
8892 ++
8893 + return;
8894 +
8895 + err_tx:
8896 +@@ -86,6 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
8897 +
8898 + /* async is priority, otherwise sync is fallback */
8899 + if (local->ops->xmit_async) {
8900 ++ unsigned int len = skb->len;
8901 ++
8902 + ret = drv_xmit_async(local, skb);
8903 + if (ret) {
8904 + ieee802154_wake_queue(&local->hw);
8905 +@@ -93,7 +95,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
8906 + }
8907 +
8908 + dev->stats.tx_packets++;
8909 +- dev->stats.tx_bytes += skb->len;
8910 ++ dev->stats.tx_bytes += len;
8911 + } else {
8912 + local->tx_skb = skb;
8913 + queue_work(local->workqueue, &local->tx_work);
8914 +diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
8915 +index c038e021a5916..5ea2471ffc03f 100644
8916 +--- a/net/openvswitch/meter.c
8917 ++++ b/net/openvswitch/meter.c
8918 +@@ -255,8 +255,8 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
8919 + *
8920 + * Start with a full bucket.
8921 + */
8922 +- band->bucket = (band->burst_size + band->rate) * 1000;
8923 +- band_max_delta_t = band->bucket / band->rate;
8924 ++ band->bucket = (band->burst_size + band->rate) * 1000ULL;
8925 ++ band_max_delta_t = div_u64(band->bucket, band->rate);
8926 + if (band_max_delta_t > meter->max_delta_t)
8927 + meter->max_delta_t = band_max_delta_t;
8928 + band++;
8929 +diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h
8930 +index 964ace2650f89..970557ed5b5b6 100644
8931 +--- a/net/openvswitch/meter.h
8932 ++++ b/net/openvswitch/meter.h
8933 +@@ -26,7 +26,7 @@ struct dp_meter_band {
8934 + u32 type;
8935 + u32 rate;
8936 + u32 burst_size;
8937 +- u32 bucket; /* 1/1000 packets, or in bits */
8938 ++ u64 bucket; /* 1/1000 packets, or in bits */
8939 + struct ovs_flow_stats stats;
8940 + };
8941 +
8942 +diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
8943 +index 7bb8e5603298d..d6e83a37a1adf 100644
8944 +--- a/net/sctp/outqueue.c
8945 ++++ b/net/sctp/outqueue.c
8946 +@@ -51,6 +51,7 @@
8947 + #include <net/sctp/sctp.h>
8948 + #include <net/sctp/sm.h>
8949 + #include <net/sctp/stream_sched.h>
8950 ++#include <trace/events/sctp.h>
8951 +
8952 + /* Declare internal functions here. */
8953 + static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
8954 +@@ -1257,6 +1258,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
8955 + /* Grab the association's destination address list. */
8956 + transport_list = &asoc->peer.transport_addr_list;
8957 +
8958 ++ /* SCTP path tracepoint for congestion control debugging. */
8959 ++ list_for_each_entry(transport, transport_list, transports) {
8960 ++ trace_sctp_probe_path(transport, asoc);
8961 ++ }
8962 ++
8963 + sack_ctsn = ntohl(sack->cum_tsn_ack);
8964 + gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
8965 + asoc->stats.gapcnt += gap_ack_blocks;
8966 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
8967 +index c8ee8e801edb8..709c082dc9059 100644
8968 +--- a/net/sunrpc/svc_xprt.c
8969 ++++ b/net/sunrpc/svc_xprt.c
8970 +@@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
8971 + }
8972 + EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
8973 +
8974 +-/*
8975 +- * Format the transport list for printing
8976 ++/**
8977 ++ * svc_print_xprts - Format the transport list for printing
8978 ++ * @buf: target buffer for formatted address
8979 ++ * @maxlen: length of target buffer
8980 ++ *
8981 ++ * Fills in @buf with a string containing a list of transport names, each name
8982 ++ * terminated with '\n'. If the buffer is too small, some entries may be
8983 ++ * missing, but it is guaranteed that all lines in the output buffer are
8984 ++ * complete.
8985 ++ *
8986 ++ * Returns positive length of the filled-in string.
8987 + */
8988 + int svc_print_xprts(char *buf, int maxlen)
8989 + {
8990 +@@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen)
8991 + list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
8992 + int slen;
8993 +
8994 +- sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
8995 +- slen = strlen(tmpstr);
8996 +- if (len + slen > maxlen)
8997 ++ slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n",
8998 ++ xcl->xcl_name, xcl->xcl_max_payload);
8999 ++ if (slen >= sizeof(tmpstr) || len + slen >= maxlen)
9000 + break;
9001 + len += slen;
9002 + strcat(buf, tmpstr);
9003 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
9004 +index b9827665ff355..d183d4aee822c 100644
9005 +--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
9006 ++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
9007 +@@ -256,6 +256,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
9008 + {
9009 + dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
9010 +
9011 ++ xprt_rdma_free_addresses(xprt);
9012 + xprt_free(xprt);
9013 + module_put(THIS_MODULE);
9014 + }
9015 +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
9016 +index 41f4464ac6cc5..ec9a7137d2677 100644
9017 +--- a/net/tipc/topsrv.c
9018 ++++ b/net/tipc/topsrv.c
9019 +@@ -407,7 +407,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
9020 + return -EWOULDBLOCK;
9021 + if (ret == sizeof(s)) {
9022 + read_lock_bh(&sk->sk_callback_lock);
9023 +- ret = tipc_conn_rcv_sub(srv, con, &s);
9024 ++ /* RACE: the connection can be closed in the meantime */
9025 ++ if (likely(connected(con)))
9026 ++ ret = tipc_conn_rcv_sub(srv, con, &s);
9027 + read_unlock_bh(&sk->sk_callback_lock);
9028 + if (!ret)
9029 + return 0;
9030 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
9031 +index 2318e2e2748f4..2020306468af4 100644
9032 +--- a/net/unix/af_unix.c
9033 ++++ b/net/unix/af_unix.c
9034 +@@ -192,11 +192,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
9035 + return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
9036 + }
9037 +
9038 +-static inline int unix_recvq_full(struct sock const *sk)
9039 ++static inline int unix_recvq_full(const struct sock *sk)
9040 + {
9041 + return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
9042 + }
9043 +
9044 ++static inline int unix_recvq_full_lockless(const struct sock *sk)
9045 ++{
9046 ++ return skb_queue_len_lockless(&sk->sk_receive_queue) >
9047 ++ READ_ONCE(sk->sk_max_ack_backlog);
9048 ++}
9049 ++
9050 + struct sock *unix_peer_get(struct sock *s)
9051 + {
9052 + struct sock *peer;
9053 +@@ -1788,7 +1794,8 @@ restart_locked:
9054 + * - unix_peer(sk) == sk by time of get but disconnected before lock
9055 + */
9056 + if (other != sk &&
9057 +- unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
9058 ++ unlikely(unix_peer(other) != sk &&
9059 ++ unix_recvq_full_lockless(other))) {
9060 + if (timeo) {
9061 + timeo = unix_wait_for_peer(other, timeo);
9062 +
9063 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
9064 +index 452254fd89f87..250b725f5754c 100644
9065 +--- a/security/selinux/hooks.c
9066 ++++ b/security/selinux/hooks.c
9067 +@@ -3304,6 +3304,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
9068 + return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
9069 + }
9070 +
9071 ++ if (!selinux_state.initialized)
9072 ++ return (inode_owner_or_capable(inode) ? 0 : -EPERM);
9073 ++
9074 + sbsec = inode->i_sb->s_security;
9075 + if (!(sbsec->flags & SBLABEL_MNT))
9076 + return -EOPNOTSUPP;
9077 +@@ -3387,6 +3390,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
9078 + return;
9079 + }
9080 +
9081 ++ if (!selinux_state.initialized) {
9082 ++ /* If we haven't even been initialized, then we can't validate
9083 ++ * against a policy, so leave the label as invalid. It may
9084 ++ * resolve to a valid label on the next revalidation try if
9085 ++ * we've since initialized.
9086 ++ */
9087 ++ return;
9088 ++ }
9089 ++
9090 + rc = security_context_to_sid_force(&selinux_state, value, size,
9091 + &newsid);
9092 + if (rc) {
9093 +diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
9094 +index f3a5a138a096d..60b3f16bb5c7b 100644
9095 +--- a/security/selinux/selinuxfs.c
9096 ++++ b/security/selinux/selinuxfs.c
9097 +@@ -1509,6 +1509,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
9098 + *idx = cpu + 1;
9099 + return &per_cpu(avc_cache_stats, cpu);
9100 + }
9101 ++ (*idx)++;
9102 + return NULL;
9103 + }
9104 +
9105 +diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
9106 +index 714a51721a313..ab9236e4c157e 100644
9107 +--- a/sound/hda/hdac_bus.c
9108 ++++ b/sound/hda/hdac_bus.c
9109 +@@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work)
9110 + struct hdac_driver *drv;
9111 + unsigned int rp, caddr, res;
9112 +
9113 ++ spin_lock_irq(&bus->reg_lock);
9114 + while (bus->unsol_rp != bus->unsol_wp) {
9115 + rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
9116 + bus->unsol_rp = rp;
9117 +@@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work)
9118 + codec = bus->caddr_tbl[caddr & 0x0f];
9119 + if (!codec || !codec->dev.driver)
9120 + continue;
9121 ++ spin_unlock_irq(&bus->reg_lock);
9122 + drv = drv_to_hdac_driver(codec->dev.driver);
9123 + if (drv->unsol_event)
9124 + drv->unsol_event(codec, res);
9125 ++ spin_lock_irq(&bus->reg_lock);
9126 + }
9127 ++ spin_unlock_irq(&bus->reg_lock);
9128 + }
9129 +
9130 + /**
9131 +diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
9132 +index 7d049569012c1..3f06986fbecf8 100644
9133 +--- a/sound/pci/asihpi/hpioctl.c
9134 ++++ b/sound/pci/asihpi/hpioctl.c
9135 +@@ -350,7 +350,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
9136 + struct hpi_message hm;
9137 + struct hpi_response hr;
9138 + struct hpi_adapter adapter;
9139 +- struct hpi_pci pci;
9140 ++ struct hpi_pci pci = { 0 };
9141 +
9142 + memset(&adapter, 0, sizeof(adapter));
9143 +
9144 +@@ -506,7 +506,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
9145 + return 0;
9146 +
9147 + err:
9148 +- for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
9149 ++ while (--idx >= 0) {
9150 + if (pci.ap_mem_base[idx]) {
9151 + iounmap(pci.ap_mem_base[idx]);
9152 + pci.ap_mem_base[idx] = NULL;
9153 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
9154 +index fa261b27d8588..8198d2e53b7df 100644
9155 +--- a/sound/pci/hda/hda_controller.c
9156 ++++ b/sound/pci/hda/hda_controller.c
9157 +@@ -1169,16 +1169,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
9158 + if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
9159 + active = true;
9160 +
9161 +- /* clear rirb int */
9162 + status = azx_readb(chip, RIRBSTS);
9163 + if (status & RIRB_INT_MASK) {
9164 ++ /*
9165 ++ * Clearing the interrupt status here ensures that no
9166 ++ * interrupt gets masked after the RIRB wp is read in
9167 ++ * snd_hdac_bus_update_rirb. This avoids a possible
9168 ++ * race condition where codec response in RIRB may
9169 ++ * remain unserviced by IRQ, eventually falling back
9170 ++ * to polling mode in azx_rirb_get_response.
9171 ++ */
9172 ++ azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
9173 + active = true;
9174 + if (status & RIRB_INT_RESPONSE) {
9175 + if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
9176 + udelay(80);
9177 + snd_hdac_bus_update_rirb(bus);
9178 + }
9179 +- azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
9180 + }
9181 + } while (active && ++repeat < 10);
9182 +
9183 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
9184 +index 9c5b3d19bfa73..24bc9e4460473 100644
9185 +--- a/sound/pci/hda/patch_realtek.c
9186 ++++ b/sound/pci/hda/patch_realtek.c
9187 +@@ -3290,7 +3290,11 @@ static void alc256_shutup(struct hda_codec *codec)
9188 +
9189 + /* 3k pull low control for Headset jack. */
9190 + /* NOTE: call this before clearing the pin, otherwise codec stalls */
9191 +- alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
9192 ++ /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
9193 ++ * when booting with headset plugged. So skip setting it for the codec alc257
9194 ++ */
9195 ++ if (codec->core.vendor_id != 0x10ec0257)
9196 ++ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
9197 +
9198 + if (!spec->no_shutup_pins)
9199 + snd_hda_codec_write(codec, hp_pin, 0,
9200 +@@ -5612,6 +5616,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
9201 + #include "hp_x360_helper.c"
9202 +
9203 + enum {
9204 ++ ALC269_FIXUP_GPIO2,
9205 + ALC269_FIXUP_SONY_VAIO,
9206 + ALC275_FIXUP_SONY_VAIO_GPIO2,
9207 + ALC269_FIXUP_DELL_M101Z,
9208 +@@ -5764,6 +5769,10 @@ enum {
9209 + };
9210 +
9211 + static const struct hda_fixup alc269_fixups[] = {
9212 ++ [ALC269_FIXUP_GPIO2] = {
9213 ++ .type = HDA_FIXUP_FUNC,
9214 ++ .v.func = alc_fixup_gpio2,
9215 ++ },
9216 + [ALC269_FIXUP_SONY_VAIO] = {
9217 + .type = HDA_FIXUP_PINCTLS,
9218 + .v.pins = (const struct hda_pintbl[]) {
9219 +@@ -6559,6 +6568,8 @@ static const struct hda_fixup alc269_fixups[] = {
9220 + [ALC233_FIXUP_LENOVO_MULTI_CODECS] = {
9221 + .type = HDA_FIXUP_FUNC,
9222 + .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
9223 ++ .chained = true,
9224 ++ .chain_id = ALC269_FIXUP_GPIO2
9225 + },
9226 + [ALC233_FIXUP_ACER_HEADSET_MIC] = {
9227 + .type = HDA_FIXUP_VERBS,
9228 +diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
9229 +index 89b6e187ac235..a5b0c40ee545f 100644
9230 +--- a/sound/soc/codecs/max98090.c
9231 ++++ b/sound/soc/codecs/max98090.c
9232 +@@ -2130,10 +2130,16 @@ static void max98090_pll_work(struct max98090_priv *max98090)
9233 +
9234 + dev_info_ratelimited(component->dev, "PLL unlocked\n");
9235 +
9236 ++ /*
9237 ++ * As the datasheet suggested, the maximum PLL lock time should be
9238 ++ * 7 msec. The workaround resets the codec softly by toggling SHDN
9239 ++ * off and on if PLL failed to lock for 10 msec. Notably, there is
9240 ++ * no suggested hold time for SHDN off.
9241 ++ */
9242 ++
9243 + /* Toggle shutdown OFF then ON */
9244 + snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
9245 + M98090_SHDNN_MASK, 0);
9246 +- msleep(10);
9247 + snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
9248 + M98090_SHDNN_MASK, M98090_SHDNN_MASK);
9249 +
9250 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
9251 +index 01acb8da2f48e..e3e069277a3ff 100644
9252 +--- a/sound/soc/codecs/wm8994.c
9253 ++++ b/sound/soc/codecs/wm8994.c
9254 +@@ -3376,6 +3376,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
9255 + return -EINVAL;
9256 + }
9257 +
9258 ++ pm_runtime_get_sync(component->dev);
9259 ++
9260 + switch (micbias) {
9261 + case 1:
9262 + micdet = &wm8994->micdet[0];
9263 +@@ -3423,6 +3425,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
9264 +
9265 + snd_soc_dapm_sync(dapm);
9266 +
9267 ++ pm_runtime_put(component->dev);
9268 ++
9269 + return 0;
9270 + }
9271 + EXPORT_SYMBOL_GPL(wm8994_mic_detect);
9272 +@@ -3790,6 +3794,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
9273 + return -EINVAL;
9274 + }
9275 +
9276 ++ pm_runtime_get_sync(component->dev);
9277 ++
9278 + if (jack) {
9279 + snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS");
9280 + snd_soc_dapm_sync(dapm);
9281 +@@ -3858,6 +3864,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
9282 + snd_soc_dapm_sync(dapm);
9283 + }
9284 +
9285 ++ pm_runtime_put(component->dev);
9286 ++
9287 + return 0;
9288 + }
9289 + EXPORT_SYMBOL_GPL(wm8958_mic_detect);
9290 +@@ -4051,11 +4059,13 @@ static int wm8994_component_probe(struct snd_soc_component *component)
9291 + wm8994->hubs.dcs_readback_mode = 2;
9292 + break;
9293 + }
9294 ++ wm8994->hubs.micd_scthr = true;
9295 + break;
9296 +
9297 + case WM8958:
9298 + wm8994->hubs.dcs_readback_mode = 1;
9299 + wm8994->hubs.hp_startup_mode = 1;
9300 ++ wm8994->hubs.micd_scthr = true;
9301 +
9302 + switch (control->revision) {
9303 + case 0:
9304 +diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
9305 +index fed6ea9b019f7..da7fa6f5459e6 100644
9306 +--- a/sound/soc/codecs/wm_hubs.c
9307 ++++ b/sound/soc/codecs/wm_hubs.c
9308 +@@ -1227,6 +1227,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component,
9309 + snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL,
9310 + WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
9311 +
9312 ++ if (!hubs->micd_scthr)
9313 ++ return 0;
9314 ++
9315 + snd_soc_component_update_bits(component, WM8993_MICBIAS,
9316 + WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
9317 + WM8993_MICB1_LVL | WM8993_MICB2_LVL,
9318 +diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
9319 +index ee339ad8514d1..1433d73e09bf8 100644
9320 +--- a/sound/soc/codecs/wm_hubs.h
9321 ++++ b/sound/soc/codecs/wm_hubs.h
9322 +@@ -31,6 +31,7 @@ struct wm_hubs_data {
9323 + int hp_startup_mode;
9324 + int series_startup;
9325 + int no_series_update;
9326 ++ bool micd_scthr;
9327 +
9328 + bool no_cache_dac_hp_direct;
9329 + struct list_head dcs_cache;
9330 +diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
9331 +index fc2d1dac63339..798ab579564cb 100644
9332 +--- a/sound/soc/img/img-i2s-out.c
9333 ++++ b/sound/soc/img/img-i2s-out.c
9334 +@@ -350,8 +350,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9335 + chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK;
9336 +
9337 + ret = pm_runtime_get_sync(i2s->dev);
9338 +- if (ret < 0)
9339 ++ if (ret < 0) {
9340 ++ pm_runtime_put_noidle(i2s->dev);
9341 + return ret;
9342 ++ }
9343 +
9344 + img_i2s_out_disable(i2s);
9345 +
9346 +@@ -491,8 +493,10 @@ static int img_i2s_out_probe(struct platform_device *pdev)
9347 + goto err_pm_disable;
9348 + }
9349 + ret = pm_runtime_get_sync(&pdev->dev);
9350 +- if (ret < 0)
9351 ++ if (ret < 0) {
9352 ++ pm_runtime_put_noidle(&pdev->dev);
9353 + goto err_suspend;
9354 ++ }
9355 +
9356 + reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK;
9357 + img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL);
9358 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
9359 +index 0dcd249877c55..ec630127ef2f3 100644
9360 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
9361 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
9362 +@@ -588,6 +588,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
9363 + BYT_RT5640_SSP0_AIF1 |
9364 + BYT_RT5640_MCLK_EN),
9365 + },
9366 ++ { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */
9367 ++ .matches = {
9368 ++ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
9369 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
9370 ++ },
9371 ++ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
9372 ++ BYT_RT5640_MONO_SPEAKER |
9373 ++ BYT_RT5640_SSP0_AIF1 |
9374 ++ BYT_RT5640_MCLK_EN),
9375 ++ },
9376 + {
9377 + /* MPMAN MPWIN895CL */
9378 + .matches = {
9379 +diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
9380 +index c6a58520d377a..255cc45905b81 100644
9381 +--- a/sound/soc/kirkwood/kirkwood-dma.c
9382 ++++ b/sound/soc/kirkwood/kirkwood-dma.c
9383 +@@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
9384 + err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
9385 + "kirkwood-i2s", priv);
9386 + if (err)
9387 +- return -EBUSY;
9388 ++ return err;
9389 +
9390 + /*
9391 + * Enable Error interrupts. We're only ack'ing them but
9392 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
9393 +index 28a3ad8b1d74b..137e1e8718d6f 100644
9394 +--- a/sound/usb/midi.c
9395 ++++ b/sound/usb/midi.c
9396 +@@ -1828,6 +1828,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi,
9397 + return 0;
9398 + }
9399 +
9400 ++static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor(
9401 ++ struct usb_host_endpoint *hostep)
9402 ++{
9403 ++ unsigned char *extra = hostep->extra;
9404 ++ int extralen = hostep->extralen;
9405 ++
9406 ++ while (extralen > 3) {
9407 ++ struct usb_ms_endpoint_descriptor *ms_ep =
9408 ++ (struct usb_ms_endpoint_descriptor *)extra;
9409 ++
9410 ++ if (ms_ep->bLength > 3 &&
9411 ++ ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT &&
9412 ++ ms_ep->bDescriptorSubtype == UAC_MS_GENERAL)
9413 ++ return ms_ep;
9414 ++ if (!extra[0])
9415 ++ break;
9416 ++ extralen -= extra[0];
9417 ++ extra += extra[0];
9418 ++ }
9419 ++ return NULL;
9420 ++}
9421 ++
9422 + /*
9423 + * Returns MIDIStreaming device capabilities.
9424 + */
9425 +@@ -1865,11 +1887,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
9426 + ep = get_ep_desc(hostep);
9427 + if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep))
9428 + continue;
9429 +- ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra;
9430 +- if (hostep->extralen < 4 ||
9431 +- ms_ep->bLength < 4 ||
9432 +- ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT ||
9433 +- ms_ep->bDescriptorSubtype != UAC_MS_GENERAL)
9434 ++ ms_ep = find_usb_ms_endpoint_descriptor(hostep);
9435 ++ if (!ms_ep)
9436 + continue;
9437 + if (usb_endpoint_dir_out(ep)) {
9438 + if (endpoints[epidx].out_ep) {
9439 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
9440 +index 45bd3d54be54b..451b8ea383c61 100644
9441 +--- a/sound/usb/mixer.c
9442 ++++ b/sound/usb/mixer.c
9443 +@@ -1699,6 +1699,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer,
9444 + /* get min/max values */
9445 + get_min_max_with_quirks(cval, 0, kctl);
9446 +
9447 ++ /* skip a bogus volume range */
9448 ++ if (cval->max <= cval->min) {
9449 ++ usb_audio_dbg(mixer->chip,
9450 ++ "[%d] FU [%s] skipped due to invalid volume\n",
9451 ++ cval->head.id, kctl->id.name);
9452 ++ snd_ctl_free_one(kctl);
9453 ++ return;
9454 ++ }
9455 ++
9456 ++
9457 + if (control == UAC_FU_VOLUME) {
9458 + check_mapped_dB(map, cval);
9459 + if (cval->dBmin < cval->dBmax || !cval->initialized) {
9460 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
9461 +index 8d9117312e30c..e6dea1c7112be 100644
9462 +--- a/sound/usb/quirks.c
9463 ++++ b/sound/usb/quirks.c
9464 +@@ -1338,12 +1338,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
9465 + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
9466 + msleep(20);
9467 +
9468 +- /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny
9469 +- * delay here, otherwise requests like get/set frequency return as
9470 +- * failed despite actually succeeding.
9471 ++ /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX
9472 ++ * needs a tiny delay here, otherwise requests like get/set
9473 ++ * frequency return as failed despite actually succeeding.
9474 + */
9475 + if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
9476 + chip->usb_id == USB_ID(0x046d, 0x0a46) ||
9477 ++ chip->usb_id == USB_ID(0x046d, 0x0a56) ||
9478 + chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
9479 + chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
9480 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
9481 +diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c
9482 +index 4bcb234c0fcab..3da5462a0c7d3 100644
9483 +--- a/tools/gpio/gpio-hammer.c
9484 ++++ b/tools/gpio/gpio-hammer.c
9485 +@@ -138,7 +138,14 @@ int main(int argc, char **argv)
9486 + device_name = optarg;
9487 + break;
9488 + case 'o':
9489 +- lines[i] = strtoul(optarg, NULL, 10);
9490 ++ /*
9491 ++ * Avoid overflow. Do not immediately error, we want to
9492 ++ * be able to accurately report on the amount of times
9493 ++ * '-o' was given to give an accurate error message
9494 ++ */
9495 ++ if (i < GPIOHANDLES_MAX)
9496 ++ lines[i] = strtoul(optarg, NULL, 10);
9497 ++
9498 + i++;
9499 + break;
9500 + case '?':
9501 +@@ -146,6 +153,14 @@ int main(int argc, char **argv)
9502 + return -1;
9503 + }
9504 + }
9505 ++
9506 ++ if (i >= GPIOHANDLES_MAX) {
9507 ++ fprintf(stderr,
9508 ++ "Only %d occurences of '-o' are allowed, %d were found\n",
9509 ++ GPIOHANDLES_MAX, i + 1);
9510 ++ return -1;
9511 ++ }
9512 ++
9513 + nlines = i;
9514 +
9515 + if (!device_name || !nlines) {
9516 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
9517 +index fd3071d83deae..c0ab27368a345 100644
9518 +--- a/tools/objtool/check.c
9519 ++++ b/tools/objtool/check.c
9520 +@@ -503,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file)
9521 + insn->type != INSN_JUMP_UNCONDITIONAL)
9522 + continue;
9523 +
9524 +- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
9525 ++ if (insn->offset == FAKE_JUMP_OFFSET)
9526 + continue;
9527 +
9528 + rela = find_rela_by_dest_range(insn->sec, insn->offset,
9529 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
9530 +index 6aae10ff954c7..adabe9d4dc866 100644
9531 +--- a/tools/perf/builtin-stat.c
9532 ++++ b/tools/perf/builtin-stat.c
9533 +@@ -422,7 +422,7 @@ static void process_interval(void)
9534 + }
9535 +
9536 + init_stats(&walltime_nsecs_stats);
9537 +- update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
9538 ++ update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
9539 + print_counters(&rs, 0, NULL);
9540 + }
9541 +
9542 +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
9543 +index c17e594041712..6631970f96832 100644
9544 +--- a/tools/perf/pmu-events/jevents.c
9545 ++++ b/tools/perf/pmu-events/jevents.c
9546 +@@ -1064,10 +1064,9 @@ static int process_one_file(const char *fpath, const struct stat *sb,
9547 + */
9548 + int main(int argc, char *argv[])
9549 + {
9550 +- int rc;
9551 ++ int rc, ret = 0;
9552 + int maxfds;
9553 + char ldirname[PATH_MAX];
9554 +-
9555 + const char *arch;
9556 + const char *output_file;
9557 + const char *start_dirname;
9558 +@@ -1138,7 +1137,8 @@ int main(int argc, char *argv[])
9559 + /* Make build fail */
9560 + fclose(eventsfp);
9561 + free_arch_std_events();
9562 +- return 1;
9563 ++ ret = 1;
9564 ++ goto out_free_mapfile;
9565 + } else if (rc) {
9566 + goto empty_map;
9567 + }
9568 +@@ -1156,14 +1156,17 @@ int main(int argc, char *argv[])
9569 + /* Make build fail */
9570 + fclose(eventsfp);
9571 + free_arch_std_events();
9572 +- return 1;
9573 ++ ret = 1;
9574 + }
9575 +
9576 +- return 0;
9577 ++
9578 ++ goto out_free_mapfile;
9579 +
9580 + empty_map:
9581 + fclose(eventsfp);
9582 + create_empty_mapping(output_file);
9583 + free_arch_std_events();
9584 +- return 0;
9585 ++out_free_mapfile:
9586 ++ free(mapfile);
9587 ++ return ret;
9588 + }
9589 +diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
9590 +index 7cb99b433888b..c2cc42daf9242 100644
9591 +--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
9592 ++++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
9593 +@@ -14,7 +14,7 @@ add_probe_vfs_getname() {
9594 + if [ $had_vfs_getname -eq 1 ] ; then
9595 + line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
9596 + perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
9597 +- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
9598 ++ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
9599 + fi
9600 + }
9601 +
9602 +diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
9603 +index 22c9fc900c847..f8c44a85650be 100755
9604 +--- a/tools/perf/trace/beauty/arch_errno_names.sh
9605 ++++ b/tools/perf/trace/beauty/arch_errno_names.sh
9606 +@@ -91,7 +91,7 @@ EoHEADER
9607 + # in tools/perf/arch
9608 + archlist=""
9609 + for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do
9610 +- test -d arch/$arch && archlist="$archlist $arch"
9611 ++ test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch"
9612 + done
9613 +
9614 + for arch in x86 $archlist generic; do
9615 +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
9616 +index f93846edc1e0d..827d844f4efb1 100644
9617 +--- a/tools/perf/util/cpumap.c
9618 ++++ b/tools/perf/util/cpumap.c
9619 +@@ -462,7 +462,7 @@ static void set_max_cpu_num(void)
9620 +
9621 + /* get the highest possible cpu number for a sparse allocation */
9622 + ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
9623 +- if (ret == PATH_MAX) {
9624 ++ if (ret >= PATH_MAX) {
9625 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
9626 + goto out;
9627 + }
9628 +@@ -473,7 +473,7 @@ static void set_max_cpu_num(void)
9629 +
9630 + /* get the highest present cpu number for a sparse allocation */
9631 + ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
9632 +- if (ret == PATH_MAX) {
9633 ++ if (ret >= PATH_MAX) {
9634 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
9635 + goto out;
9636 + }
9637 +@@ -501,7 +501,7 @@ static void set_max_node_num(void)
9638 +
9639 + /* get the highest possible cpu number for a sparse allocation */
9640 + ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
9641 +- if (ret == PATH_MAX) {
9642 ++ if (ret >= PATH_MAX) {
9643 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
9644 + goto out;
9645 + }
9646 +@@ -586,7 +586,7 @@ int cpu__setup_cpunode_map(void)
9647 + return 0;
9648 +
9649 + n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
9650 +- if (n == PATH_MAX) {
9651 ++ if (n >= PATH_MAX) {
9652 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
9653 + return -1;
9654 + }
9655 +@@ -601,7 +601,7 @@ int cpu__setup_cpunode_map(void)
9656 + continue;
9657 +
9658 + n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
9659 +- if (n == PATH_MAX) {
9660 ++ if (n >= PATH_MAX) {
9661 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
9662 + continue;
9663 + }
9664 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
9665 +index 4fad92213609f..11a2aa80802d5 100644
9666 +--- a/tools/perf/util/evsel.c
9667 ++++ b/tools/perf/util/evsel.c
9668 +@@ -1290,6 +1290,9 @@ void perf_evsel__exit(struct perf_evsel *evsel)
9669 + thread_map__put(evsel->threads);
9670 + zfree(&evsel->group_name);
9671 + zfree(&evsel->name);
9672 ++ zfree(&evsel->pmu_name);
9673 ++ zfree(&evsel->per_pkg_mask);
9674 ++ zfree(&evsel->metric_events);
9675 + perf_evsel__object.fini(evsel);
9676 + }
9677 +
9678 +diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c
9679 +index c6fd81c025863..81c5a2e438b7d 100644
9680 +--- a/tools/perf/util/mem2node.c
9681 ++++ b/tools/perf/util/mem2node.c
9682 +@@ -1,5 +1,6 @@
9683 + #include <errno.h>
9684 + #include <inttypes.h>
9685 ++#include <asm/bug.h>
9686 + #include <linux/bitmap.h>
9687 + #include "mem2node.h"
9688 + #include "util.h"
9689 +@@ -92,7 +93,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env)
9690 +
9691 + /* Cut unused entries, due to merging. */
9692 + tmp_entries = realloc(entries, sizeof(*entries) * j);
9693 +- if (tmp_entries)
9694 ++ if (tmp_entries || WARN_ON_ONCE(j == 0))
9695 + entries = tmp_entries;
9696 +
9697 + for (i = 0; i < j; i++) {
9698 +diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
9699 +index 8b3dafe3fac3a..6dcc6e1182a54 100644
9700 +--- a/tools/perf/util/metricgroup.c
9701 ++++ b/tools/perf/util/metricgroup.c
9702 +@@ -171,6 +171,7 @@ static int metricgroup__setup_events(struct list_head *groups,
9703 + if (!evsel) {
9704 + pr_debug("Cannot resolve %s: %s\n",
9705 + eg->metric_name, eg->metric_expr);
9706 ++ free(metric_events);
9707 + continue;
9708 + }
9709 + for (i = 0; i < eg->idnum; i++)
9710 +@@ -178,11 +179,13 @@ static int metricgroup__setup_events(struct list_head *groups,
9711 + me = metricgroup__lookup(metric_events_list, evsel, true);
9712 + if (!me) {
9713 + ret = -ENOMEM;
9714 ++ free(metric_events);
9715 + break;
9716 + }
9717 + expr = malloc(sizeof(struct metric_expr));
9718 + if (!expr) {
9719 + ret = -ENOMEM;
9720 ++ free(metric_events);
9721 + break;
9722 + }
9723 + expr->metric_expr = eg->metric_expr;
9724 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
9725 +index 95043cae57740..0eff0c3ba9eeb 100644
9726 +--- a/tools/perf/util/parse-events.c
9727 ++++ b/tools/perf/util/parse-events.c
9728 +@@ -1261,7 +1261,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
9729 + attr.type = pmu->type;
9730 + evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
9731 + if (evsel) {
9732 +- evsel->pmu_name = name;
9733 ++ evsel->pmu_name = name ? strdup(name) : NULL;
9734 + evsel->use_uncore_alias = use_uncore_alias;
9735 + return 0;
9736 + } else {
9737 +@@ -1302,7 +1302,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
9738 + evsel->snapshot = info.snapshot;
9739 + evsel->metric_expr = info.metric_expr;
9740 + evsel->metric_name = info.metric_name;
9741 +- evsel->pmu_name = name;
9742 ++ evsel->pmu_name = name ? strdup(name) : NULL;
9743 + evsel->use_uncore_alias = use_uncore_alias;
9744 + }
9745 +
9746 +@@ -1421,12 +1421,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
9747 + * event. That can be used to distinguish the leader from
9748 + * other members, even they have the same event name.
9749 + */
9750 +- if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) {
9751 ++ if ((leader != evsel) &&
9752 ++ !strcmp(leader->pmu_name, evsel->pmu_name)) {
9753 + is_leader = false;
9754 + continue;
9755 + }
9756 +- /* The name is always alias name */
9757 +- WARN_ON(strcmp(leader->name, evsel->name));
9758 +
9759 + /* Store the leader event for each PMU */
9760 + leaders[nr_pmu++] = (uintptr_t) evsel;
9761 +diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
9762 +index 46daa22b86e3b..85ff4f68adc00 100644
9763 +--- a/tools/perf/util/sort.c
9764 ++++ b/tools/perf/util/sort.c
9765 +@@ -2690,7 +2690,7 @@ static char *prefix_if_not_in(const char *pre, char *str)
9766 + return str;
9767 +
9768 + if (asprintf(&n, "%s,%s", pre, str) < 0)
9769 +- return NULL;
9770 ++ n = NULL;
9771 +
9772 + free(str);
9773 + return n;
9774 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
9775 +index a701a8a48f005..166c621e02235 100644
9776 +--- a/tools/perf/util/symbol-elf.c
9777 ++++ b/tools/perf/util/symbol-elf.c
9778 +@@ -1421,6 +1421,7 @@ struct kcore_copy_info {
9779 + u64 first_symbol;
9780 + u64 last_symbol;
9781 + u64 first_module;
9782 ++ u64 first_module_symbol;
9783 + u64 last_module_symbol;
9784 + size_t phnum;
9785 + struct list_head phdrs;
9786 +@@ -1497,6 +1498,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
9787 + return 0;
9788 +
9789 + if (strchr(name, '[')) {
9790 ++ if (!kci->first_module_symbol || start < kci->first_module_symbol)
9791 ++ kci->first_module_symbol = start;
9792 + if (start > kci->last_module_symbol)
9793 + kci->last_module_symbol = start;
9794 + return 0;
9795 +@@ -1694,6 +1697,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
9796 + kci->etext += page_size;
9797 + }
9798 +
9799 ++ if (kci->first_module_symbol &&
9800 ++ (!kci->first_module || kci->first_module_symbol < kci->first_module))
9801 ++ kci->first_module = kci->first_module_symbol;
9802 ++
9803 + kci->first_module = round_down(kci->first_module, page_size);
9804 +
9805 + if (kci->last_module_symbol) {
9806 +diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
9807 +index 2fa3c5757bcb5..dbed3d213bf17 100755
9808 +--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
9809 ++++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
9810 +@@ -10,11 +10,11 @@ then this utility enables and collects trace data for a user specified interval
9811 + and generates performance plots.
9812 +
9813 + Prerequisites:
9814 +- Python version 2.7.x
9815 ++ Python version 2.7.x or higher
9816 + gnuplot 5.0 or higher
9817 +- gnuplot-py 1.8
9818 ++ gnuplot-py 1.8 or higher
9819 + (Most of the distributions have these required packages. They may be called
9820 +- gnuplot-py, phython-gnuplot. )
9821 ++ gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
9822 +
9823 + HWP (Hardware P-States are disabled)
9824 + Kernel config for Linux trace is enabled
9825 +@@ -180,7 +180,7 @@ def plot_pstate_cpu_with_sample():
9826 + g_plot('set xlabel "Samples"')
9827 + g_plot('set ylabel "P-State"')
9828 + g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
9829 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
9830 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
9831 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
9832 + g_plot('title_list = "{}"'.format(title_list))
9833 + g_plot(plot_str)
9834 +@@ -197,7 +197,7 @@ def plot_pstate_cpu():
9835 + # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
9836 + # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
9837 + #
9838 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
9839 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
9840 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
9841 + g_plot('title_list = "{}"'.format(title_list))
9842 + g_plot(plot_str)
9843 +@@ -211,7 +211,7 @@ def plot_load_cpu():
9844 + g_plot('set ylabel "CPU load (percent)"')
9845 + g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
9846 +
9847 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
9848 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
9849 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
9850 + g_plot('title_list = "{}"'.format(title_list))
9851 + g_plot(plot_str)
9852 +@@ -225,7 +225,7 @@ def plot_frequency_cpu():
9853 + g_plot('set ylabel "CPU Frequency (GHz)"')
9854 + g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
9855 +
9856 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
9857 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
9858 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
9859 + g_plot('title_list = "{}"'.format(title_list))
9860 + g_plot(plot_str)
9861 +@@ -240,7 +240,7 @@ def plot_duration_cpu():
9862 + g_plot('set ylabel "Timer Duration (MilliSeconds)"')
9863 + g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
9864 +
9865 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
9866 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
9867 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
9868 + g_plot('title_list = "{}"'.format(title_list))
9869 + g_plot(plot_str)
9870 +@@ -254,7 +254,7 @@ def plot_scaled_cpu():
9871 + g_plot('set ylabel "Scaled Busy (Unitless)"')
9872 + g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
9873 +
9874 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
9875 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
9876 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
9877 + g_plot('title_list = "{}"'.format(title_list))
9878 + g_plot(plot_str)
9879 +@@ -268,7 +268,7 @@ def plot_boost_cpu():
9880 + g_plot('set ylabel "CPU IO Boost (percent)"')
9881 + g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
9882 +
9883 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
9884 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
9885 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
9886 + g_plot('title_list = "{}"'.format(title_list))
9887 + g_plot(plot_str)
9888 +@@ -282,7 +282,7 @@ def plot_ghz_cpu():
9889 + g_plot('set ylabel "TSC Frequency (GHz)"')
9890 + g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
9891 +
9892 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
9893 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
9894 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
9895 + g_plot('title_list = "{}"'.format(title_list))
9896 + g_plot(plot_str)
9897 +diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
9898 +index 27a54a17da65d..f4e92afab14b2 100644
9899 +--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
9900 ++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
9901 +@@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
9902 + ftrace_filter_check 'schedule*' '^schedule.*$'
9903 +
9904 + # filter by *mid*end
9905 +-ftrace_filter_check '*aw*lock' '.*aw.*lock$'
9906 ++ftrace_filter_check '*pin*lock' '.*pin.*lock$'
9907 +
9908 + # filter by start*mid*
9909 + ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
9910 +diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
9911 +index 43fcab367fb0a..74e6b3fc2d09e 100644
9912 +--- a/tools/testing/selftests/x86/syscall_nt.c
9913 ++++ b/tools/testing/selftests/x86/syscall_nt.c
9914 +@@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags)
9915 + set_eflags(get_eflags() | extraflags);
9916 + syscall(SYS_getpid);
9917 + flags = get_eflags();
9918 ++ set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
9919 + if ((flags & extraflags) == extraflags) {
9920 + printf("[OK]\tThe syscall worked and flags are still set\n");
9921 + } else {
9922 +diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
9923 +index 878e0edb2e1b7..ff0a1c6083718 100644
9924 +--- a/virt/kvm/arm/mmio.c
9925 ++++ b/virt/kvm/arm/mmio.c
9926 +@@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
9927 + bool sign_extend;
9928 + bool sixty_four;
9929 +
9930 +- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
9931 ++ if (kvm_vcpu_abt_iss1tw(vcpu)) {
9932 + /* page table accesses IO mem: tell guest to fix its TTBR */
9933 + kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
9934 + return 1;
9935 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
9936 +index 41d6285c3da99..787f7329d1b7f 100644
9937 +--- a/virt/kvm/arm/mmu.c
9938 ++++ b/virt/kvm/arm/mmu.c
9939 +@@ -1282,6 +1282,9 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
9940 +
9941 + static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
9942 + {
9943 ++ if (kvm_vcpu_abt_iss1tw(vcpu))
9944 ++ return true;
9945 ++
9946 + if (kvm_vcpu_trap_is_iabt(vcpu))
9947 + return false;
9948 +
9949 +@@ -1496,7 +1499,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
9950 + unsigned long flags = 0;
9951 +
9952 + write_fault = kvm_is_write_fault(vcpu);
9953 +- exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
9954 ++ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
9955 + VM_BUG_ON(write_fault && exec_fault);
9956 +
9957 + if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
9958 +diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
9959 +index cd75df25fe140..2fc1777da50d2 100644
9960 +--- a/virt/kvm/arm/vgic/vgic-init.c
9961 ++++ b/virt/kvm/arm/vgic/vgic-init.c
9962 +@@ -187,6 +187,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
9963 + break;
9964 + default:
9965 + kfree(dist->spis);
9966 ++ dist->spis = NULL;
9967 + return -EINVAL;
9968 + }
9969 + }
9970 +diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
9971 +index 9295addea7ecf..f139b1c62ca38 100644
9972 +--- a/virt/kvm/arm/vgic/vgic-its.c
9973 ++++ b/virt/kvm/arm/vgic/vgic-its.c
9974 +@@ -107,14 +107,21 @@ out_unlock:
9975 + * We "cache" the configuration table entries in our struct vgic_irq's.
9976 + * However we only have those structs for mapped IRQs, so we read in
9977 + * the respective config data from memory here upon mapping the LPI.
9978 ++ *
9979 ++ * Should any of these fail, behave as if we couldn't create the LPI
9980 ++ * by dropping the refcount and returning the error.
9981 + */
9982 + ret = update_lpi_config(kvm, irq, NULL, false);
9983 +- if (ret)
9984 ++ if (ret) {
9985 ++ vgic_put_irq(kvm, irq);
9986 + return ERR_PTR(ret);
9987 ++ }
9988 +
9989 + ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
9990 +- if (ret)
9991 ++ if (ret) {
9992 ++ vgic_put_irq(kvm, irq);
9993 + return ERR_PTR(ret);
9994 ++ }
9995 +
9996 + return irq;
9997 + }
9998 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
9999 +index 6bd01d12df2ec..9312c7e750ed3 100644
10000 +--- a/virt/kvm/kvm_main.c
10001 ++++ b/virt/kvm/kvm_main.c
10002 +@@ -169,6 +169,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
10003 + */
10004 + if (pfn_valid(pfn))
10005 + return PageReserved(pfn_to_page(pfn)) &&
10006 ++ !is_zero_pfn(pfn) &&
10007 + !kvm_is_zone_device_pfn(pfn);
10008 +
10009 + return true;