Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Thu, 01 Oct 2020 12:34:58
Message-Id: 1601555678.09e81ef9c8bba2841049e6eb601bbcf5db64ce6b.mpagano@gentoo
1 commit: 09e81ef9c8bba2841049e6eb601bbcf5db64ce6b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Oct 1 12:34:38 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Oct 1 12:34:38 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=09e81ef9
7
8 Linux patch 4.14.200
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1199_linux-4.14.200.patch | 6075 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6079 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 474e30c..952497e 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -839,6 +839,10 @@ Patch: 1198_linux-4.14.199.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.199
23
24 +Patch: 1199_linux-4.14.200.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.200
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1199_linux-4.14.200.patch b/1199_linux-4.14.200.patch
33 new file mode 100644
34 index 0000000..435900c
35 --- /dev/null
36 +++ b/1199_linux-4.14.200.patch
37 @@ -0,0 +1,6075 @@
38 +diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
39 +index 68c4e8d96bed6..b309de00cd836 100644
40 +--- a/Documentation/devicetree/bindings/sound/wm8994.txt
41 ++++ b/Documentation/devicetree/bindings/sound/wm8994.txt
42 +@@ -14,9 +14,15 @@ Required properties:
43 + - #gpio-cells : Must be 2. The first cell is the pin number and the
44 + second cell is used to specify optional parameters (currently unused).
45 +
46 +- - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply,
47 +- SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered
48 +- in Documentation/devicetree/bindings/regulator/regulator.txt
49 ++ - power supplies for the device, as covered in
50 ++ Documentation/devicetree/bindings/regulator/regulator.txt, depending
51 ++ on compatible:
52 ++ - for wlf,wm1811 and wlf,wm8958:
53 ++ AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
54 ++ DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
55 ++ - for wlf,wm8994:
56 ++ AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
57 ++ SPKVDD1-supply, SPKVDD2-supply
58 +
59 + Optional properties:
60 +
61 +@@ -68,11 +74,11 @@ codec: wm8994@1a {
62 +
63 + lineout1-se;
64 +
65 ++ AVDD1-supply = <&regulator>;
66 + AVDD2-supply = <&regulator>;
67 + CPVDD-supply = <&regulator>;
68 +- DBVDD1-supply = <&regulator>;
69 +- DBVDD2-supply = <&regulator>;
70 +- DBVDD3-supply = <&regulator>;
71 ++ DBVDD-supply = <&regulator>;
72 ++ DCVDD-supply = <&regulator>;
73 + SPKVDD1-supply = <&regulator>;
74 + SPKVDD2-supply = <&regulator>;
75 + };
76 +diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
77 +index 4adc056f76356..01ae89efa2bda 100644
78 +--- a/Documentation/driver-api/libata.rst
79 ++++ b/Documentation/driver-api/libata.rst
80 +@@ -251,7 +251,7 @@ High-level taskfile hooks
81 +
82 + ::
83 +
84 +- void (*qc_prep) (struct ata_queued_cmd *qc);
85 ++ enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
86 + int (*qc_issue) (struct ata_queued_cmd *qc);
87 +
88 +
89 +diff --git a/Makefile b/Makefile
90 +index aaba1a2b45833..0fde7a0de1dd0 100644
91 +--- a/Makefile
92 ++++ b/Makefile
93 +@@ -1,7 +1,7 @@
94 + # SPDX-License-Identifier: GPL-2.0
95 + VERSION = 4
96 + PATCHLEVEL = 14
97 +-SUBLEVEL = 199
98 ++SUBLEVEL = 200
99 + EXTRAVERSION =
100 + NAME = Petit Gorille
101 +
102 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
103 +index 6b3bb67596ae8..174aa12fb8b1f 100644
104 +--- a/arch/arm64/kernel/cpufeature.c
105 ++++ b/arch/arm64/kernel/cpufeature.c
106 +@@ -136,11 +136,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
107 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
108 + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
109 + S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
110 +- /* Linux doesn't care about the EL3 */
111 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
112 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
113 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
114 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
115 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
116 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
117 ++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
118 + ARM64_FTR_END,
119 + };
120 +
121 +@@ -273,7 +272,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
122 + };
123 +
124 + static const struct arm64_ftr_bits ftr_id_dfr0[] = {
125 +- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
126 ++ /* [31:28] TraceFilt */
127 + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
128 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
129 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
130 +@@ -627,9 +626,6 @@ void update_cpu_features(int cpu,
131 + taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
132 + info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
133 +
134 +- /*
135 +- * EL3 is not our concern.
136 +- */
137 + taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
138 + info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
139 + taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
140 +diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
141 +index 71c0867ecf20f..7fdf4e7799bcd 100644
142 +--- a/arch/m68k/q40/config.c
143 ++++ b/arch/m68k/q40/config.c
144 +@@ -303,6 +303,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
145 + {
146 + int tmp = Q40_RTC_CTRL;
147 +
148 ++ pll->pll_ctrl = 0;
149 + pll->pll_value = tmp & Q40_RTC_PLL_MASK;
150 + if (tmp & Q40_RTC_PLL_SIGN)
151 + pll->pll_value = -pll->pll_value;
152 +diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
153 +index a45af3de075d9..d43e4ab20b238 100644
154 +--- a/arch/mips/include/asm/cpu-type.h
155 ++++ b/arch/mips/include/asm/cpu-type.h
156 +@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
157 + case CPU_34K:
158 + case CPU_1004K:
159 + case CPU_74K:
160 ++ case CPU_1074K:
161 + case CPU_M14KC:
162 + case CPU_M14KEC:
163 + case CPU_INTERAPTIV:
164 +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
165 +index d2ba7936d0d33..7b46576962bfd 100644
166 +--- a/arch/powerpc/kernel/eeh.c
167 ++++ b/arch/powerpc/kernel/eeh.c
168 +@@ -506,7 +506,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
169 + rc = 1;
170 + if (pe->state & EEH_PE_ISOLATED) {
171 + pe->check_count++;
172 +- if (pe->check_count % EEH_MAX_FAILS == 0) {
173 ++ if (pe->check_count == EEH_MAX_FAILS) {
174 + dn = pci_device_to_OF_node(dev);
175 + if (dn)
176 + location = of_get_property(dn, "ibm,loc-code",
177 +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
178 +index 3c9457420aee8..0f1a888c04a84 100644
179 +--- a/arch/powerpc/kernel/traps.c
180 ++++ b/arch/powerpc/kernel/traps.c
181 +@@ -357,11 +357,11 @@ out:
182 + #ifdef CONFIG_PPC_BOOK3S_64
183 + BUG_ON(get_paca()->in_nmi == 0);
184 + if (get_paca()->in_nmi > 1)
185 +- nmi_panic(regs, "Unrecoverable nested System Reset");
186 ++ die("Unrecoverable nested System Reset", regs, SIGABRT);
187 + #endif
188 + /* Must die if the interrupt is not recoverable */
189 + if (!(regs->msr & MSR_RI))
190 +- nmi_panic(regs, "Unrecoverable System Reset");
191 ++ die("Unrecoverable System Reset", regs, SIGABRT);
192 +
193 + if (!nested)
194 + nmi_exit();
195 +@@ -701,7 +701,7 @@ void machine_check_exception(struct pt_regs *regs)
196 +
197 + /* Must die if the interrupt is not recoverable */
198 + if (!(regs->msr & MSR_RI))
199 +- nmi_panic(regs, "Unrecoverable Machine check");
200 ++ die("Unrecoverable Machine check", regs, SIGBUS);
201 +
202 + return;
203 +
204 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
205 +index 5c2558cc6977a..42025e33a4e07 100644
206 +--- a/arch/s390/kernel/setup.c
207 ++++ b/arch/s390/kernel/setup.c
208 +@@ -540,7 +540,7 @@ static struct notifier_block kdump_mem_nb = {
209 + /*
210 + * Make sure that the area behind memory_end is protected
211 + */
212 +-static void reserve_memory_end(void)
213 ++static void __init reserve_memory_end(void)
214 + {
215 + #ifdef CONFIG_CRASH_DUMP
216 + if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
217 +@@ -558,7 +558,7 @@ static void reserve_memory_end(void)
218 + /*
219 + * Make sure that oldmem, where the dump is stored, is protected
220 + */
221 +-static void reserve_oldmem(void)
222 ++static void __init reserve_oldmem(void)
223 + {
224 + #ifdef CONFIG_CRASH_DUMP
225 + if (OLDMEM_BASE)
226 +@@ -570,7 +570,7 @@ static void reserve_oldmem(void)
227 + /*
228 + * Make sure that oldmem, where the dump is stored, is protected
229 + */
230 +-static void remove_oldmem(void)
231 ++static void __init remove_oldmem(void)
232 + {
233 + #ifdef CONFIG_CRASH_DUMP
234 + if (OLDMEM_BASE)
235 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
236 +index 041d2a04be1d8..270448b178a7a 100644
237 +--- a/arch/x86/include/asm/nospec-branch.h
238 ++++ b/arch/x86/include/asm/nospec-branch.h
239 +@@ -330,7 +330,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
240 + * combination with microcode which triggers a CPU buffer flush when the
241 + * instruction is executed.
242 + */
243 +-static inline void mds_clear_cpu_buffers(void)
244 ++static __always_inline void mds_clear_cpu_buffers(void)
245 + {
246 + static const u16 ds = __KERNEL_DS;
247 +
248 +@@ -351,7 +351,7 @@ static inline void mds_clear_cpu_buffers(void)
249 + *
250 + * Clear CPU buffers if the corresponding static key is enabled
251 + */
252 +-static inline void mds_user_clear_cpu_buffers(void)
253 ++static __always_inline void mds_user_clear_cpu_buffers(void)
254 + {
255 + if (static_branch_likely(&mds_user_clear))
256 + mds_clear_cpu_buffers();
257 +diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
258 +index 851c04b7a0922..1572a436bc087 100644
259 +--- a/arch/x86/include/asm/pkeys.h
260 ++++ b/arch/x86/include/asm/pkeys.h
261 +@@ -4,6 +4,11 @@
262 +
263 + #define ARCH_DEFAULT_PKEY 0
264 +
265 ++/*
266 ++ * If more than 16 keys are ever supported, a thorough audit
267 ++ * will be necessary to ensure that the types that store key
268 ++ * numbers and masks have sufficient capacity.
269 ++ */
270 + #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
271 +
272 + extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
273 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
274 +index a1c4a13782da8..be226cdd08d37 100644
275 +--- a/arch/x86/kernel/apic/io_apic.c
276 ++++ b/arch/x86/kernel/apic/io_apic.c
277 +@@ -2160,6 +2160,7 @@ static inline void __init check_timer(void)
278 + legacy_pic->init(0);
279 + legacy_pic->make_irq(0);
280 + apic_write(APIC_LVT0, APIC_DM_EXTINT);
281 ++ legacy_pic->unmask(0);
282 +
283 + unlock_ExtINT_logic();
284 +
285 +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
286 +index 4b900035f2202..601a5da1d196a 100644
287 +--- a/arch/x86/kernel/fpu/xstate.c
288 ++++ b/arch/x86/kernel/fpu/xstate.c
289 +@@ -907,8 +907,6 @@ const void *get_xsave_field_ptr(int xsave_state)
290 +
291 + #ifdef CONFIG_ARCH_HAS_PKEYS
292 +
293 +-#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
294 +-#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
295 + /*
296 + * This will go out and modify PKRU register to set the access
297 + * rights for @pkey to @init_val.
298 +@@ -927,6 +925,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
299 + if (!boot_cpu_has(X86_FEATURE_OSPKE))
300 + return -EINVAL;
301 +
302 ++ /*
303 ++ * This code should only be called with valid 'pkey'
304 ++ * values originating from in-kernel users. Complain
305 ++ * if a bad value is observed.
306 ++ */
307 ++ WARN_ON_ONCE(pkey >= arch_max_pkey());
308 ++
309 + /* Set the bits we need in PKRU: */
310 + if (init_val & PKEY_DISABLE_ACCESS)
311 + new_pkru_bits |= PKRU_AD_BIT;
312 +diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
313 +index 918b0d5bf2724..1c1c2649829ba 100644
314 +--- a/arch/x86/kvm/mmutrace.h
315 ++++ b/arch/x86/kvm/mmutrace.h
316 +@@ -339,7 +339,7 @@ TRACE_EVENT(
317 + /* These depend on page entry type, so compute them now. */
318 + __field(bool, r)
319 + __field(bool, x)
320 +- __field(u8, u)
321 ++ __field(signed char, u)
322 + ),
323 +
324 + TP_fast_assign(
325 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
326 +index 3aed03942d7d4..79fa55de635cc 100644
327 +--- a/arch/x86/kvm/x86.c
328 ++++ b/arch/x86/kvm/x86.c
329 +@@ -4370,10 +4370,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
330 + r = -EFAULT;
331 + if (copy_from_user(&u.ps, argp, sizeof u.ps))
332 + goto out;
333 ++ mutex_lock(&kvm->lock);
334 + r = -ENXIO;
335 + if (!kvm->arch.vpit)
336 +- goto out;
337 ++ goto set_pit_out;
338 + r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
339 ++set_pit_out:
340 ++ mutex_unlock(&kvm->lock);
341 + break;
342 + }
343 + case KVM_GET_PIT2: {
344 +@@ -4393,10 +4396,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
345 + r = -EFAULT;
346 + if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
347 + goto out;
348 ++ mutex_lock(&kvm->lock);
349 + r = -ENXIO;
350 + if (!kvm->arch.vpit)
351 +- goto out;
352 ++ goto set_pit2_out;
353 + r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
354 ++set_pit2_out:
355 ++ mutex_unlock(&kvm->lock);
356 + break;
357 + }
358 + case KVM_REINJECT_CONTROL: {
359 +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
360 +index 75d3776123cc0..2c3b4bcbe8f21 100644
361 +--- a/arch/x86/lib/usercopy_64.c
362 ++++ b/arch/x86/lib/usercopy_64.c
363 +@@ -118,7 +118,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
364 + */
365 + if (size < 8) {
366 + if (!IS_ALIGNED(dest, 4) || size != 4)
367 +- clean_cache_range(dst, 1);
368 ++ clean_cache_range(dst, size);
369 + } else {
370 + if (!IS_ALIGNED(dest, 8)) {
371 + dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
372 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
373 +index ebfc06f29f7b2..37aacb39e6922 100644
374 +--- a/drivers/acpi/ec.c
375 ++++ b/drivers/acpi/ec.c
376 +@@ -1062,29 +1062,21 @@ void acpi_ec_unblock_transactions(void)
377 + /* --------------------------------------------------------------------------
378 + Event Management
379 + -------------------------------------------------------------------------- */
380 +-static struct acpi_ec_query_handler *
381 +-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
382 +-{
383 +- if (handler)
384 +- kref_get(&handler->kref);
385 +- return handler;
386 +-}
387 +-
388 + static struct acpi_ec_query_handler *
389 + acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
390 + {
391 + struct acpi_ec_query_handler *handler;
392 +- bool found = false;
393 +
394 + mutex_lock(&ec->mutex);
395 + list_for_each_entry(handler, &ec->list, node) {
396 + if (value == handler->query_bit) {
397 +- found = true;
398 +- break;
399 ++ kref_get(&handler->kref);
400 ++ mutex_unlock(&ec->mutex);
401 ++ return handler;
402 + }
403 + }
404 + mutex_unlock(&ec->mutex);
405 +- return found ? acpi_ec_get_query_handler(handler) : NULL;
406 ++ return NULL;
407 + }
408 +
409 + static void acpi_ec_query_handler_release(struct kref *kref)
410 +diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
411 +index 940ddbc59aa71..572848d933636 100644
412 +--- a/drivers/ata/acard-ahci.c
413 ++++ b/drivers/ata/acard-ahci.c
414 +@@ -72,7 +72,7 @@ struct acard_sg {
415 + __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
416 + };
417 +
418 +-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
419 ++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
420 + static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
421 + static int acard_ahci_port_start(struct ata_port *ap);
422 + static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
423 +@@ -257,7 +257,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
424 + return si;
425 + }
426 +
427 +-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
428 ++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
429 + {
430 + struct ata_port *ap = qc->ap;
431 + struct ahci_port_priv *pp = ap->private_data;
432 +@@ -295,6 +295,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
433 + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
434 +
435 + ahci_fill_cmd_slot(pp, qc->tag, opts);
436 ++
437 ++ return AC_ERR_OK;
438 + }
439 +
440 + static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
441 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
442 +index 7473ff46de66c..1fe18a4983f0d 100644
443 +--- a/drivers/ata/libahci.c
444 ++++ b/drivers/ata/libahci.c
445 +@@ -73,7 +73,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
446 + static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
447 + static int ahci_port_start(struct ata_port *ap);
448 + static void ahci_port_stop(struct ata_port *ap);
449 +-static void ahci_qc_prep(struct ata_queued_cmd *qc);
450 ++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
451 + static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
452 + static void ahci_freeze(struct ata_port *ap);
453 + static void ahci_thaw(struct ata_port *ap);
454 +@@ -1626,7 +1626,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
455 + return sata_pmp_qc_defer_cmd_switch(qc);
456 + }
457 +
458 +-static void ahci_qc_prep(struct ata_queued_cmd *qc)
459 ++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
460 + {
461 + struct ata_port *ap = qc->ap;
462 + struct ahci_port_priv *pp = ap->private_data;
463 +@@ -1662,6 +1662,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
464 + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
465 +
466 + ahci_fill_cmd_slot(pp, qc->tag, opts);
467 ++
468 ++ return AC_ERR_OK;
469 + }
470 +
471 + static void ahci_fbs_dec_intr(struct ata_port *ap)
472 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
473 +index f90a20cad3fef..c28b0ca249075 100644
474 +--- a/drivers/ata/libata-core.c
475 ++++ b/drivers/ata/libata-core.c
476 +@@ -4986,7 +4986,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
477 + return ATA_DEFER_LINK;
478 + }
479 +
480 +-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
481 ++enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
482 ++{
483 ++ return AC_ERR_OK;
484 ++}
485 +
486 + /**
487 + * ata_sg_init - Associate command with scatter-gather table.
488 +@@ -5439,7 +5442,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
489 + return;
490 + }
491 +
492 +- ap->ops->qc_prep(qc);
493 ++ qc->err_mask |= ap->ops->qc_prep(qc);
494 ++ if (unlikely(qc->err_mask))
495 ++ goto err;
496 + trace_ata_qc_issue(qc);
497 + qc->err_mask |= ap->ops->qc_issue(qc);
498 + if (unlikely(qc->err_mask))
499 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
500 +index 8c36ff0c2dd49..7057630ccf520 100644
501 +--- a/drivers/ata/libata-sff.c
502 ++++ b/drivers/ata/libata-sff.c
503 +@@ -2725,12 +2725,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
504 + * LOCKING:
505 + * spin_lock_irqsave(host lock)
506 + */
507 +-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
508 ++enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
509 + {
510 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
511 +- return;
512 ++ return AC_ERR_OK;
513 +
514 + ata_bmdma_fill_sg(qc);
515 ++
516 ++ return AC_ERR_OK;
517 + }
518 + EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
519 +
520 +@@ -2743,12 +2745,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
521 + * LOCKING:
522 + * spin_lock_irqsave(host lock)
523 + */
524 +-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
525 ++enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
526 + {
527 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
528 +- return;
529 ++ return AC_ERR_OK;
530 +
531 + ata_bmdma_fill_sg_dumb(qc);
532 ++
533 ++ return AC_ERR_OK;
534 + }
535 + EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
536 +
537 +diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
538 +index 0adcb40d2794e..054a88ecd5ba3 100644
539 +--- a/drivers/ata/pata_macio.c
540 ++++ b/drivers/ata/pata_macio.c
541 +@@ -507,7 +507,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
542 + return ATA_CBL_PATA40;
543 + }
544 +
545 +-static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
546 ++static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
547 + {
548 + unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
549 + struct ata_port *ap = qc->ap;
550 +@@ -520,7 +520,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
551 + __func__, qc, qc->flags, write, qc->dev->devno);
552 +
553 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
554 +- return;
555 ++ return AC_ERR_OK;
556 +
557 + table = (struct dbdma_cmd *) priv->dma_table_cpu;
558 +
559 +@@ -565,6 +565,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
560 + table->command = cpu_to_le16(DBDMA_STOP);
561 +
562 + dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
563 ++
564 ++ return AC_ERR_OK;
565 + }
566 +
567 +
568 +diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
569 +index f6c46e9a4dc0f..d7186a503e358 100644
570 +--- a/drivers/ata/pata_pxa.c
571 ++++ b/drivers/ata/pata_pxa.c
572 +@@ -59,25 +59,27 @@ static void pxa_ata_dma_irq(void *d)
573 + /*
574 + * Prepare taskfile for submission.
575 + */
576 +-static void pxa_qc_prep(struct ata_queued_cmd *qc)
577 ++static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
578 + {
579 + struct pata_pxa_data *pd = qc->ap->private_data;
580 + struct dma_async_tx_descriptor *tx;
581 + enum dma_transfer_direction dir;
582 +
583 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
584 +- return;
585 ++ return AC_ERR_OK;
586 +
587 + dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
588 + tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
589 + DMA_PREP_INTERRUPT);
590 + if (!tx) {
591 + ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
592 +- return;
593 ++ return AC_ERR_OK;
594 + }
595 + tx->callback = pxa_ata_dma_irq;
596 + tx->callback_param = pd;
597 + pd->dma_cookie = dmaengine_submit(tx);
598 ++
599 ++ return AC_ERR_OK;
600 + }
601 +
602 + /*
603 +diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
604 +index f1e873a37465e..096b4771b19da 100644
605 +--- a/drivers/ata/pdc_adma.c
606 ++++ b/drivers/ata/pdc_adma.c
607 +@@ -132,7 +132,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
608 + const struct pci_device_id *ent);
609 + static int adma_port_start(struct ata_port *ap);
610 + static void adma_port_stop(struct ata_port *ap);
611 +-static void adma_qc_prep(struct ata_queued_cmd *qc);
612 ++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
613 + static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
614 + static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
615 + static void adma_freeze(struct ata_port *ap);
616 +@@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
617 + return i;
618 + }
619 +
620 +-static void adma_qc_prep(struct ata_queued_cmd *qc)
621 ++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
622 + {
623 + struct adma_port_priv *pp = qc->ap->private_data;
624 + u8 *buf = pp->pkt;
625 +@@ -322,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
626 +
627 + adma_enter_reg_mode(qc->ap);
628 + if (qc->tf.protocol != ATA_PROT_DMA)
629 +- return;
630 ++ return AC_ERR_OK;
631 +
632 + buf[i++] = 0; /* Response flags */
633 + buf[i++] = 0; /* reserved */
634 +@@ -387,6 +387,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
635 + printk("%s\n", obuf);
636 + }
637 + #endif
638 ++ return AC_ERR_OK;
639 + }
640 +
641 + static inline void adma_packet_start(struct ata_queued_cmd *qc)
642 +diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
643 +index 95bf3abda6f65..5a94659064b40 100644
644 +--- a/drivers/ata/sata_fsl.c
645 ++++ b/drivers/ata/sata_fsl.c
646 +@@ -513,7 +513,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
647 + return num_prde;
648 + }
649 +
650 +-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
651 ++static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
652 + {
653 + struct ata_port *ap = qc->ap;
654 + struct sata_fsl_port_priv *pp = ap->private_data;
655 +@@ -559,6 +559,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
656 +
657 + VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
658 + desc_info, ttl_dwords, num_prde);
659 ++
660 ++ return AC_ERR_OK;
661 + }
662 +
663 + static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
664 +diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
665 +index 9b6d7930d1c79..6c7ddc037fce9 100644
666 +--- a/drivers/ata/sata_inic162x.c
667 ++++ b/drivers/ata/sata_inic162x.c
668 +@@ -472,7 +472,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
669 + prd[-1].flags |= PRD_END;
670 + }
671 +
672 +-static void inic_qc_prep(struct ata_queued_cmd *qc)
673 ++static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
674 + {
675 + struct inic_port_priv *pp = qc->ap->private_data;
676 + struct inic_pkt *pkt = pp->pkt;
677 +@@ -532,6 +532,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
678 + inic_fill_sg(prd, qc);
679 +
680 + pp->cpb_tbl[0] = pp->pkt_dma;
681 ++
682 ++ return AC_ERR_OK;
683 + }
684 +
685 + static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
686 +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
687 +index 3b2246dded74f..d85965bab2e27 100644
688 +--- a/drivers/ata/sata_mv.c
689 ++++ b/drivers/ata/sata_mv.c
690 +@@ -605,8 +605,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
691 + static int mv_port_start(struct ata_port *ap);
692 + static void mv_port_stop(struct ata_port *ap);
693 + static int mv_qc_defer(struct ata_queued_cmd *qc);
694 +-static void mv_qc_prep(struct ata_queued_cmd *qc);
695 +-static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
696 ++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
697 ++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
698 + static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
699 + static int mv_hardreset(struct ata_link *link, unsigned int *class,
700 + unsigned long deadline);
701 +@@ -2044,7 +2044,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
702 + * LOCKING:
703 + * Inherited from caller.
704 + */
705 +-static void mv_qc_prep(struct ata_queued_cmd *qc)
706 ++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
707 + {
708 + struct ata_port *ap = qc->ap;
709 + struct mv_port_priv *pp = ap->private_data;
710 +@@ -2056,15 +2056,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
711 + switch (tf->protocol) {
712 + case ATA_PROT_DMA:
713 + if (tf->command == ATA_CMD_DSM)
714 +- return;
715 ++ return AC_ERR_OK;
716 + /* fall-thru */
717 + case ATA_PROT_NCQ:
718 + break; /* continue below */
719 + case ATA_PROT_PIO:
720 + mv_rw_multi_errata_sata24(qc);
721 +- return;
722 ++ return AC_ERR_OK;
723 + default:
724 +- return;
725 ++ return AC_ERR_OK;
726 + }
727 +
728 + /* Fill in command request block
729 +@@ -2111,12 +2111,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
730 + * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
731 + * of which are defined/used by Linux. If we get here, this
732 + * driver needs work.
733 +- *
734 +- * FIXME: modify libata to give qc_prep a return value and
735 +- * return error here.
736 + */
737 +- BUG_ON(tf->command);
738 +- break;
739 ++ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
740 ++ tf->command);
741 ++ return AC_ERR_INVALID;
742 + }
743 + mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
744 + mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
745 +@@ -2129,8 +2127,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
746 + mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
747 +
748 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
749 +- return;
750 ++ return AC_ERR_OK;
751 + mv_fill_sg(qc);
752 ++
753 ++ return AC_ERR_OK;
754 + }
755 +
756 + /**
757 +@@ -2145,7 +2145,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
758 + * LOCKING:
759 + * Inherited from caller.
760 + */
761 +-static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
762 ++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
763 + {
764 + struct ata_port *ap = qc->ap;
765 + struct mv_port_priv *pp = ap->private_data;
766 +@@ -2156,9 +2156,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
767 +
768 + if ((tf->protocol != ATA_PROT_DMA) &&
769 + (tf->protocol != ATA_PROT_NCQ))
770 +- return;
771 ++ return AC_ERR_OK;
772 + if (tf->command == ATA_CMD_DSM)
773 +- return; /* use bmdma for this */
774 ++ return AC_ERR_OK; /* use bmdma for this */
775 +
776 + /* Fill in Gen IIE command request block */
777 + if (!(tf->flags & ATA_TFLAG_WRITE))
778 +@@ -2199,8 +2199,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
779 + );
780 +
781 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
782 +- return;
783 ++ return AC_ERR_OK;
784 + mv_fill_sg(qc);
785 ++
786 ++ return AC_ERR_OK;
787 + }
788 +
789 + /**
790 +diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
791 +index 8c683ddd0f580..efd5a0592855b 100644
792 +--- a/drivers/ata/sata_nv.c
793 ++++ b/drivers/ata/sata_nv.c
794 +@@ -313,7 +313,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
795 + static void nv_ck804_thaw(struct ata_port *ap);
796 + static int nv_adma_slave_config(struct scsi_device *sdev);
797 + static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
798 +-static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
799 ++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
800 + static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
801 + static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
802 + static void nv_adma_irq_clear(struct ata_port *ap);
803 +@@ -335,7 +335,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
804 + static void nv_swncq_error_handler(struct ata_port *ap);
805 + static int nv_swncq_slave_config(struct scsi_device *sdev);
806 + static int nv_swncq_port_start(struct ata_port *ap);
807 +-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
808 ++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
809 + static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
810 + static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
811 + static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
812 +@@ -1382,7 +1382,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
813 + return 1;
814 + }
815 +
816 +-static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
817 ++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
818 + {
819 + struct nv_adma_port_priv *pp = qc->ap->private_data;
820 + struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
821 +@@ -1394,7 +1394,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
822 + (qc->flags & ATA_QCFLAG_DMAMAP));
823 + nv_adma_register_mode(qc->ap);
824 + ata_bmdma_qc_prep(qc);
825 +- return;
826 ++ return AC_ERR_OK;
827 + }
828 +
829 + cpb->resp_flags = NV_CPB_RESP_DONE;
830 +@@ -1426,6 +1426,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
831 + cpb->ctl_flags = ctl_flags;
832 + wmb();
833 + cpb->resp_flags = 0;
834 ++
835 ++ return AC_ERR_OK;
836 + }
837 +
838 + static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
839 +@@ -1989,17 +1991,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
840 + return 0;
841 + }
842 +
843 +-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
844 ++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
845 + {
846 + if (qc->tf.protocol != ATA_PROT_NCQ) {
847 + ata_bmdma_qc_prep(qc);
848 +- return;
849 ++ return AC_ERR_OK;
850 + }
851 +
852 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
853 +- return;
854 ++ return AC_ERR_OK;
855 +
856 + nv_swncq_fill_sg(qc);
857 ++
858 ++ return AC_ERR_OK;
859 + }
860 +
861 + static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
862 +diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
863 +index d032bf657f709..29d2bb465f60d 100644
864 +--- a/drivers/ata/sata_promise.c
865 ++++ b/drivers/ata/sata_promise.c
866 +@@ -155,7 +155,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
867 + static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
868 + static int pdc_common_port_start(struct ata_port *ap);
869 + static int pdc_sata_port_start(struct ata_port *ap);
870 +-static void pdc_qc_prep(struct ata_queued_cmd *qc);
871 ++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
872 + static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
873 + static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
874 + static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
875 +@@ -649,7 +649,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
876 + prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
877 + }
878 +
879 +-static void pdc_qc_prep(struct ata_queued_cmd *qc)
880 ++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
881 + {
882 + struct pdc_port_priv *pp = qc->ap->private_data;
883 + unsigned int i;
884 +@@ -681,6 +681,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
885 + default:
886 + break;
887 + }
888 ++
889 ++ return AC_ERR_OK;
890 + }
891 +
892 + static int pdc_is_sataii_tx4(unsigned long flags)
893 +diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
894 +index 1fe941688e95d..a66d10628c183 100644
895 +--- a/drivers/ata/sata_qstor.c
896 ++++ b/drivers/ata/sata_qstor.c
897 +@@ -116,7 +116,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
898 + static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
899 + static int qs_port_start(struct ata_port *ap);
900 + static void qs_host_stop(struct ata_host *host);
901 +-static void qs_qc_prep(struct ata_queued_cmd *qc);
902 ++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
903 + static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
904 + static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
905 + static void qs_freeze(struct ata_port *ap);
906 +@@ -276,7 +276,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
907 + return si;
908 + }
909 +
910 +-static void qs_qc_prep(struct ata_queued_cmd *qc)
911 ++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
912 + {
913 + struct qs_port_priv *pp = qc->ap->private_data;
914 + u8 dflags = QS_DF_PORD, *buf = pp->pkt;
915 +@@ -288,7 +288,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
916 +
917 + qs_enter_reg_mode(qc->ap);
918 + if (qc->tf.protocol != ATA_PROT_DMA)
919 +- return;
920 ++ return AC_ERR_OK;
921 +
922 + nelem = qs_fill_sg(qc);
923 +
924 +@@ -311,6 +311,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
925 +
926 + /* frame information structure (FIS) */
927 + ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
928 ++
929 ++ return AC_ERR_OK;
930 + }
931 +
932 + static inline void qs_packet_start(struct ata_queued_cmd *qc)
933 +diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
934 +index 3e82a4ac239e7..a443c69434732 100644
935 +--- a/drivers/ata/sata_rcar.c
936 ++++ b/drivers/ata/sata_rcar.c
937 +@@ -551,12 +551,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
938 + prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
939 + }
940 +
941 +-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
942 ++static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
943 + {
944 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
945 +- return;
946 ++ return AC_ERR_OK;
947 +
948 + sata_rcar_bmdma_fill_sg(qc);
949 ++
950 ++ return AC_ERR_OK;
951 + }
952 +
953 + static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
954 +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
955 +index ed76f070d21e4..82adaf02887fb 100644
956 +--- a/drivers/ata/sata_sil.c
957 ++++ b/drivers/ata/sata_sil.c
958 +@@ -119,7 +119,7 @@ static void sil_dev_config(struct ata_device *dev);
959 + static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
960 + static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
961 + static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
962 +-static void sil_qc_prep(struct ata_queued_cmd *qc);
963 ++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
964 + static void sil_bmdma_setup(struct ata_queued_cmd *qc);
965 + static void sil_bmdma_start(struct ata_queued_cmd *qc);
966 + static void sil_bmdma_stop(struct ata_queued_cmd *qc);
967 +@@ -333,12 +333,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
968 + last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
969 + }
970 +
971 +-static void sil_qc_prep(struct ata_queued_cmd *qc)
972 ++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
973 + {
974 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
975 +- return;
976 ++ return AC_ERR_OK;
977 +
978 + sil_fill_sg(qc);
979 ++
980 ++ return AC_ERR_OK;
981 + }
982 +
983 + static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
984 +diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
985 +index 4b1995e2d044b..ffa3bf724054d 100644
986 +--- a/drivers/ata/sata_sil24.c
987 ++++ b/drivers/ata/sata_sil24.c
988 +@@ -336,7 +336,7 @@ static void sil24_dev_config(struct ata_device *dev);
989 + static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
990 + static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
991 + static int sil24_qc_defer(struct ata_queued_cmd *qc);
992 +-static void sil24_qc_prep(struct ata_queued_cmd *qc);
993 ++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
994 + static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
995 + static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
996 + static void sil24_pmp_attach(struct ata_port *ap);
997 +@@ -840,7 +840,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
998 + return ata_std_qc_defer(qc);
999 + }
1000 +
1001 +-static void sil24_qc_prep(struct ata_queued_cmd *qc)
1002 ++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
1003 + {
1004 + struct ata_port *ap = qc->ap;
1005 + struct sil24_port_priv *pp = ap->private_data;
1006 +@@ -884,6 +884,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
1007 +
1008 + if (qc->flags & ATA_QCFLAG_DMAMAP)
1009 + sil24_fill_sg(qc, sge);
1010 ++
1011 ++ return AC_ERR_OK;
1012 + }
1013 +
1014 + static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
1015 +diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
1016 +index 405e606a234d1..0d742457925ec 100644
1017 +--- a/drivers/ata/sata_sx4.c
1018 ++++ b/drivers/ata/sata_sx4.c
1019 +@@ -218,7 +218,7 @@ static void pdc_error_handler(struct ata_port *ap);
1020 + static void pdc_freeze(struct ata_port *ap);
1021 + static void pdc_thaw(struct ata_port *ap);
1022 + static int pdc_port_start(struct ata_port *ap);
1023 +-static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
1024 ++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
1025 + static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
1026 + static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
1027 + static unsigned int pdc20621_dimm_init(struct ata_host *host);
1028 +@@ -546,7 +546,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
1029 + VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
1030 + }
1031 +
1032 +-static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
1033 ++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
1034 + {
1035 + switch (qc->tf.protocol) {
1036 + case ATA_PROT_DMA:
1037 +@@ -558,6 +558,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
1038 + default:
1039 + break;
1040 + }
1041 ++
1042 ++ return AC_ERR_OK;
1043 + }
1044 +
1045 + static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
1046 +diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
1047 +index a106d15f6def0..ba549d9454799 100644
1048 +--- a/drivers/atm/eni.c
1049 ++++ b/drivers/atm/eni.c
1050 +@@ -2243,7 +2243,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
1051 +
1052 + rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
1053 + if (rc < 0)
1054 +- goto out;
1055 ++ goto err_disable;
1056 +
1057 + rc = -ENOMEM;
1058 + eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
1059 +diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
1060 +index 6210bff46341e..52fadc46e7a97 100644
1061 +--- a/drivers/char/tlclk.c
1062 ++++ b/drivers/char/tlclk.c
1063 +@@ -777,17 +777,21 @@ static int __init tlclk_init(void)
1064 + {
1065 + int ret;
1066 +
1067 ++ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
1068 ++
1069 ++ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
1070 ++ if (!alarm_events) {
1071 ++ ret = -ENOMEM;
1072 ++ goto out1;
1073 ++ }
1074 ++
1075 + ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
1076 + if (ret < 0) {
1077 + printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
1078 ++ kfree(alarm_events);
1079 + return ret;
1080 + }
1081 + tlclk_major = ret;
1082 +- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
1083 +- if (!alarm_events) {
1084 +- ret = -ENOMEM;
1085 +- goto out1;
1086 +- }
1087 +
1088 + /* Read telecom clock IRQ number (Set by BIOS) */
1089 + if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
1090 +@@ -796,7 +800,6 @@ static int __init tlclk_init(void)
1091 + ret = -EBUSY;
1092 + goto out2;
1093 + }
1094 +- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
1095 +
1096 + if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
1097 + printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
1098 +@@ -837,8 +840,8 @@ out3:
1099 + release_region(TLCLK_BASE, 8);
1100 + out2:
1101 + kfree(alarm_events);
1102 +-out1:
1103 + unregister_chrdev(tlclk_major, "telco_clock");
1104 ++out1:
1105 + return ret;
1106 + }
1107 +
1108 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
1109 +index 569e93e1f06cc..3ba67bc6baba0 100644
1110 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
1111 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
1112 +@@ -588,6 +588,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
1113 + */
1114 + while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
1115 + ibmvtpm_crq_process(crq, ibmvtpm);
1116 ++ wake_up_interruptible(&ibmvtpm->crq_queue.wq);
1117 + crq->valid = 0;
1118 + smp_wmb();
1119 + }
1120 +@@ -635,6 +636,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
1121 + }
1122 +
1123 + crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
1124 ++ init_waitqueue_head(&crq_q->wq);
1125 + ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
1126 + CRQ_RES_BUF_SIZE,
1127 + DMA_BIDIRECTIONAL);
1128 +@@ -687,6 +689,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
1129 + if (rc)
1130 + goto init_irq_cleanup;
1131 +
1132 ++ if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
1133 ++ ibmvtpm->rtce_buf != NULL,
1134 ++ HZ)) {
1135 ++ dev_err(dev, "CRQ response timed out\n");
1136 ++ goto init_irq_cleanup;
1137 ++ }
1138 ++
1139 + return tpm_chip_register(chip);
1140 + init_irq_cleanup:
1141 + do {
1142 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
1143 +index 91dfe766d0800..4f6a124601db4 100644
1144 +--- a/drivers/char/tpm/tpm_ibmvtpm.h
1145 ++++ b/drivers/char/tpm/tpm_ibmvtpm.h
1146 +@@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue {
1147 + struct ibmvtpm_crq *crq_addr;
1148 + u32 index;
1149 + u32 num_entry;
1150 ++ wait_queue_head_t wq;
1151 + };
1152 +
1153 + struct ibmvtpm_dev {
1154 +diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
1155 +index d6036c788fab8..2738ecb1511b1 100644
1156 +--- a/drivers/clk/ti/adpll.c
1157 ++++ b/drivers/clk/ti/adpll.c
1158 +@@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
1159 + if (err)
1160 + return NULL;
1161 + } else {
1162 +- const char *base_name = "adpll";
1163 +- char *buf;
1164 +-
1165 +- buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
1166 +- strlen(postfix), GFP_KERNEL);
1167 +- if (!buf)
1168 +- return NULL;
1169 +- sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
1170 +- name = buf;
1171 ++ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
1172 ++ d->pa, postfix);
1173 + }
1174 +
1175 + return name;
1176 +diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
1177 +index 1d740a8c42ab3..47114c2a7cb54 100644
1178 +--- a/drivers/clocksource/h8300_timer8.c
1179 ++++ b/drivers/clocksource/h8300_timer8.c
1180 +@@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node)
1181 + return PTR_ERR(clk);
1182 + }
1183 +
1184 +- ret = ENXIO;
1185 ++ ret = -ENXIO;
1186 + base = of_iomap(node, 0);
1187 + if (!base) {
1188 + pr_err("failed to map registers for clockevent\n");
1189 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
1190 +index 25c9a6cdd8614..dc81fc2bf8015 100644
1191 +--- a/drivers/cpufreq/powernv-cpufreq.c
1192 ++++ b/drivers/cpufreq/powernv-cpufreq.c
1193 +@@ -864,6 +864,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
1194 + void powernv_cpufreq_work_fn(struct work_struct *work)
1195 + {
1196 + struct chip *chip = container_of(work, struct chip, throttle);
1197 ++ struct cpufreq_policy *policy;
1198 + unsigned int cpu;
1199 + cpumask_t mask;
1200 +
1201 +@@ -878,12 +879,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
1202 + chip->restore = false;
1203 + for_each_cpu(cpu, &mask) {
1204 + int index;
1205 +- struct cpufreq_policy policy;
1206 +
1207 +- cpufreq_get_policy(&policy, cpu);
1208 +- index = cpufreq_table_find_index_c(&policy, policy.cur);
1209 +- powernv_cpufreq_target_index(&policy, index);
1210 +- cpumask_andnot(&mask, &mask, policy.cpus);
1211 ++ policy = cpufreq_cpu_get(cpu);
1212 ++ if (!policy)
1213 ++ continue;
1214 ++ index = cpufreq_table_find_index_c(policy, policy->cur);
1215 ++ powernv_cpufreq_target_index(policy, index);
1216 ++ cpumask_andnot(&mask, &mask, policy->cpus);
1217 ++ cpufreq_cpu_put(policy);
1218 + }
1219 + out:
1220 + put_online_cpus();
1221 +diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
1222 +index 6627a7dce95c4..f6a2dd6b188fa 100644
1223 +--- a/drivers/devfreq/tegra-devfreq.c
1224 ++++ b/drivers/devfreq/tegra-devfreq.c
1225 +@@ -79,6 +79,8 @@
1226 +
1227 + #define KHZ 1000
1228 +
1229 ++#define KHZ_MAX (ULONG_MAX / KHZ)
1230 ++
1231 + /* Assume that the bus is saturated if the utilization is 25% */
1232 + #define BUS_SATURATION_RATIO 25
1233 +
1234 +@@ -179,7 +181,7 @@ struct tegra_actmon_emc_ratio {
1235 + };
1236 +
1237 + static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
1238 +- { 1400000, ULONG_MAX },
1239 ++ { 1400000, KHZ_MAX },
1240 + { 1200000, 750000 },
1241 + { 1100000, 600000 },
1242 + { 1000000, 500000 },
1243 +diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
1244 +index 3402494cadf99..78e098b4bd89e 100644
1245 +--- a/drivers/dma/tegra20-apb-dma.c
1246 ++++ b/drivers/dma/tegra20-apb-dma.c
1247 +@@ -1208,8 +1208,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1248 +
1249 + dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1250 +
1251 +- if (tdc->busy)
1252 +- tegra_dma_terminate_all(dc);
1253 ++ tegra_dma_terminate_all(dc);
1254 +
1255 + spin_lock_irqsave(&tdc->lock, flags);
1256 + list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1257 +diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
1258 +index 6d86d05e53aa1..66d6640766ed8 100644
1259 +--- a/drivers/dma/xilinx/zynqmp_dma.c
1260 ++++ b/drivers/dma/xilinx/zynqmp_dma.c
1261 +@@ -125,10 +125,12 @@
1262 + /* Max transfer size per descriptor */
1263 + #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
1264 +
1265 ++/* Max burst lengths */
1266 ++#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
1267 ++#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
1268 ++
1269 + /* Reset values for data attributes */
1270 + #define ZYNQMP_DMA_AXCACHE_VAL 0xF
1271 +-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
1272 +-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
1273 +
1274 + #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
1275 +
1276 +@@ -527,17 +529,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
1277 +
1278 + static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
1279 + {
1280 +- u32 val;
1281 ++ u32 val, burst_val;
1282 +
1283 + val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
1284 + val |= ZYNQMP_DMA_POINT_TYPE_SG;
1285 + writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
1286 +
1287 + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
1288 ++ burst_val = __ilog2_u32(chan->src_burst_len);
1289 + val = (val & ~ZYNQMP_DMA_ARLEN) |
1290 +- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
1291 ++ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
1292 ++ burst_val = __ilog2_u32(chan->dst_burst_len);
1293 + val = (val & ~ZYNQMP_DMA_AWLEN) |
1294 +- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
1295 ++ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
1296 + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
1297 + }
1298 +
1299 +@@ -551,8 +555,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
1300 + {
1301 + struct zynqmp_dma_chan *chan = to_chan(dchan);
1302 +
1303 +- chan->src_burst_len = config->src_maxburst;
1304 +- chan->dst_burst_len = config->dst_maxburst;
1305 ++ chan->src_burst_len = clamp(config->src_maxburst, 1U,
1306 ++ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
1307 ++ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
1308 ++ ZYNQMP_DMA_MAX_DST_BURST_LEN);
1309 +
1310 + return 0;
1311 + }
1312 +@@ -873,8 +879,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
1313 + return PTR_ERR(chan->regs);
1314 +
1315 + chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
1316 +- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
1317 +- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
1318 ++ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
1319 ++ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
1320 + err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
1321 + if (err < 0) {
1322 + dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
1323 +diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
1324 +index d69aa2e179bbe..e4874cc209ef4 100644
1325 +--- a/drivers/gpu/drm/amd/amdgpu/atom.c
1326 ++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
1327 +@@ -742,8 +742,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
1328 + cjiffies = jiffies;
1329 + if (time_after(cjiffies, ctx->last_jump_jiffies)) {
1330 + cjiffies -= ctx->last_jump_jiffies;
1331 +- if ((jiffies_to_msecs(cjiffies) > 5000)) {
1332 +- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
1333 ++ if ((jiffies_to_msecs(cjiffies) > 10000)) {
1334 ++ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
1335 + ctx->abort = true;
1336 + }
1337 + } else {
1338 +diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
1339 +index 17db4b4749d5a..2e8479744ca4a 100644
1340 +--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
1341 ++++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
1342 +@@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
1343 + struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
1344 + struct gma_clock_t clock;
1345 +
1346 ++ memset(&clock, 0, sizeof(clock));
1347 ++
1348 + switch (refclk) {
1349 + case 27000:
1350 + if (target < 200000) {
1351 +diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1352 +index 9635704a1d864..4561a786fab07 100644
1353 +--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1354 ++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
1355 +@@ -161,8 +161,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
1356 + }
1357 +
1358 + ret = pm_runtime_get_sync(drm->dev);
1359 +- if (ret < 0 && ret != -EACCES)
1360 ++ if (ret < 0 && ret != -EACCES) {
1361 ++ pm_runtime_put_autosuspend(drm->dev);
1362 + return ret;
1363 ++ }
1364 ++
1365 + ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
1366 + pm_runtime_put_autosuspend(drm->dev);
1367 + if (ret < 0)
1368 +diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
1369 +index bf626acae2712..cd8e9b799b9a5 100644
1370 +--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
1371 ++++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
1372 +@@ -193,7 +193,7 @@ static int __init omapdss_boot_init(void)
1373 + dss = of_find_matching_node(NULL, omapdss_of_match);
1374 +
1375 + if (dss == NULL || !of_device_is_available(dss))
1376 +- return 0;
1377 ++ goto put_node;
1378 +
1379 + omapdss_walk_device(dss, true);
1380 +
1381 +@@ -218,6 +218,8 @@ static int __init omapdss_boot_init(void)
1382 + kfree(n);
1383 + }
1384 +
1385 ++put_node:
1386 ++ of_node_put(dss);
1387 + return 0;
1388 + }
1389 +
1390 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
1391 +index 8f71157a2b063..3020ae7a8f6b1 100644
1392 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
1393 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
1394 +@@ -1115,6 +1115,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
1395 + card->num_links = 1;
1396 + card->name = "vc4-hdmi";
1397 + card->dev = dev;
1398 ++ card->owner = THIS_MODULE;
1399 +
1400 + /*
1401 + * Be careful, snd_soc_register_card() calls dev_set_drvdata() and
1402 +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
1403 +index 7b961c9c62eff..9f2aa45560e62 100644
1404 +--- a/drivers/i2c/i2c-core-base.c
1405 ++++ b/drivers/i2c/i2c-core-base.c
1406 +@@ -1280,8 +1280,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
1407 +
1408 + /* create pre-declared device nodes */
1409 + of_i2c_register_devices(adap);
1410 +- i2c_acpi_register_devices(adap);
1411 + i2c_acpi_install_space_handler(adap);
1412 ++ i2c_acpi_register_devices(adap);
1413 +
1414 + if (adap->nr < __i2c_first_dynamic_bus_num)
1415 + i2c_scan_static_board_info(adap);
1416 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1417 +index c3e5f921da12e..4002a8ddf6d0a 100644
1418 +--- a/drivers/infiniband/core/ucma.c
1419 ++++ b/drivers/infiniband/core/ucma.c
1420 +@@ -1315,13 +1315,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1421 + if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1422 + return -EFAULT;
1423 +
1424 ++ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1425 ++ return -EINVAL;
1426 ++
1427 + ctx = ucma_get_ctx(file, cmd.id);
1428 + if (IS_ERR(ctx))
1429 + return PTR_ERR(ctx);
1430 +
1431 +- if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1432 +- return -EINVAL;
1433 +-
1434 + optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1435 + cmd.optlen);
1436 + if (IS_ERR(optval)) {
1437 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
1438 +index 7eb1cc1b1aa04..5aa545f9a4232 100644
1439 +--- a/drivers/infiniband/hw/cxgb4/cm.c
1440 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
1441 +@@ -3265,7 +3265,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1442 + if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
1443 + err = pick_local_ipaddrs(dev, cm_id);
1444 + if (err)
1445 +- goto fail2;
1446 ++ goto fail3;
1447 + }
1448 +
1449 + /* find a route */
1450 +@@ -3287,7 +3287,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1451 + if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
1452 + err = pick_local_ip6addrs(dev, cm_id);
1453 + if (err)
1454 +- goto fail2;
1455 ++ goto fail3;
1456 + }
1457 +
1458 + /* find a route */
1459 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
1460 +index 880c63579ba88..adec03412506d 100644
1461 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
1462 ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
1463 +@@ -2052,9 +2052,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
1464 + dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
1465 + if (!dst || dst->error) {
1466 + if (dst) {
1467 +- dst_release(dst);
1468 + i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
1469 + dst->error);
1470 ++ dst_release(dst);
1471 + }
1472 + return rc;
1473 + }
1474 +diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
1475 +index 25267a620e0b5..e6770e5c1432c 100644
1476 +--- a/drivers/infiniband/sw/rxe/rxe.c
1477 ++++ b/drivers/infiniband/sw/rxe/rxe.c
1478 +@@ -126,6 +126,8 @@ static int rxe_init_device_param(struct rxe_dev *rxe)
1479 + rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
1480 + rxe->attr.max_pkeys = RXE_MAX_PKEYS;
1481 + rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
1482 ++ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
1483 ++ rxe->ndev->dev_addr);
1484 +
1485 + rxe->max_ucontext = RXE_MAX_UCONTEXT;
1486 +
1487 +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
1488 +index 25055a68a2c07..ef7fd5dfad468 100644
1489 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c
1490 ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
1491 +@@ -593,15 +593,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
1492 + struct ib_gid_attr sgid_attr;
1493 +
1494 + if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
1495 +- int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
1496 ++ int max_rd_atomic = attr->max_rd_atomic ?
1497 ++ roundup_pow_of_two(attr->max_rd_atomic) : 0;
1498 +
1499 + qp->attr.max_rd_atomic = max_rd_atomic;
1500 + atomic_set(&qp->req.rd_atomic, max_rd_atomic);
1501 + }
1502 +
1503 + if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1504 +- int max_dest_rd_atomic =
1505 +- __roundup_pow_of_two(attr->max_dest_rd_atomic);
1506 ++ int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
1507 ++ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
1508 +
1509 + qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
1510 +
1511 +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
1512 +index e4a3f692057b8..a3763d664a67a 100644
1513 +--- a/drivers/md/bcache/bcache.h
1514 ++++ b/drivers/md/bcache/bcache.h
1515 +@@ -548,6 +548,7 @@ struct cache_set {
1516 + */
1517 + wait_queue_head_t btree_cache_wait;
1518 + struct task_struct *btree_cache_alloc_lock;
1519 ++ spinlock_t btree_cannibalize_lock;
1520 +
1521 + /*
1522 + * When we free a btree node, we increment the gen of the bucket the
1523 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
1524 +index 9fca837d0b41e..fba0fff8040d6 100644
1525 +--- a/drivers/md/bcache/btree.c
1526 ++++ b/drivers/md/bcache/btree.c
1527 +@@ -840,15 +840,17 @@ out:
1528 +
1529 + static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
1530 + {
1531 +- struct task_struct *old;
1532 +-
1533 +- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
1534 +- if (old && old != current) {
1535 ++ spin_lock(&c->btree_cannibalize_lock);
1536 ++ if (likely(c->btree_cache_alloc_lock == NULL)) {
1537 ++ c->btree_cache_alloc_lock = current;
1538 ++ } else if (c->btree_cache_alloc_lock != current) {
1539 + if (op)
1540 + prepare_to_wait(&c->btree_cache_wait, &op->wait,
1541 + TASK_UNINTERRUPTIBLE);
1542 ++ spin_unlock(&c->btree_cannibalize_lock);
1543 + return -EINTR;
1544 + }
1545 ++ spin_unlock(&c->btree_cannibalize_lock);
1546 +
1547 + return 0;
1548 + }
1549 +@@ -883,10 +885,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
1550 + */
1551 + static void bch_cannibalize_unlock(struct cache_set *c)
1552 + {
1553 ++ spin_lock(&c->btree_cannibalize_lock);
1554 + if (c->btree_cache_alloc_lock == current) {
1555 + c->btree_cache_alloc_lock = NULL;
1556 + wake_up(&c->btree_cache_wait);
1557 + }
1558 ++ spin_unlock(&c->btree_cannibalize_lock);
1559 + }
1560 +
1561 + static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
1562 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1563 +index 7fcc1ba12bc01..6bf1559a1f0db 100644
1564 +--- a/drivers/md/bcache/super.c
1565 ++++ b/drivers/md/bcache/super.c
1566 +@@ -1510,6 +1510,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1567 + sema_init(&c->sb_write_mutex, 1);
1568 + mutex_init(&c->bucket_lock);
1569 + init_waitqueue_head(&c->btree_cache_wait);
1570 ++ spin_lock_init(&c->btree_cannibalize_lock);
1571 + init_waitqueue_head(&c->bucket_wait);
1572 + init_waitqueue_head(&c->gc_wait);
1573 + sema_init(&c->uuid_write_mutex, 1);
1574 +diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
1575 +index a59f4fd09df60..27466b0d0be86 100644
1576 +--- a/drivers/media/dvb-frontends/tda10071.c
1577 ++++ b/drivers/media/dvb-frontends/tda10071.c
1578 +@@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
1579 + goto error;
1580 +
1581 + if (dev->delivery_system == SYS_DVBS) {
1582 +- dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
1583 +- buf[2] << 8 | buf[3] << 0;
1584 +- dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
1585 +- buf[2] << 8 | buf[3] << 0;
1586 ++ u32 bit_error = buf[0] << 24 | buf[1] << 16 |
1587 ++ buf[2] << 8 | buf[3] << 0;
1588 ++
1589 ++ dev->dvbv3_ber = bit_error;
1590 ++ dev->post_bit_error += bit_error;
1591 + c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
1592 + c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
1593 + dev->block_error += buf[4] << 8 | buf[5] << 0;
1594 +diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
1595 +index e4d7f2febf00c..05b3974bd9202 100644
1596 +--- a/drivers/media/i2c/smiapp/smiapp-core.c
1597 ++++ b/drivers/media/i2c/smiapp/smiapp-core.c
1598 +@@ -2338,11 +2338,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
1599 + if (rval < 0) {
1600 + if (rval != -EBUSY && rval != -EAGAIN)
1601 + pm_runtime_set_active(&client->dev);
1602 +- pm_runtime_put(&client->dev);
1603 ++ pm_runtime_put_noidle(&client->dev);
1604 + return -ENODEV;
1605 + }
1606 +
1607 + if (smiapp_read_nvm(sensor, sensor->nvm)) {
1608 ++ pm_runtime_put(&client->dev);
1609 + dev_err(&client->dev, "nvm read failed\n");
1610 + return -ENODEV;
1611 + }
1612 +diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
1613 +index b6dcae1ecc1be..ad344e642ddb7 100644
1614 +--- a/drivers/media/platform/ti-vpe/cal.c
1615 ++++ b/drivers/media/platform/ti-vpe/cal.c
1616 +@@ -687,12 +687,13 @@ static void pix_proc_config(struct cal_ctx *ctx)
1617 + }
1618 +
1619 + static void cal_wr_dma_config(struct cal_ctx *ctx,
1620 +- unsigned int width)
1621 ++ unsigned int width, unsigned int height)
1622 + {
1623 + u32 val;
1624 +
1625 + val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
1626 + set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
1627 ++ set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
1628 + set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
1629 + CAL_WR_DMA_CTRL_DTAG_MASK);
1630 + set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
1631 +@@ -1318,7 +1319,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
1632 + csi2_lane_config(ctx);
1633 + csi2_ctx_config(ctx);
1634 + pix_proc_config(ctx);
1635 +- cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
1636 ++ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
1637 ++ ctx->v_fmt.fmt.pix.height);
1638 + cal_wr_dma_addr(ctx, addr);
1639 + csi2_ppi_enable(ctx);
1640 +
1641 +diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
1642 +index ed9bcaf08d5ec..ddfaabd4c0813 100644
1643 +--- a/drivers/media/usb/go7007/go7007-usb.c
1644 ++++ b/drivers/media/usb/go7007/go7007-usb.c
1645 +@@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
1646 + struct go7007_usb *usb;
1647 + const struct go7007_usb_board *board;
1648 + struct usb_device *usbdev = interface_to_usbdev(intf);
1649 ++ struct usb_host_endpoint *ep;
1650 + unsigned num_i2c_devs;
1651 + char *name;
1652 + int video_pipe, i, v_urb_len;
1653 +@@ -1147,7 +1148,8 @@ static int go7007_usb_probe(struct usb_interface *intf,
1654 + if (usb->intr_urb->transfer_buffer == NULL)
1655 + goto allocfail;
1656 +
1657 +- if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
1658 ++ ep = usb->usbdev->ep_in[4];
1659 ++ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
1660 + usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
1661 + usb_rcvbulkpipe(usb->usbdev, 4),
1662 + usb->intr_urb->transfer_buffer, 2*sizeof(u16),
1663 +diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
1664 +index 5c8ed2150c8bf..fb687368ac98c 100644
1665 +--- a/drivers/mfd/mfd-core.c
1666 ++++ b/drivers/mfd/mfd-core.c
1667 +@@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev)
1668 + const struct mfd_cell *cell = mfd_get_cell(pdev);
1669 + int err = 0;
1670 +
1671 ++ if (!cell->enable) {
1672 ++ dev_dbg(&pdev->dev, "No .enable() call-back registered\n");
1673 ++ return 0;
1674 ++ }
1675 ++
1676 + /* only call enable hook if the cell wasn't previously enabled */
1677 + if (atomic_inc_return(cell->usage_count) == 1)
1678 + err = cell->enable(pdev);
1679 +@@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev)
1680 + const struct mfd_cell *cell = mfd_get_cell(pdev);
1681 + int err = 0;
1682 +
1683 ++ if (!cell->disable) {
1684 ++ dev_dbg(&pdev->dev, "No .disable() call-back registered\n");
1685 ++ return 0;
1686 ++ }
1687 ++
1688 + /* only disable if no other clients are using it */
1689 + if (atomic_dec_return(cell->usage_count) == 0)
1690 + err = cell->disable(pdev);
1691 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1692 +index 814a04e8fdd77..2be2313f5950a 100644
1693 +--- a/drivers/mmc/core/mmc.c
1694 ++++ b/drivers/mmc/core/mmc.c
1695 +@@ -300,7 +300,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
1696 + }
1697 + }
1698 +
1699 +-static void mmc_part_add(struct mmc_card *card, unsigned int size,
1700 ++static void mmc_part_add(struct mmc_card *card, u64 size,
1701 + unsigned int part_cfg, char *name, int idx, bool ro,
1702 + int area_type)
1703 + {
1704 +@@ -316,7 +316,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
1705 + {
1706 + int idx;
1707 + u8 hc_erase_grp_sz, hc_wp_grp_sz;
1708 +- unsigned int part_size;
1709 ++ u64 part_size;
1710 +
1711 + /*
1712 + * General purpose partition feature support --
1713 +@@ -346,8 +346,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
1714 + (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
1715 + << 8) +
1716 + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
1717 +- part_size *= (size_t)(hc_erase_grp_sz *
1718 +- hc_wp_grp_sz);
1719 ++ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
1720 + mmc_part_add(card, part_size << 19,
1721 + EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
1722 + "gp%d", idx, false,
1723 +@@ -365,7 +364,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
1724 + static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
1725 + {
1726 + int err = 0, idx;
1727 +- unsigned int part_size;
1728 ++ u64 part_size;
1729 + struct device_node *np;
1730 + bool broken_hpi = false;
1731 +
1732 +diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
1733 +index 1f0d83086cb09..870d1f1331b18 100644
1734 +--- a/drivers/mtd/chips/cfi_cmdset_0002.c
1735 ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
1736 +@@ -726,7 +726,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
1737 + kfree(mtd->eraseregions);
1738 + kfree(mtd);
1739 + kfree(cfi->cmdset_priv);
1740 +- kfree(cfi->cfiq);
1741 + return NULL;
1742 + }
1743 +
1744 +diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
1745 +index fbd5affc0acfe..04fd845de05fb 100644
1746 +--- a/drivers/mtd/cmdlinepart.c
1747 ++++ b/drivers/mtd/cmdlinepart.c
1748 +@@ -228,12 +228,29 @@ static int mtdpart_setup_real(char *s)
1749 + struct cmdline_mtd_partition *this_mtd;
1750 + struct mtd_partition *parts;
1751 + int mtd_id_len, num_parts;
1752 +- char *p, *mtd_id;
1753 ++ char *p, *mtd_id, *semicol;
1754 ++
1755 ++ /*
1756 ++ * Replace the first ';' by a NULL char so strrchr can work
1757 ++ * properly.
1758 ++ */
1759 ++ semicol = strchr(s, ';');
1760 ++ if (semicol)
1761 ++ *semicol = '\0';
1762 +
1763 + mtd_id = s;
1764 +
1765 +- /* fetch <mtd-id> */
1766 +- p = strchr(s, ':');
1767 ++ /*
1768 ++ * fetch <mtd-id>. We use strrchr to ignore all ':' that could
1769 ++ * be present in the MTD name, only the last one is interpreted
1770 ++ * as an <mtd-id>/<part-definition> separator.
1771 ++ */
1772 ++ p = strrchr(s, ':');
1773 ++
1774 ++ /* Restore the ';' now. */
1775 ++ if (semicol)
1776 ++ *semicol = ';';
1777 ++
1778 + if (!p) {
1779 + pr_err("no mtd-id\n");
1780 + return -EINVAL;
1781 +diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c
1782 +index a3f32f939cc17..6736777a41567 100644
1783 +--- a/drivers/mtd/nand/omap_elm.c
1784 ++++ b/drivers/mtd/nand/omap_elm.c
1785 +@@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev)
1786 + pm_runtime_enable(&pdev->dev);
1787 + if (pm_runtime_get_sync(&pdev->dev) < 0) {
1788 + ret = -EINVAL;
1789 ++ pm_runtime_put_sync(&pdev->dev);
1790 + pm_runtime_disable(&pdev->dev);
1791 + dev_err(&pdev->dev, "can't enable clock\n");
1792 + return ret;
1793 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1794 +index a38433cb9015d..cc2ecbbfd4bde 100644
1795 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1796 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1797 +@@ -1264,9 +1264,12 @@ static int bnxt_set_pauseparam(struct net_device *dev,
1798 + if (!BNXT_SINGLE_PF(bp))
1799 + return -EOPNOTSUPP;
1800 +
1801 ++ mutex_lock(&bp->link_lock);
1802 + if (epause->autoneg) {
1803 +- if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
1804 +- return -EINVAL;
1805 ++ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
1806 ++ rc = -EINVAL;
1807 ++ goto pause_exit;
1808 ++ }
1809 +
1810 + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
1811 + if (bp->hwrm_spec_code >= 0x10201)
1812 +@@ -1287,11 +1290,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
1813 + if (epause->tx_pause)
1814 + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
1815 +
1816 +- if (netif_running(dev)) {
1817 +- mutex_lock(&bp->link_lock);
1818 ++ if (netif_running(dev))
1819 + rc = bnxt_hwrm_set_pause(bp);
1820 +- mutex_unlock(&bp->link_lock);
1821 +- }
1822 ++
1823 ++pause_exit:
1824 ++ mutex_unlock(&bp->link_lock);
1825 + return rc;
1826 + }
1827 +
1828 +@@ -1977,8 +1980,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1829 + struct bnxt *bp = netdev_priv(dev);
1830 + struct ethtool_eee *eee = &bp->eee;
1831 + struct bnxt_link_info *link_info = &bp->link_info;
1832 +- u32 advertising =
1833 +- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
1834 ++ u32 advertising;
1835 + int rc = 0;
1836 +
1837 + if (!BNXT_SINGLE_PF(bp))
1838 +@@ -1987,19 +1989,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1839 + if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1840 + return -EOPNOTSUPP;
1841 +
1842 ++ mutex_lock(&bp->link_lock);
1843 ++ advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
1844 + if (!edata->eee_enabled)
1845 + goto eee_ok;
1846 +
1847 + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
1848 + netdev_warn(dev, "EEE requires autoneg\n");
1849 +- return -EINVAL;
1850 ++ rc = -EINVAL;
1851 ++ goto eee_exit;
1852 + }
1853 + if (edata->tx_lpi_enabled) {
1854 + if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
1855 + edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
1856 + netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
1857 + bp->lpi_tmr_lo, bp->lpi_tmr_hi);
1858 +- return -EINVAL;
1859 ++ rc = -EINVAL;
1860 ++ goto eee_exit;
1861 + } else if (!bp->lpi_tmr_hi) {
1862 + edata->tx_lpi_timer = eee->tx_lpi_timer;
1863 + }
1864 +@@ -2009,7 +2015,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1865 + } else if (edata->advertised & ~advertising) {
1866 + netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
1867 + edata->advertised, advertising);
1868 +- return -EINVAL;
1869 ++ rc = -EINVAL;
1870 ++ goto eee_exit;
1871 + }
1872 +
1873 + eee->advertised = edata->advertised;
1874 +@@ -2021,6 +2028,8 @@ eee_ok:
1875 + if (netif_running(dev))
1876 + rc = bnxt_hwrm_set_link_setting(bp, false, true);
1877 +
1878 ++eee_exit:
1879 ++ mutex_unlock(&bp->link_lock);
1880 + return rc;
1881 + }
1882 +
1883 +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
1884 +index 175681aa52607..8cc0e48738152 100644
1885 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
1886 ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
1887 +@@ -567,8 +567,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter)
1888 + WARN_ON(in_interrupt());
1889 + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
1890 + msleep(1);
1891 +- e1000_down(adapter);
1892 +- e1000_up(adapter);
1893 ++
1894 ++ /* only run the task if not already down */
1895 ++ if (!test_bit(__E1000_DOWN, &adapter->flags)) {
1896 ++ e1000_down(adapter);
1897 ++ e1000_up(adapter);
1898 ++ }
1899 ++
1900 + clear_bit(__E1000_RESETTING, &adapter->flags);
1901 + }
1902 +
1903 +@@ -1458,10 +1463,15 @@ int e1000_close(struct net_device *netdev)
1904 + struct e1000_hw *hw = &adapter->hw;
1905 + int count = E1000_CHECK_RESET_COUNT;
1906 +
1907 +- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1908 ++ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1909 + usleep_range(10000, 20000);
1910 +
1911 +- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1912 ++ WARN_ON(count < 0);
1913 ++
1914 ++ /* signal that we're down so that the reset task will no longer run */
1915 ++ set_bit(__E1000_DOWN, &adapter->flags);
1916 ++ clear_bit(__E1000_RESETTING, &adapter->flags);
1917 ++
1918 + e1000_down(adapter);
1919 + e1000_power_down_phy(adapter);
1920 + e1000_free_irq(adapter);
1921 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1922 +index 65a53d409e773..bc9eec1bcbf18 100644
1923 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1924 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1925 +@@ -96,6 +96,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
1926 + p_ramrod->personality = PERSONALITY_ETH;
1927 + break;
1928 + case QED_PCI_ETH_ROCE:
1929 ++ case QED_PCI_ETH_IWARP:
1930 + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
1931 + break;
1932 + default:
1933 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
1934 +index 3c9f8770f7e78..f48006c22a8a6 100644
1935 +--- a/drivers/net/geneve.c
1936 ++++ b/drivers/net/geneve.c
1937 +@@ -716,7 +716,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
1938 + struct net_device *dev,
1939 + struct geneve_sock *gs4,
1940 + struct flowi4 *fl4,
1941 +- const struct ip_tunnel_info *info)
1942 ++ const struct ip_tunnel_info *info,
1943 ++ __be16 dport, __be16 sport)
1944 + {
1945 + bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1946 + struct geneve_dev *geneve = netdev_priv(dev);
1947 +@@ -732,6 +733,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
1948 + fl4->flowi4_proto = IPPROTO_UDP;
1949 + fl4->daddr = info->key.u.ipv4.dst;
1950 + fl4->saddr = info->key.u.ipv4.src;
1951 ++ fl4->fl4_dport = dport;
1952 ++ fl4->fl4_sport = sport;
1953 +
1954 + tos = info->key.tos;
1955 + if ((tos == 1) && !geneve->collect_md) {
1956 +@@ -766,7 +769,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
1957 + struct net_device *dev,
1958 + struct geneve_sock *gs6,
1959 + struct flowi6 *fl6,
1960 +- const struct ip_tunnel_info *info)
1961 ++ const struct ip_tunnel_info *info,
1962 ++ __be16 dport, __be16 sport)
1963 + {
1964 + bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1965 + struct geneve_dev *geneve = netdev_priv(dev);
1966 +@@ -782,6 +786,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
1967 + fl6->flowi6_proto = IPPROTO_UDP;
1968 + fl6->daddr = info->key.u.ipv6.dst;
1969 + fl6->saddr = info->key.u.ipv6.src;
1970 ++ fl6->fl6_dport = dport;
1971 ++ fl6->fl6_sport = sport;
1972 ++
1973 + prio = info->key.tos;
1974 + if ((prio == 1) && !geneve->collect_md) {
1975 + prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
1976 +@@ -828,7 +835,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1977 + __be16 df;
1978 + int err;
1979 +
1980 +- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
1981 ++ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
1982 ++ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
1983 ++ geneve->info.key.tp_dst, sport);
1984 + if (IS_ERR(rt))
1985 + return PTR_ERR(rt);
1986 +
1987 +@@ -839,7 +848,6 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1988 + skb_dst_update_pmtu(skb, mtu);
1989 + }
1990 +
1991 +- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
1992 + if (geneve->collect_md) {
1993 + tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
1994 + ttl = key->ttl;
1995 +@@ -874,7 +882,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1996 + __be16 sport;
1997 + int err;
1998 +
1999 +- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
2000 ++ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
2001 ++ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
2002 ++ geneve->info.key.tp_dst, sport);
2003 + if (IS_ERR(dst))
2004 + return PTR_ERR(dst);
2005 +
2006 +@@ -885,7 +895,6 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
2007 + skb_dst_update_pmtu(skb, mtu);
2008 + }
2009 +
2010 +- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
2011 + if (geneve->collect_md) {
2012 + prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
2013 + ttl = key->ttl;
2014 +@@ -963,13 +972,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2015 + {
2016 + struct ip_tunnel_info *info = skb_tunnel_info(skb);
2017 + struct geneve_dev *geneve = netdev_priv(dev);
2018 ++ __be16 sport;
2019 +
2020 + if (ip_tunnel_info_af(info) == AF_INET) {
2021 + struct rtable *rt;
2022 + struct flowi4 fl4;
2023 ++
2024 + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
2025 ++ sport = udp_flow_src_port(geneve->net, skb,
2026 ++ 1, USHRT_MAX, true);
2027 +
2028 +- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
2029 ++ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
2030 ++ geneve->info.key.tp_dst, sport);
2031 + if (IS_ERR(rt))
2032 + return PTR_ERR(rt);
2033 +
2034 +@@ -979,9 +993,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2035 + } else if (ip_tunnel_info_af(info) == AF_INET6) {
2036 + struct dst_entry *dst;
2037 + struct flowi6 fl6;
2038 ++
2039 + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
2040 ++ sport = udp_flow_src_port(geneve->net, skb,
2041 ++ 1, USHRT_MAX, true);
2042 +
2043 +- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
2044 ++ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
2045 ++ geneve->info.key.tp_dst, sport);
2046 + if (IS_ERR(dst))
2047 + return PTR_ERR(dst);
2048 +
2049 +@@ -992,8 +1010,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2050 + return -EINVAL;
2051 + }
2052 +
2053 +- info->key.tp_src = udp_flow_src_port(geneve->net, skb,
2054 +- 1, USHRT_MAX, true);
2055 ++ info->key.tp_src = sport;
2056 + info->key.tp_dst = geneve->info.key.tp_dst;
2057 + return 0;
2058 + }
2059 +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
2060 +index 46b42de13d76f..3510eb26ccd55 100644
2061 +--- a/drivers/net/ieee802154/adf7242.c
2062 ++++ b/drivers/net/ieee802154/adf7242.c
2063 +@@ -834,7 +834,9 @@ static int adf7242_rx(struct adf7242_local *lp)
2064 + int ret;
2065 + u8 lqi, len_u8, *data;
2066 +
2067 +- adf7242_read_reg(lp, 0, &len_u8);
2068 ++ ret = adf7242_read_reg(lp, 0, &len_u8);
2069 ++ if (ret)
2070 ++ return ret;
2071 +
2072 + len = len_u8;
2073 +
2074 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
2075 +index 3a58962babd41..368369469e321 100644
2076 +--- a/drivers/net/ieee802154/ca8210.c
2077 ++++ b/drivers/net/ieee802154/ca8210.c
2078 +@@ -2924,6 +2924,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
2079 + );
2080 + if (!priv->irq_workqueue) {
2081 + dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
2082 ++ destroy_workqueue(priv->mlme_workqueue);
2083 + return -ENOMEM;
2084 + }
2085 +
2086 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
2087 +index 27f1f0b5b8f67..b51bca051c475 100644
2088 +--- a/drivers/net/phy/phy_device.c
2089 ++++ b/drivers/net/phy/phy_device.c
2090 +@@ -1121,7 +1121,8 @@ void phy_detach(struct phy_device *phydev)
2091 +
2092 + phy_led_triggers_unregister(phydev);
2093 +
2094 +- module_put(phydev->mdio.dev.driver->owner);
2095 ++ if (phydev->mdio.dev.driver)
2096 ++ module_put(phydev->mdio.dev.driver->owner);
2097 +
2098 + /* If the device had no specific driver before (i.e. - it
2099 + * was using the generic driver), we unbind the device
2100 +diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
2101 +index f3c1d52459788..d42b861cc8965 100644
2102 +--- a/drivers/net/wan/hdlc_ppp.c
2103 ++++ b/drivers/net/wan/hdlc_ppp.c
2104 +@@ -386,11 +386,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
2105 + }
2106 +
2107 + for (opt = data; len; len -= opt[1], opt += opt[1]) {
2108 +- if (len < 2 || len < opt[1]) {
2109 +- dev->stats.rx_errors++;
2110 +- kfree(out);
2111 +- return; /* bad packet, drop silently */
2112 +- }
2113 ++ if (len < 2 || opt[1] < 2 || len < opt[1])
2114 ++ goto err_out;
2115 +
2116 + if (pid == PID_LCP)
2117 + switch (opt[0]) {
2118 +@@ -398,6 +395,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
2119 + continue; /* MRU always OK and > 1500 bytes? */
2120 +
2121 + case LCP_OPTION_ACCM: /* async control character map */
2122 ++ if (opt[1] < sizeof(valid_accm))
2123 ++ goto err_out;
2124 + if (!memcmp(opt, valid_accm,
2125 + sizeof(valid_accm)))
2126 + continue;
2127 +@@ -409,6 +408,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
2128 + }
2129 + break;
2130 + case LCP_OPTION_MAGIC:
2131 ++ if (len < 6)
2132 ++ goto err_out;
2133 + if (opt[1] != 6 || (!opt[2] && !opt[3] &&
2134 + !opt[4] && !opt[5]))
2135 + break; /* reject invalid magic number */
2136 +@@ -427,6 +428,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
2137 + ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
2138 +
2139 + kfree(out);
2140 ++ return;
2141 ++
2142 ++err_out:
2143 ++ dev->stats.rx_errors++;
2144 ++ kfree(out);
2145 + }
2146 +
2147 + static int ppp_rx(struct sk_buff *skb)
2148 +diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
2149 +index ad4a1efc57c97..e1a1d27427cc9 100644
2150 +--- a/drivers/net/wireless/ath/ar5523/ar5523.c
2151 ++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
2152 +@@ -1771,6 +1771,8 @@ static const struct usb_device_id ar5523_id_table[] = {
2153 + AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
2154 + AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
2155 + AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
2156 ++ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
2157 ++ SMCWUSBT-G2 */
2158 + AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
2159 + AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
2160 + AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
2161 +diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
2162 +index f49b21b137c13..fef313099e08a 100644
2163 +--- a/drivers/net/wireless/ath/ath10k/sdio.c
2164 ++++ b/drivers/net/wireless/ath/ath10k/sdio.c
2165 +@@ -1564,23 +1564,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
2166 + size_t buf_len)
2167 + {
2168 + int ret;
2169 ++ void *mem;
2170 ++
2171 ++ mem = kzalloc(buf_len, GFP_KERNEL);
2172 ++ if (!mem)
2173 ++ return -ENOMEM;
2174 +
2175 + /* set window register to start read cycle */
2176 + ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
2177 + if (ret) {
2178 + ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
2179 +- return ret;
2180 ++ goto out;
2181 + }
2182 +
2183 + /* read the data */
2184 +- ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
2185 ++ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
2186 + if (ret) {
2187 + ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
2188 + ret);
2189 +- return ret;
2190 ++ goto out;
2191 + }
2192 +
2193 +- return 0;
2194 ++ memcpy(buf, mem, buf_len);
2195 ++
2196 ++out:
2197 ++ kfree(mem);
2198 ++
2199 ++ return ret;
2200 + }
2201 +
2202 + static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
2203 +diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
2204 +index 342555ebafd79..1d86d29b64ccc 100644
2205 +--- a/drivers/net/wireless/marvell/mwifiex/fw.h
2206 ++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
2207 +@@ -938,7 +938,7 @@ struct mwifiex_tkip_param {
2208 + struct mwifiex_aes_param {
2209 + u8 pn[WPA_PN_SIZE];
2210 + __le16 key_len;
2211 +- u8 key[WLAN_KEY_LEN_CCMP];
2212 ++ u8 key[WLAN_KEY_LEN_CCMP_256];
2213 + } __packed;
2214 +
2215 + struct mwifiex_wapi_param {
2216 +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
2217 +index 19ce279df24d9..1aeb8cf6dff97 100644
2218 +--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
2219 ++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
2220 +@@ -624,7 +624,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
2221 + key_v2 = &resp->params.key_material_v2;
2222 +
2223 + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
2224 +- if (len > WLAN_KEY_LEN_CCMP)
2225 ++ if (len > sizeof(key_v2->key_param_set.key_params.aes.key))
2226 + return -EINVAL;
2227 +
2228 + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
2229 +@@ -640,7 +640,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
2230 + return 0;
2231 +
2232 + memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
2233 +- WLAN_KEY_LEN_CCMP);
2234 ++ sizeof(key_v2->key_param_set.key_params.aes.key));
2235 + priv->aes_key_v2.key_param_set.key_params.aes.key_len =
2236 + cpu_to_le16(len);
2237 + memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
2238 +diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
2239 +index 2526971f99299..3eeaf57e6d939 100644
2240 +--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
2241 ++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
2242 +@@ -102,6 +102,8 @@
2243 + #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
2244 +
2245 + /* QMP PHY TX registers */
2246 ++#define QSERDES_TX_EMP_POST1_LVL 0x018
2247 ++#define QSERDES_TX_SLEW_CNTL 0x040
2248 + #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
2249 + #define QSERDES_TX_DEBUG_BUS_SEL 0x064
2250 + #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
2251 +@@ -394,8 +396,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
2252 + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
2253 + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
2254 + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
2255 +- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
2256 +- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
2257 ++ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
2258 ++ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
2259 + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
2260 + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
2261 + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
2262 +@@ -421,7 +423,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
2263 + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
2264 + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
2265 + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
2266 +- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
2267 + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
2268 + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
2269 + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
2270 +@@ -430,7 +431,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
2271 + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
2272 + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
2273 + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
2274 +- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
2275 + };
2276 +
2277 + static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
2278 +@@ -438,6 +438,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
2279 + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
2280 + QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
2281 + QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
2282 ++ QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36),
2283 ++ QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a),
2284 + };
2285 +
2286 + static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
2287 +@@ -448,7 +450,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
2288 + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
2289 + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
2290 + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
2291 +- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
2292 + };
2293 +
2294 + static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
2295 +@@ -665,6 +666,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
2296 + .mask_pcs_ready = PHYSTATUS,
2297 + };
2298 +
2299 ++static const char * const ipq8074_pciephy_clk_l[] = {
2300 ++ "aux", "cfg_ahb",
2301 ++};
2302 + /* list of resets */
2303 + static const char * const ipq8074_pciephy_reset_l[] = {
2304 + "phy", "common",
2305 +@@ -682,8 +686,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
2306 + .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
2307 + .pcs_tbl = ipq8074_pcie_pcs_tbl,
2308 + .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
2309 +- .clk_list = NULL,
2310 +- .num_clks = 0,
2311 ++ .clk_list = ipq8074_pciephy_clk_l,
2312 ++ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
2313 + .reset_list = ipq8074_pciephy_reset_l,
2314 + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
2315 + .vreg_list = NULL,
2316 +diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c
2317 +index f6f72339bbc32..bb7fdf491c1c2 100644
2318 +--- a/drivers/phy/samsung/phy-s5pv210-usb2.c
2319 ++++ b/drivers/phy/samsung/phy-s5pv210-usb2.c
2320 +@@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
2321 + udelay(10);
2322 + rst &= ~rstbits;
2323 + writel(rst, drv->reg_phy + S5PV210_UPHYRST);
2324 ++ /* The following delay is necessary for the reset sequence to be
2325 ++ * completed
2326 ++ */
2327 ++ udelay(80);
2328 + } else {
2329 + pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
2330 + pwr |= phypwr;
2331 +diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
2332 +index 33c40f79d23d5..2c35c13ad546f 100644
2333 +--- a/drivers/power/supply/max17040_battery.c
2334 ++++ b/drivers/power/supply/max17040_battery.c
2335 +@@ -109,7 +109,7 @@ static void max17040_get_vcell(struct i2c_client *client)
2336 +
2337 + vcell = max17040_read_reg(client, MAX17040_VCELL);
2338 +
2339 +- chip->vcell = vcell;
2340 ++ chip->vcell = (vcell >> 4) * 1250;
2341 + }
2342 +
2343 + static void max17040_get_soc(struct i2c_client *client)
2344 +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
2345 +index f15f6d1e1070a..f207f8725993c 100644
2346 +--- a/drivers/rapidio/devices/rio_mport_cdev.c
2347 ++++ b/drivers/rapidio/devices/rio_mport_cdev.c
2348 +@@ -2464,13 +2464,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2349 + cdev_init(&md->cdev, &mport_fops);
2350 + md->cdev.owner = THIS_MODULE;
2351 +
2352 +- ret = cdev_device_add(&md->cdev, &md->dev);
2353 +- if (ret) {
2354 +- rmcd_error("Failed to register mport %d (err=%d)",
2355 +- mport->id, ret);
2356 +- goto err_cdev;
2357 +- }
2358 +-
2359 + INIT_LIST_HEAD(&md->doorbells);
2360 + spin_lock_init(&md->db_lock);
2361 + INIT_LIST_HEAD(&md->portwrites);
2362 +@@ -2490,6 +2483,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2363 + #else
2364 + md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2365 + #endif
2366 ++
2367 ++ ret = cdev_device_add(&md->cdev, &md->dev);
2368 ++ if (ret) {
2369 ++ rmcd_error("Failed to register mport %d (err=%d)",
2370 ++ mport->id, ret);
2371 ++ goto err_cdev;
2372 ++ }
2373 + ret = rio_query_mport(mport, &attr);
2374 + if (!ret) {
2375 + md->properties.flags = attr.flags;
2376 +diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
2377 +index 38a2e9e684df4..77a106e90124b 100644
2378 +--- a/drivers/rtc/rtc-ds1374.c
2379 ++++ b/drivers/rtc/rtc-ds1374.c
2380 +@@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client,
2381 + if (!ds1374)
2382 + return -ENOMEM;
2383 +
2384 ++ ds1374->rtc = devm_rtc_allocate_device(&client->dev);
2385 ++ if (IS_ERR(ds1374->rtc))
2386 ++ return PTR_ERR(ds1374->rtc);
2387 ++
2388 + ds1374->client = client;
2389 + i2c_set_clientdata(client, ds1374);
2390 +
2391 +@@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client,
2392 + device_set_wakeup_capable(&client->dev, 1);
2393 + }
2394 +
2395 +- ds1374->rtc = devm_rtc_device_register(&client->dev, client->name,
2396 +- &ds1374_rtc_ops, THIS_MODULE);
2397 +- if (IS_ERR(ds1374->rtc)) {
2398 +- dev_err(&client->dev, "unable to register the class device\n");
2399 +- return PTR_ERR(ds1374->rtc);
2400 +- }
2401 ++ ds1374->rtc->ops = &ds1374_rtc_ops;
2402 ++
2403 ++ ret = rtc_register_device(ds1374->rtc);
2404 ++ if (ret)
2405 ++ return ret;
2406 +
2407 + #ifdef CONFIG_RTC_DRV_DS1374_WDT
2408 + save_client = client;
2409 +diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
2410 +index 6168ccdb389c3..91a569c10dea0 100644
2411 +--- a/drivers/s390/block/dasd_fba.c
2412 ++++ b/drivers/s390/block/dasd_fba.c
2413 +@@ -39,6 +39,7 @@
2414 + MODULE_LICENSE("GPL");
2415 +
2416 + static struct dasd_discipline dasd_fba_discipline;
2417 ++static void *dasd_fba_zero_page;
2418 +
2419 + struct dasd_fba_private {
2420 + struct dasd_fba_characteristics rdc_data;
2421 +@@ -269,7 +270,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
2422 + ccw->cmd_code = DASD_FBA_CCW_WRITE;
2423 + ccw->flags |= CCW_FLAG_SLI;
2424 + ccw->count = count;
2425 +- ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0));
2426 ++ ccw->cda = (__u32) (addr_t) dasd_fba_zero_page;
2427 + }
2428 +
2429 + /*
2430 +@@ -808,6 +809,11 @@ dasd_fba_init(void)
2431 + int ret;
2432 +
2433 + ASCEBC(dasd_fba_discipline.ebcname, 4);
2434 ++
2435 ++ dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2436 ++ if (!dasd_fba_zero_page)
2437 ++ return -ENOMEM;
2438 ++
2439 + ret = ccw_driver_register(&dasd_fba_driver);
2440 + if (!ret)
2441 + wait_for_device_probe();
2442 +@@ -819,6 +825,7 @@ static void __exit
2443 + dasd_fba_cleanup(void)
2444 + {
2445 + ccw_driver_unregister(&dasd_fba_driver);
2446 ++ free_page((unsigned long)dasd_fba_zero_page);
2447 + }
2448 +
2449 + module_init(dasd_fba_init);
2450 +diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
2451 +index 7173ae53c5260..ba11313568810 100644
2452 +--- a/drivers/scsi/aacraid/aachba.c
2453 ++++ b/drivers/scsi/aacraid/aachba.c
2454 +@@ -2322,13 +2322,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
2455 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2456 + SAM_STAT_CHECK_CONDITION;
2457 + set_sense(&dev->fsa_dev[cid].sense_data,
2458 +- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2459 ++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
2460 + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2461 + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2462 + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2463 + SCSI_SENSE_BUFFERSIZE));
2464 + scsicmd->scsi_done(scsicmd);
2465 +- return 1;
2466 ++ return 0;
2467 + }
2468 +
2469 + dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
2470 +@@ -2414,13 +2414,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
2471 + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2472 + SAM_STAT_CHECK_CONDITION;
2473 + set_sense(&dev->fsa_dev[cid].sense_data,
2474 +- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2475 ++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
2476 + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2477 + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2478 + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2479 + SCSI_SENSE_BUFFERSIZE));
2480 + scsicmd->scsi_done(scsicmd);
2481 +- return 1;
2482 ++ return 0;
2483 + }
2484 +
2485 + dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
2486 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
2487 +index a284527999c55..65a5cd6a5f961 100644
2488 +--- a/drivers/scsi/aacraid/commsup.c
2489 ++++ b/drivers/scsi/aacraid/commsup.c
2490 +@@ -771,7 +771,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
2491 + hbacmd->request_id =
2492 + cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
2493 + fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
2494 +- } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
2495 ++ } else
2496 + return -EINVAL;
2497 +
2498 +
2499 +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
2500 +index 053a31c5485f3..e0e728d0dd5e7 100644
2501 +--- a/drivers/scsi/aacraid/linit.c
2502 ++++ b/drivers/scsi/aacraid/linit.c
2503 +@@ -736,7 +736,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
2504 + status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
2505 + (fib_callback) aac_hba_callback,
2506 + (void *) cmd);
2507 +-
2508 ++ if (status != -EINPROGRESS) {
2509 ++ aac_fib_complete(fib);
2510 ++ aac_fib_free(fib);
2511 ++ return ret;
2512 ++ }
2513 + /* Wait up to 15 secs for completion */
2514 + for (count = 0; count < 15; ++count) {
2515 + if (cmd->SCp.sent_command) {
2516 +@@ -915,11 +919,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
2517 +
2518 + info = &aac->hba_map[bus][cid];
2519 +
2520 +- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
2521 +- info->reset_state > 0)
2522 ++ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
2523 ++ !(info->reset_state > 0)))
2524 + return FAILED;
2525 +
2526 +- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
2527 ++ pr_err("%s: Host device reset request. SCSI hang ?\n",
2528 + AAC_DRIVERNAME);
2529 +
2530 + fib = aac_fib_alloc(aac);
2531 +@@ -934,7 +938,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
2532 + status = aac_hba_send(command, fib,
2533 + (fib_callback) aac_tmf_callback,
2534 + (void *) info);
2535 +-
2536 ++ if (status != -EINPROGRESS) {
2537 ++ info->reset_state = 0;
2538 ++ aac_fib_complete(fib);
2539 ++ aac_fib_free(fib);
2540 ++ return ret;
2541 ++ }
2542 + /* Wait up to 15 seconds for completion */
2543 + for (count = 0; count < 15; ++count) {
2544 + if (info->reset_state == 0) {
2545 +@@ -973,11 +982,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
2546 +
2547 + info = &aac->hba_map[bus][cid];
2548 +
2549 +- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
2550 +- info->reset_state > 0)
2551 ++ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
2552 ++ !(info->reset_state > 0)))
2553 + return FAILED;
2554 +
2555 +- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
2556 ++ pr_err("%s: Host target reset request. SCSI hang ?\n",
2557 + AAC_DRIVERNAME);
2558 +
2559 + fib = aac_fib_alloc(aac);
2560 +@@ -994,6 +1003,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
2561 + (fib_callback) aac_tmf_callback,
2562 + (void *) info);
2563 +
2564 ++ if (status != -EINPROGRESS) {
2565 ++ info->reset_state = 0;
2566 ++ aac_fib_complete(fib);
2567 ++ aac_fib_free(fib);
2568 ++ return ret;
2569 ++ }
2570 ++
2571 + /* Wait up to 15 seconds for completion */
2572 + for (count = 0; count < 15; ++count) {
2573 + if (info->reset_state <= 0) {
2574 +@@ -1046,7 +1062,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
2575 + }
2576 + }
2577 +
2578 +- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
2579 ++ pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
2580 +
2581 + /*
2582 + * Check the health of the controller
2583 +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
2584 +index d79ac0b24f5af..04c25ca2be45f 100644
2585 +--- a/drivers/scsi/fnic/fnic_scsi.c
2586 ++++ b/drivers/scsi/fnic/fnic_scsi.c
2587 +@@ -1034,7 +1034,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
2588 + atomic64_inc(&fnic_stats->io_stats.io_completions);
2589 +
2590 +
2591 +- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
2592 ++ io_duration_time = jiffies_to_msecs(jiffies) -
2593 ++ jiffies_to_msecs(start_time);
2594 +
2595 + if(io_duration_time <= 10)
2596 + atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
2597 +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
2598 +index 0e964ce75406b..669cf3553a77d 100644
2599 +--- a/drivers/scsi/libfc/fc_rport.c
2600 ++++ b/drivers/scsi/libfc/fc_rport.c
2601 +@@ -145,8 +145,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
2602 + size_t rport_priv_size = sizeof(*rdata);
2603 +
2604 + rdata = fc_rport_lookup(lport, port_id);
2605 +- if (rdata)
2606 ++ if (rdata) {
2607 ++ kref_put(&rdata->kref, fc_rport_destroy);
2608 + return rdata;
2609 ++ }
2610 +
2611 + if (lport->rport_priv_size > 0)
2612 + rport_priv_size = lport->rport_priv_size;
2613 +@@ -493,10 +495,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
2614 +
2615 + fc_rport_state_enter(rdata, RPORT_ST_DELETE);
2616 +
2617 +- kref_get(&rdata->kref);
2618 +- if (rdata->event == RPORT_EV_NONE &&
2619 +- !queue_work(rport_event_queue, &rdata->event_work))
2620 +- kref_put(&rdata->kref, fc_rport_destroy);
2621 ++ if (rdata->event == RPORT_EV_NONE) {
2622 ++ kref_get(&rdata->kref);
2623 ++ if (!queue_work(rport_event_queue, &rdata->event_work))
2624 ++ kref_put(&rdata->kref, fc_rport_destroy);
2625 ++ }
2626 +
2627 + rdata->event = event;
2628 + }
2629 +diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
2630 +index 601a4ee60de85..e193fa5963d82 100644
2631 +--- a/drivers/scsi/lpfc/lpfc_ct.c
2632 ++++ b/drivers/scsi/lpfc/lpfc_ct.c
2633 +@@ -1714,8 +1714,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
2634 + struct lpfc_fdmi_attr_entry *ae;
2635 + uint32_t size;
2636 +
2637 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2638 +- memset(ae, 0, sizeof(struct lpfc_name));
2639 ++ ae = &ad->AttrValue;
2640 ++ memset(ae, 0, sizeof(*ae));
2641 +
2642 + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
2643 + sizeof(struct lpfc_name));
2644 +@@ -1731,8 +1731,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
2645 + struct lpfc_fdmi_attr_entry *ae;
2646 + uint32_t len, size;
2647 +
2648 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2649 +- memset(ae, 0, 256);
2650 ++ ae = &ad->AttrValue;
2651 ++ memset(ae, 0, sizeof(*ae));
2652 +
2653 + /* This string MUST be consistent with other FC platforms
2654 + * supported by Broadcom.
2655 +@@ -1756,8 +1756,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
2656 + struct lpfc_fdmi_attr_entry *ae;
2657 + uint32_t len, size;
2658 +
2659 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2660 +- memset(ae, 0, 256);
2661 ++ ae = &ad->AttrValue;
2662 ++ memset(ae, 0, sizeof(*ae));
2663 +
2664 + strncpy(ae->un.AttrString, phba->SerialNumber,
2665 + sizeof(ae->un.AttrString));
2666 +@@ -1778,8 +1778,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
2667 + struct lpfc_fdmi_attr_entry *ae;
2668 + uint32_t len, size;
2669 +
2670 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2671 +- memset(ae, 0, 256);
2672 ++ ae = &ad->AttrValue;
2673 ++ memset(ae, 0, sizeof(*ae));
2674 +
2675 + strncpy(ae->un.AttrString, phba->ModelName,
2676 + sizeof(ae->un.AttrString));
2677 +@@ -1799,8 +1799,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
2678 + struct lpfc_fdmi_attr_entry *ae;
2679 + uint32_t len, size;
2680 +
2681 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2682 +- memset(ae, 0, 256);
2683 ++ ae = &ad->AttrValue;
2684 ++ memset(ae, 0, sizeof(*ae));
2685 +
2686 + strncpy(ae->un.AttrString, phba->ModelDesc,
2687 + sizeof(ae->un.AttrString));
2688 +@@ -1822,8 +1822,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
2689 + struct lpfc_fdmi_attr_entry *ae;
2690 + uint32_t i, j, incr, size;
2691 +
2692 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2693 +- memset(ae, 0, 256);
2694 ++ ae = &ad->AttrValue;
2695 ++ memset(ae, 0, sizeof(*ae));
2696 +
2697 + /* Convert JEDEC ID to ascii for hardware version */
2698 + incr = vp->rev.biuRev;
2699 +@@ -1852,8 +1852,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
2700 + struct lpfc_fdmi_attr_entry *ae;
2701 + uint32_t len, size;
2702 +
2703 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2704 +- memset(ae, 0, 256);
2705 ++ ae = &ad->AttrValue;
2706 ++ memset(ae, 0, sizeof(*ae));
2707 +
2708 + strncpy(ae->un.AttrString, lpfc_release_version,
2709 + sizeof(ae->un.AttrString));
2710 +@@ -1874,8 +1874,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
2711 + struct lpfc_fdmi_attr_entry *ae;
2712 + uint32_t len, size;
2713 +
2714 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2715 +- memset(ae, 0, 256);
2716 ++ ae = &ad->AttrValue;
2717 ++ memset(ae, 0, sizeof(*ae));
2718 +
2719 + if (phba->sli_rev == LPFC_SLI_REV4)
2720 + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
2721 +@@ -1899,8 +1899,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
2722 + struct lpfc_fdmi_attr_entry *ae;
2723 + uint32_t len, size;
2724 +
2725 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2726 +- memset(ae, 0, 256);
2727 ++ ae = &ad->AttrValue;
2728 ++ memset(ae, 0, sizeof(*ae));
2729 +
2730 + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
2731 + len = strnlen(ae->un.AttrString,
2732 +@@ -1919,8 +1919,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
2733 + struct lpfc_fdmi_attr_entry *ae;
2734 + uint32_t len, size;
2735 +
2736 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2737 +- memset(ae, 0, 256);
2738 ++ ae = &ad->AttrValue;
2739 ++ memset(ae, 0, sizeof(*ae));
2740 +
2741 + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
2742 + init_utsname()->sysname,
2743 +@@ -1942,7 +1942,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
2744 + struct lpfc_fdmi_attr_entry *ae;
2745 + uint32_t size;
2746 +
2747 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2748 ++ ae = &ad->AttrValue;
2749 +
2750 + ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
2751 + size = FOURBYTES + sizeof(uint32_t);
2752 +@@ -1958,8 +1958,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
2753 + struct lpfc_fdmi_attr_entry *ae;
2754 + uint32_t len, size;
2755 +
2756 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2757 +- memset(ae, 0, 256);
2758 ++ ae = &ad->AttrValue;
2759 ++ memset(ae, 0, sizeof(*ae));
2760 +
2761 + len = lpfc_vport_symbolic_node_name(vport,
2762 + ae->un.AttrString, 256);
2763 +@@ -1977,7 +1977,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
2764 + struct lpfc_fdmi_attr_entry *ae;
2765 + uint32_t size;
2766 +
2767 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2768 ++ ae = &ad->AttrValue;
2769 +
2770 + /* Nothing is defined for this currently */
2771 + ae->un.AttrInt = cpu_to_be32(0);
2772 +@@ -1994,7 +1994,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
2773 + struct lpfc_fdmi_attr_entry *ae;
2774 + uint32_t size;
2775 +
2776 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2777 ++ ae = &ad->AttrValue;
2778 +
2779 + /* Each driver instance corresponds to a single port */
2780 + ae->un.AttrInt = cpu_to_be32(1);
2781 +@@ -2011,8 +2011,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
2782 + struct lpfc_fdmi_attr_entry *ae;
2783 + uint32_t size;
2784 +
2785 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2786 +- memset(ae, 0, sizeof(struct lpfc_name));
2787 ++ ae = &ad->AttrValue;
2788 ++ memset(ae, 0, sizeof(*ae));
2789 +
2790 + memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
2791 + sizeof(struct lpfc_name));
2792 +@@ -2030,8 +2030,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
2793 + struct lpfc_fdmi_attr_entry *ae;
2794 + uint32_t len, size;
2795 +
2796 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2797 +- memset(ae, 0, 256);
2798 ++ ae = &ad->AttrValue;
2799 ++ memset(ae, 0, sizeof(*ae));
2800 +
2801 + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
2802 + len = strnlen(ae->un.AttrString,
2803 +@@ -2050,7 +2050,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
2804 + struct lpfc_fdmi_attr_entry *ae;
2805 + uint32_t size;
2806 +
2807 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2808 ++ ae = &ad->AttrValue;
2809 +
2810 + /* Driver doesn't have access to this information */
2811 + ae->un.AttrInt = cpu_to_be32(0);
2812 +@@ -2067,8 +2067,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
2813 + struct lpfc_fdmi_attr_entry *ae;
2814 + uint32_t len, size;
2815 +
2816 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2817 +- memset(ae, 0, 256);
2818 ++ ae = &ad->AttrValue;
2819 ++ memset(ae, 0, sizeof(*ae));
2820 +
2821 + strncpy(ae->un.AttrString, "EMULEX",
2822 + sizeof(ae->un.AttrString));
2823 +@@ -2089,8 +2089,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
2824 + struct lpfc_fdmi_attr_entry *ae;
2825 + uint32_t size;
2826 +
2827 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2828 +- memset(ae, 0, 32);
2829 ++ ae = &ad->AttrValue;
2830 ++ memset(ae, 0, sizeof(*ae));
2831 +
2832 + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
2833 + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
2834 +@@ -2111,7 +2111,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
2835 + struct lpfc_fdmi_attr_entry *ae;
2836 + uint32_t size;
2837 +
2838 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2839 ++ ae = &ad->AttrValue;
2840 +
2841 + ae->un.AttrInt = 0;
2842 + if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2843 +@@ -2161,7 +2161,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
2844 + struct lpfc_fdmi_attr_entry *ae;
2845 + uint32_t size;
2846 +
2847 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2848 ++ ae = &ad->AttrValue;
2849 +
2850 + if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2851 + switch (phba->fc_linkspeed) {
2852 +@@ -2225,7 +2225,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
2853 + struct lpfc_fdmi_attr_entry *ae;
2854 + uint32_t size;
2855 +
2856 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2857 ++ ae = &ad->AttrValue;
2858 +
2859 + hsp = (struct serv_parm *)&vport->fc_sparam;
2860 + ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
2861 +@@ -2245,8 +2245,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
2862 + struct lpfc_fdmi_attr_entry *ae;
2863 + uint32_t len, size;
2864 +
2865 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2866 +- memset(ae, 0, 256);
2867 ++ ae = &ad->AttrValue;
2868 ++ memset(ae, 0, sizeof(*ae));
2869 +
2870 + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
2871 + "/sys/class/scsi_host/host%d", shost->host_no);
2872 +@@ -2266,8 +2266,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
2873 + struct lpfc_fdmi_attr_entry *ae;
2874 + uint32_t len, size;
2875 +
2876 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2877 +- memset(ae, 0, 256);
2878 ++ ae = &ad->AttrValue;
2879 ++ memset(ae, 0, sizeof(*ae));
2880 +
2881 + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
2882 + init_utsname()->nodename);
2883 +@@ -2287,8 +2287,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
2884 + struct lpfc_fdmi_attr_entry *ae;
2885 + uint32_t size;
2886 +
2887 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2888 +- memset(ae, 0, sizeof(struct lpfc_name));
2889 ++ ae = &ad->AttrValue;
2890 ++ memset(ae, 0, sizeof(*ae));
2891 +
2892 + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
2893 + sizeof(struct lpfc_name));
2894 +@@ -2305,8 +2305,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
2895 + struct lpfc_fdmi_attr_entry *ae;
2896 + uint32_t size;
2897 +
2898 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2899 +- memset(ae, 0, sizeof(struct lpfc_name));
2900 ++ ae = &ad->AttrValue;
2901 ++ memset(ae, 0, sizeof(*ae));
2902 +
2903 + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
2904 + sizeof(struct lpfc_name));
2905 +@@ -2323,8 +2323,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
2906 + struct lpfc_fdmi_attr_entry *ae;
2907 + uint32_t len, size;
2908 +
2909 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2910 +- memset(ae, 0, 256);
2911 ++ ae = &ad->AttrValue;
2912 ++ memset(ae, 0, sizeof(*ae));
2913 +
2914 + len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
2915 + len += (len & 3) ? (4 - (len & 3)) : 4;
2916 +@@ -2342,7 +2342,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
2917 + struct lpfc_fdmi_attr_entry *ae;
2918 + uint32_t size;
2919 +
2920 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2921 ++ ae = &ad->AttrValue;
2922 + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
2923 + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
2924 + else
2925 +@@ -2360,7 +2360,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
2926 + struct lpfc_fdmi_attr_entry *ae;
2927 + uint32_t size;
2928 +
2929 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2930 ++ ae = &ad->AttrValue;
2931 + ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
2932 + size = FOURBYTES + sizeof(uint32_t);
2933 + ad->AttrLen = cpu_to_be16(size);
2934 +@@ -2375,8 +2375,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
2935 + struct lpfc_fdmi_attr_entry *ae;
2936 + uint32_t size;
2937 +
2938 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2939 +- memset(ae, 0, sizeof(struct lpfc_name));
2940 ++ ae = &ad->AttrValue;
2941 ++ memset(ae, 0, sizeof(*ae));
2942 +
2943 + memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
2944 + sizeof(struct lpfc_name));
2945 +@@ -2393,8 +2393,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
2946 + struct lpfc_fdmi_attr_entry *ae;
2947 + uint32_t size;
2948 +
2949 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2950 +- memset(ae, 0, 32);
2951 ++ ae = &ad->AttrValue;
2952 ++ memset(ae, 0, sizeof(*ae));
2953 +
2954 + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
2955 + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
2956 +@@ -2414,7 +2414,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
2957 + struct lpfc_fdmi_attr_entry *ae;
2958 + uint32_t size;
2959 +
2960 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2961 ++ ae = &ad->AttrValue;
2962 + /* Link Up - operational */
2963 + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
2964 + size = FOURBYTES + sizeof(uint32_t);
2965 +@@ -2430,7 +2430,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
2966 + struct lpfc_fdmi_attr_entry *ae;
2967 + uint32_t size;
2968 +
2969 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2970 ++ ae = &ad->AttrValue;
2971 + vport->fdmi_num_disc = lpfc_find_map_node(vport);
2972 + ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
2973 + size = FOURBYTES + sizeof(uint32_t);
2974 +@@ -2446,7 +2446,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
2975 + struct lpfc_fdmi_attr_entry *ae;
2976 + uint32_t size;
2977 +
2978 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2979 ++ ae = &ad->AttrValue;
2980 + ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
2981 + size = FOURBYTES + sizeof(uint32_t);
2982 + ad->AttrLen = cpu_to_be16(size);
2983 +@@ -2461,8 +2461,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
2984 + struct lpfc_fdmi_attr_entry *ae;
2985 + uint32_t len, size;
2986 +
2987 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2988 +- memset(ae, 0, 256);
2989 ++ ae = &ad->AttrValue;
2990 ++ memset(ae, 0, sizeof(*ae));
2991 +
2992 + strncpy(ae->un.AttrString, "Smart SAN Initiator",
2993 + sizeof(ae->un.AttrString));
2994 +@@ -2482,8 +2482,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
2995 + struct lpfc_fdmi_attr_entry *ae;
2996 + uint32_t size;
2997 +
2998 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2999 +- memset(ae, 0, 256);
3000 ++ ae = &ad->AttrValue;
3001 ++ memset(ae, 0, sizeof(*ae));
3002 +
3003 + memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
3004 + sizeof(struct lpfc_name));
3005 +@@ -2503,8 +2503,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
3006 + struct lpfc_fdmi_attr_entry *ae;
3007 + uint32_t len, size;
3008 +
3009 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
3010 +- memset(ae, 0, 256);
3011 ++ ae = &ad->AttrValue;
3012 ++ memset(ae, 0, sizeof(*ae));
3013 +
3014 + strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
3015 + sizeof(ae->un.AttrString));
3016 +@@ -2525,8 +2525,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
3017 + struct lpfc_fdmi_attr_entry *ae;
3018 + uint32_t len, size;
3019 +
3020 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
3021 +- memset(ae, 0, 256);
3022 ++ ae = &ad->AttrValue;
3023 ++ memset(ae, 0, sizeof(*ae));
3024 +
3025 + strncpy(ae->un.AttrString, phba->ModelName,
3026 + sizeof(ae->un.AttrString));
3027 +@@ -2545,7 +2545,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
3028 + struct lpfc_fdmi_attr_entry *ae;
3029 + uint32_t size;
3030 +
3031 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
3032 ++ ae = &ad->AttrValue;
3033 +
3034 + /* SRIOV (type 3) is not supported */
3035 + if (vport->vpi)
3036 +@@ -2565,7 +2565,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
3037 + struct lpfc_fdmi_attr_entry *ae;
3038 + uint32_t size;
3039 +
3040 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
3041 ++ ae = &ad->AttrValue;
3042 + ae->un.AttrInt = cpu_to_be32(0);
3043 + size = FOURBYTES + sizeof(uint32_t);
3044 + ad->AttrLen = cpu_to_be16(size);
3045 +@@ -2580,7 +2580,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
3046 + struct lpfc_fdmi_attr_entry *ae;
3047 + uint32_t size;
3048 +
3049 +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
3050 ++ ae = &ad->AttrValue;
3051 + ae->un.AttrInt = cpu_to_be32(1);
3052 + size = FOURBYTES + sizeof(uint32_t);
3053 + ad->AttrLen = cpu_to_be16(size);
3054 +@@ -2728,7 +2728,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3055 + /* Registered Port List */
3056 + /* One entry (port) per adapter */
3057 + rh->rpl.EntryCnt = cpu_to_be32(1);
3058 +- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName,
3059 ++ memcpy(&rh->rpl.pe.PortName,
3060 ++ &phba->pport->fc_sparam.portName,
3061 + sizeof(struct lpfc_name));
3062 +
3063 + /* point to the HBA attribute block */
3064 +diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
3065 +index bdc1f184f67a3..12e033bbe607a 100644
3066 +--- a/drivers/scsi/lpfc/lpfc_hw.h
3067 ++++ b/drivers/scsi/lpfc/lpfc_hw.h
3068 +@@ -1326,25 +1326,8 @@ struct fc_rdp_res_frame {
3069 + /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
3070 + #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
3071 +
3072 +-/*
3073 +- * Registered Port List Format
3074 +- */
3075 +-struct lpfc_fdmi_reg_port_list {
3076 +- uint32_t EntryCnt;
3077 +- uint32_t pe; /* Variable-length array */
3078 +-};
3079 +-
3080 +-
3081 + /* Definitions for HBA / Port attribute entries */
3082 +
3083 +-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
3084 +- /* Structure is in Big Endian format */
3085 +- uint32_t AttrType:16;
3086 +- uint32_t AttrLen:16;
3087 +- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
3088 +-};
3089 +-
3090 +-
3091 + /* Attribute Entry */
3092 + struct lpfc_fdmi_attr_entry {
3093 + union {
3094 +@@ -1355,7 +1338,13 @@ struct lpfc_fdmi_attr_entry {
3095 + } un;
3096 + };
3097 +
3098 +-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
3099 ++struct lpfc_fdmi_attr_def { /* Defined in TLV format */
3100 ++ /* Structure is in Big Endian format */
3101 ++ uint32_t AttrType:16;
3102 ++ uint32_t AttrLen:16;
3103 ++ /* Marks start of Value (ATTRIBUTE_ENTRY) */
3104 ++ struct lpfc_fdmi_attr_entry AttrValue;
3105 ++} __packed;
3106 +
3107 + /*
3108 + * HBA Attribute Block
3109 +@@ -1379,13 +1368,20 @@ struct lpfc_fdmi_hba_ident {
3110 + struct lpfc_name PortName;
3111 + };
3112 +
3113 ++/*
3114 ++ * Registered Port List Format
3115 ++ */
3116 ++struct lpfc_fdmi_reg_port_list {
3117 ++ uint32_t EntryCnt;
3118 ++ struct lpfc_fdmi_port_entry pe;
3119 ++} __packed;
3120 ++
3121 + /*
3122 + * Register HBA(RHBA)
3123 + */
3124 + struct lpfc_fdmi_reg_hba {
3125 + struct lpfc_fdmi_hba_ident hi;
3126 +- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
3127 +-/* struct lpfc_fdmi_attr_block ab; */
3128 ++ struct lpfc_fdmi_reg_port_list rpl;
3129 + };
3130 +
3131 + /*
3132 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3133 +index 480d2d467f7a6..45445dafc80cf 100644
3134 +--- a/drivers/scsi/lpfc/lpfc_sli.c
3135 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
3136 +@@ -17038,6 +17038,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
3137 + list_add_tail(&iocbq->list, &first_iocbq->list);
3138 + }
3139 + }
3140 ++ /* Free the sequence's header buffer */
3141 ++ if (!first_iocbq)
3142 ++ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
3143 ++
3144 + return first_iocbq;
3145 + }
3146 +
3147 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
3148 +index fb6439bc1d9a9..4d7971c3f339b 100644
3149 +--- a/drivers/scsi/qedi/qedi_iscsi.c
3150 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
3151 +@@ -1072,6 +1072,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
3152 + break;
3153 + }
3154 +
3155 ++ if (!abrt_conn)
3156 ++ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
3157 ++
3158 + qedi_ep->state = EP_STATE_DISCONN_START;
3159 + ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
3160 + if (ret) {
3161 +diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
3162 +index ea145bafb880a..8ff8843df5141 100644
3163 +--- a/drivers/staging/media/imx/imx-media-capture.c
3164 ++++ b/drivers/staging/media/imx/imx-media-capture.c
3165 +@@ -685,7 +685,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
3166 + /* setup default format */
3167 + fmt_src.pad = priv->src_sd_pad;
3168 + fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
3169 +- v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
3170 ++ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
3171 + if (ret) {
3172 + v4l2_err(sd, "failed to get src_sd format\n");
3173 + goto unreg;
3174 +diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
3175 +index afb9dadc1cfe9..77685bae21eda 100644
3176 +--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
3177 ++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
3178 +@@ -1541,21 +1541,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
3179 +
3180 + /* Allocate new skb for releasing to upper layer */
3181 + sub_skb = dev_alloc_skb(nSubframe_Length + 12);
3182 +- if (sub_skb) {
3183 +- skb_reserve(sub_skb, 12);
3184 +- skb_put_data(sub_skb, pdata, nSubframe_Length);
3185 +- } else {
3186 +- sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
3187 +- if (sub_skb) {
3188 +- sub_skb->data = pdata;
3189 +- sub_skb->len = nSubframe_Length;
3190 +- skb_set_tail_pointer(sub_skb, nSubframe_Length);
3191 +- } else {
3192 +- DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
3193 +- break;
3194 +- }
3195 ++ if (!sub_skb) {
3196 ++ DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes);
3197 ++ break;
3198 + }
3199 +
3200 ++ skb_reserve(sub_skb, 12);
3201 ++ skb_put_data(sub_skb, pdata, nSubframe_Length);
3202 ++
3203 + subframes[nr_subframes++] = sub_skb;
3204 +
3205 + if (nr_subframes >= MAX_SUBFRAME_COUNT) {
3206 +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
3207 +index d6b790510c94e..8d46bd612888f 100644
3208 +--- a/drivers/tty/serial/8250/8250_core.c
3209 ++++ b/drivers/tty/serial/8250/8250_core.c
3210 +@@ -1065,8 +1065,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
3211 + serial8250_apply_quirks(uart);
3212 + ret = uart_add_one_port(&serial8250_reg,
3213 + &uart->port);
3214 +- if (ret == 0)
3215 +- ret = uart->port.line;
3216 ++ if (ret)
3217 ++ goto err;
3218 ++
3219 ++ ret = uart->port.line;
3220 + } else {
3221 + dev_info(uart->port.dev,
3222 + "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
3223 +@@ -1091,6 +1093,11 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
3224 + mutex_unlock(&serial_mutex);
3225 +
3226 + return ret;
3227 ++
3228 ++err:
3229 ++ uart->port.dev = NULL;
3230 ++ mutex_unlock(&serial_mutex);
3231 ++ return ret;
3232 + }
3233 + EXPORT_SYMBOL(serial8250_register_8250_port);
3234 +
3235 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
3236 +index da04ba1ecf68a..726852ebef855 100644
3237 +--- a/drivers/tty/serial/8250/8250_omap.c
3238 ++++ b/drivers/tty/serial/8250/8250_omap.c
3239 +@@ -773,7 +773,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
3240 + dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
3241 +
3242 + count = dma->rx_size - state.residue;
3243 +-
3244 ++ if (count < dma->rx_size)
3245 ++ dmaengine_terminate_async(dma->rxchan);
3246 ++ if (!count)
3247 ++ goto unlock;
3248 + ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
3249 +
3250 + p->port.icount.rx += ret;
3251 +@@ -833,7 +836,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
3252 + spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
3253 +
3254 + __dma_rx_do_complete(p);
3255 +- dmaengine_terminate_all(dma->rxchan);
3256 + }
3257 +
3258 + static int omap_8250_rx_dma(struct uart_8250_port *p)
3259 +@@ -1216,11 +1218,11 @@ static int omap8250_probe(struct platform_device *pdev)
3260 + spin_lock_init(&priv->rx_dma_lock);
3261 +
3262 + device_init_wakeup(&pdev->dev, true);
3263 ++ pm_runtime_enable(&pdev->dev);
3264 + pm_runtime_use_autosuspend(&pdev->dev);
3265 + pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
3266 +
3267 + pm_runtime_irq_safe(&pdev->dev);
3268 +- pm_runtime_enable(&pdev->dev);
3269 +
3270 + pm_runtime_get_sync(&pdev->dev);
3271 +
3272 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
3273 +index 07d5925791e1c..9880a50d664fc 100644
3274 +--- a/drivers/tty/serial/8250/8250_port.c
3275 ++++ b/drivers/tty/serial/8250/8250_port.c
3276 +@@ -1865,6 +1865,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
3277 + unsigned char status;
3278 + unsigned long flags;
3279 + struct uart_8250_port *up = up_to_u8250p(port);
3280 ++ bool skip_rx = false;
3281 +
3282 + if (iir & UART_IIR_NO_INT)
3283 + return 0;
3284 +@@ -1873,7 +1874,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
3285 +
3286 + status = serial_port_in(port, UART_LSR);
3287 +
3288 +- if (status & (UART_LSR_DR | UART_LSR_BI)) {
3289 ++ /*
3290 ++ * If port is stopped and there are no error conditions in the
3291 ++ * FIFO, then don't drain the FIFO, as this may lead to TTY buffer
3292 ++ * overflow. Not servicing, RX FIFO would trigger auto HW flow
3293 ++ * control when FIFO occupancy reaches preset threshold, thus
3294 ++ * halting RX. This only works when auto HW flow control is
3295 ++ * available.
3296 ++ */
3297 ++ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
3298 ++ (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
3299 ++ !(port->read_status_mask & UART_LSR_DR))
3300 ++ skip_rx = true;
3301 ++
3302 ++ if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
3303 + if (!up->dma || handle_rx_dma(up, iir))
3304 + status = serial8250_rx_chars(up, status);
3305 + }
3306 +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
3307 +index c67d39fea74ca..70d29b697e822 100644
3308 +--- a/drivers/tty/serial/samsung.c
3309 ++++ b/drivers/tty/serial/samsung.c
3310 +@@ -1165,14 +1165,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
3311 + struct s3c24xx_uart_info *info = ourport->info;
3312 + struct clk *clk;
3313 + unsigned long rate;
3314 +- unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
3315 ++ unsigned int cnt, baud, quot, best_quot = 0;
3316 + char clkname[MAX_CLK_NAME_LENGTH];
3317 + int calc_deviation, deviation = (1 << 30) - 1;
3318 +
3319 +- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
3320 +- ourport->info->def_clk_sel;
3321 + for (cnt = 0; cnt < info->num_clks; cnt++) {
3322 +- if (!(clk_sel & (1 << cnt)))
3323 ++ /* Keep selected clock if provided */
3324 ++ if (ourport->cfg->clk_sel &&
3325 ++ !(ourport->cfg->clk_sel & (1 << cnt)))
3326 + continue;
3327 +
3328 + sprintf(clkname, "clk_uart_baud%d", cnt);
3329 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
3330 +index 81657f09761cd..00a740b8ad273 100644
3331 +--- a/drivers/tty/serial/xilinx_uartps.c
3332 ++++ b/drivers/tty/serial/xilinx_uartps.c
3333 +@@ -1282,6 +1282,7 @@ static int cdns_uart_console_setup(struct console *co, char *options)
3334 + int bits = 8;
3335 + int parity = 'n';
3336 + int flow = 'n';
3337 ++ unsigned long time_out;
3338 +
3339 + if (co->index < 0 || co->index >= CDNS_UART_NR_PORTS)
3340 + return -EINVAL;
3341 +@@ -1295,6 +1296,13 @@ static int cdns_uart_console_setup(struct console *co, char *options)
3342 + if (options)
3343 + uart_parse_options(options, &baud, &parity, &bits, &flow);
3344 +
3345 ++ /* Wait for tx_empty before setting up the console */
3346 ++ time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
3347 ++
3348 ++ while (time_before(jiffies, time_out) &&
3349 ++ cdns_uart_tx_empty(port) != TIOCSER_TEMT)
3350 ++ cpu_relax();
3351 ++
3352 + return uart_set_options(port, co, baud, parity, bits, flow);
3353 + }
3354 +
3355 +diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
3356 +index ef01d24858cd6..4e795353192bc 100644
3357 +--- a/drivers/tty/vcc.c
3358 ++++ b/drivers/tty/vcc.c
3359 +@@ -612,6 +612,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
3360 + port->index = vcc_table_add(port);
3361 + if (port->index == -1) {
3362 + pr_err("VCC: no more TTY indices left for allocation\n");
3363 ++ rv = -ENOMEM;
3364 + goto free_ldc;
3365 + }
3366 +
3367 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3368 +index 4149d751719e3..4a42368734644 100644
3369 +--- a/drivers/usb/dwc3/gadget.c
3370 ++++ b/drivers/usb/dwc3/gadget.c
3371 +@@ -276,7 +276,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
3372 + {
3373 + const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
3374 + struct dwc3 *dwc = dep->dwc;
3375 +- u32 timeout = 1000;
3376 ++ u32 timeout = 5000;
3377 + u32 saved_config = 0;
3378 + u32 reg;
3379 +
3380 +diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
3381 +index 849806a75f1ce..b29610899c9f6 100644
3382 +--- a/drivers/usb/host/ehci-mv.c
3383 ++++ b/drivers/usb/host/ehci-mv.c
3384 +@@ -196,12 +196,10 @@ static int mv_ehci_probe(struct platform_device *pdev)
3385 + hcd->rsrc_len = resource_size(r);
3386 + hcd->regs = ehci_mv->op_regs;
3387 +
3388 +- hcd->irq = platform_get_irq(pdev, 0);
3389 +- if (!hcd->irq) {
3390 +- dev_err(&pdev->dev, "Cannot get irq.");
3391 +- retval = -ENODEV;
3392 ++ retval = platform_get_irq(pdev, 0);
3393 ++ if (retval < 0)
3394 + goto err_disable_clk;
3395 +- }
3396 ++ hcd->irq = retval;
3397 +
3398 + ehci = hcd_to_ehci(hcd);
3399 + ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
3400 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
3401 +index 794dc90aa5c95..ac1c54bcfe8fb 100644
3402 +--- a/drivers/vfio/pci/vfio_pci.c
3403 ++++ b/drivers/vfio/pci/vfio_pci.c
3404 +@@ -399,6 +399,19 @@ static void vfio_pci_release(void *device_data)
3405 + if (!(--vdev->refcnt)) {
3406 + vfio_spapr_pci_eeh_release(vdev->pdev);
3407 + vfio_pci_disable(vdev);
3408 ++ mutex_lock(&vdev->igate);
3409 ++ if (vdev->err_trigger) {
3410 ++ eventfd_ctx_put(vdev->err_trigger);
3411 ++ vdev->err_trigger = NULL;
3412 ++ }
3413 ++ mutex_unlock(&vdev->igate);
3414 ++
3415 ++ mutex_lock(&vdev->igate);
3416 ++ if (vdev->req_trigger) {
3417 ++ eventfd_ctx_put(vdev->req_trigger);
3418 ++ vdev->req_trigger = NULL;
3419 ++ }
3420 ++ mutex_unlock(&vdev->igate);
3421 + }
3422 +
3423 + mutex_unlock(&driver_lock);
3424 +diff --git a/fs/block_dev.c b/fs/block_dev.c
3425 +index 77ce77a283247..23fb999b49e15 100644
3426 +--- a/fs/block_dev.c
3427 ++++ b/fs/block_dev.c
3428 +@@ -1777,6 +1777,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
3429 + struct gendisk *disk = bdev->bd_disk;
3430 + struct block_device *victim = NULL;
3431 +
3432 ++ /*
3433 ++ * Sync early if it looks like we're the last one. If someone else
3434 ++ * opens the block device between now and the decrement of bd_openers
3435 ++ * then we did a sync that we didn't need to, but that's not the end
3436 ++ * of the world and we want to avoid long (could be several minute)
3437 ++ * syncs while holding the mutex.
3438 ++ */
3439 ++ if (bdev->bd_openers == 1)
3440 ++ sync_blockdev(bdev);
3441 ++
3442 + mutex_lock_nested(&bdev->bd_mutex, for_part);
3443 + if (for_part)
3444 + bdev->bd_part_count--;
3445 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3446 +index cb46ad4b2b0d1..00481cfe6cfce 100644
3447 +--- a/fs/btrfs/extent-tree.c
3448 ++++ b/fs/btrfs/extent-tree.c
3449 +@@ -9364,8 +9364,6 @@ out:
3450 + */
3451 + if (!for_reloc && root_dropped == false)
3452 + btrfs_add_dead_root(root);
3453 +- if (err && err != -EAGAIN)
3454 +- btrfs_handle_fs_error(fs_info, err, NULL);
3455 + return err;
3456 + }
3457 +
3458 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3459 +index 17856e92b93d1..c9e7b92d0f212 100644
3460 +--- a/fs/btrfs/inode.c
3461 ++++ b/fs/btrfs/inode.c
3462 +@@ -9204,20 +9204,17 @@ again:
3463 + /*
3464 + * Qgroup reserved space handler
3465 + * Page here will be either
3466 +- * 1) Already written to disk
3467 +- * In this case, its reserved space is released from data rsv map
3468 +- * and will be freed by delayed_ref handler finally.
3469 +- * So even we call qgroup_free_data(), it won't decrease reserved
3470 +- * space.
3471 +- * 2) Not written to disk
3472 +- * This means the reserved space should be freed here. However,
3473 +- * if a truncate invalidates the page (by clearing PageDirty)
3474 +- * and the page is accounted for while allocating extent
3475 +- * in btrfs_check_data_free_space() we let delayed_ref to
3476 +- * free the entire extent.
3477 ++ * 1) Already written to disk or ordered extent already submitted
3478 ++ * Then its QGROUP_RESERVED bit in io_tree is already cleaned.
3479 ++ * Qgroup will be handled by its qgroup_record then.
3480 ++ * btrfs_qgroup_free_data() call will do nothing here.
3481 ++ *
3482 ++ * 2) Not written to disk yet
3483 ++ * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
3484 ++ * bit of its io_tree, and free the qgroup reserved data space.
3485 ++ * Since the IO will never happen for this page.
3486 + */
3487 +- if (PageDirty(page))
3488 +- btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
3489 ++ btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
3490 + if (!inode_evicting) {
3491 + clear_extent_bit(tree, page_start, page_end,
3492 + EXTENT_LOCKED | EXTENT_DIRTY |
3493 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
3494 +index 589cfe3ed873b..ce94d09f6abf9 100644
3495 +--- a/fs/ceph/caps.c
3496 ++++ b/fs/ceph/caps.c
3497 +@@ -1906,12 +1906,24 @@ ack:
3498 + if (mutex_trylock(&session->s_mutex) == 0) {
3499 + dout("inverting session/ino locks on %p\n",
3500 + session);
3501 ++ session = ceph_get_mds_session(session);
3502 + spin_unlock(&ci->i_ceph_lock);
3503 + if (took_snap_rwsem) {
3504 + up_read(&mdsc->snap_rwsem);
3505 + took_snap_rwsem = 0;
3506 + }
3507 +- mutex_lock(&session->s_mutex);
3508 ++ if (session) {
3509 ++ mutex_lock(&session->s_mutex);
3510 ++ ceph_put_mds_session(session);
3511 ++ } else {
3512 ++ /*
3513 ++ * Because we take the reference while
3514 ++ * holding the i_ceph_lock, it should
3515 ++ * never be NULL. Throw a warning if it
3516 ++ * ever is.
3517 ++ */
3518 ++ WARN_ON_ONCE(true);
3519 ++ }
3520 + goto retry;
3521 + }
3522 + }
3523 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
3524 +index 600bb838c15b8..f166fcb48ac0e 100644
3525 +--- a/fs/cifs/cifsglob.h
3526 ++++ b/fs/cifs/cifsglob.h
3527 +@@ -246,8 +246,9 @@ struct smb_version_operations {
3528 + int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
3529 + bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
3530 + int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
3531 +- void (*downgrade_oplock)(struct TCP_Server_Info *,
3532 +- struct cifsInodeInfo *, bool);
3533 ++ void (*downgrade_oplock)(struct TCP_Server_Info *server,
3534 ++ struct cifsInodeInfo *cinode, __u32 oplock,
3535 ++ unsigned int epoch, bool *purge_cache);
3536 + /* process transaction2 response */
3537 + bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
3538 + char *, int);
3539 +@@ -1092,6 +1093,8 @@ struct cifsFileInfo {
3540 + unsigned int f_flags;
3541 + bool invalidHandle:1; /* file closed via session abend */
3542 + bool oplock_break_cancelled:1;
3543 ++ unsigned int oplock_epoch; /* epoch from the lease break */
3544 ++ __u32 oplock_level; /* oplock/lease level from the lease break */
3545 + int count;
3546 + spinlock_t file_info_lock; /* protects four flag/count fields above */
3547 + struct mutex fh_mutex; /* prevents reopen race after dead ses*/
3548 +@@ -1223,7 +1226,7 @@ struct cifsInodeInfo {
3549 + unsigned int epoch; /* used to track lease state changes */
3550 + #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
3551 + #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
3552 +-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
3553 ++#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */
3554 + #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
3555 + #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
3556 + #define CIFS_INO_LOCK (5) /* lock bit for synchronization */
3557 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3558 +index 72e7cbfb325a6..6c77a96437e61 100644
3559 +--- a/fs/cifs/file.c
3560 ++++ b/fs/cifs/file.c
3561 +@@ -3753,7 +3753,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3562 + break;
3563 +
3564 + __SetPageLocked(page);
3565 +- if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
3566 ++ rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
3567 ++ if (rc) {
3568 + __ClearPageLocked(page);
3569 + break;
3570 + }
3571 +@@ -3769,6 +3770,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3572 + struct list_head *page_list, unsigned num_pages)
3573 + {
3574 + int rc;
3575 ++ int err = 0;
3576 + struct list_head tmplist;
3577 + struct cifsFileInfo *open_file = file->private_data;
3578 + struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
3579 +@@ -3809,7 +3811,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3580 + * the order of declining indexes. When we put the pages in
3581 + * the rdata->pages, then we want them in increasing order.
3582 + */
3583 +- while (!list_empty(page_list)) {
3584 ++ while (!list_empty(page_list) && !err) {
3585 + unsigned int i, nr_pages, bytes, rsize;
3586 + loff_t offset;
3587 + struct page *page, *tpage;
3588 +@@ -3832,9 +3834,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3589 + return 0;
3590 + }
3591 +
3592 +- rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3593 ++ nr_pages = 0;
3594 ++ err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3595 + &nr_pages, &offset, &bytes);
3596 +- if (rc) {
3597 ++ if (!nr_pages) {
3598 + add_credits_and_wake_if(server, credits, 0);
3599 + break;
3600 + }
3601 +@@ -4135,12 +4138,13 @@ void cifs_oplock_break(struct work_struct *work)
3602 + struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3603 + struct TCP_Server_Info *server = tcon->ses->server;
3604 + int rc = 0;
3605 ++ bool purge_cache = false;
3606 +
3607 + wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3608 + TASK_UNINTERRUPTIBLE);
3609 +
3610 +- server->ops->downgrade_oplock(server, cinode,
3611 +- test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3612 ++ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3613 ++ cfile->oplock_epoch, &purge_cache);
3614 +
3615 + if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3616 + cifs_has_mand_locks(cinode)) {
3617 +@@ -4155,18 +4159,21 @@ void cifs_oplock_break(struct work_struct *work)
3618 + else
3619 + break_lease(inode, O_WRONLY);
3620 + rc = filemap_fdatawrite(inode->i_mapping);
3621 +- if (!CIFS_CACHE_READ(cinode)) {
3622 ++ if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3623 + rc = filemap_fdatawait(inode->i_mapping);
3624 + mapping_set_error(inode->i_mapping, rc);
3625 + cifs_zap_mapping(inode);
3626 + }
3627 + cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3628 ++ if (CIFS_CACHE_WRITE(cinode))
3629 ++ goto oplock_break_ack;
3630 + }
3631 +
3632 + rc = cifs_push_locks(cfile);
3633 + if (rc)
3634 + cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3635 +
3636 ++oplock_break_ack:
3637 + /*
3638 + * releasing stale oplock after recent reconnect of smb session using
3639 + * a now incorrect file handle is not a data integrity issue but do
3640 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
3641 +index 76f1649ab444f..d0e024856c0d4 100644
3642 +--- a/fs/cifs/misc.c
3643 ++++ b/fs/cifs/misc.c
3644 +@@ -473,21 +473,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
3645 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
3646 + &pCifsInode->flags);
3647 +
3648 +- /*
3649 +- * Set flag if the server downgrades the oplock
3650 +- * to L2 else clear.
3651 +- */
3652 +- if (pSMB->OplockLevel)
3653 +- set_bit(
3654 +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
3655 +- &pCifsInode->flags);
3656 +- else
3657 +- clear_bit(
3658 +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
3659 +- &pCifsInode->flags);
3660 +-
3661 +- cifs_queue_oplock_break(netfile);
3662 ++ netfile->oplock_epoch = 0;
3663 ++ netfile->oplock_level = pSMB->OplockLevel;
3664 + netfile->oplock_break_cancelled = false;
3665 ++ cifs_queue_oplock_break(netfile);
3666 +
3667 + spin_unlock(&tcon->open_file_lock);
3668 + spin_unlock(&cifs_tcp_ses_lock);
3669 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
3670 +index 483458340b10c..9b271ae641c19 100644
3671 +--- a/fs/cifs/smb1ops.c
3672 ++++ b/fs/cifs/smb1ops.c
3673 +@@ -379,12 +379,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
3674 +
3675 + static void
3676 + cifs_downgrade_oplock(struct TCP_Server_Info *server,
3677 +- struct cifsInodeInfo *cinode, bool set_level2)
3678 ++ struct cifsInodeInfo *cinode, __u32 oplock,
3679 ++ unsigned int epoch, bool *purge_cache)
3680 + {
3681 +- if (set_level2)
3682 +- cifs_set_oplock_level(cinode, OPLOCK_READ);
3683 +- else
3684 +- cifs_set_oplock_level(cinode, 0);
3685 ++ cifs_set_oplock_level(cinode, oplock);
3686 + }
3687 +
3688 + static bool
3689 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3690 +index ff2ad15f67d63..0c6e5450ff765 100644
3691 +--- a/fs/cifs/smb2misc.c
3692 ++++ b/fs/cifs/smb2misc.c
3693 +@@ -496,7 +496,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3694 +
3695 + cifs_dbg(FYI, "found in the open list\n");
3696 + cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
3697 +- le32_to_cpu(rsp->NewLeaseState));
3698 ++ lease_state);
3699 +
3700 + if (ack_req)
3701 + cfile->oplock_break_cancelled = false;
3702 +@@ -505,17 +505,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3703 +
3704 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
3705 +
3706 +- /*
3707 +- * Set or clear flags depending on the lease state being READ.
3708 +- * HANDLE caching flag should be added when the client starts
3709 +- * to defer closing remote file handles with HANDLE leases.
3710 +- */
3711 +- if (lease_state & SMB2_LEASE_READ_CACHING_HE)
3712 +- set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
3713 +- &cinode->flags);
3714 +- else
3715 +- clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
3716 +- &cinode->flags);
3717 ++ cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
3718 ++ cfile->oplock_level = lease_state;
3719 +
3720 + cifs_queue_oplock_break(cfile);
3721 + kfree(lw);
3722 +@@ -538,7 +529,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3723 +
3724 + cifs_dbg(FYI, "found in the pending open list\n");
3725 + cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
3726 +- le32_to_cpu(rsp->NewLeaseState));
3727 ++ lease_state);
3728 +
3729 + open->oplock = lease_state;
3730 + }
3731 +@@ -650,18 +641,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3732 + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
3733 + &cinode->flags);
3734 +
3735 +- /*
3736 +- * Set flag if the server downgrades the oplock
3737 +- * to L2 else clear.
3738 +- */
3739 +- if (rsp->OplockLevel)
3740 +- set_bit(
3741 +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
3742 +- &cinode->flags);
3743 +- else
3744 +- clear_bit(
3745 +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
3746 +- &cinode->flags);
3747 ++ cfile->oplock_epoch = 0;
3748 ++ cfile->oplock_level = rsp->OplockLevel;
3749 ++
3750 + spin_unlock(&cfile->file_info_lock);
3751 +
3752 + cifs_queue_oplock_break(cfile);
3753 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3754 +index b46fdb2b8d349..90d4288907a61 100644
3755 +--- a/fs/cifs/smb2ops.c
3756 ++++ b/fs/cifs/smb2ops.c
3757 +@@ -1935,22 +1935,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3758 +
3759 + static void
3760 + smb2_downgrade_oplock(struct TCP_Server_Info *server,
3761 +- struct cifsInodeInfo *cinode, bool set_level2)
3762 ++ struct cifsInodeInfo *cinode, __u32 oplock,
3763 ++ unsigned int epoch, bool *purge_cache)
3764 + {
3765 +- if (set_level2)
3766 +- server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
3767 +- 0, NULL);
3768 +- else
3769 +- server->ops->set_oplock_level(cinode, 0, 0, NULL);
3770 ++ server->ops->set_oplock_level(cinode, oplock, 0, NULL);
3771 + }
3772 +
3773 + static void
3774 +-smb21_downgrade_oplock(struct TCP_Server_Info *server,
3775 +- struct cifsInodeInfo *cinode, bool set_level2)
3776 ++smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3777 ++ unsigned int epoch, bool *purge_cache);
3778 ++
3779 ++static void
3780 ++smb3_downgrade_oplock(struct TCP_Server_Info *server,
3781 ++ struct cifsInodeInfo *cinode, __u32 oplock,
3782 ++ unsigned int epoch, bool *purge_cache)
3783 + {
3784 +- server->ops->set_oplock_level(cinode,
3785 +- set_level2 ? SMB2_LEASE_READ_CACHING_HE :
3786 +- 0, 0, NULL);
3787 ++ unsigned int old_state = cinode->oplock;
3788 ++ unsigned int old_epoch = cinode->epoch;
3789 ++ unsigned int new_state;
3790 ++
3791 ++ if (epoch > old_epoch) {
3792 ++ smb21_set_oplock_level(cinode, oplock, 0, NULL);
3793 ++ cinode->epoch = epoch;
3794 ++ }
3795 ++
3796 ++ new_state = cinode->oplock;
3797 ++ *purge_cache = false;
3798 ++
3799 ++ if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3800 ++ (new_state & CIFS_CACHE_READ_FLG) == 0)
3801 ++ *purge_cache = true;
3802 ++ else if (old_state == new_state && (epoch - old_epoch > 1))
3803 ++ *purge_cache = true;
3804 + }
3805 +
3806 + static void
3807 +@@ -2953,7 +2969,7 @@ struct smb_version_operations smb21_operations = {
3808 + .print_stats = smb2_print_stats,
3809 + .is_oplock_break = smb2_is_valid_oplock_break,
3810 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
3811 +- .downgrade_oplock = smb21_downgrade_oplock,
3812 ++ .downgrade_oplock = smb2_downgrade_oplock,
3813 + .need_neg = smb2_need_neg,
3814 + .negotiate = smb2_negotiate,
3815 + .negotiate_wsize = smb2_negotiate_wsize,
3816 +@@ -3048,7 +3064,7 @@ struct smb_version_operations smb30_operations = {
3817 + .dump_share_caps = smb2_dump_share_caps,
3818 + .is_oplock_break = smb2_is_valid_oplock_break,
3819 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
3820 +- .downgrade_oplock = smb21_downgrade_oplock,
3821 ++ .downgrade_oplock = smb3_downgrade_oplock,
3822 + .need_neg = smb2_need_neg,
3823 + .negotiate = smb2_negotiate,
3824 + .negotiate_wsize = smb2_negotiate_wsize,
3825 +@@ -3153,7 +3169,7 @@ struct smb_version_operations smb311_operations = {
3826 + .dump_share_caps = smb2_dump_share_caps,
3827 + .is_oplock_break = smb2_is_valid_oplock_break,
3828 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
3829 +- .downgrade_oplock = smb21_downgrade_oplock,
3830 ++ .downgrade_oplock = smb3_downgrade_oplock,
3831 + .need_neg = smb2_need_neg,
3832 + .negotiate = smb2_negotiate,
3833 + .negotiate_wsize = smb2_negotiate_wsize,
3834 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
3835 +index bad458a2b579e..f8baa54c83008 100644
3836 +--- a/fs/cifs/smb2pdu.h
3837 ++++ b/fs/cifs/smb2pdu.h
3838 +@@ -1046,7 +1046,7 @@ struct smb2_oplock_break {
3839 + struct smb2_lease_break {
3840 + struct smb2_hdr hdr;
3841 + __le16 StructureSize; /* Must be 44 */
3842 +- __le16 Reserved;
3843 ++ __le16 Epoch;
3844 + __le32 Flags;
3845 + __u8 LeaseKey[16];
3846 + __le32 CurrentLeaseState;
3847 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3848 +index 845b8620afcf6..34da8d341c0c4 100644
3849 +--- a/fs/ext4/inode.c
3850 ++++ b/fs/ext4/inode.c
3851 +@@ -5179,7 +5179,7 @@ static int ext4_do_update_inode(handle_t *handle,
3852 + raw_inode->i_file_acl_high =
3853 + cpu_to_le16(ei->i_file_acl >> 32);
3854 + raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
3855 +- if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
3856 ++ if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
3857 + ext4_isize_set(raw_inode, ei->i_disksize);
3858 + need_datasync = 1;
3859 + }
3860 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3861 +index f580695b7bb9a..1b9c4c19bed29 100644
3862 +--- a/fs/fuse/dev.c
3863 ++++ b/fs/fuse/dev.c
3864 +@@ -824,7 +824,6 @@ static int fuse_check_page(struct page *page)
3865 + {
3866 + if (page_mapcount(page) ||
3867 + page->mapping != NULL ||
3868 +- page_count(page) != 1 ||
3869 + (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
3870 + ~(1 << PG_locked |
3871 + 1 << PG_referenced |
3872 +diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
3873 +index 6c6401084d3d8..e893b1fbde98b 100644
3874 +--- a/fs/gfs2/inode.c
3875 ++++ b/fs/gfs2/inode.c
3876 +@@ -714,7 +714,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
3877 +
3878 + error = gfs2_trans_begin(sdp, blocks, 0);
3879 + if (error)
3880 +- goto fail_gunlock2;
3881 ++ goto fail_free_inode;
3882 +
3883 + if (blocks > 1) {
3884 + ip->i_eattr = ip->i_no_addr + 1;
3885 +@@ -725,7 +725,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
3886 +
3887 + error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
3888 + if (error)
3889 +- goto fail_gunlock2;
3890 ++ goto fail_free_inode;
3891 +
3892 + BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
3893 +
3894 +@@ -734,7 +734,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
3895 + goto fail_gunlock2;
3896 +
3897 + glock_set_object(ip->i_iopen_gh.gh_gl, ip);
3898 +- gfs2_glock_put(io_gl);
3899 + gfs2_set_iop(inode);
3900 + insert_inode_hash(inode);
3901 +
3902 +@@ -767,6 +766,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
3903 +
3904 + mark_inode_dirty(inode);
3905 + d_instantiate(dentry, inode);
3906 ++ /* After instantiate, errors should result in evict which will destroy
3907 ++ * both inode and iopen glocks properly. */
3908 + if (file) {
3909 + *opened |= FILE_CREATED;
3910 + error = finish_open(file, dentry, gfs2_open_common, opened);
3911 +@@ -774,15 +775,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
3912 + gfs2_glock_dq_uninit(ghs);
3913 + gfs2_glock_dq_uninit(ghs + 1);
3914 + clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
3915 ++ gfs2_glock_put(io_gl);
3916 + return error;
3917 +
3918 + fail_gunlock3:
3919 + glock_clear_object(io_gl, ip);
3920 + gfs2_glock_dq_uninit(&ip->i_iopen_gh);
3921 +- gfs2_glock_put(io_gl);
3922 + fail_gunlock2:
3923 +- if (io_gl)
3924 +- clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
3925 ++ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
3926 ++ gfs2_glock_put(io_gl);
3927 + fail_free_inode:
3928 + if (ip->i_gl) {
3929 + glock_clear_object(ip->i_gl, ip);
3930 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
3931 +index 7c01936be7c70..37358dba3b033 100644
3932 +--- a/fs/nfs/pagelist.c
3933 ++++ b/fs/nfs/pagelist.c
3934 +@@ -132,47 +132,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
3935 + EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
3936 +
3937 + /*
3938 +- * nfs_page_group_lock - lock the head of the page group
3939 +- * @req - request in group that is to be locked
3940 ++ * nfs_page_set_headlock - set the request PG_HEADLOCK
3941 ++ * @req: request that is to be locked
3942 + *
3943 +- * this lock must be held when traversing or modifying the page
3944 +- * group list
3945 ++ * this lock must be held when modifying req->wb_head
3946 + *
3947 + * return 0 on success, < 0 on error
3948 + */
3949 + int
3950 +-nfs_page_group_lock(struct nfs_page *req)
3951 ++nfs_page_set_headlock(struct nfs_page *req)
3952 + {
3953 +- struct nfs_page *head = req->wb_head;
3954 +-
3955 +- WARN_ON_ONCE(head != head->wb_head);
3956 +-
3957 +- if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
3958 ++ if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
3959 + return 0;
3960 +
3961 +- set_bit(PG_CONTENDED1, &head->wb_flags);
3962 ++ set_bit(PG_CONTENDED1, &req->wb_flags);
3963 + smp_mb__after_atomic();
3964 +- return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
3965 ++ return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
3966 + TASK_UNINTERRUPTIBLE);
3967 + }
3968 +
3969 + /*
3970 +- * nfs_page_group_unlock - unlock the head of the page group
3971 +- * @req - request in group that is to be unlocked
3972 ++ * nfs_page_clear_headlock - clear the request PG_HEADLOCK
3973 ++ * @req: request that is to be locked
3974 + */
3975 + void
3976 +-nfs_page_group_unlock(struct nfs_page *req)
3977 ++nfs_page_clear_headlock(struct nfs_page *req)
3978 + {
3979 +- struct nfs_page *head = req->wb_head;
3980 +-
3981 +- WARN_ON_ONCE(head != head->wb_head);
3982 +-
3983 + smp_mb__before_atomic();
3984 +- clear_bit(PG_HEADLOCK, &head->wb_flags);
3985 ++ clear_bit(PG_HEADLOCK, &req->wb_flags);
3986 + smp_mb__after_atomic();
3987 +- if (!test_bit(PG_CONTENDED1, &head->wb_flags))
3988 ++ if (!test_bit(PG_CONTENDED1, &req->wb_flags))
3989 + return;
3990 +- wake_up_bit(&head->wb_flags, PG_HEADLOCK);
3991 ++ wake_up_bit(&req->wb_flags, PG_HEADLOCK);
3992 ++}
3993 ++
3994 ++/*
3995 ++ * nfs_page_group_lock - lock the head of the page group
3996 ++ * @req: request in group that is to be locked
3997 ++ *
3998 ++ * this lock must be held when traversing or modifying the page
3999 ++ * group list
4000 ++ *
4001 ++ * return 0 on success, < 0 on error
4002 ++ */
4003 ++int
4004 ++nfs_page_group_lock(struct nfs_page *req)
4005 ++{
4006 ++ int ret;
4007 ++
4008 ++ ret = nfs_page_set_headlock(req);
4009 ++ if (ret || req->wb_head == req)
4010 ++ return ret;
4011 ++ return nfs_page_set_headlock(req->wb_head);
4012 ++}
4013 ++
4014 ++/*
4015 ++ * nfs_page_group_unlock - unlock the head of the page group
4016 ++ * @req: request in group that is to be unlocked
4017 ++ */
4018 ++void
4019 ++nfs_page_group_unlock(struct nfs_page *req)
4020 ++{
4021 ++ if (req != req->wb_head)
4022 ++ nfs_page_clear_headlock(req->wb_head);
4023 ++ nfs_page_clear_headlock(req);
4024 + }
4025 +
4026 + /*
4027 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
4028 +index 7b6bda68aa86a..767e46c09074b 100644
4029 +--- a/fs/nfs/write.c
4030 ++++ b/fs/nfs/write.c
4031 +@@ -406,22 +406,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
4032 + destroy_list = (subreq->wb_this_page == old_head) ?
4033 + NULL : subreq->wb_this_page;
4034 +
4035 ++ /* Note: lock subreq in order to change subreq->wb_head */
4036 ++ nfs_page_set_headlock(subreq);
4037 + WARN_ON_ONCE(old_head != subreq->wb_head);
4038 +
4039 + /* make sure old group is not used */
4040 + subreq->wb_this_page = subreq;
4041 ++ subreq->wb_head = subreq;
4042 +
4043 + clear_bit(PG_REMOVE, &subreq->wb_flags);
4044 +
4045 + /* Note: races with nfs_page_group_destroy() */
4046 + if (!kref_read(&subreq->wb_kref)) {
4047 + /* Check if we raced with nfs_page_group_destroy() */
4048 +- if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
4049 ++ if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
4050 ++ nfs_page_clear_headlock(subreq);
4051 + nfs_free_request(subreq);
4052 ++ } else
4053 ++ nfs_page_clear_headlock(subreq);
4054 + continue;
4055 + }
4056 ++ nfs_page_clear_headlock(subreq);
4057 +
4058 +- subreq->wb_head = subreq;
4059 + nfs_release_request(old_head);
4060 +
4061 + if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
4062 +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
4063 +index 3be28900bf375..135e95950f513 100644
4064 +--- a/fs/ubifs/io.c
4065 ++++ b/fs/ubifs/io.c
4066 +@@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
4067 + int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
4068 + int offs, int quiet, int must_chk_crc)
4069 + {
4070 +- int err = -EINVAL, type, node_len;
4071 ++ int err = -EINVAL, type, node_len, dump_node = 1;
4072 + uint32_t crc, node_crc, magic;
4073 + const struct ubifs_ch *ch = buf;
4074 +
4075 +@@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
4076 + out_len:
4077 + if (!quiet)
4078 + ubifs_err(c, "bad node length %d", node_len);
4079 ++ if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ)
4080 ++ dump_node = 0;
4081 + out:
4082 + if (!quiet) {
4083 + ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
4084 +- ubifs_dump_node(c, buf);
4085 ++ if (dump_node) {
4086 ++ ubifs_dump_node(c, buf);
4087 ++ } else {
4088 ++ int safe_len = min3(node_len, c->leb_size - offs,
4089 ++ (int)UBIFS_MAX_DATA_NODE_SZ);
4090 ++ pr_err("\tprevent out-of-bounds memory access\n");
4091 ++ pr_err("\ttruncated data node length %d\n", safe_len);
4092 ++ pr_err("\tcorrupted data node:\n");
4093 ++ print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
4094 ++ buf, safe_len, 0);
4095 ++ }
4096 + dump_stack();
4097 + }
4098 + return err;
4099 +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
4100 +index 299d17b088e21..facb83031ba77 100644
4101 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c
4102 ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
4103 +@@ -1335,7 +1335,9 @@ xfs_attr3_leaf_add_work(
4104 + for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
4105 + if (ichdr->freemap[i].base == tmp) {
4106 + ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
4107 +- ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
4108 ++ ichdr->freemap[i].size -=
4109 ++ min_t(uint16_t, ichdr->freemap[i].size,
4110 ++ sizeof(xfs_attr_leaf_entry_t));
4111 + }
4112 + }
4113 + ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
4114 +diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
4115 +index 682e2bf370c72..ee4ebc2dd7492 100644
4116 +--- a/fs/xfs/libxfs/xfs_dir2_node.c
4117 ++++ b/fs/xfs/libxfs/xfs_dir2_node.c
4118 +@@ -212,6 +212,7 @@ __xfs_dir3_free_read(
4119 + xfs_buf_ioerror(*bpp, -EFSCORRUPTED);
4120 + xfs_verifier_error(*bpp);
4121 + xfs_trans_brelse(tp, *bpp);
4122 ++ *bpp = NULL;
4123 + return -EFSCORRUPTED;
4124 + }
4125 +
4126 +diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
4127 +index b93efc8feecd1..755033acd2b0d 100644
4128 +--- a/include/linux/debugfs.h
4129 ++++ b/include/linux/debugfs.h
4130 +@@ -77,6 +77,8 @@ static const struct file_operations __fops = { \
4131 + .llseek = no_llseek, \
4132 + }
4133 +
4134 ++typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
4135 ++
4136 + #if defined(CONFIG_DEBUG_FS)
4137 +
4138 + struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
4139 +@@ -98,7 +100,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
4140 + struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
4141 + const char *dest);
4142 +
4143 +-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
4144 + struct dentry *debugfs_create_automount(const char *name,
4145 + struct dentry *parent,
4146 + debugfs_automount_t f,
4147 +@@ -227,7 +228,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
4148 +
4149 + static inline struct dentry *debugfs_create_automount(const char *name,
4150 + struct dentry *parent,
4151 +- struct vfsmount *(*f)(void *),
4152 ++ debugfs_automount_t f,
4153 + void *data)
4154 + {
4155 + return ERR_PTR(-ENODEV);
4156 +diff --git a/include/linux/libata.h b/include/linux/libata.h
4157 +index f772c55ed901d..c5c34fd78c5a5 100644
4158 +--- a/include/linux/libata.h
4159 ++++ b/include/linux/libata.h
4160 +@@ -504,6 +504,7 @@ enum hsm_task_states {
4161 + };
4162 +
4163 + enum ata_completion_errors {
4164 ++ AC_ERR_OK = 0, /* no error */
4165 + AC_ERR_DEV = (1 << 0), /* device reported error */
4166 + AC_ERR_HSM = (1 << 1), /* host state machine violation */
4167 + AC_ERR_TIMEOUT = (1 << 2), /* timeout */
4168 +@@ -909,9 +910,9 @@ struct ata_port_operations {
4169 + /*
4170 + * Command execution
4171 + */
4172 +- int (*qc_defer)(struct ata_queued_cmd *qc);
4173 +- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
4174 +- void (*qc_prep)(struct ata_queued_cmd *qc);
4175 ++ int (*qc_defer)(struct ata_queued_cmd *qc);
4176 ++ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
4177 ++ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
4178 + unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
4179 + bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
4180 +
4181 +@@ -1175,7 +1176,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
4182 + extern const char *ata_mode_string(unsigned long xfer_mask);
4183 + extern unsigned long ata_id_xfermask(const u16 *id);
4184 + extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
4185 +-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
4186 ++extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
4187 + extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4188 + unsigned int n_elem);
4189 + extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
4190 +@@ -1889,9 +1890,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
4191 + .sg_tablesize = LIBATA_MAX_PRD, \
4192 + .dma_boundary = ATA_DMA_BOUNDARY
4193 +
4194 +-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
4195 ++extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
4196 + extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
4197 +-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
4198 ++extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
4199 + extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
4200 + struct ata_queued_cmd *qc);
4201 + extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
4202 +diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
4203 +index 279b39008a33b..de81ed857ea37 100644
4204 +--- a/include/linux/mmc/card.h
4205 ++++ b/include/linux/mmc/card.h
4206 +@@ -226,7 +226,7 @@ struct mmc_queue_req;
4207 + * MMC Physical partitions
4208 + */
4209 + struct mmc_part {
4210 +- unsigned int size; /* partition size (in bytes) */
4211 ++ u64 size; /* partition size (in bytes) */
4212 + unsigned int part_cfg; /* partition type */
4213 + char name[MAX_MMC_PART_NAME_LEN];
4214 + bool force_ro; /* to make boot parts RO by default */
4215 +diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
4216 +index ad69430fd0eb5..5162fc1533c2f 100644
4217 +--- a/include/linux/nfs_page.h
4218 ++++ b/include/linux/nfs_page.h
4219 +@@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *);
4220 + extern int nfs_page_group_lock(struct nfs_page *);
4221 + extern void nfs_page_group_unlock(struct nfs_page *);
4222 + extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
4223 ++extern int nfs_page_set_headlock(struct nfs_page *req);
4224 ++extern void nfs_page_clear_headlock(struct nfs_page *req);
4225 + extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
4226 +
4227 + /*
4228 +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
4229 +index f189a8a3bbb88..7b3b5d05ab0de 100644
4230 +--- a/include/linux/seqlock.h
4231 ++++ b/include/linux/seqlock.h
4232 +@@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
4233 + * usual consistency guarantee. It is one wmb cheaper, because we can
4234 + * collapse the two back-to-back wmb()s.
4235 + *
4236 ++ * Note that, writes surrounding the barrier should be declared atomic (e.g.
4237 ++ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
4238 ++ * atomically, avoiding compiler optimizations; b) to document which writes are
4239 ++ * meant to propagate to the reader critical section. This is necessary because
4240 ++ * neither writes before and after the barrier are enclosed in a seq-writer
4241 ++ * critical section that would ensure readers are aware of ongoing writes.
4242 ++ *
4243 + * seqcount_t seq;
4244 + * bool X = true, Y = false;
4245 + *
4246 +@@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
4247 + *
4248 + * void write(void)
4249 + * {
4250 +- * Y = true;
4251 ++ * WRITE_ONCE(Y, true);
4252 + *
4253 + * raw_write_seqcount_barrier(seq);
4254 + *
4255 +- * X = false;
4256 ++ * WRITE_ONCE(X, false);
4257 + * }
4258 + */
4259 + static inline void raw_write_seqcount_barrier(seqcount_t *s)
4260 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4261 +index a9a764a17c289..3690985e24a8a 100644
4262 +--- a/include/linux/skbuff.h
4263 ++++ b/include/linux/skbuff.h
4264 +@@ -1674,6 +1674,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
4265 + return list_->qlen;
4266 + }
4267 +
4268 ++/**
4269 ++ * skb_queue_len_lockless - get queue length
4270 ++ * @list_: list to measure
4271 ++ *
4272 ++ * Return the length of an &sk_buff queue.
4273 ++ * This variant can be used in lockless contexts.
4274 ++ */
4275 ++static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
4276 ++{
4277 ++ return READ_ONCE(list_->qlen);
4278 ++}
4279 ++
4280 + /**
4281 + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
4282 + * @list: queue to initialize
4283 +@@ -1881,7 +1893,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
4284 + {
4285 + struct sk_buff *next, *prev;
4286 +
4287 +- list->qlen--;
4288 ++ WRITE_ONCE(list->qlen, list->qlen - 1);
4289 + next = skb->next;
4290 + prev = skb->prev;
4291 + skb->next = skb->prev = NULL;
4292 +@@ -2999,8 +3011,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
4293 + * is untouched. Otherwise it is extended. Returns zero on
4294 + * success. The skb is freed on error if @free_on_error is true.
4295 + */
4296 +-static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
4297 +- bool free_on_error)
4298 ++static inline int __must_check __skb_put_padto(struct sk_buff *skb,
4299 ++ unsigned int len,
4300 ++ bool free_on_error)
4301 + {
4302 + unsigned int size = skb->len;
4303 +
4304 +@@ -3023,7 +3036,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
4305 + * is untouched. Otherwise it is extended. Returns zero on
4306 + * success. The skb is freed on error.
4307 + */
4308 +-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
4309 ++static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
4310 + {
4311 + return __skb_put_padto(skb, len, true);
4312 + }
4313 +diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
4314 +index 35f1d706bd5b4..ac87820cc0825 100644
4315 +--- a/kernel/audit_watch.c
4316 ++++ b/kernel/audit_watch.c
4317 +@@ -316,8 +316,6 @@ static void audit_update_watch(struct audit_parent *parent,
4318 + if (oentry->rule.exe)
4319 + audit_remove_mark(oentry->rule.exe);
4320 +
4321 +- audit_watch_log_rule_change(r, owatch, "updated_rules");
4322 +-
4323 + call_rcu(&oentry->rcu, audit_free_rule_rcu);
4324 + }
4325 +
4326 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
4327 +index 6cc090d015f66..ecc58137525bc 100644
4328 +--- a/kernel/bpf/hashtab.c
4329 ++++ b/kernel/bpf/hashtab.c
4330 +@@ -645,15 +645,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
4331 + struct htab_elem *l = container_of(head, struct htab_elem, rcu);
4332 + struct bpf_htab *htab = l->htab;
4333 +
4334 +- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
4335 +- * we're calling kfree, otherwise deadlock is possible if kprobes
4336 +- * are placed somewhere inside of slub
4337 +- */
4338 +- preempt_disable();
4339 +- __this_cpu_inc(bpf_prog_active);
4340 + htab_elem_free(htab, l);
4341 +- __this_cpu_dec(bpf_prog_active);
4342 +- preempt_enable();
4343 + }
4344 +
4345 + static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
4346 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4347 +index 7b3a5c35904a0..fdf6656cce292 100644
4348 +--- a/kernel/kprobes.c
4349 ++++ b/kernel/kprobes.c
4350 +@@ -2117,6 +2117,9 @@ static void kill_kprobe(struct kprobe *p)
4351 + {
4352 + struct kprobe *kp;
4353 +
4354 ++ if (WARN_ON_ONCE(kprobe_gone(p)))
4355 ++ return;
4356 ++
4357 + p->flags |= KPROBE_FLAG_GONE;
4358 + if (kprobe_aggrprobe(p)) {
4359 + /*
4360 +@@ -2137,9 +2140,10 @@ static void kill_kprobe(struct kprobe *p)
4361 +
4362 + /*
4363 + * The module is going away. We should disarm the kprobe which
4364 +- * is using ftrace.
4365 ++ * is using ftrace, because ftrace framework is still available at
4366 ++ * MODULE_STATE_GOING notification.
4367 + */
4368 +- if (kprobe_ftrace(p))
4369 ++ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
4370 + disarm_kprobe_ftrace(p);
4371 + }
4372 +
4373 +@@ -2259,7 +2263,10 @@ static int kprobes_module_callback(struct notifier_block *nb,
4374 + mutex_lock(&kprobe_mutex);
4375 + for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
4376 + head = &kprobe_table[i];
4377 +- hlist_for_each_entry_rcu(p, head, hlist)
4378 ++ hlist_for_each_entry_rcu(p, head, hlist) {
4379 ++ if (kprobe_gone(p))
4380 ++ continue;
4381 ++
4382 + if (within_module_init((unsigned long)p->addr, mod) ||
4383 + (checkcore &&
4384 + within_module_core((unsigned long)p->addr, mod))) {
4385 +@@ -2276,6 +2283,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
4386 + */
4387 + kill_kprobe(p);
4388 + }
4389 ++ }
4390 + }
4391 + mutex_unlock(&kprobe_mutex);
4392 + return NOTIFY_DONE;
4393 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
4394 +index 4e50beb162c00..f96b22db5fe70 100644
4395 +--- a/kernel/printk/printk.c
4396 ++++ b/kernel/printk/printk.c
4397 +@@ -2092,6 +2092,9 @@ static int __init console_setup(char *str)
4398 + char *s, *options, *brl_options = NULL;
4399 + int idx;
4400 +
4401 ++ if (str[0] == 0)
4402 ++ return 1;
4403 ++
4404 + if (_braille_console_setup(&str, &brl_options))
4405 + return 1;
4406 +
4407 +diff --git a/kernel/sys.c b/kernel/sys.c
4408 +index ab96b98823473..2e4f017f7c5aa 100644
4409 +--- a/kernel/sys.c
4410 ++++ b/kernel/sys.c
4411 +@@ -1217,11 +1217,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
4412 +
4413 + SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
4414 + {
4415 +- struct oldold_utsname tmp = {};
4416 ++ struct oldold_utsname tmp;
4417 +
4418 + if (!name)
4419 + return -EFAULT;
4420 +
4421 ++ memset(&tmp, 0, sizeof(tmp));
4422 ++
4423 + down_read(&uts_sem);
4424 + memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
4425 + memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
4426 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
4427 +index 1ce7c404d0b03..5b6f815a74ee3 100644
4428 +--- a/kernel/time/timekeeping.c
4429 ++++ b/kernel/time/timekeeping.c
4430 +@@ -988,9 +988,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
4431 + ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
4432 + return -EOVERFLOW;
4433 + tmp *= mult;
4434 +- rem *= mult;
4435 +
4436 +- do_div(rem, div);
4437 ++ rem = div64_u64(rem * mult, div);
4438 + *base = tmp + rem;
4439 + return 0;
4440 + }
4441 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4442 +index 95ede1f7ffdf3..19526297a5b1c 100644
4443 +--- a/kernel/trace/trace.c
4444 ++++ b/kernel/trace/trace.c
4445 +@@ -3035,6 +3035,9 @@ int trace_array_printk(struct trace_array *tr,
4446 + if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
4447 + return 0;
4448 +
4449 ++ if (!tr)
4450 ++ return -ENOENT;
4451 ++
4452 + va_start(ap, fmt);
4453 + ret = trace_array_vprintk(tr, ip, fmt, ap);
4454 + va_end(ap);
4455 +@@ -8343,7 +8346,7 @@ __init static int tracer_alloc_buffers(void)
4456 + goto out_free_buffer_mask;
4457 +
4458 + /* Only allocate trace_printk buffers if a trace_printk exists */
4459 +- if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
4460 ++ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
4461 + /* Must be called before global_trace.buffer is allocated */
4462 + trace_printk_init_buffers();
4463 +
4464 +diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
4465 +index e3a658bac10fe..ff91acff72946 100644
4466 +--- a/kernel/trace/trace_entries.h
4467 ++++ b/kernel/trace/trace_entries.h
4468 +@@ -179,7 +179,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
4469 +
4470 + F_STRUCT(
4471 + __field( int, size )
4472 +- __dynamic_array(unsigned long, caller )
4473 ++ __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
4474 + ),
4475 +
4476 + F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
4477 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
4478 +index e72a44ecb81da..d69c79ac97986 100644
4479 +--- a/kernel/trace/trace_events.c
4480 ++++ b/kernel/trace/trace_events.c
4481 +@@ -799,6 +799,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
4482 + char *event = NULL, *sub = NULL, *match;
4483 + int ret;
4484 +
4485 ++ if (!tr)
4486 ++ return -ENOENT;
4487 + /*
4488 + * The buf format can be <subsystem>:<event-name>
4489 + * *:<event-name> means any event by that name.
4490 +diff --git a/lib/string.c b/lib/string.c
4491 +index 33befc6ba3faf..db9abc18b2165 100644
4492 +--- a/lib/string.c
4493 ++++ b/lib/string.c
4494 +@@ -236,6 +236,30 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
4495 + EXPORT_SYMBOL(strscpy);
4496 + #endif
4497 +
4498 ++/**
4499 ++ * stpcpy - copy a string from src to dest returning a pointer to the new end
4500 ++ * of dest, including src's %NUL-terminator. May overrun dest.
4501 ++ * @dest: pointer to end of string being copied into. Must be large enough
4502 ++ * to receive copy.
4503 ++ * @src: pointer to the beginning of string being copied from. Must not overlap
4504 ++ * dest.
4505 ++ *
4506 ++ * stpcpy differs from strcpy in a key way: the return value is a pointer
4507 ++ * to the new %NUL-terminating character in @dest. (For strcpy, the return
4508 ++ * value is a pointer to the start of @dest). This interface is considered
4509 ++ * unsafe as it doesn't perform bounds checking of the inputs. As such it's
4510 ++ * not recommended for usage. Instead, its definition is provided in case
4511 ++ * the compiler lowers other libcalls to stpcpy.
4512 ++ */
4513 ++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
4514 ++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
4515 ++{
4516 ++ while ((*dest++ = *src++) != '\0')
4517 ++ /* nothing */;
4518 ++ return --dest;
4519 ++}
4520 ++EXPORT_SYMBOL(stpcpy);
4521 ++
4522 + #ifndef __HAVE_ARCH_STRCAT
4523 + /**
4524 + * strcat - Append one %NUL-terminated string to another
4525 +diff --git a/mm/filemap.c b/mm/filemap.c
4526 +index a30dbf93de992..471f8091d782a 100644
4527 +--- a/mm/filemap.c
4528 ++++ b/mm/filemap.c
4529 +@@ -2774,6 +2774,14 @@ filler:
4530 + unlock_page(page);
4531 + goto out;
4532 + }
4533 ++
4534 ++ /*
4535 ++ * A previous I/O error may have been due to temporary
4536 ++ * failures.
4537 ++ * Clear page error before actual read, PG_error will be
4538 ++ * set again if read page fails.
4539 ++ */
4540 ++ ClearPageError(page);
4541 + goto filler;
4542 +
4543 + out:
4544 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4545 +index 9f3d4f84032bc..51068ef1dff5a 100644
4546 +--- a/mm/huge_memory.c
4547 ++++ b/mm/huge_memory.c
4548 +@@ -2078,7 +2078,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
4549 + put_page(page);
4550 + add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR);
4551 + return;
4552 +- } else if (is_huge_zero_pmd(*pmd)) {
4553 ++ } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
4554 + return __split_huge_zero_page_pmd(vma, haddr, pmd);
4555 + }
4556 +
4557 +@@ -2131,27 +2131,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
4558 + pte = pte_offset_map(&_pmd, addr);
4559 + BUG_ON(!pte_none(*pte));
4560 + set_pte_at(mm, addr, pte, entry);
4561 +- atomic_inc(&page[i]._mapcount);
4562 +- pte_unmap(pte);
4563 +- }
4564 +-
4565 +- /*
4566 +- * Set PG_double_map before dropping compound_mapcount to avoid
4567 +- * false-negative page_mapped().
4568 +- */
4569 +- if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
4570 +- for (i = 0; i < HPAGE_PMD_NR; i++)
4571 ++ if (!pmd_migration)
4572 + atomic_inc(&page[i]._mapcount);
4573 ++ pte_unmap(pte);
4574 + }
4575 +
4576 +- if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
4577 +- /* Last compound_mapcount is gone. */
4578 +- __dec_node_page_state(page, NR_ANON_THPS);
4579 +- if (TestClearPageDoubleMap(page)) {
4580 +- /* No need in mapcount reference anymore */
4581 ++ if (!pmd_migration) {
4582 ++ /*
4583 ++ * Set PG_double_map before dropping compound_mapcount to avoid
4584 ++ * false-negative page_mapped().
4585 ++ */
4586 ++ if (compound_mapcount(page) > 1 &&
4587 ++ !TestSetPageDoubleMap(page)) {
4588 + for (i = 0; i < HPAGE_PMD_NR; i++)
4589 +- atomic_dec(&page[i]._mapcount);
4590 ++ atomic_inc(&page[i]._mapcount);
4591 ++ }
4592 ++
4593 ++ lock_page_memcg(page);
4594 ++ if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
4595 ++ /* Last compound_mapcount is gone. */
4596 ++ __dec_lruvec_page_state(page, NR_ANON_THPS);
4597 ++ if (TestClearPageDoubleMap(page)) {
4598 ++ /* No need in mapcount reference anymore */
4599 ++ for (i = 0; i < HPAGE_PMD_NR; i++)
4600 ++ atomic_dec(&page[i]._mapcount);
4601 ++ }
4602 + }
4603 ++ unlock_page_memcg(page);
4604 + }
4605 +
4606 + smp_wmb(); /* make pte visible before pmd */
4607 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
4608 +index d779181bed4d8..706f705c2e0a4 100644
4609 +--- a/mm/kmemleak.c
4610 ++++ b/mm/kmemleak.c
4611 +@@ -2030,7 +2030,7 @@ void __init kmemleak_init(void)
4612 + create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
4613 + KMEMLEAK_GREY, GFP_ATOMIC);
4614 + /* only register .data..ro_after_init if not within .data */
4615 +- if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
4616 ++ if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
4617 + create_object((unsigned long)__start_ro_after_init,
4618 + __end_ro_after_init - __start_ro_after_init,
4619 + KMEMLEAK_GREY, GFP_ATOMIC);
4620 +diff --git a/mm/memory.c b/mm/memory.c
4621 +index e9bce27bc18c3..caefa5526b20c 100644
4622 +--- a/mm/memory.c
4623 ++++ b/mm/memory.c
4624 +@@ -117,6 +117,18 @@ int randomize_va_space __read_mostly =
4625 + 2;
4626 + #endif
4627 +
4628 ++#ifndef arch_faults_on_old_pte
4629 ++static inline bool arch_faults_on_old_pte(void)
4630 ++{
4631 ++ /*
4632 ++ * Those arches which don't have hw access flag feature need to
4633 ++ * implement their own helper. By default, "true" means pagefault
4634 ++ * will be hit on old pte.
4635 ++ */
4636 ++ return true;
4637 ++}
4638 ++#endif
4639 ++
4640 + static int __init disable_randmaps(char *s)
4641 + {
4642 + randomize_va_space = 0;
4643 +@@ -2324,32 +2336,101 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
4644 + return same;
4645 + }
4646 +
4647 +-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
4648 ++static inline bool cow_user_page(struct page *dst, struct page *src,
4649 ++ struct vm_fault *vmf)
4650 + {
4651 ++ bool ret;
4652 ++ void *kaddr;
4653 ++ void __user *uaddr;
4654 ++ bool locked = false;
4655 ++ struct vm_area_struct *vma = vmf->vma;
4656 ++ struct mm_struct *mm = vma->vm_mm;
4657 ++ unsigned long addr = vmf->address;
4658 ++
4659 + debug_dma_assert_idle(src);
4660 +
4661 ++ if (likely(src)) {
4662 ++ copy_user_highpage(dst, src, addr, vma);
4663 ++ return true;
4664 ++ }
4665 ++
4666 + /*
4667 + * If the source page was a PFN mapping, we don't have
4668 + * a "struct page" for it. We do a best-effort copy by
4669 + * just copying from the original user address. If that
4670 + * fails, we just zero-fill it. Live with it.
4671 + */
4672 +- if (unlikely(!src)) {
4673 +- void *kaddr = kmap_atomic(dst);
4674 +- void __user *uaddr = (void __user *)(va & PAGE_MASK);
4675 ++ kaddr = kmap_atomic(dst);
4676 ++ uaddr = (void __user *)(addr & PAGE_MASK);
4677 ++
4678 ++ /*
4679 ++ * On architectures with software "accessed" bits, we would
4680 ++ * take a double page fault, so mark it accessed here.
4681 ++ */
4682 ++ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
4683 ++ pte_t entry;
4684 ++
4685 ++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
4686 ++ locked = true;
4687 ++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
4688 ++ /*
4689 ++ * Other thread has already handled the fault
4690 ++ * and we don't need to do anything. If it's
4691 ++ * not the case, the fault will be triggered
4692 ++ * again on the same address.
4693 ++ */
4694 ++ ret = false;
4695 ++ goto pte_unlock;
4696 ++ }
4697 ++
4698 ++ entry = pte_mkyoung(vmf->orig_pte);
4699 ++ if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
4700 ++ update_mmu_cache(vma, addr, vmf->pte);
4701 ++ }
4702 ++
4703 ++ /*
4704 ++ * This really shouldn't fail, because the page is there
4705 ++ * in the page tables. But it might just be unreadable,
4706 ++ * in which case we just give up and fill the result with
4707 ++ * zeroes.
4708 ++ */
4709 ++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
4710 ++ if (locked)
4711 ++ goto warn;
4712 ++
4713 ++ /* Re-validate under PTL if the page is still mapped */
4714 ++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
4715 ++ locked = true;
4716 ++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
4717 ++ /* The PTE changed under us. Retry page fault. */
4718 ++ ret = false;
4719 ++ goto pte_unlock;
4720 ++ }
4721 +
4722 + /*
4723 +- * This really shouldn't fail, because the page is there
4724 +- * in the page tables. But it might just be unreadable,
4725 +- * in which case we just give up and fill the result with
4726 +- * zeroes.
4727 ++ * The same page can be mapped back since last copy attampt.
4728 ++ * Try to copy again under PTL.
4729 + */
4730 +- if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
4731 ++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
4732 ++ /*
4733 ++ * Give a warn in case there can be some obscure
4734 ++ * use-case
4735 ++ */
4736 ++warn:
4737 ++ WARN_ON_ONCE(1);
4738 + clear_page(kaddr);
4739 +- kunmap_atomic(kaddr);
4740 +- flush_dcache_page(dst);
4741 +- } else
4742 +- copy_user_highpage(dst, src, va, vma);
4743 ++ }
4744 ++ }
4745 ++
4746 ++ ret = true;
4747 ++
4748 ++pte_unlock:
4749 ++ if (locked)
4750 ++ pte_unmap_unlock(vmf->pte, vmf->ptl);
4751 ++ kunmap_atomic(kaddr);
4752 ++ flush_dcache_page(dst);
4753 ++
4754 ++ return ret;
4755 + }
4756 +
4757 + static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
4758 +@@ -2503,7 +2584,19 @@ static int wp_page_copy(struct vm_fault *vmf)
4759 + vmf->address);
4760 + if (!new_page)
4761 + goto oom;
4762 +- cow_user_page(new_page, old_page, vmf->address, vma);
4763 ++
4764 ++ if (!cow_user_page(new_page, old_page, vmf)) {
4765 ++ /*
4766 ++ * COW failed, if the fault was solved by other,
4767 ++ * it's fine. If not, userspace would re-fault on
4768 ++ * the same address and we will handle the fault
4769 ++ * from the second attempt.
4770 ++ */
4771 ++ put_page(new_page);
4772 ++ if (old_page)
4773 ++ put_page(old_page);
4774 ++ return 0;
4775 ++ }
4776 + }
4777 +
4778 + if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
4779 +diff --git a/mm/mmap.c b/mm/mmap.c
4780 +index 724b7c4f1a5b5..c389fd258384f 100644
4781 +--- a/mm/mmap.c
4782 ++++ b/mm/mmap.c
4783 +@@ -2034,6 +2034,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4784 + info.low_limit = mm->mmap_base;
4785 + info.high_limit = TASK_SIZE;
4786 + info.align_mask = 0;
4787 ++ info.align_offset = 0;
4788 + return vm_unmapped_area(&info);
4789 + }
4790 + #endif
4791 +@@ -2075,6 +2076,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4792 + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
4793 + info.high_limit = mm->mmap_base;
4794 + info.align_mask = 0;
4795 ++ info.align_offset = 0;
4796 + addr = vm_unmapped_area(&info);
4797 +
4798 + /*
4799 +diff --git a/mm/pagewalk.c b/mm/pagewalk.c
4800 +index 23a3e415ac2ce..84bdb2bac3dc6 100644
4801 +--- a/mm/pagewalk.c
4802 ++++ b/mm/pagewalk.c
4803 +@@ -15,9 +15,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
4804 + err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
4805 + if (err)
4806 + break;
4807 +- addr += PAGE_SIZE;
4808 +- if (addr == end)
4809 ++ if (addr >= end - PAGE_SIZE)
4810 + break;
4811 ++ addr += PAGE_SIZE;
4812 + pte++;
4813 + }
4814 +
4815 +diff --git a/mm/swap_state.c b/mm/swap_state.c
4816 +index 755be95d52f9c..3ceea86818bd4 100644
4817 +--- a/mm/swap_state.c
4818 ++++ b/mm/swap_state.c
4819 +@@ -524,10 +524,11 @@ static unsigned long swapin_nr_pages(unsigned long offset)
4820 + return 1;
4821 +
4822 + hits = atomic_xchg(&swapin_readahead_hits, 0);
4823 +- pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
4824 ++ pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
4825 ++ max_pages,
4826 + atomic_read(&last_readahead_pages));
4827 + if (!hits)
4828 +- prev_offset = offset;
4829 ++ WRITE_ONCE(prev_offset, offset);
4830 + atomic_set(&last_readahead_pages, pages);
4831 +
4832 + return pages;
4833 +diff --git a/mm/swapfile.c b/mm/swapfile.c
4834 +index 4f9e522643a2b..f79f0d938274a 100644
4835 +--- a/mm/swapfile.c
4836 ++++ b/mm/swapfile.c
4837 +@@ -973,7 +973,7 @@ start_over:
4838 + goto nextsi;
4839 + }
4840 + if (cluster) {
4841 +- if (!(si->flags & SWP_FILE))
4842 ++ if (si->flags & SWP_BLKDEV)
4843 + n_ret = swap_alloc_cluster(si, swp_entries);
4844 + } else
4845 + n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
4846 +diff --git a/mm/vmscan.c b/mm/vmscan.c
4847 +index c6962aa5ddb40..5ee6fbdec8a8d 100644
4848 +--- a/mm/vmscan.c
4849 ++++ b/mm/vmscan.c
4850 +@@ -2950,8 +2950,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
4851 +
4852 + /* kswapd must be awake if processes are being throttled */
4853 + if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
4854 +- pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
4855 +- (enum zone_type)ZONE_NORMAL);
4856 ++ if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL)
4857 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL);
4858 ++
4859 + wake_up_interruptible(&pgdat->kswapd_wait);
4860 + }
4861 +
4862 +@@ -3451,9 +3452,9 @@ out:
4863 + static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
4864 + enum zone_type prev_classzone_idx)
4865 + {
4866 +- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
4867 +- return prev_classzone_idx;
4868 +- return pgdat->kswapd_classzone_idx;
4869 ++ enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
4870 ++
4871 ++ return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx;
4872 + }
4873 +
4874 + static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
4875 +@@ -3497,8 +3498,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o
4876 + * the previous request that slept prematurely.
4877 + */
4878 + if (remaining) {
4879 +- pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
4880 +- pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
4881 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx,
4882 ++ kswapd_classzone_idx(pgdat, classzone_idx));
4883 ++
4884 ++ if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
4885 ++ WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
4886 + }
4887 +
4888 + finish_wait(&pgdat->kswapd_wait, &wait);
4889 +@@ -3580,12 +3584,12 @@ static int kswapd(void *p)
4890 + tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
4891 + set_freezable();
4892 +
4893 +- pgdat->kswapd_order = 0;
4894 +- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
4895 ++ WRITE_ONCE(pgdat->kswapd_order, 0);
4896 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
4897 + for ( ; ; ) {
4898 + bool ret;
4899 +
4900 +- alloc_order = reclaim_order = pgdat->kswapd_order;
4901 ++ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
4902 + classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
4903 +
4904 + kswapd_try_sleep:
4905 +@@ -3593,10 +3597,10 @@ kswapd_try_sleep:
4906 + classzone_idx);
4907 +
4908 + /* Read the new order and classzone_idx */
4909 +- alloc_order = reclaim_order = pgdat->kswapd_order;
4910 ++ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
4911 + classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
4912 +- pgdat->kswapd_order = 0;
4913 +- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
4914 ++ WRITE_ONCE(pgdat->kswapd_order, 0);
4915 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
4916 +
4917 + ret = try_to_freeze();
4918 + if (kthread_should_stop())
4919 +@@ -3638,20 +3642,23 @@ kswapd_try_sleep:
4920 + void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
4921 + {
4922 + pg_data_t *pgdat;
4923 ++ enum zone_type curr_idx;
4924 +
4925 + if (!managed_zone(zone))
4926 + return;
4927 +
4928 + if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
4929 + return;
4930 ++
4931 + pgdat = zone->zone_pgdat;
4932 ++ curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
4933 ++
4934 ++ if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx)
4935 ++ WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx);
4936 ++
4937 ++ if (READ_ONCE(pgdat->kswapd_order) < order)
4938 ++ WRITE_ONCE(pgdat->kswapd_order, order);
4939 +
4940 +- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
4941 +- pgdat->kswapd_classzone_idx = classzone_idx;
4942 +- else
4943 +- pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
4944 +- classzone_idx);
4945 +- pgdat->kswapd_order = max(pgdat->kswapd_order, order);
4946 + if (!waitqueue_active(&pgdat->kswapd_wait))
4947 + return;
4948 +
4949 +diff --git a/net/atm/lec.c b/net/atm/lec.c
4950 +index 85ce89c8a35c9..0b0794b6a8149 100644
4951 +--- a/net/atm/lec.c
4952 ++++ b/net/atm/lec.c
4953 +@@ -1282,6 +1282,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
4954 + entry->vcc = NULL;
4955 + }
4956 + if (entry->recv_vcc) {
4957 ++ struct atm_vcc *vcc = entry->recv_vcc;
4958 ++ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
4959 ++
4960 ++ kfree(vpriv);
4961 ++ vcc->user_back = NULL;
4962 ++
4963 + entry->recv_vcc->push = entry->old_recv_push;
4964 + vcc_release_async(entry->recv_vcc, -EPIPE);
4965 + entry->recv_vcc = NULL;
4966 +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
4967 +index ae647fa69ce85..ae1147b8710f3 100644
4968 +--- a/net/batman-adv/bridge_loop_avoidance.c
4969 ++++ b/net/batman-adv/bridge_loop_avoidance.c
4970 +@@ -36,6 +36,7 @@
4971 + #include <linux/lockdep.h>
4972 + #include <linux/netdevice.h>
4973 + #include <linux/netlink.h>
4974 ++#include <linux/preempt.h>
4975 + #include <linux/rculist.h>
4976 + #include <linux/rcupdate.h>
4977 + #include <linux/seq_file.h>
4978 +@@ -95,11 +96,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
4979 + */
4980 + static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
4981 + {
4982 +- const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
4983 ++ const struct batadv_bla_backbone_gw *gw;
4984 + u32 hash = 0;
4985 +
4986 +- hash = jhash(&claim->addr, sizeof(claim->addr), hash);
4987 +- hash = jhash(&claim->vid, sizeof(claim->vid), hash);
4988 ++ gw = (struct batadv_bla_backbone_gw *)data;
4989 ++ hash = jhash(&gw->orig, sizeof(gw->orig), hash);
4990 ++ hash = jhash(&gw->vid, sizeof(gw->vid), hash);
4991 +
4992 + return hash % size;
4993 + }
4994 +@@ -1825,7 +1827,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
4995 + * @bat_priv: the bat priv with all the soft interface information
4996 + * @skb: the frame to be checked
4997 + * @vid: the VLAN ID of the frame
4998 +- * @is_bcast: the packet came in a broadcast packet type.
4999 ++ * @packet_type: the batman packet type this frame came in
5000 + *
5001 + * batadv_bla_rx avoidance checks if:
5002 + * * we have to race for a claim
5003 +@@ -1837,7 +1839,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
5004 + * further process the skb.
5005 + */
5006 + bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
5007 +- unsigned short vid, bool is_bcast)
5008 ++ unsigned short vid, int packet_type)
5009 + {
5010 + struct batadv_bla_backbone_gw *backbone_gw;
5011 + struct ethhdr *ethhdr;
5012 +@@ -1859,9 +1861,24 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
5013 + goto handled;
5014 +
5015 + if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
5016 +- /* don't allow broadcasts while requests are in flight */
5017 +- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
5018 +- goto handled;
5019 ++ /* don't allow multicast packets while requests are in flight */
5020 ++ if (is_multicast_ether_addr(ethhdr->h_dest))
5021 ++ /* Both broadcast flooding or multicast-via-unicasts
5022 ++ * delivery might send to multiple backbone gateways
5023 ++ * sharing the same LAN and therefore need to coordinate
5024 ++ * which backbone gateway forwards into the LAN,
5025 ++ * by claiming the payload source address.
5026 ++ *
5027 ++ * Broadcast flooding and multicast-via-unicasts
5028 ++ * delivery use the following two batman packet types.
5029 ++ * Note: explicitly exclude BATADV_UNICAST_4ADDR,
5030 ++ * as the DHCP gateway feature will send explicitly
5031 ++ * to only one BLA gateway, so the claiming process
5032 ++ * should be avoided there.
5033 ++ */
5034 ++ if (packet_type == BATADV_BCAST ||
5035 ++ packet_type == BATADV_UNICAST)
5036 ++ goto handled;
5037 +
5038 + ether_addr_copy(search_claim.addr, ethhdr->h_source);
5039 + search_claim.vid = vid;
5040 +@@ -1896,13 +1913,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
5041 + goto allow;
5042 + }
5043 +
5044 +- /* if it is a broadcast ... */
5045 +- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
5046 ++ /* if it is a multicast ... */
5047 ++ if (is_multicast_ether_addr(ethhdr->h_dest) &&
5048 ++ (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
5049 + /* ... drop it. the responsible gateway is in charge.
5050 + *
5051 +- * We need to check is_bcast because with the gateway
5052 ++ * We need to check packet type because with the gateway
5053 + * feature, broadcasts (like DHCP requests) may be sent
5054 +- * using a unicast packet type.
5055 ++ * using a unicast 4 address packet type. See comment above.
5056 + */
5057 + goto handled;
5058 + } else {
5059 +diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
5060 +index 234775748b8ea..e05809abf79cd 100644
5061 +--- a/net/batman-adv/bridge_loop_avoidance.h
5062 ++++ b/net/batman-adv/bridge_loop_avoidance.h
5063 +@@ -47,7 +47,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
5064 +
5065 + #ifdef CONFIG_BATMAN_ADV_BLA
5066 + bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
5067 +- unsigned short vid, bool is_bcast);
5068 ++ unsigned short vid, int packet_type);
5069 + bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
5070 + unsigned short vid);
5071 + bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
5072 +@@ -78,7 +78,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
5073 +
5074 + static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
5075 + struct sk_buff *skb, unsigned short vid,
5076 +- bool is_bcast)
5077 ++ int packet_type)
5078 + {
5079 + return false;
5080 + }
5081 +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
5082 +index f59aac06733e1..83f73f840ff91 100644
5083 +--- a/net/batman-adv/routing.c
5084 ++++ b/net/batman-adv/routing.c
5085 +@@ -822,6 +822,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
5086 + vid = batadv_get_vid(skb, hdr_len);
5087 + ethhdr = (struct ethhdr *)(skb->data + hdr_len);
5088 +
5089 ++ /* do not reroute multicast frames in a unicast header */
5090 ++ if (is_multicast_ether_addr(ethhdr->h_dest))
5091 ++ return true;
5092 ++
5093 + /* check if the destination client was served by this node and it is now
5094 + * roaming. In this case, it means that the node has got a ROAM_ADV
5095 + * message and that it knows the new destination in the mesh to re-route
5096 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
5097 +index 7c883420485b8..ba9dce04343a3 100644
5098 +--- a/net/batman-adv/soft-interface.c
5099 ++++ b/net/batman-adv/soft-interface.c
5100 +@@ -418,10 +418,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
5101 + struct vlan_ethhdr *vhdr;
5102 + struct ethhdr *ethhdr;
5103 + unsigned short vid;
5104 +- bool is_bcast;
5105 ++ int packet_type;
5106 +
5107 + batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
5108 +- is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
5109 ++ packet_type = batadv_bcast_packet->packet_type;
5110 +
5111 + skb_pull_rcsum(skb, hdr_size);
5112 + skb_reset_mac_header(skb);
5113 +@@ -464,7 +464,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
5114 + /* Let the bridge loop avoidance check the packet. If will
5115 + * not handle it, we can safely push it up.
5116 + */
5117 +- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
5118 ++ if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
5119 + goto out;
5120 +
5121 + if (orig_node)
5122 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
5123 +index 70b8e2de40cf4..587b674bbcd64 100644
5124 +--- a/net/bluetooth/hci_event.c
5125 ++++ b/net/bluetooth/hci_event.c
5126 +@@ -41,12 +41,27 @@
5127 +
5128 + /* Handle HCI Event packets */
5129 +
5130 +-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
5131 ++static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
5132 ++ u8 *new_status)
5133 + {
5134 + __u8 status = *((__u8 *) skb->data);
5135 +
5136 + BT_DBG("%s status 0x%2.2x", hdev->name, status);
5137 +
5138 ++ /* It is possible that we receive Inquiry Complete event right
5139 ++ * before we receive Inquiry Cancel Command Complete event, in
5140 ++ * which case the latter event should have status of Command
5141 ++ * Disallowed (0x0c). This should not be treated as error, since
5142 ++ * we actually achieve what Inquiry Cancel wants to achieve,
5143 ++ * which is to end the last Inquiry session.
5144 ++ */
5145 ++ if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
5146 ++ bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
5147 ++ status = 0x00;
5148 ++ }
5149 ++
5150 ++ *new_status = status;
5151 ++
5152 + if (status)
5153 + return;
5154 +
5155 +@@ -2772,7 +2787,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
5156 +
5157 + switch (*opcode) {
5158 + case HCI_OP_INQUIRY_CANCEL:
5159 +- hci_cc_inquiry_cancel(hdev, skb);
5160 ++ hci_cc_inquiry_cancel(hdev, skb, status);
5161 + break;
5162 +
5163 + case HCI_OP_PERIODIC_INQ:
5164 +@@ -5257,6 +5272,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5165 + u8 status = 0, event = hdr->evt, req_evt = 0;
5166 + u16 opcode = HCI_OP_NOP;
5167 +
5168 ++ if (!event) {
5169 ++ bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5170 ++ goto done;
5171 ++ }
5172 ++
5173 + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5174 + struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5175 + opcode = __le16_to_cpu(cmd_hdr->opcode);
5176 +@@ -5468,6 +5488,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5177 + req_complete_skb(hdev, status, opcode, orig_skb);
5178 + }
5179 +
5180 ++done:
5181 + kfree_skb(orig_skb);
5182 + kfree_skb(skb);
5183 + hdev->stat.evt_rx++;
5184 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
5185 +index ebdf1b0e576a5..c301b9debea7c 100644
5186 +--- a/net/bluetooth/l2cap_core.c
5187 ++++ b/net/bluetooth/l2cap_core.c
5188 +@@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
5189 + BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
5190 +
5191 + mutex_lock(&conn->chan_lock);
5192 ++ /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
5193 ++ * this work. No need to call l2cap_chan_hold(chan) here again.
5194 ++ */
5195 + l2cap_chan_lock(chan);
5196 +
5197 + if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
5198 +@@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
5199 +
5200 + l2cap_chan_close(chan, reason);
5201 +
5202 +- l2cap_chan_unlock(chan);
5203 +-
5204 + chan->ops->close(chan);
5205 +- mutex_unlock(&conn->chan_lock);
5206 +
5207 ++ l2cap_chan_unlock(chan);
5208 + l2cap_chan_put(chan);
5209 ++
5210 ++ mutex_unlock(&conn->chan_lock);
5211 + }
5212 +
5213 + struct l2cap_chan *l2cap_chan_create(void)
5214 +@@ -1725,9 +1728,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
5215 +
5216 + l2cap_chan_del(chan, err);
5217 +
5218 +- l2cap_chan_unlock(chan);
5219 +-
5220 + chan->ops->close(chan);
5221 ++
5222 ++ l2cap_chan_unlock(chan);
5223 + l2cap_chan_put(chan);
5224 + }
5225 +
5226 +@@ -4114,7 +4117,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
5227 + return 0;
5228 + }
5229 +
5230 +- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
5231 ++ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
5232 ++ chan->state != BT_CONNECTED) {
5233 + cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
5234 + chan->dcid);
5235 + goto unlock;
5236 +@@ -4337,6 +4341,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
5237 + return 0;
5238 + }
5239 +
5240 ++ l2cap_chan_hold(chan);
5241 + l2cap_chan_lock(chan);
5242 +
5243 + rsp.dcid = cpu_to_le16(chan->scid);
5244 +@@ -4345,12 +4350,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
5245 +
5246 + chan->ops->set_shutdown(chan);
5247 +
5248 +- l2cap_chan_hold(chan);
5249 + l2cap_chan_del(chan, ECONNRESET);
5250 +
5251 +- l2cap_chan_unlock(chan);
5252 +-
5253 + chan->ops->close(chan);
5254 ++
5255 ++ l2cap_chan_unlock(chan);
5256 + l2cap_chan_put(chan);
5257 +
5258 + mutex_unlock(&conn->chan_lock);
5259 +@@ -4382,20 +4386,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
5260 + return 0;
5261 + }
5262 +
5263 ++ l2cap_chan_hold(chan);
5264 + l2cap_chan_lock(chan);
5265 +
5266 + if (chan->state != BT_DISCONN) {
5267 + l2cap_chan_unlock(chan);
5268 ++ l2cap_chan_put(chan);
5269 + mutex_unlock(&conn->chan_lock);
5270 + return 0;
5271 + }
5272 +
5273 +- l2cap_chan_hold(chan);
5274 + l2cap_chan_del(chan, 0);
5275 +
5276 +- l2cap_chan_unlock(chan);
5277 +-
5278 + chan->ops->close(chan);
5279 ++
5280 ++ l2cap_chan_unlock(chan);
5281 + l2cap_chan_put(chan);
5282 +
5283 + mutex_unlock(&conn->chan_lock);
5284 +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
5285 +index 8c329c549ea60..511a1da6ca971 100644
5286 +--- a/net/bluetooth/l2cap_sock.c
5287 ++++ b/net/bluetooth/l2cap_sock.c
5288 +@@ -1040,7 +1040,7 @@ done:
5289 + }
5290 +
5291 + /* Kill socket (only if zapped and orphan)
5292 +- * Must be called on unlocked socket.
5293 ++ * Must be called on unlocked socket, with l2cap channel lock.
5294 + */
5295 + static void l2cap_sock_kill(struct sock *sk)
5296 + {
5297 +@@ -1191,6 +1191,7 @@ static int l2cap_sock_release(struct socket *sock)
5298 + {
5299 + struct sock *sk = sock->sk;
5300 + int err;
5301 ++ struct l2cap_chan *chan;
5302 +
5303 + BT_DBG("sock %p, sk %p", sock, sk);
5304 +
5305 +@@ -1200,9 +1201,17 @@ static int l2cap_sock_release(struct socket *sock)
5306 + bt_sock_unlink(&l2cap_sk_list, sk);
5307 +
5308 + err = l2cap_sock_shutdown(sock, 2);
5309 ++ chan = l2cap_pi(sk)->chan;
5310 ++
5311 ++ l2cap_chan_hold(chan);
5312 ++ l2cap_chan_lock(chan);
5313 +
5314 + sock_orphan(sk);
5315 + l2cap_sock_kill(sk);
5316 ++
5317 ++ l2cap_chan_unlock(chan);
5318 ++ l2cap_chan_put(chan);
5319 ++
5320 + return err;
5321 + }
5322 +
5323 +@@ -1220,12 +1229,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
5324 + BT_DBG("child chan %p state %s", chan,
5325 + state_to_string(chan->state));
5326 +
5327 ++ l2cap_chan_hold(chan);
5328 + l2cap_chan_lock(chan);
5329 ++
5330 + __clear_chan_timer(chan);
5331 + l2cap_chan_close(chan, ECONNRESET);
5332 +- l2cap_chan_unlock(chan);
5333 +-
5334 + l2cap_sock_kill(sk);
5335 ++
5336 ++ l2cap_chan_unlock(chan);
5337 ++ l2cap_chan_put(chan);
5338 + }
5339 + }
5340 +
5341 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
5342 +index 567e431813e59..20f6c634ad68a 100644
5343 +--- a/net/core/neighbour.c
5344 ++++ b/net/core/neighbour.c
5345 +@@ -2836,6 +2836,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5346 + *pos = cpu+1;
5347 + return per_cpu_ptr(tbl->stats, cpu);
5348 + }
5349 ++ (*pos)++;
5350 + return NULL;
5351 + }
5352 +
5353 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
5354 +index 73cd64c7692f9..819d51101cbd9 100644
5355 +--- a/net/ipv4/ip_output.c
5356 ++++ b/net/ipv4/ip_output.c
5357 +@@ -73,6 +73,7 @@
5358 + #include <net/icmp.h>
5359 + #include <net/checksum.h>
5360 + #include <net/inetpeer.h>
5361 ++#include <net/inet_ecn.h>
5362 + #include <net/lwtunnel.h>
5363 + #include <linux/bpf-cgroup.h>
5364 + #include <linux/igmp.h>
5365 +@@ -1562,7 +1563,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
5366 + if (IS_ERR(rt))
5367 + return;
5368 +
5369 +- inet_sk(sk)->tos = arg->tos;
5370 ++ inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
5371 +
5372 + sk->sk_priority = skb->priority;
5373 + sk->sk_protocol = ip_hdr(skb)->protocol;
5374 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
5375 +index a894adbb6c9b5..6fcb12e083d99 100644
5376 +--- a/net/ipv4/route.c
5377 ++++ b/net/ipv4/route.c
5378 +@@ -276,6 +276,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5379 + *pos = cpu+1;
5380 + return &per_cpu(rt_cache_stat, cpu);
5381 + }
5382 ++ (*pos)++;
5383 + return NULL;
5384 +
5385 + }
5386 +@@ -794,6 +795,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
5387 + if (fib_lookup(net, fl4, &res, 0) == 0) {
5388 + struct fib_nh *nh = &FIB_RES_NH(res);
5389 +
5390 ++ fib_select_path(net, &res, fl4, skb);
5391 ++ nh = &FIB_RES_NH(res);
5392 + update_or_create_fnhe(nh, fl4->daddr, new_gw,
5393 + 0, false,
5394 + jiffies + ip_rt_gc_timeout);
5395 +@@ -1010,6 +1013,7 @@ out: kfree_skb(skb);
5396 + static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
5397 + {
5398 + struct dst_entry *dst = &rt->dst;
5399 ++ struct net *net = dev_net(dst->dev);
5400 + u32 old_mtu = ipv4_mtu(dst);
5401 + struct fib_result res;
5402 + bool lock = false;
5403 +@@ -1030,9 +1034,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
5404 + return;
5405 +
5406 + rcu_read_lock();
5407 +- if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
5408 +- struct fib_nh *nh = &FIB_RES_NH(res);
5409 ++ if (fib_lookup(net, fl4, &res, 0) == 0) {
5410 ++ struct fib_nh *nh;
5411 +
5412 ++ fib_select_path(net, &res, fl4, NULL);
5413 ++ nh = &FIB_RES_NH(res);
5414 + update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
5415 + jiffies + ip_rt_mtu_expires);
5416 + }
5417 +@@ -2505,8 +2511,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
5418 + fib_select_path(net, res, fl4, skb);
5419 +
5420 + dev_out = FIB_RES_DEV(*res);
5421 +- fl4->flowi4_oif = dev_out->ifindex;
5422 +-
5423 +
5424 + make_route:
5425 + rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
5426 +diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
5427 +index 2d36fd0972990..a941f09a3fce9 100644
5428 +--- a/net/ipv6/Kconfig
5429 ++++ b/net/ipv6/Kconfig
5430 +@@ -321,6 +321,7 @@ config IPV6_SEG6_LWTUNNEL
5431 + config IPV6_SEG6_HMAC
5432 + bool "IPv6: Segment Routing HMAC support"
5433 + depends on IPV6
5434 ++ select CRYPTO
5435 + select CRYPTO_HMAC
5436 + select CRYPTO_SHA1
5437 + select CRYPTO_SHA256
5438 +diff --git a/net/key/af_key.c b/net/key/af_key.c
5439 +index f8f7065f7b627..0747747fffe58 100644
5440 +--- a/net/key/af_key.c
5441 ++++ b/net/key/af_key.c
5442 +@@ -1855,6 +1855,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
5443 + if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
5444 + struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
5445 +
5446 ++ if ((xfilter->sadb_x_filter_splen >=
5447 ++ (sizeof(xfrm_address_t) << 3)) ||
5448 ++ (xfilter->sadb_x_filter_dplen >=
5449 ++ (sizeof(xfrm_address_t) << 3))) {
5450 ++ mutex_unlock(&pfk->dump_lock);
5451 ++ return -EINVAL;
5452 ++ }
5453 + filter = kmalloc(sizeof(*filter), GFP_KERNEL);
5454 + if (filter == NULL) {
5455 + mutex_unlock(&pfk->dump_lock);
5456 +diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
5457 +index bcd1a5e6ebf42..2f873a0dc5836 100644
5458 +--- a/net/mac802154/tx.c
5459 ++++ b/net/mac802154/tx.c
5460 +@@ -42,11 +42,11 @@ void ieee802154_xmit_worker(struct work_struct *work)
5461 + if (res)
5462 + goto err_tx;
5463 +
5464 +- ieee802154_xmit_complete(&local->hw, skb, false);
5465 +-
5466 + dev->stats.tx_packets++;
5467 + dev->stats.tx_bytes += skb->len;
5468 +
5469 ++ ieee802154_xmit_complete(&local->hw, skb, false);
5470 ++
5471 + return;
5472 +
5473 + err_tx:
5474 +@@ -86,6 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
5475 +
5476 + /* async is priority, otherwise sync is fallback */
5477 + if (local->ops->xmit_async) {
5478 ++ unsigned int len = skb->len;
5479 ++
5480 + ret = drv_xmit_async(local, skb);
5481 + if (ret) {
5482 + ieee802154_wake_queue(&local->hw);
5483 +@@ -93,7 +95,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
5484 + }
5485 +
5486 + dev->stats.tx_packets++;
5487 +- dev->stats.tx_bytes += skb->len;
5488 ++ dev->stats.tx_bytes += len;
5489 + } else {
5490 + local->tx_skb = skb;
5491 + queue_work(local->workqueue, &local->tx_work);
5492 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
5493 +index 7e5f849b44cdb..b293827b2a583 100644
5494 +--- a/net/sunrpc/svc_xprt.c
5495 ++++ b/net/sunrpc/svc_xprt.c
5496 +@@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
5497 + }
5498 + EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
5499 +
5500 +-/*
5501 +- * Format the transport list for printing
5502 ++/**
5503 ++ * svc_print_xprts - Format the transport list for printing
5504 ++ * @buf: target buffer for formatted address
5505 ++ * @maxlen: length of target buffer
5506 ++ *
5507 ++ * Fills in @buf with a string containing a list of transport names, each name
5508 ++ * terminated with '\n'. If the buffer is too small, some entries may be
5509 ++ * missing, but it is guaranteed that all lines in the output buffer are
5510 ++ * complete.
5511 ++ *
5512 ++ * Returns positive length of the filled-in string.
5513 + */
5514 + int svc_print_xprts(char *buf, int maxlen)
5515 + {
5516 +@@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen)
5517 + list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
5518 + int slen;
5519 +
5520 +- sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
5521 +- slen = strlen(tmpstr);
5522 +- if (len + slen > maxlen)
5523 ++ slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n",
5524 ++ xcl->xcl_name, xcl->xcl_max_payload);
5525 ++ if (slen >= sizeof(tmpstr) || len + slen >= maxlen)
5526 + break;
5527 + len += slen;
5528 + strcat(buf, tmpstr);
5529 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
5530 +index af7893501e40a..4b9aaf487327c 100644
5531 +--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
5532 ++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
5533 +@@ -270,6 +270,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
5534 + {
5535 + dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
5536 +
5537 ++ xprt_rdma_free_addresses(xprt);
5538 + xprt_free(xprt);
5539 + module_put(THIS_MODULE);
5540 + }
5541 +diff --git a/net/tipc/msg.c b/net/tipc/msg.c
5542 +index e383960258742..d048ec6dab12a 100644
5543 +--- a/net/tipc/msg.c
5544 ++++ b/net/tipc/msg.c
5545 +@@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
5546 + if (fragid == FIRST_FRAGMENT) {
5547 + if (unlikely(head))
5548 + goto err;
5549 +- if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
5550 ++ frag = skb_unshare(frag, GFP_ATOMIC);
5551 ++ if (unlikely(!frag))
5552 + goto err;
5553 + head = *headbuf = frag;
5554 + *buf = NULL;
5555 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
5556 +index cbec2242f79ab..44ede9ab78985 100644
5557 +--- a/net/tipc/socket.c
5558 ++++ b/net/tipc/socket.c
5559 +@@ -2126,10 +2126,7 @@ static int tipc_shutdown(struct socket *sock, int how)
5560 + lock_sock(sk);
5561 +
5562 + __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
5563 +- if (tipc_sk_type_connectionless(sk))
5564 +- sk->sk_shutdown = SHUTDOWN_MASK;
5565 +- else
5566 +- sk->sk_shutdown = SEND_SHUTDOWN;
5567 ++ sk->sk_shutdown = SHUTDOWN_MASK;
5568 +
5569 + if (sk->sk_state == TIPC_DISCONNECTING) {
5570 + /* Discard any unreceived messages */
5571 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
5572 +index 091e93798eacc..44ff3f5c22dfd 100644
5573 +--- a/net/unix/af_unix.c
5574 ++++ b/net/unix/af_unix.c
5575 +@@ -192,11 +192,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
5576 + return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
5577 + }
5578 +
5579 +-static inline int unix_recvq_full(struct sock const *sk)
5580 ++static inline int unix_recvq_full(const struct sock *sk)
5581 + {
5582 + return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
5583 + }
5584 +
5585 ++static inline int unix_recvq_full_lockless(const struct sock *sk)
5586 ++{
5587 ++ return skb_queue_len_lockless(&sk->sk_receive_queue) >
5588 ++ READ_ONCE(sk->sk_max_ack_backlog);
5589 ++}
5590 ++
5591 + struct sock *unix_peer_get(struct sock *s)
5592 + {
5593 + struct sock *peer;
5594 +@@ -1792,7 +1798,8 @@ restart_locked:
5595 + * - unix_peer(sk) == sk by time of get but disconnected before lock
5596 + */
5597 + if (other != sk &&
5598 +- unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
5599 ++ unlikely(unix_peer(other) != sk &&
5600 ++ unix_recvq_full_lockless(other))) {
5601 + if (timeo) {
5602 + timeo = unix_wait_for_peer(other, timeo);
5603 +
5604 +diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
5605 +index 00eed842c491c..bf50fead9f8c0 100644
5606 +--- a/security/selinux/selinuxfs.c
5607 ++++ b/security/selinux/selinuxfs.c
5608 +@@ -1425,6 +1425,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
5609 + *idx = cpu + 1;
5610 + return &per_cpu(avc_cache_stats, cpu);
5611 + }
5612 ++ (*idx)++;
5613 + return NULL;
5614 + }
5615 +
5616 +diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
5617 +index 714a51721a313..ab9236e4c157e 100644
5618 +--- a/sound/hda/hdac_bus.c
5619 ++++ b/sound/hda/hdac_bus.c
5620 +@@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work)
5621 + struct hdac_driver *drv;
5622 + unsigned int rp, caddr, res;
5623 +
5624 ++ spin_lock_irq(&bus->reg_lock);
5625 + while (bus->unsol_rp != bus->unsol_wp) {
5626 + rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
5627 + bus->unsol_rp = rp;
5628 +@@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work)
5629 + codec = bus->caddr_tbl[caddr & 0x0f];
5630 + if (!codec || !codec->dev.driver)
5631 + continue;
5632 ++ spin_unlock_irq(&bus->reg_lock);
5633 + drv = drv_to_hdac_driver(codec->dev.driver);
5634 + if (drv->unsol_event)
5635 + drv->unsol_event(codec, res);
5636 ++ spin_lock_irq(&bus->reg_lock);
5637 + }
5638 ++ spin_unlock_irq(&bus->reg_lock);
5639 + }
5640 +
5641 + /**
5642 +diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
5643 +index b1a2a7ea41723..b4ccd9f92400e 100644
5644 +--- a/sound/pci/asihpi/hpioctl.c
5645 ++++ b/sound/pci/asihpi/hpioctl.c
5646 +@@ -350,7 +350,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
5647 + struct hpi_message hm;
5648 + struct hpi_response hr;
5649 + struct hpi_adapter adapter;
5650 +- struct hpi_pci pci;
5651 ++ struct hpi_pci pci = { 0 };
5652 +
5653 + memset(&adapter, 0, sizeof(adapter));
5654 +
5655 +@@ -506,7 +506,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
5656 + return 0;
5657 +
5658 + err:
5659 +- for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
5660 ++ while (--idx >= 0) {
5661 + if (pci.ap_mem_base[idx]) {
5662 + iounmap(pci.ap_mem_base[idx]);
5663 + pci.ap_mem_base[idx] = NULL;
5664 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
5665 +index fa261b27d8588..8198d2e53b7df 100644
5666 +--- a/sound/pci/hda/hda_controller.c
5667 ++++ b/sound/pci/hda/hda_controller.c
5668 +@@ -1169,16 +1169,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
5669 + if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
5670 + active = true;
5671 +
5672 +- /* clear rirb int */
5673 + status = azx_readb(chip, RIRBSTS);
5674 + if (status & RIRB_INT_MASK) {
5675 ++ /*
5676 ++ * Clearing the interrupt status here ensures that no
5677 ++ * interrupt gets masked after the RIRB wp is read in
5678 ++ * snd_hdac_bus_update_rirb. This avoids a possible
5679 ++ * race condition where codec response in RIRB may
5680 ++ * remain unserviced by IRQ, eventually falling back
5681 ++ * to polling mode in azx_rirb_get_response.
5682 ++ */
5683 ++ azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
5684 + active = true;
5685 + if (status & RIRB_INT_RESPONSE) {
5686 + if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
5687 + udelay(80);
5688 + snd_hdac_bus_update_rirb(bus);
5689 + }
5690 +- azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
5691 + }
5692 + } while (active && ++repeat < 10);
5693 +
5694 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5695 +index 98110fd65b9bb..c27623052264b 100644
5696 +--- a/sound/pci/hda/patch_realtek.c
5697 ++++ b/sound/pci/hda/patch_realtek.c
5698 +@@ -3154,7 +3154,11 @@ static void alc256_shutup(struct hda_codec *codec)
5699 +
5700 + /* 3k pull low control for Headset jack. */
5701 + /* NOTE: call this before clearing the pin, otherwise codec stalls */
5702 +- alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
5703 ++ /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
5704 ++ * when booting with headset plugged. So skip setting it for the codec alc257
5705 ++ */
5706 ++ if (codec->core.vendor_id != 0x10ec0257)
5707 ++ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
5708 +
5709 + snd_hda_codec_write(codec, hp_pin, 0,
5710 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
5711 +diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
5712 +index cf23af159acf4..35ca8e8bb5e52 100644
5713 +--- a/sound/soc/kirkwood/kirkwood-dma.c
5714 ++++ b/sound/soc/kirkwood/kirkwood-dma.c
5715 +@@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
5716 + err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
5717 + "kirkwood-i2s", priv);
5718 + if (err)
5719 +- return -EBUSY;
5720 ++ return err;
5721 +
5722 + /*
5723 + * Enable Error interrupts. We're only ack'ing them but
5724 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
5725 +index 1bfae7a1c32f1..a3d1c0c1b4a67 100644
5726 +--- a/sound/usb/midi.c
5727 ++++ b/sound/usb/midi.c
5728 +@@ -1805,6 +1805,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi,
5729 + return 0;
5730 + }
5731 +
5732 ++static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor(
5733 ++ struct usb_host_endpoint *hostep)
5734 ++{
5735 ++ unsigned char *extra = hostep->extra;
5736 ++ int extralen = hostep->extralen;
5737 ++
5738 ++ while (extralen > 3) {
5739 ++ struct usb_ms_endpoint_descriptor *ms_ep =
5740 ++ (struct usb_ms_endpoint_descriptor *)extra;
5741 ++
5742 ++ if (ms_ep->bLength > 3 &&
5743 ++ ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT &&
5744 ++ ms_ep->bDescriptorSubtype == UAC_MS_GENERAL)
5745 ++ return ms_ep;
5746 ++ if (!extra[0])
5747 ++ break;
5748 ++ extralen -= extra[0];
5749 ++ extra += extra[0];
5750 ++ }
5751 ++ return NULL;
5752 ++}
5753 ++
5754 + /*
5755 + * Returns MIDIStreaming device capabilities.
5756 + */
5757 +@@ -1842,11 +1864,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
5758 + ep = get_ep_desc(hostep);
5759 + if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep))
5760 + continue;
5761 +- ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra;
5762 +- if (hostep->extralen < 4 ||
5763 +- ms_ep->bLength < 4 ||
5764 +- ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT ||
5765 +- ms_ep->bDescriptorSubtype != UAC_MS_GENERAL)
5766 ++ ms_ep = find_usb_ms_endpoint_descriptor(hostep);
5767 ++ if (!ms_ep)
5768 + continue;
5769 + if (usb_endpoint_dir_out(ep)) {
5770 + if (endpoints[epidx].out_ep) {
5771 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
5772 +index 4f4dc43e56a7b..224e0a7604282 100644
5773 +--- a/sound/usb/quirks.c
5774 ++++ b/sound/usb/quirks.c
5775 +@@ -1322,12 +1322,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
5776 + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
5777 + mdelay(20);
5778 +
5779 +- /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny
5780 +- * delay here, otherwise requests like get/set frequency return as
5781 +- * failed despite actually succeeding.
5782 ++ /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX
5783 ++ * needs a tiny delay here, otherwise requests like get/set
5784 ++ * frequency return as failed despite actually succeeding.
5785 + */
5786 + if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
5787 + chip->usb_id == USB_ID(0x046d, 0x0a46) ||
5788 ++ chip->usb_id == USB_ID(0x046d, 0x0a56) ||
5789 + chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
5790 + chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
5791 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
5792 +diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c
5793 +index 4bcb234c0fcab..3da5462a0c7d3 100644
5794 +--- a/tools/gpio/gpio-hammer.c
5795 ++++ b/tools/gpio/gpio-hammer.c
5796 +@@ -138,7 +138,14 @@ int main(int argc, char **argv)
5797 + device_name = optarg;
5798 + break;
5799 + case 'o':
5800 +- lines[i] = strtoul(optarg, NULL, 10);
5801 ++ /*
5802 ++ * Avoid overflow. Do not immediately error, we want to
5803 ++ * be able to accurately report on the amount of times
5804 ++ * '-o' was given to give an accurate error message
5805 ++ */
5806 ++ if (i < GPIOHANDLES_MAX)
5807 ++ lines[i] = strtoul(optarg, NULL, 10);
5808 ++
5809 + i++;
5810 + break;
5811 + case '?':
5812 +@@ -146,6 +153,14 @@ int main(int argc, char **argv)
5813 + return -1;
5814 + }
5815 + }
5816 ++
5817 ++ if (i >= GPIOHANDLES_MAX) {
5818 ++ fprintf(stderr,
5819 ++ "Only %d occurences of '-o' are allowed, %d were found\n",
5820 ++ GPIOHANDLES_MAX, i + 1);
5821 ++ return -1;
5822 ++ }
5823 ++
5824 + nlines = i;
5825 +
5826 + if (!device_name || !nlines) {
5827 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
5828 +index 247fbb5f6a389..2c8e2dae17016 100644
5829 +--- a/tools/objtool/check.c
5830 ++++ b/tools/objtool/check.c
5831 +@@ -502,7 +502,7 @@ static int add_jump_destinations(struct objtool_file *file)
5832 + insn->type != INSN_JUMP_UNCONDITIONAL)
5833 + continue;
5834 +
5835 +- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
5836 ++ if (insn->offset == FAKE_JUMP_OFFSET)
5837 + continue;
5838 +
5839 + rela = find_rela_by_dest_range(insn->sec, insn->offset,
5840 +diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
5841 +index 068d463e5cbfc..4b0922a209701 100644
5842 +--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
5843 ++++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
5844 +@@ -14,7 +14,7 @@ add_probe_vfs_getname() {
5845 + if [ $had_vfs_getname -eq 1 ] ; then
5846 + line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
5847 + perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
5848 +- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
5849 ++ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
5850 + fi
5851 + }
5852 +
5853 +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
5854 +index f93846edc1e0d..827d844f4efb1 100644
5855 +--- a/tools/perf/util/cpumap.c
5856 ++++ b/tools/perf/util/cpumap.c
5857 +@@ -462,7 +462,7 @@ static void set_max_cpu_num(void)
5858 +
5859 + /* get the highest possible cpu number for a sparse allocation */
5860 + ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
5861 +- if (ret == PATH_MAX) {
5862 ++ if (ret >= PATH_MAX) {
5863 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
5864 + goto out;
5865 + }
5866 +@@ -473,7 +473,7 @@ static void set_max_cpu_num(void)
5867 +
5868 + /* get the highest present cpu number for a sparse allocation */
5869 + ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
5870 +- if (ret == PATH_MAX) {
5871 ++ if (ret >= PATH_MAX) {
5872 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
5873 + goto out;
5874 + }
5875 +@@ -501,7 +501,7 @@ static void set_max_node_num(void)
5876 +
5877 + /* get the highest possible cpu number for a sparse allocation */
5878 + ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
5879 +- if (ret == PATH_MAX) {
5880 ++ if (ret >= PATH_MAX) {
5881 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
5882 + goto out;
5883 + }
5884 +@@ -586,7 +586,7 @@ int cpu__setup_cpunode_map(void)
5885 + return 0;
5886 +
5887 + n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
5888 +- if (n == PATH_MAX) {
5889 ++ if (n >= PATH_MAX) {
5890 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
5891 + return -1;
5892 + }
5893 +@@ -601,7 +601,7 @@ int cpu__setup_cpunode_map(void)
5894 + continue;
5895 +
5896 + n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
5897 +- if (n == PATH_MAX) {
5898 ++ if (n >= PATH_MAX) {
5899 + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
5900 + continue;
5901 + }
5902 +diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
5903 +index 84a33f1e9ec92..cd870129131e1 100644
5904 +--- a/tools/perf/util/sort.c
5905 ++++ b/tools/perf/util/sort.c
5906 +@@ -2667,7 +2667,7 @@ static char *prefix_if_not_in(const char *pre, char *str)
5907 + return str;
5908 +
5909 + if (asprintf(&n, "%s,%s", pre, str) < 0)
5910 +- return NULL;
5911 ++ n = NULL;
5912 +
5913 + free(str);
5914 + return n;
5915 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
5916 +index 3d39332b3a06a..a0a4afa7e6781 100644
5917 +--- a/tools/perf/util/symbol-elf.c
5918 ++++ b/tools/perf/util/symbol-elf.c
5919 +@@ -1420,6 +1420,7 @@ struct kcore_copy_info {
5920 + u64 first_symbol;
5921 + u64 last_symbol;
5922 + u64 first_module;
5923 ++ u64 first_module_symbol;
5924 + u64 last_module_symbol;
5925 + struct phdr_data kernel_map;
5926 + struct phdr_data modules_map;
5927 +@@ -1434,6 +1435,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
5928 + return 0;
5929 +
5930 + if (strchr(name, '[')) {
5931 ++ if (!kci->first_module_symbol || start < kci->first_module_symbol)
5932 ++ kci->first_module_symbol = start;
5933 + if (start > kci->last_module_symbol)
5934 + kci->last_module_symbol = start;
5935 + return 0;
5936 +@@ -1558,6 +1561,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
5937 + kci->etext += page_size;
5938 + }
5939 +
5940 ++ if (kci->first_module_symbol &&
5941 ++ (!kci->first_module || kci->first_module_symbol < kci->first_module))
5942 ++ kci->first_module = kci->first_module_symbol;
5943 ++
5944 + kci->first_module = round_down(kci->first_module, page_size);
5945 +
5946 + if (kci->last_module_symbol) {
5947 +diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
5948 +index 0b24dd9d01ff2..98c9f2df8aa71 100755
5949 +--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
5950 ++++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
5951 +@@ -10,11 +10,11 @@ then this utility enables and collects trace data for a user specified interval
5952 + and generates performance plots.
5953 +
5954 + Prerequisites:
5955 +- Python version 2.7.x
5956 ++ Python version 2.7.x or higher
5957 + gnuplot 5.0 or higher
5958 +- gnuplot-py 1.8
5959 ++ gnuplot-py 1.8 or higher
5960 + (Most of the distributions have these required packages. They may be called
5961 +- gnuplot-py, phython-gnuplot. )
5962 ++ gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
5963 +
5964 + HWP (Hardware P-States are disabled)
5965 + Kernel config for Linux trace is enabled
5966 +@@ -178,7 +178,7 @@ def plot_pstate_cpu_with_sample():
5967 + g_plot('set xlabel "Samples"')
5968 + g_plot('set ylabel "P-State"')
5969 + g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
5970 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
5971 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
5972 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
5973 + g_plot('title_list = "{}"'.format(title_list))
5974 + g_plot(plot_str)
5975 +@@ -195,7 +195,7 @@ def plot_pstate_cpu():
5976 + # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
5977 + # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
5978 + #
5979 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
5980 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
5981 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
5982 + g_plot('title_list = "{}"'.format(title_list))
5983 + g_plot(plot_str)
5984 +@@ -209,7 +209,7 @@ def plot_load_cpu():
5985 + g_plot('set ylabel "CPU load (percent)"')
5986 + g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
5987 +
5988 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
5989 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
5990 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
5991 + g_plot('title_list = "{}"'.format(title_list))
5992 + g_plot(plot_str)
5993 +@@ -223,7 +223,7 @@ def plot_frequency_cpu():
5994 + g_plot('set ylabel "CPU Frequency (GHz)"')
5995 + g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
5996 +
5997 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
5998 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
5999 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
6000 + g_plot('title_list = "{}"'.format(title_list))
6001 + g_plot(plot_str)
6002 +@@ -238,7 +238,7 @@ def plot_duration_cpu():
6003 + g_plot('set ylabel "Timer Duration (MilliSeconds)"')
6004 + g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
6005 +
6006 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
6007 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
6008 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
6009 + g_plot('title_list = "{}"'.format(title_list))
6010 + g_plot(plot_str)
6011 +@@ -252,7 +252,7 @@ def plot_scaled_cpu():
6012 + g_plot('set ylabel "Scaled Busy (Unitless)"')
6013 + g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
6014 +
6015 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
6016 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
6017 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
6018 + g_plot('title_list = "{}"'.format(title_list))
6019 + g_plot(plot_str)
6020 +@@ -266,7 +266,7 @@ def plot_boost_cpu():
6021 + g_plot('set ylabel "CPU IO Boost (percent)"')
6022 + g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
6023 +
6024 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
6025 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
6026 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
6027 + g_plot('title_list = "{}"'.format(title_list))
6028 + g_plot(plot_str)
6029 +@@ -280,7 +280,7 @@ def plot_ghz_cpu():
6030 + g_plot('set ylabel "TSC Frequency (GHz)"')
6031 + g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
6032 +
6033 +- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
6034 ++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
6035 + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
6036 + g_plot('title_list = "{}"'.format(title_list))
6037 + g_plot(plot_str)
6038 +diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
6039 +index 27a54a17da65d..f4e92afab14b2 100644
6040 +--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
6041 ++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
6042 +@@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
6043 + ftrace_filter_check 'schedule*' '^schedule.*$'
6044 +
6045 + # filter by *mid*end
6046 +-ftrace_filter_check '*aw*lock' '.*aw.*lock$'
6047 ++ftrace_filter_check '*pin*lock' '.*pin.*lock$'
6048 +
6049 + # filter by start*mid*
6050 + ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
6051 +diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
6052 +index 43fcab367fb0a..74e6b3fc2d09e 100644
6053 +--- a/tools/testing/selftests/x86/syscall_nt.c
6054 ++++ b/tools/testing/selftests/x86/syscall_nt.c
6055 +@@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags)
6056 + set_eflags(get_eflags() | extraflags);
6057 + syscall(SYS_getpid);
6058 + flags = get_eflags();
6059 ++ set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
6060 + if ((flags & extraflags) == extraflags) {
6061 + printf("[OK]\tThe syscall worked and flags are still set\n");
6062 + } else {
6063 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
6064 +index 71f77ae6c2a66..c1ca4d40157b1 100644
6065 +--- a/virt/kvm/kvm_main.c
6066 ++++ b/virt/kvm/kvm_main.c
6067 +@@ -165,6 +165,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
6068 + */
6069 + if (pfn_valid(pfn))
6070 + return PageReserved(pfn_to_page(pfn)) &&
6071 ++ !is_zero_pfn(pfn) &&
6072 + !kvm_is_zone_device_pfn(pfn);
6073 +
6074 + return true;
6075 +@@ -3688,7 +3689,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
6076 + void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6077 + struct kvm_io_device *dev)
6078 + {
6079 +- int i;
6080 ++ int i, j;
6081 + struct kvm_io_bus *new_bus, *bus;
6082 +
6083 + bus = kvm_get_bus(kvm, bus_idx);
6084 +@@ -3705,17 +3706,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6085 +
6086 + new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
6087 + sizeof(struct kvm_io_range)), GFP_KERNEL);
6088 +- if (!new_bus) {
6089 ++ if (new_bus) {
6090 ++ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
6091 ++ new_bus->dev_count--;
6092 ++ memcpy(new_bus->range + i, bus->range + i + 1,
6093 ++ (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
6094 ++ } else {
6095 + pr_err("kvm: failed to shrink bus, removing it completely\n");
6096 +- goto broken;
6097 ++ for (j = 0; j < bus->dev_count; j++) {
6098 ++ if (j == i)
6099 ++ continue;
6100 ++ kvm_iodevice_destructor(bus->range[j].dev);
6101 ++ }
6102 + }
6103 +
6104 +- memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
6105 +- new_bus->dev_count--;
6106 +- memcpy(new_bus->range + i, bus->range + i + 1,
6107 +- (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
6108 +-
6109 +-broken:
6110 + rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6111 + synchronize_srcu_expedited(&kvm->srcu);
6112 + kfree(bus);