Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 31 Dec 2019 17:46:14
Message-Id: 1577814349.631919c872608f1b997f2023d125b191f0b807d2.mpagano@gentoo
1 commit: 631919c872608f1b997f2023d125b191f0b807d2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Dec 31 17:45:49 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Dec 31 17:45:49 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=631919c8
7
8 Linux patch 4.19.92
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1091_linux-4.19.92.patch | 6138 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6142 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 304b54d..af10ce2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -403,6 +403,10 @@ Patch: 1090_linux-4.19.91.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.91
23
24 +Patch: 1091_linux-4.19.92.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.92
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1091_linux-4.19.92.patch b/1091_linux-4.19.92.patch
33 new file mode 100644
34 index 0000000..76c22e3
35 --- /dev/null
36 +++ b/1091_linux-4.19.92.patch
37 @@ -0,0 +1,6138 @@
38 +diff --git a/Makefile b/Makefile
39 +index 4e07d786bec2..080232ef6716 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 91
47 ++SUBLEVEL = 92
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
52 +index e8edbf13302a..3856d51c645b 100644
53 +--- a/arch/arm64/kernel/psci.c
54 ++++ b/arch/arm64/kernel/psci.c
55 +@@ -84,7 +84,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
56 +
57 + static int cpu_psci_cpu_kill(unsigned int cpu)
58 + {
59 +- int err, i;
60 ++ int err;
61 ++ unsigned long start, end;
62 +
63 + if (!psci_ops.affinity_info)
64 + return 0;
65 +@@ -94,16 +95,18 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
66 + * while it is dying. So, try again a few times.
67 + */
68 +
69 +- for (i = 0; i < 10; i++) {
70 ++ start = jiffies;
71 ++ end = start + msecs_to_jiffies(100);
72 ++ do {
73 + err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
74 + if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
75 +- pr_info("CPU%d killed.\n", cpu);
76 ++ pr_info("CPU%d killed (polled %d ms)\n", cpu,
77 ++ jiffies_to_msecs(jiffies - start));
78 + return 0;
79 + }
80 +
81 +- msleep(10);
82 +- pr_info("Retrying again to check for CPU kill\n");
83 +- }
84 ++ usleep_range(100, 1000);
85 ++ } while (time_before(jiffies, end));
86 +
87 + pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
88 + cpu, err);
89 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
90 +index 6da2bbdb9648..0c073f3ca122 100644
91 +--- a/arch/arm64/kvm/sys_regs.c
92 ++++ b/arch/arm64/kvm/sys_regs.c
93 +@@ -2174,8 +2174,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
94 + if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
95 + return NULL;
96 +
97 ++ if (!index_to_params(id, &params))
98 ++ return NULL;
99 ++
100 + table = get_target_table(vcpu->arch.target, true, &num);
101 +- r = find_reg_by_id(id, &params, table, num);
102 ++ r = find_reg(&params, table, num);
103 + if (!r)
104 + r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
105 +
106 +diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
107 +index 93a9dce31f25..813dfe5f45a5 100644
108 +--- a/arch/mips/include/asm/pgtable-64.h
109 ++++ b/arch/mips/include/asm/pgtable-64.h
110 +@@ -18,10 +18,12 @@
111 + #include <asm/fixmap.h>
112 +
113 + #define __ARCH_USE_5LEVEL_HACK
114 +-#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
115 ++#if CONFIG_PGTABLE_LEVELS == 2
116 + #include <asm-generic/pgtable-nopmd.h>
117 +-#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
118 ++#elif CONFIG_PGTABLE_LEVELS == 3
119 + #include <asm-generic/pgtable-nopud.h>
120 ++#else
121 ++#include <asm-generic/5level-fixup.h>
122 + #endif
123 +
124 + /*
125 +@@ -216,6 +218,9 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
126 + return pgd_val(pgd);
127 + }
128 +
129 ++#define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd))
130 ++#define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT))
131 ++
132 + static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
133 + {
134 + return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
135 +diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
136 +index 685c72310f5d..bfaf175db54d 100644
137 +--- a/arch/powerpc/include/asm/spinlock.h
138 ++++ b/arch/powerpc/include/asm/spinlock.h
139 +@@ -53,10 +53,12 @@
140 + #endif
141 +
142 + #ifdef CONFIG_PPC_PSERIES
143 ++DECLARE_STATIC_KEY_FALSE(shared_processor);
144 ++
145 + #define vcpu_is_preempted vcpu_is_preempted
146 + static inline bool vcpu_is_preempted(int cpu)
147 + {
148 +- if (!firmware_has_feature(FW_FEATURE_SPLPAR))
149 ++ if (!static_branch_unlikely(&shared_processor))
150 + return false;
151 + return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
152 + }
153 +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
154 +index 916ddc4aac44..d37704ebccdb 100644
155 +--- a/arch/powerpc/kernel/irq.c
156 ++++ b/arch/powerpc/kernel/irq.c
157 +@@ -634,8 +634,6 @@ void __do_irq(struct pt_regs *regs)
158 +
159 + trace_irq_entry(regs);
160 +
161 +- check_stack_overflow();
162 +-
163 + /*
164 + * Query the platform PIC for the interrupt & ack it.
165 + *
166 +@@ -667,6 +665,8 @@ void do_IRQ(struct pt_regs *regs)
167 + irqtp = hardirq_ctx[raw_smp_processor_id()];
168 + sirqtp = softirq_ctx[raw_smp_processor_id()];
169 +
170 ++ check_stack_overflow();
171 ++
172 + /* Already there ? */
173 + if (unlikely(curtp == irqtp || curtp == sirqtp)) {
174 + __do_irq(regs);
175 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
176 +index 67f49159ea70..c2d318d1df02 100644
177 +--- a/arch/powerpc/platforms/pseries/setup.c
178 ++++ b/arch/powerpc/platforms/pseries/setup.c
179 +@@ -75,6 +75,9 @@
180 + #include "pseries.h"
181 + #include "../../../../drivers/pci/pci.h"
182 +
183 ++DEFINE_STATIC_KEY_FALSE(shared_processor);
184 ++EXPORT_SYMBOL_GPL(shared_processor);
185 ++
186 + int CMO_PrPSP = -1;
187 + int CMO_SecPSP = -1;
188 + unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
189 +@@ -761,6 +764,10 @@ static void __init pSeries_setup_arch(void)
190 +
191 + if (firmware_has_feature(FW_FEATURE_LPAR)) {
192 + vpa_init(boot_cpuid);
193 ++
194 ++ if (lppaca_shared_proc(get_lppaca()))
195 ++ static_branch_enable(&shared_processor);
196 ++
197 + ppc_md.power_save = pseries_lpar_idle;
198 + ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
199 + #ifdef CONFIG_PCI_IOV
200 +diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
201 +index 5ee733720a57..67838df3f3f9 100644
202 +--- a/arch/s390/include/asm/pgalloc.h
203 ++++ b/arch/s390/include/asm/pgalloc.h
204 +@@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
205 + crst_table_init(table, _REGION2_ENTRY_EMPTY);
206 + return (p4d_t *) table;
207 + }
208 +-#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
209 ++
210 ++static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
211 ++{
212 ++ if (!mm_p4d_folded(mm))
213 ++ crst_table_free(mm, (unsigned long *) p4d);
214 ++}
215 +
216 + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
217 + {
218 +@@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
219 + crst_table_init(table, _REGION3_ENTRY_EMPTY);
220 + return (pud_t *) table;
221 + }
222 +-#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
223 ++
224 ++static inline void pud_free(struct mm_struct *mm, pud_t *pud)
225 ++{
226 ++ if (!mm_pud_folded(mm))
227 ++ crst_table_free(mm, (unsigned long *) pud);
228 ++}
229 +
230 + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
231 + {
232 +@@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
233 +
234 + static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
235 + {
236 ++ if (mm_pmd_folded(mm))
237 ++ return;
238 + pgtable_pmd_page_dtor(virt_to_page(pmd));
239 + crst_table_free(mm, (unsigned long *) pmd);
240 + }
241 +diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
242 +index 64539c221672..2dc9eb4e1acc 100644
243 +--- a/arch/s390/include/asm/timex.h
244 ++++ b/arch/s390/include/asm/timex.h
245 +@@ -10,8 +10,9 @@
246 + #ifndef _ASM_S390_TIMEX_H
247 + #define _ASM_S390_TIMEX_H
248 +
249 +-#include <asm/lowcore.h>
250 ++#include <linux/preempt.h>
251 + #include <linux/time64.h>
252 ++#include <asm/lowcore.h>
253 +
254 + /* The value of the TOD clock for 1.1.1970. */
255 + #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
256 +@@ -186,15 +187,18 @@ extern unsigned char tod_clock_base[16] __aligned(8);
257 + /**
258 + * get_clock_monotonic - returns current time in clock rate units
259 + *
260 +- * The caller must ensure that preemption is disabled.
261 + * The clock and tod_clock_base get changed via stop_machine.
262 +- * Therefore preemption must be disabled when calling this
263 +- * function, otherwise the returned value is not guaranteed to
264 +- * be monotonic.
265 ++ * Therefore preemption must be disabled, otherwise the returned
266 ++ * value is not guaranteed to be monotonic.
267 + */
268 + static inline unsigned long long get_tod_clock_monotonic(void)
269 + {
270 +- return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
271 ++ unsigned long long tod;
272 ++
273 ++ preempt_disable_notrace();
274 ++ tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
275 ++ preempt_enable_notrace();
276 ++ return tod;
277 + }
278 +
279 + /**
280 +diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
281 +index b2c68fbf2634..41925f220694 100644
282 +--- a/arch/s390/kernel/dis.c
283 ++++ b/arch/s390/kernel/dis.c
284 +@@ -462,10 +462,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
285 + ptr += sprintf(ptr, "%%c%i", value);
286 + else if (operand->flags & OPERAND_VR)
287 + ptr += sprintf(ptr, "%%v%i", value);
288 +- else if (operand->flags & OPERAND_PCREL)
289 +- ptr += sprintf(ptr, "%lx", (signed int) value
290 +- + addr);
291 +- else if (operand->flags & OPERAND_SIGNED)
292 ++ else if (operand->flags & OPERAND_PCREL) {
293 ++ void *pcrel = (void *)((int)value + addr);
294 ++
295 ++ ptr += sprintf(ptr, "%px", pcrel);
296 ++ } else if (operand->flags & OPERAND_SIGNED)
297 + ptr += sprintf(ptr, "%i", value);
298 + else
299 + ptr += sprintf(ptr, "%u", value);
300 +@@ -537,7 +538,7 @@ void show_code(struct pt_regs *regs)
301 + else
302 + *ptr++ = ' ';
303 + addr = regs->psw.addr + start - 32;
304 +- ptr += sprintf(ptr, "%016lx: ", addr);
305 ++ ptr += sprintf(ptr, "%px: ", (void *)addr);
306 + if (start + opsize >= end)
307 + break;
308 + for (i = 0; i < opsize; i++)
309 +@@ -565,7 +566,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
310 + opsize = insn_length(*code);
311 + if (opsize > len)
312 + break;
313 +- ptr += sprintf(ptr, "%p: ", code);
314 ++ ptr += sprintf(ptr, "%px: ", code);
315 + for (i = 0; i < opsize; i++)
316 + ptr += sprintf(ptr, "%02x", code[i]);
317 + *ptr++ = '\t';
318 +diff --git a/arch/sh/include/cpu-sh4/cpu/sh7734.h b/arch/sh/include/cpu-sh4/cpu/sh7734.h
319 +index 96f0246ad2f2..82b63208135a 100644
320 +--- a/arch/sh/include/cpu-sh4/cpu/sh7734.h
321 ++++ b/arch/sh/include/cpu-sh4/cpu/sh7734.h
322 +@@ -134,7 +134,7 @@ enum {
323 + GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C,
324 + GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A,
325 + GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B,
326 +- GPIO_FN_RD_WR, GPIO_FN_TCLK0,
327 ++ GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4,
328 + GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B,
329 + GPIO_FN_ET0_ETXD3_A,
330 + GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B,
331 +diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
332 +index a7adb2bfbf0b..6b8ad6fa3979 100644
333 +--- a/arch/x86/include/asm/crash.h
334 ++++ b/arch/x86/include/asm/crash.h
335 +@@ -2,6 +2,8 @@
336 + #ifndef _ASM_X86_CRASH_H
337 + #define _ASM_X86_CRASH_H
338 +
339 ++struct kimage;
340 ++
341 + int crash_load_segments(struct kimage *image);
342 + int crash_copy_backup_region(struct kimage *image);
343 + int crash_setup_memmap_entries(struct kimage *image,
344 +diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
345 +index 6390bd8c141b..5e12b2319d7a 100644
346 +--- a/arch/x86/include/asm/fixmap.h
347 ++++ b/arch/x86/include/asm/fixmap.h
348 +@@ -159,7 +159,7 @@ extern pte_t *kmap_pte;
349 + extern pte_t *pkmap_page_table;
350 +
351 + void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
352 +-void native_set_fixmap(enum fixed_addresses idx,
353 ++void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
354 + phys_addr_t phys, pgprot_t flags);
355 +
356 + #ifndef CONFIG_PARAVIRT
357 +diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
358 +index e046a405743d..90eb70df0b18 100644
359 +--- a/arch/x86/include/asm/syscall_wrapper.h
360 ++++ b/arch/x86/include/asm/syscall_wrapper.h
361 +@@ -48,12 +48,13 @@
362 + * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
363 + * named __ia32_sys_*()
364 + */
365 +-#define SYSCALL_DEFINE0(sname) \
366 +- SYSCALL_METADATA(_##sname, 0); \
367 +- asmlinkage long __x64_sys_##sname(void); \
368 +- ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
369 +- SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
370 +- asmlinkage long __x64_sys_##sname(void)
371 ++
372 ++#define SYSCALL_DEFINE0(sname) \
373 ++ SYSCALL_METADATA(_##sname, 0); \
374 ++ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
375 ++ ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
376 ++ SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
377 ++ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
378 +
379 + #define COND_SYSCALL(name) \
380 + cond_syscall(__x64_sys_##name); \
381 +@@ -181,11 +182,11 @@
382 + * macros to work correctly.
383 + */
384 + #ifndef SYSCALL_DEFINE0
385 +-#define SYSCALL_DEFINE0(sname) \
386 +- SYSCALL_METADATA(_##sname, 0); \
387 +- asmlinkage long __x64_sys_##sname(void); \
388 +- ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
389 +- asmlinkage long __x64_sys_##sname(void)
390 ++#define SYSCALL_DEFINE0(sname) \
391 ++ SYSCALL_METADATA(_##sname, 0); \
392 ++ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
393 ++ ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
394 ++ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
395 + #endif
396 +
397 + #ifndef COND_SYSCALL
398 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
399 +index ab22eded61d2..fa3b85b222e3 100644
400 +--- a/arch/x86/kernel/apic/io_apic.c
401 ++++ b/arch/x86/kernel/apic/io_apic.c
402 +@@ -1724,9 +1724,10 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
403 +
404 + static inline bool ioapic_irqd_mask(struct irq_data *data)
405 + {
406 +- /* If we are moving the irq we need to mask it */
407 ++ /* If we are moving the IRQ we need to mask it */
408 + if (unlikely(irqd_is_setaffinity_pending(data))) {
409 +- mask_ioapic_irq(data);
410 ++ if (!irqd_irq_masked(data))
411 ++ mask_ioapic_irq(data);
412 + return true;
413 + }
414 + return false;
415 +@@ -1763,7 +1764,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
416 + */
417 + if (!io_apic_level_ack_pending(data->chip_data))
418 + irq_move_masked_irq(data);
419 +- unmask_ioapic_irq(data);
420 ++ /* If the IRQ is masked in the core, leave it: */
421 ++ if (!irqd_irq_masked(data))
422 ++ unmask_ioapic_irq(data);
423 + }
424 + }
425 + #else
426 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
427 +index 5bdfe52b2c9d..da0b6967349a 100644
428 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
429 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
430 +@@ -228,10 +228,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
431 + }
432 +
433 + /* Return early if this bank was already initialized. */
434 +- if (smca_banks[bank].hwid)
435 ++ if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
436 + return;
437 +
438 +- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
439 ++ if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
440 + pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
441 + return;
442 + }
443 +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
444 +index ee229ceee745..ec6a07b04fdb 100644
445 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
446 ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
447 +@@ -185,7 +185,7 @@ static void therm_throt_process(bool new_event, int event, int level)
448 + /* if we just entered the thermal event */
449 + if (new_event) {
450 + if (event == THERMAL_THROTTLING_EVENT)
451 +- pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
452 ++ pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
453 + this_cpu,
454 + level == CORE_LEVEL ? "Core" : "Package",
455 + state->count);
456 +diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
457 +index e0b85930dd77..0a0e9112f284 100644
458 +--- a/arch/x86/lib/x86-opcode-map.txt
459 ++++ b/arch/x86/lib/x86-opcode-map.txt
460 +@@ -333,7 +333,7 @@ AVXcode: 1
461 + 06: CLTS
462 + 07: SYSRET (o64)
463 + 08: INVD
464 +-09: WBINVD
465 ++09: WBINVD | WBNOINVD (F3)
466 + 0a:
467 + 0b: UD2 (1B)
468 + 0c:
469 +@@ -364,7 +364,7 @@ AVXcode: 1
470 + # a ModR/M byte.
471 + 1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
472 + 1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
473 +-1c:
474 ++1c: Grp20 (1A),(1C)
475 + 1d:
476 + 1e:
477 + 1f: NOP Ev
478 +@@ -792,6 +792,8 @@ f3: Grp17 (1A)
479 + f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
480 + f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
481 + f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
482 ++f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
483 ++f9: MOVDIRI My,Gy
484 + EndTable
485 +
486 + Table: 3-byte opcode 2 (0x0f 0x3a)
487 +@@ -943,9 +945,9 @@ GrpTable: Grp6
488 + EndTable
489 +
490 + GrpTable: Grp7
491 +-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
492 +-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
493 +-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
494 ++0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
495 ++1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
496 ++2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
497 + 3: LIDT Ms
498 + 4: SMSW Mw/Rv
499 + 5: rdpkru (110),(11B) | wrpkru (111),(11B)
500 +@@ -1020,7 +1022,7 @@ GrpTable: Grp15
501 + 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
502 + 4: XSAVE | ptwrite Ey (F3),(11B)
503 + 5: XRSTOR | lfence (11B)
504 +-6: XSAVEOPT | clwb (66) | mfence (11B)
505 ++6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
506 + 7: clflush | clflushopt (66) | sfence (11B)
507 + EndTable
508 +
509 +@@ -1051,6 +1053,10 @@ GrpTable: Grp19
510 + 6: vscatterpf1qps/d Wx (66),(ev)
511 + EndTable
512 +
513 ++GrpTable: Grp20
514 ++0: cldemote Mb
515 ++EndTable
516 ++
517 + # AMD's Prefetch Group
518 + GrpTable: GrpP
519 + 0: PREFETCH
520 +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
521 +index 59274e2c1ac4..bf52106ab9c4 100644
522 +--- a/arch/x86/mm/pgtable.c
523 ++++ b/arch/x86/mm/pgtable.c
524 +@@ -660,8 +660,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
525 + fixmaps_set++;
526 + }
527 +
528 +-void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
529 +- pgprot_t flags)
530 ++void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
531 ++ phys_addr_t phys, pgprot_t flags)
532 + {
533 + /* Sanitize 'prot' against any unsupported bits: */
534 + pgprot_val(flags) &= __default_kernel_pte_mask;
535 +diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
536 +index a19ff3977ac4..870eb5c7516a 100644
537 +--- a/drivers/acpi/button.c
538 ++++ b/drivers/acpi/button.c
539 +@@ -91,6 +91,17 @@ static const struct dmi_system_id lid_blacklst[] = {
540 + DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
541 + },
542 + },
543 ++ {
544 ++ /*
545 ++ * Medion Akoya E2215T, notification of the LID device only
546 ++ * happens on close, not on open and _LID always returns closed.
547 ++ */
548 ++ .matches = {
549 ++ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
550 ++ DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
551 ++ },
552 ++ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
553 ++ },
554 + {}
555 + };
556 +
557 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
558 +index 133fed8e4a8b..85ee6c35d8e0 100644
559 +--- a/drivers/ata/libata-core.c
560 ++++ b/drivers/ata/libata-core.c
561 +@@ -6726,6 +6726,9 @@ void ata_host_detach(struct ata_host *host)
562 + {
563 + int i;
564 +
565 ++ /* Ensure ata_port probe has completed */
566 ++ async_synchronize_full();
567 ++
568 + for (i = 0; i < host->n_ports; i++)
569 + ata_port_detach(host->ports[i]);
570 +
571 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
572 +index 126c2c514673..9cd231a27328 100644
573 +--- a/drivers/block/loop.c
574 ++++ b/drivers/block/loop.c
575 +@@ -416,18 +416,20 @@ out_free_page:
576 + return ret;
577 + }
578 +
579 +-static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
580 ++static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
581 ++ int mode)
582 + {
583 + /*
584 +- * We use punch hole to reclaim the free space used by the
585 +- * image a.k.a. discard. However we do not support discard if
586 +- * encryption is enabled, because it may give an attacker
587 +- * useful information.
588 ++ * We use fallocate to manipulate the space mappings used by the image
589 ++ * a.k.a. discard/zerorange. However we do not support this if
590 ++ * encryption is enabled, because it may give an attacker useful
591 ++ * information.
592 + */
593 + struct file *file = lo->lo_backing_file;
594 +- int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
595 + int ret;
596 +
597 ++ mode |= FALLOC_FL_KEEP_SIZE;
598 ++
599 + if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
600 + ret = -EOPNOTSUPP;
601 + goto out;
602 +@@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
603 + switch (req_op(rq)) {
604 + case REQ_OP_FLUSH:
605 + return lo_req_flush(lo, rq);
606 +- case REQ_OP_DISCARD:
607 + case REQ_OP_WRITE_ZEROES:
608 +- return lo_discard(lo, rq, pos);
609 ++ /*
610 ++ * If the caller doesn't want deallocation, call zeroout to
611 ++ * write zeroes the range. Otherwise, punch them out.
612 ++ */
613 ++ return lo_fallocate(lo, rq, pos,
614 ++ (rq->cmd_flags & REQ_NOUNMAP) ?
615 ++ FALLOC_FL_ZERO_RANGE :
616 ++ FALLOC_FL_PUNCH_HOLE);
617 ++ case REQ_OP_DISCARD:
618 ++ return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
619 + case REQ_OP_WRITE:
620 + if (lo->transfer)
621 + return lo_write_transfer(lo, rq, pos);
622 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
623 +index 996b1ef5f076..b9d321bdaa8a 100644
624 +--- a/drivers/block/nbd.c
625 ++++ b/drivers/block/nbd.c
626 +@@ -1247,10 +1247,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
627 + mutex_unlock(&nbd->config_lock);
628 + ret = wait_event_interruptible(config->recv_wq,
629 + atomic_read(&config->recv_threads) == 0);
630 +- if (ret) {
631 ++ if (ret)
632 + sock_shutdown(nbd);
633 +- flush_workqueue(nbd->recv_workq);
634 +- }
635 ++ flush_workqueue(nbd->recv_workq);
636 ++
637 + mutex_lock(&nbd->config_lock);
638 + nbd_bdev_reset(bdev);
639 + /* user requested, ignore socket errors */
640 +diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
641 +index 38b719017186..648e39ce6bd9 100644
642 +--- a/drivers/char/hw_random/omap3-rom-rng.c
643 ++++ b/drivers/char/hw_random/omap3-rom-rng.c
644 +@@ -121,7 +121,8 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
645 + {
646 + cancel_delayed_work_sync(&idle_work);
647 + hwrng_unregister(&omap3_rom_rng_ops);
648 +- clk_disable_unprepare(rng_clk);
649 ++ if (!rng_idle)
650 ++ clk_disable_unprepare(rng_clk);
651 + return 0;
652 + }
653 +
654 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
655 +index 84c17f936c09..91f2d9219489 100644
656 +--- a/drivers/char/ipmi/ipmi_msghandler.c
657 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
658 +@@ -447,6 +447,8 @@ enum ipmi_stat_indexes {
659 +
660 + #define IPMI_IPMB_NUM_SEQ 64
661 + struct ipmi_smi {
662 ++ struct module *owner;
663 ++
664 + /* What interface number are we? */
665 + int intf_num;
666 +
667 +@@ -1139,6 +1141,11 @@ int ipmi_create_user(unsigned int if_num,
668 + if (rv)
669 + goto out_kfree;
670 +
671 ++ if (!try_module_get(intf->owner)) {
672 ++ rv = -ENODEV;
673 ++ goto out_kfree;
674 ++ }
675 ++
676 + /* Note that each existing user holds a refcount to the interface. */
677 + kref_get(&intf->refcount);
678 +
679 +@@ -1269,6 +1276,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
680 + }
681 +
682 + kref_put(&intf->refcount, intf_free);
683 ++ module_put(intf->owner);
684 + }
685 +
686 + int ipmi_destroy_user(struct ipmi_user *user)
687 +@@ -2384,7 +2392,7 @@ static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
688 + * been recently fetched, this will just use the cached data. Otherwise
689 + * it will run a new fetch.
690 + *
691 +- * Except for the first time this is called (in ipmi_register_smi()),
692 ++ * Except for the first time this is called (in ipmi_add_smi()),
693 + * this will always return good data;
694 + */
695 + static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
696 +@@ -3304,10 +3312,11 @@ static void redo_bmc_reg(struct work_struct *work)
697 + kref_put(&intf->refcount, intf_free);
698 + }
699 +
700 +-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
701 +- void *send_info,
702 +- struct device *si_dev,
703 +- unsigned char slave_addr)
704 ++int ipmi_add_smi(struct module *owner,
705 ++ const struct ipmi_smi_handlers *handlers,
706 ++ void *send_info,
707 ++ struct device *si_dev,
708 ++ unsigned char slave_addr)
709 + {
710 + int i, j;
711 + int rv;
712 +@@ -3333,7 +3342,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
713 + return rv;
714 + }
715 +
716 +-
717 ++ intf->owner = owner;
718 + intf->bmc = &intf->tmp_bmc;
719 + INIT_LIST_HEAD(&intf->bmc->intfs);
720 + mutex_init(&intf->bmc->dyn_mutex);
721 +@@ -3440,7 +3449,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
722 +
723 + return rv;
724 + }
725 +-EXPORT_SYMBOL(ipmi_register_smi);
726 ++EXPORT_SYMBOL(ipmi_add_smi);
727 +
728 + static void deliver_smi_err_response(struct ipmi_smi *intf,
729 + struct ipmi_smi_msg *msg,
730 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
731 +index 9d8d64f706e0..e35c397b1259 100644
732 +--- a/drivers/cpufreq/cpufreq.c
733 ++++ b/drivers/cpufreq/cpufreq.c
734 +@@ -2480,6 +2480,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
735 + if (cpufreq_disabled())
736 + return -ENODEV;
737 +
738 ++ /*
739 ++ * The cpufreq core depends heavily on the availability of device
740 ++ * structure, make sure they are available before proceeding further.
741 ++ */
742 ++ if (!get_cpu_device(0))
743 ++ return -EPROBE_DEFER;
744 ++
745 + if (!driver_data || !driver_data->verify || !driver_data->init ||
746 + !(driver_data->setpolicy || driver_data->target_index ||
747 + driver_data->target) ||
748 +diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
749 +index f662914d87b8..53a78035381d 100644
750 +--- a/drivers/crypto/atmel-aes.c
751 ++++ b/drivers/crypto/atmel-aes.c
752 +@@ -148,7 +148,7 @@ struct atmel_aes_xts_ctx {
753 + u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
754 + };
755 +
756 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
757 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
758 + struct atmel_aes_authenc_ctx {
759 + struct atmel_aes_base_ctx base;
760 + struct atmel_sha_authenc_ctx *auth;
761 +@@ -160,7 +160,7 @@ struct atmel_aes_reqctx {
762 + u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
763 + };
764 +
765 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
766 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
767 + struct atmel_aes_authenc_reqctx {
768 + struct atmel_aes_reqctx base;
769 +
770 +@@ -489,7 +489,7 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
771 + return (dd->flags & AES_FLAGS_ENCRYPT);
772 + }
773 +
774 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
775 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
776 + static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
777 + #endif
778 +
779 +@@ -518,7 +518,7 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
780 +
781 + static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
782 + {
783 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
784 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
785 + if (dd->ctx->is_aead)
786 + atmel_aes_authenc_complete(dd, err);
787 + #endif
788 +@@ -1983,7 +1983,7 @@ static struct crypto_alg aes_xts_alg = {
789 + }
790 + };
791 +
792 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
793 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
794 + /* authenc aead functions */
795 +
796 + static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
797 +@@ -2470,7 +2470,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
798 + {
799 + int i;
800 +
801 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
802 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
803 + if (dd->caps.has_authenc)
804 + for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
805 + crypto_unregister_aead(&aes_authenc_algs[i]);
806 +@@ -2517,7 +2517,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
807 + goto err_aes_xts_alg;
808 + }
809 +
810 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
811 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
812 + if (dd->caps.has_authenc) {
813 + for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
814 + err = crypto_register_aead(&aes_authenc_algs[i]);
815 +@@ -2529,7 +2529,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
816 +
817 + return 0;
818 +
819 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
820 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
821 + /* i = ARRAY_SIZE(aes_authenc_algs); */
822 + err_aes_authenc_alg:
823 + for (j = 0; j < i; j++)
824 +@@ -2720,7 +2720,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
825 +
826 + atmel_aes_get_cap(aes_dd);
827 +
828 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
829 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
830 + if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
831 + err = -EPROBE_DEFER;
832 + goto iclk_unprepare;
833 +diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
834 +index 2a60d1224143..7f6742d35dd5 100644
835 +--- a/drivers/crypto/atmel-authenc.h
836 ++++ b/drivers/crypto/atmel-authenc.h
837 +@@ -23,7 +23,7 @@
838 + #ifndef __ATMEL_AUTHENC_H__
839 + #define __ATMEL_AUTHENC_H__
840 +
841 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
842 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
843 +
844 + #include <crypto/authenc.h>
845 + #include <crypto/hash.h>
846 +diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
847 +index 8a19df2fba6a..ef125d4be8fc 100644
848 +--- a/drivers/crypto/atmel-sha.c
849 ++++ b/drivers/crypto/atmel-sha.c
850 +@@ -2215,7 +2215,7 @@ static struct ahash_alg sha_hmac_algs[] = {
851 + },
852 + };
853 +
854 +-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
855 ++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
856 + /* authenc functions */
857 +
858 + static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
859 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
860 +index 5cf64746731a..22e491857925 100644
861 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
862 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
863 +@@ -81,7 +81,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
864 + oi = 0;
865 + oo = 0;
866 + do {
867 +- todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
868 ++ todo = min(rx_cnt, ileft);
869 ++ todo = min_t(size_t, todo, (mi.length - oi) / 4);
870 + if (todo) {
871 + ileft -= todo;
872 + writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
873 +@@ -96,7 +97,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
874 + rx_cnt = SS_RXFIFO_SPACES(spaces);
875 + tx_cnt = SS_TXFIFO_SPACES(spaces);
876 +
877 +- todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
878 ++ todo = min(tx_cnt, oleft);
879 ++ todo = min_t(size_t, todo, (mo.length - oo) / 4);
880 + if (todo) {
881 + oleft -= todo;
882 + readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
883 +@@ -220,7 +222,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
884 + * todo is the number of consecutive 4byte word that we
885 + * can read from current SG
886 + */
887 +- todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
888 ++ todo = min(rx_cnt, ileft / 4);
889 ++ todo = min_t(size_t, todo, (mi.length - oi) / 4);
890 + if (todo && !ob) {
891 + writesl(ss->base + SS_RXFIFO, mi.addr + oi,
892 + todo);
893 +@@ -234,8 +237,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
894 + * we need to be able to write all buf in one
895 + * pass, so it is why we min() with rx_cnt
896 + */
897 +- todo = min3(rx_cnt * 4 - ob, ileft,
898 +- mi.length - oi);
899 ++ todo = min(rx_cnt * 4 - ob, ileft);
900 ++ todo = min_t(size_t, todo, mi.length - oi);
901 + memcpy(buf + ob, mi.addr + oi, todo);
902 + ileft -= todo;
903 + oi += todo;
904 +@@ -255,7 +258,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
905 + spaces = readl(ss->base + SS_FCSR);
906 + rx_cnt = SS_RXFIFO_SPACES(spaces);
907 + tx_cnt = SS_TXFIFO_SPACES(spaces);
908 +- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
909 ++ dev_dbg(ss->dev,
910 ++ "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
911 + mode,
912 + oi, mi.length, ileft, areq->cryptlen, rx_cnt,
913 + oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
914 +@@ -263,7 +267,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
915 + if (!tx_cnt)
916 + continue;
917 + /* todo in 4bytes word */
918 +- todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
919 ++ todo = min(tx_cnt, oleft / 4);
920 ++ todo = min_t(size_t, todo, (mo.length - oo) / 4);
921 + if (todo) {
922 + readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
923 + oleft -= todo * 4;
924 +@@ -287,7 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
925 + * no more than remaining buffer
926 + * no need to test against oleft
927 + */
928 +- todo = min(mo.length - oo, obl - obo);
929 ++ todo = min_t(size_t,
930 ++ mo.length - oo, obl - obo);
931 + memcpy(mo.addr + oo, bufo + obo, todo);
932 + oleft -= todo;
933 + obo += todo;
934 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
935 +index f6936bb3b7be..1a724263761b 100644
936 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
937 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
938 +@@ -276,8 +276,8 @@ static int sun4i_hash(struct ahash_request *areq)
939 + */
940 + while (op->len < 64 && i < end) {
941 + /* how many bytes we can read from current SG */
942 +- in_r = min3(mi.length - in_i, end - i,
943 +- 64 - op->len);
944 ++ in_r = min(end - i, 64 - op->len);
945 ++ in_r = min_t(size_t, mi.length - in_i, in_r);
946 + memcpy(op->buf + op->len, mi.addr + in_i, in_r);
947 + op->len += in_r;
948 + i += in_r;
949 +@@ -297,8 +297,8 @@ static int sun4i_hash(struct ahash_request *areq)
950 + }
951 + if (mi.length - in_i > 3 && i < end) {
952 + /* how many bytes we can read from current SG */
953 +- in_r = min3(mi.length - in_i, areq->nbytes - i,
954 +- ((mi.length - in_i) / 4) * 4);
955 ++ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
956 ++ in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
957 + /* how many bytes we can write in the device*/
958 + todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
959 + writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
960 +@@ -324,8 +324,8 @@ static int sun4i_hash(struct ahash_request *areq)
961 + if ((areq->nbytes - i) < 64) {
962 + while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
963 + /* how many bytes we can read from current SG */
964 +- in_r = min3(mi.length - in_i, areq->nbytes - i,
965 +- 64 - op->len);
966 ++ in_r = min(areq->nbytes - i, 64 - op->len);
967 ++ in_r = min_t(size_t, mi.length - in_i, in_r);
968 + memcpy(op->buf + op->len, mi.addr + in_i, in_r);
969 + op->len += in_r;
970 + i += in_r;
971 +diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
972 +index 2c573d1aaa64..523b712770ac 100644
973 +--- a/drivers/crypto/virtio/virtio_crypto_algs.c
974 ++++ b/drivers/crypto/virtio/virtio_crypto_algs.c
975 +@@ -117,8 +117,6 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
976 + *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
977 + break;
978 + default:
979 +- pr_err("virtio_crypto: Unsupported key length: %d\n",
980 +- key_len);
981 + return -EINVAL;
982 + }
983 + return 0;
984 +@@ -498,6 +496,11 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
985 + /* Use the first data virtqueue as default */
986 + struct data_queue *data_vq = &vcrypto->data_vq[0];
987 +
988 ++ if (!req->nbytes)
989 ++ return 0;
990 ++ if (req->nbytes % AES_BLOCK_SIZE)
991 ++ return -EINVAL;
992 ++
993 + vc_req->dataq = data_vq;
994 + vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
995 + vc_sym_req->ablkcipher_ctx = ctx;
996 +@@ -518,6 +521,11 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
997 + /* Use the first data virtqueue as default */
998 + struct data_queue *data_vq = &vcrypto->data_vq[0];
999 +
1000 ++ if (!req->nbytes)
1001 ++ return 0;
1002 ++ if (req->nbytes % AES_BLOCK_SIZE)
1003 ++ return -EINVAL;
1004 ++
1005 + vc_req->dataq = data_vq;
1006 + vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
1007 + vc_sym_req->ablkcipher_ctx = ctx;
1008 +diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
1009 +index cab32cfec9c4..709670d2b553 100644
1010 +--- a/drivers/crypto/vmx/Makefile
1011 ++++ b/drivers/crypto/vmx/Makefile
1012 +@@ -3,13 +3,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
1013 + vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
1014 +
1015 + ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
1016 +-TARGET := linux-ppc64le
1017 ++override flavour := linux-ppc64le
1018 + else
1019 +-TARGET := linux-ppc64
1020 ++override flavour := linux-ppc64
1021 + endif
1022 +
1023 + quiet_cmd_perl = PERL $@
1024 +- cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
1025 ++ cmd_perl = $(PERL) $(<) $(flavour) > $(@)
1026 +
1027 + targets += aesp8-ppc.S ghashp8-ppc.S
1028 +
1029 +diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
1030 +index 574bce603337..78c339da19b5 100644
1031 +--- a/drivers/edac/ghes_edac.c
1032 ++++ b/drivers/edac/ghes_edac.c
1033 +@@ -210,6 +210,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
1034 + /* Cleans the error report buffer */
1035 + memset(e, 0, sizeof (*e));
1036 + e->error_count = 1;
1037 ++ e->grain = 1;
1038 + strcpy(e->label, "unknown label");
1039 + e->msg = pvt->msg;
1040 + e->other_detail = pvt->other_detail;
1041 +@@ -305,7 +306,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
1042 +
1043 + /* Error grain */
1044 + if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
1045 +- e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
1046 ++ e->grain = ~mem_err->physical_addr_mask + 1;
1047 +
1048 + /* Memory error location, mapped on e->location */
1049 + p = e->location;
1050 +@@ -412,8 +413,13 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
1051 + if (p > pvt->other_detail)
1052 + *(p - 1) = '\0';
1053 +
1054 ++ /* Sanity-check driver-supplied grain value. */
1055 ++ if (WARN_ON_ONCE(!e->grain))
1056 ++ e->grain = 1;
1057 ++
1058 ++ grain_bits = fls_long(e->grain - 1);
1059 ++
1060 + /* Generate the trace event */
1061 +- grain_bits = fls_long(e->grain);
1062 + snprintf(pvt->detail_location, sizeof(pvt->detail_location),
1063 + "APEI location: %s %s", e->location, e->other_detail);
1064 + trace_mc_event(type, e->msg, e->label, e->error_count,
1065 +diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
1066 +index 0cfb5a3efdf6..2efcd94f74fc 100644
1067 +--- a/drivers/extcon/extcon-sm5502.c
1068 ++++ b/drivers/extcon/extcon-sm5502.c
1069 +@@ -69,6 +69,10 @@ struct sm5502_muic_info {
1070 + /* Default value of SM5502 register to bring up MUIC device. */
1071 + static struct reg_data sm5502_reg_data[] = {
1072 + {
1073 ++ .reg = SM5502_REG_RESET,
1074 ++ .val = SM5502_REG_RESET_MASK,
1075 ++ .invert = true,
1076 ++ }, {
1077 + .reg = SM5502_REG_CONTROL,
1078 + .val = SM5502_REG_CONTROL_MASK_INT_MASK,
1079 + .invert = false,
1080 +diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
1081 +index 974b53222f56..12f8b01e5753 100644
1082 +--- a/drivers/extcon/extcon-sm5502.h
1083 ++++ b/drivers/extcon/extcon-sm5502.h
1084 +@@ -241,6 +241,8 @@ enum sm5502_reg {
1085 + #define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
1086 + | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
1087 +
1088 ++#define SM5502_REG_RESET_MASK (0x1)
1089 ++
1090 + /* SM5502 Interrupts */
1091 + enum sm5502_irq {
1092 + /* INT1 */
1093 +diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
1094 +index 2c31563fdcae..c6fa9b393e84 100644
1095 +--- a/drivers/fsi/fsi-core.c
1096 ++++ b/drivers/fsi/fsi-core.c
1097 +@@ -552,6 +552,31 @@ static int fsi_slave_scan(struct fsi_slave *slave)
1098 + return 0;
1099 + }
1100 +
1101 ++static unsigned long aligned_access_size(size_t offset, size_t count)
1102 ++{
1103 ++ unsigned long offset_unit, count_unit;
1104 ++
1105 ++ /* Criteria:
1106 ++ *
1107 ++ * 1. Access size must be less than or equal to the maximum access
1108 ++ * width or the highest power-of-two factor of offset
1109 ++ * 2. Access size must be less than or equal to the amount specified by
1110 ++ * count
1111 ++ *
1112 ++ * The access width is optimal if we can calculate 1 to be strictly
1113 ++ * equal while still satisfying 2.
1114 ++ */
1115 ++
1116 ++ /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
1117 ++ offset_unit = BIT(__builtin_ctzl(offset | 4));
1118 ++
1119 ++ /* Find 2 by the top bit of count */
1120 ++ count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
1121 ++
1122 ++ /* Constrain the maximum access width to the minimum of both criteria */
1123 ++ return BIT(__builtin_ctzl(offset_unit | count_unit));
1124 ++}
1125 ++
1126 + static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
1127 + struct kobject *kobj, struct bin_attribute *attr, char *buf,
1128 + loff_t off, size_t count)
1129 +@@ -567,8 +592,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
1130 + return -EINVAL;
1131 +
1132 + for (total_len = 0; total_len < count; total_len += read_len) {
1133 +- read_len = min_t(size_t, count, 4);
1134 +- read_len -= off & 0x3;
1135 ++ read_len = aligned_access_size(off, count - total_len);
1136 +
1137 + rc = fsi_slave_read(slave, off, buf + total_len, read_len);
1138 + if (rc)
1139 +@@ -595,8 +619,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
1140 + return -EINVAL;
1141 +
1142 + for (total_len = 0; total_len < count; total_len += write_len) {
1143 +- write_len = min_t(size_t, count, 4);
1144 +- write_len -= off & 0x3;
1145 ++ write_len = aligned_access_size(off, count - total_len);
1146 +
1147 + rc = fsi_slave_write(slave, off, buf + total_len, write_len);
1148 + if (rc)
1149 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
1150 +index 8904e62dca7a..41d3142ef3cf 100644
1151 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
1152 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
1153 +@@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
1154 + }
1155 +
1156 + dma_fence_put(fence);
1157 ++ fence = NULL;
1158 +
1159 + r = amdgpu_bo_kmap(vram_obj, &vram_map);
1160 + if (r) {
1161 +@@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
1162 + }
1163 +
1164 + dma_fence_put(fence);
1165 ++ fence = NULL;
1166 +
1167 + r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
1168 + if (r) {
1169 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1170 +index 49fe5084c53d..f67c332b16a4 100644
1171 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1172 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1173 +@@ -700,10 +700,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
1174 + id->oa_base != job->oa_base ||
1175 + id->oa_size != job->oa_size);
1176 + bool vm_flush_needed = job->vm_needs_flush;
1177 +- bool pasid_mapping_needed = id->pasid != job->pasid ||
1178 +- !id->pasid_mapping ||
1179 +- !dma_fence_is_signaled(id->pasid_mapping);
1180 + struct dma_fence *fence = NULL;
1181 ++ bool pasid_mapping_needed = false;
1182 + unsigned patch_offset = 0;
1183 + int r;
1184 +
1185 +@@ -713,6 +711,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
1186 + pasid_mapping_needed = true;
1187 + }
1188 +
1189 ++ mutex_lock(&id_mgr->lock);
1190 ++ if (id->pasid != job->pasid || !id->pasid_mapping ||
1191 ++ !dma_fence_is_signaled(id->pasid_mapping))
1192 ++ pasid_mapping_needed = true;
1193 ++ mutex_unlock(&id_mgr->lock);
1194 ++
1195 + gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1196 + vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
1197 + job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1198 +@@ -752,9 +756,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
1199 + }
1200 +
1201 + if (pasid_mapping_needed) {
1202 ++ mutex_lock(&id_mgr->lock);
1203 + id->pasid = job->pasid;
1204 + dma_fence_put(id->pasid_mapping);
1205 + id->pasid_mapping = dma_fence_get(fence);
1206 ++ mutex_unlock(&id_mgr->lock);
1207 + }
1208 + dma_fence_put(fence);
1209 +
1210 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1211 +index 782411649816..28794b1b15c1 100644
1212 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1213 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1214 +@@ -2187,7 +2187,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
1215 + * And it's needed by gfxoff feature.
1216 + */
1217 + if (adev->gfx.rlc.is_rlc_v2_1) {
1218 +- gfx_v9_1_init_rlc_save_restore_list(adev);
1219 ++ if (adev->asic_type == CHIP_VEGA12)
1220 ++ gfx_v9_1_init_rlc_save_restore_list(adev);
1221 + gfx_v9_0_enable_save_restore_machine(adev);
1222 + }
1223 +
1224 +diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
1225 +index 60dad63098a2..e40a3fbc3e76 100644
1226 +--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
1227 ++++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
1228 +@@ -62,7 +62,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
1229 + u64 wptr_off;
1230 +
1231 + si_ih_disable_interrupts(adev);
1232 +- WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
1233 ++ /* set dummy read address to dummy page address */
1234 ++ WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
1235 + interrupt_cntl = RREG32(INTERRUPT_CNTL);
1236 + interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
1237 + interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
1238 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
1239 +index c56ac47cd318..bc47f6a44456 100644
1240 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
1241 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
1242 +@@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
1243 + }
1244 +
1245 + kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
1246 ++ if (unlikely(!kfd->ih_wq)) {
1247 ++ kfifo_free(&kfd->ih_fifo);
1248 ++ dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
1249 ++ return -ENOMEM;
1250 ++ }
1251 + spin_lock_init(&kfd->interrupt_lock);
1252 +
1253 + INIT_WORK(&kfd->interrupt_work, interrupt_wq);
1254 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1255 +index 23a7ef97afdd..c6f7c1344a9b 100644
1256 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1257 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1258 +@@ -1950,7 +1950,7 @@ static bool dp_active_dongle_validate_timing(
1259 + break;
1260 + }
1261 +
1262 +- if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
1263 ++ if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
1264 + dongle_caps->extendedCapValid == false)
1265 + return true;
1266 +
1267 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1268 +index 05840f5bddd5..122249da03ab 100644
1269 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1270 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1271 +@@ -2172,6 +2172,7 @@ static void get_active_converter_info(
1272 + uint8_t data, struct dc_link *link)
1273 + {
1274 + union dp_downstream_port_present ds_port = { .byte = data };
1275 ++ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
1276 +
1277 + /* decode converter info*/
1278 + if (!ds_port.fields.PORT_PRESENT) {
1279 +diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
1280 +index f8433c93f463..cc820e9aea1d 100644
1281 +--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
1282 ++++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
1283 +@@ -725,7 +725,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
1284 + /* 1.0V digital core power regulator */
1285 + pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
1286 + if (IS_ERR(pdata->dvdd10)) {
1287 +- DRM_ERROR("DVDD10 regulator not found\n");
1288 ++ if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
1289 ++ DRM_ERROR("DVDD10 regulator not found\n");
1290 ++
1291 + return PTR_ERR(pdata->dvdd10);
1292 + }
1293 +
1294 +@@ -1341,7 +1343,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
1295 +
1296 + err = anx78xx_init_pdata(anx78xx);
1297 + if (err) {
1298 +- DRM_ERROR("Failed to initialize pdata: %d\n", err);
1299 ++ if (err != -EPROBE_DEFER)
1300 ++ DRM_ERROR("Failed to initialize pdata: %d\n", err);
1301 ++
1302 + return err;
1303 + }
1304 +
1305 +diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
1306 +index 5971976284bf..2a0a1654d3ce 100644
1307 +--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
1308 ++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
1309 +@@ -39,6 +39,7 @@
1310 +
1311 + #include <media/cec-notifier.h>
1312 +
1313 ++#define DDC_CI_ADDR 0x37
1314 + #define DDC_SEGMENT_ADDR 0x30
1315 +
1316 + #define HDMI_EDID_LEN 512
1317 +@@ -320,6 +321,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
1318 + u8 addr = msgs[0].addr;
1319 + int i, ret = 0;
1320 +
1321 ++ if (addr == DDC_CI_ADDR)
1322 ++ /*
1323 ++ * The internal I2C controller does not support the multi-byte
1324 ++ * read and write operations needed for DDC/CI.
1325 ++ * TOFIX: Blacklist the DDC/CI address until we filter out
1326 ++ * unsupported I2C operations.
1327 ++ */
1328 ++ return -EOPNOTSUPP;
1329 ++
1330 + dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
1331 +
1332 + for (i = 0; i < num; i++) {
1333 +@@ -1747,7 +1757,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
1334 +
1335 + /* HDMI Initialization Step E - Configure audio */
1336 + hdmi_clk_regenerator_update_pixel_clock(hdmi);
1337 +- hdmi_enable_audio_clk(hdmi, true);
1338 ++ hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
1339 + }
1340 +
1341 + /* not for DVI mode */
1342 +diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
1343 +index d1859bcc7ccb..33a72a84361e 100644
1344 +--- a/drivers/gpu/drm/drm_vblank.c
1345 ++++ b/drivers/gpu/drm/drm_vblank.c
1346 +@@ -1572,7 +1572,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
1347 + unsigned int flags, pipe, high_pipe;
1348 +
1349 + if (!dev->irq_enabled)
1350 +- return -EINVAL;
1351 ++ return -EOPNOTSUPP;
1352 +
1353 + if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
1354 + return -EINVAL;
1355 +@@ -1813,7 +1813,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
1356 + return -EINVAL;
1357 +
1358 + if (!dev->irq_enabled)
1359 +- return -EINVAL;
1360 ++ return -EOPNOTSUPP;
1361 +
1362 + crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
1363 + if (!crtc)
1364 +@@ -1871,7 +1871,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
1365 + return -EINVAL;
1366 +
1367 + if (!dev->irq_enabled)
1368 +- return -EINVAL;
1369 ++ return -EOPNOTSUPP;
1370 +
1371 + crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
1372 + if (!crtc)
1373 +diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
1374 +index 1b7fd6a9d8a5..f73a02a2a5b3 100644
1375 +--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
1376 ++++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
1377 +@@ -139,6 +139,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
1378 + s32 freq_error, min_error = 100000;
1379 +
1380 + memset(best_clock, 0, sizeof(*best_clock));
1381 ++ memset(&clock, 0, sizeof(clock));
1382 +
1383 + for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
1384 + for (clock.n = limit->n.min; clock.n <= limit->n.max;
1385 +@@ -195,6 +196,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
1386 + int err = target;
1387 +
1388 + memset(best_clock, 0, sizeof(*best_clock));
1389 ++ memset(&clock, 0, sizeof(clock));
1390 +
1391 + for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
1392 + for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
1393 +diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1394 +index 9a2cb8aeab3a..aab6a70ece7f 100644
1395 +--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1396 ++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1397 +@@ -427,6 +427,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
1398 + return PTR_ERR(ts->dsi);
1399 + }
1400 +
1401 ++ drm_panel_init(&ts->base);
1402 + ts->base.dev = dev;
1403 + ts->base.funcs = &rpi_touchscreen_funcs;
1404 +
1405 +diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
1406 +index 74284e5afc5d..89fa17877b33 100644
1407 +--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
1408 ++++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
1409 +@@ -380,6 +380,7 @@ static int st7789v_probe(struct spi_device *spi)
1410 + spi_set_drvdata(spi, ctx);
1411 + ctx->spi = spi;
1412 +
1413 ++ drm_panel_init(&ctx->panel);
1414 + ctx->panel.dev = &spi->dev;
1415 + ctx->panel.funcs = &st7789v_drm_funcs;
1416 +
1417 +diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
1418 +index d7fe9f15def1..89cb70da2bfe 100644
1419 +--- a/drivers/gpu/drm/tegra/sor.c
1420 ++++ b/drivers/gpu/drm/tegra/sor.c
1421 +@@ -2922,6 +2922,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
1422 + * earlier
1423 + */
1424 + sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
1425 ++ } else {
1426 ++ if (sor->soc->supports_edp)
1427 ++ sor->index = 0;
1428 ++ else
1429 ++ sor->index = 1;
1430 + }
1431 +
1432 + return 0;
1433 +diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
1434 +index 527a1cddb14f..916b2355e11a 100644
1435 +--- a/drivers/gpu/host1x/job.c
1436 ++++ b/drivers/gpu/host1x/job.c
1437 +@@ -447,7 +447,8 @@ out:
1438 + return err;
1439 + }
1440 +
1441 +-static inline int copy_gathers(struct host1x_job *job, struct device *dev)
1442 ++static inline int copy_gathers(struct device *host, struct host1x_job *job,
1443 ++ struct device *dev)
1444 + {
1445 + struct host1x_firewall fw;
1446 + size_t size = 0;
1447 +@@ -470,12 +471,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
1448 + * Try a non-blocking allocation from a higher priority pools first,
1449 + * as awaiting for the allocation here is a major performance hit.
1450 + */
1451 +- job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
1452 ++ job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
1453 + GFP_NOWAIT);
1454 +
1455 + /* the higher priority allocation failed, try the generic-blocking */
1456 + if (!job->gather_copy_mapped)
1457 +- job->gather_copy_mapped = dma_alloc_wc(dev, size,
1458 ++ job->gather_copy_mapped = dma_alloc_wc(host, size,
1459 + &job->gather_copy,
1460 + GFP_KERNEL);
1461 + if (!job->gather_copy_mapped)
1462 +@@ -523,7 +524,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
1463 + goto out;
1464 +
1465 + if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
1466 +- err = copy_gathers(job, dev);
1467 ++ err = copy_gathers(host->dev, job, dev);
1468 + if (err)
1469 + goto out;
1470 + }
1471 +@@ -584,7 +585,7 @@ void host1x_job_unpin(struct host1x_job *job)
1472 + job->num_unpins = 0;
1473 +
1474 + if (job->gather_copy_size)
1475 +- dma_free_wc(job->channel->dev, job->gather_copy_size,
1476 ++ dma_free_wc(host->dev, job->gather_copy_size,
1477 + job->gather_copy_mapped, job->gather_copy);
1478 + }
1479 + EXPORT_SYMBOL(host1x_job_unpin);
1480 +diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
1481 +index 24ab3cb426a7..e63a0c24e76b 100644
1482 +--- a/drivers/hwtracing/intel_th/pci.c
1483 ++++ b/drivers/hwtracing/intel_th/pci.c
1484 +@@ -180,6 +180,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
1485 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
1486 + .driver_data = (kernel_ulong_t)&intel_th_2x,
1487 + },
1488 ++ {
1489 ++ /* Comet Lake PCH-V */
1490 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
1491 ++ .driver_data = (kernel_ulong_t)&intel_th_2x,
1492 ++ },
1493 + {
1494 + /* Ice Lake NNPI */
1495 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
1496 +@@ -205,6 +210,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
1497 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
1498 + .driver_data = (kernel_ulong_t)&intel_th_2x,
1499 + },
1500 ++ {
1501 ++ /* Elkhart Lake */
1502 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
1503 ++ .driver_data = (kernel_ulong_t)&intel_th_2x,
1504 ++ },
1505 + { 0 },
1506 + };
1507 +
1508 +diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
1509 +index c64c6675cae6..4ab052d76d9f 100644
1510 +--- a/drivers/iio/adc/dln2-adc.c
1511 ++++ b/drivers/iio/adc/dln2-adc.c
1512 +@@ -527,6 +527,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
1513 + u16 conflict;
1514 + unsigned int trigger_chan;
1515 +
1516 ++ ret = iio_triggered_buffer_postenable(indio_dev);
1517 ++ if (ret)
1518 ++ return ret;
1519 ++
1520 + mutex_lock(&dln2->mutex);
1521 +
1522 + /* Enable ADC */
1523 +@@ -540,6 +544,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
1524 + (int)conflict);
1525 + ret = -EBUSY;
1526 + }
1527 ++ iio_triggered_buffer_predisable(indio_dev);
1528 + return ret;
1529 + }
1530 +
1531 +@@ -553,6 +558,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
1532 + mutex_unlock(&dln2->mutex);
1533 + if (ret < 0) {
1534 + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
1535 ++ iio_triggered_buffer_predisable(indio_dev);
1536 + return ret;
1537 + }
1538 + } else {
1539 +@@ -560,12 +566,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
1540 + mutex_unlock(&dln2->mutex);
1541 + }
1542 +
1543 +- return iio_triggered_buffer_postenable(indio_dev);
1544 ++ return 0;
1545 + }
1546 +
1547 + static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
1548 + {
1549 +- int ret;
1550 ++ int ret, ret2;
1551 + struct dln2_adc *dln2 = iio_priv(indio_dev);
1552 +
1553 + mutex_lock(&dln2->mutex);
1554 +@@ -580,12 +586,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
1555 + ret = dln2_adc_set_port_enabled(dln2, false, NULL);
1556 +
1557 + mutex_unlock(&dln2->mutex);
1558 +- if (ret < 0) {
1559 ++ if (ret < 0)
1560 + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
1561 +- return ret;
1562 +- }
1563 +
1564 +- return iio_triggered_buffer_predisable(indio_dev);
1565 ++ ret2 = iio_triggered_buffer_predisable(indio_dev);
1566 ++ if (ret == 0)
1567 ++ ret = ret2;
1568 ++
1569 ++ return ret;
1570 + }
1571 +
1572 + static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
1573 +diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
1574 +index 311c1a89c329..0939eb0384f1 100644
1575 +--- a/drivers/iio/adc/max1027.c
1576 ++++ b/drivers/iio/adc/max1027.c
1577 +@@ -460,6 +460,14 @@ static int max1027_probe(struct spi_device *spi)
1578 + goto fail_dev_register;
1579 + }
1580 +
1581 ++ /* Internal reset */
1582 ++ st->reg = MAX1027_RST_REG;
1583 ++ ret = spi_write(st->spi, &st->reg, 1);
1584 ++ if (ret < 0) {
1585 ++ dev_err(&indio_dev->dev, "Failed to reset the ADC\n");
1586 ++ return ret;
1587 ++ }
1588 ++
1589 + /* Disable averaging */
1590 + st->reg = MAX1027_AVG_REG;
1591 + ret = spi_write(st->spi, &st->reg, 1);
1592 +diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
1593 +index 80beb64e9e0c..69f4cfa6494b 100644
1594 +--- a/drivers/iio/dac/Kconfig
1595 ++++ b/drivers/iio/dac/Kconfig
1596 +@@ -59,8 +59,8 @@ config AD5446
1597 + help
1598 + Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
1599 + AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
1600 +- AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
1601 +- AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
1602 ++ AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611,
1603 ++ AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
1604 + as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
1605 +
1606 + To compile this driver as a module, choose M here: the
1607 +diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
1608 +index fd26a4272fc5..d3ce5def4f65 100644
1609 +--- a/drivers/iio/dac/ad5446.c
1610 ++++ b/drivers/iio/dac/ad5446.c
1611 +@@ -328,6 +328,7 @@ enum ad5446_supported_spi_device_ids {
1612 + ID_AD5541A,
1613 + ID_AD5512A,
1614 + ID_AD5553,
1615 ++ ID_AD5600,
1616 + ID_AD5601,
1617 + ID_AD5611,
1618 + ID_AD5621,
1619 +@@ -382,6 +383,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
1620 + .channel = AD5446_CHANNEL(14, 16, 0),
1621 + .write = ad5446_write,
1622 + },
1623 ++ [ID_AD5600] = {
1624 ++ .channel = AD5446_CHANNEL(16, 16, 0),
1625 ++ .write = ad5446_write,
1626 ++ },
1627 + [ID_AD5601] = {
1628 + .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
1629 + .write = ad5446_write,
1630 +@@ -449,6 +454,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
1631 + {"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */
1632 + {"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */
1633 + {"ad5553", ID_AD5553},
1634 ++ {"ad5600", ID_AD5600},
1635 + {"ad5601", ID_AD5601},
1636 + {"ad5611", ID_AD5611},
1637 + {"ad5621", ID_AD5621},
1638 +diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
1639 +index a814828e69f5..5f5d54ce882b 100644
1640 +--- a/drivers/iio/light/bh1750.c
1641 ++++ b/drivers/iio/light/bh1750.c
1642 +@@ -62,9 +62,9 @@ struct bh1750_chip_info {
1643 +
1644 + u16 int_time_low_mask;
1645 + u16 int_time_high_mask;
1646 +-}
1647 ++};
1648 +
1649 +-static const bh1750_chip_info_tbl[] = {
1650 ++static const struct bh1750_chip_info bh1750_chip_info_tbl[] = {
1651 + [BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 },
1652 + [BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 },
1653 + [BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 },
1654 +diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
1655 +index 8cc3df24e04e..9167a1c40bcf 100644
1656 +--- a/drivers/infiniband/hw/qedr/verbs.c
1657 ++++ b/drivers/infiniband/hw/qedr/verbs.c
1658 +@@ -1701,6 +1701,14 @@ static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1659 + if (qp->urq.umem)
1660 + ib_umem_release(qp->urq.umem);
1661 + qp->urq.umem = NULL;
1662 ++
1663 ++ if (rdma_protocol_roce(&dev->ibdev, 1)) {
1664 ++ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1665 ++ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1666 ++ } else {
1667 ++ kfree(qp->usq.pbl_tbl);
1668 ++ kfree(qp->urq.pbl_tbl);
1669 ++ }
1670 + }
1671 +
1672 + static int qedr_create_user_qp(struct qedr_dev *dev,
1673 +@@ -2809,8 +2817,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
1674 +
1675 + dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1676 +
1677 +- if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
1678 +- qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1679 ++ if (mr->type != QEDR_MR_DMA)
1680 ++ free_mr_info(dev, &mr->info);
1681 +
1682 + /* it could be user registered memory. */
1683 + if (mr->umem)
1684 +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
1685 +index 3fecd87c9f2b..b4e0ae024575 100644
1686 +--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
1687 ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
1688 +@@ -646,6 +646,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
1689 + if (ib_conn->pi_support) {
1690 + u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap;
1691 +
1692 ++ shost->sg_prot_tablesize = shost->sg_tablesize;
1693 + scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
1694 + scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
1695 + SHOST_DIX_GUARD_CRC);
1696 +diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
1697 +index 9c3beb1e382b..46794cac167e 100644
1698 +--- a/drivers/md/bcache/alloc.c
1699 ++++ b/drivers/md/bcache/alloc.c
1700 +@@ -377,7 +377,10 @@ retry_invalidate:
1701 + if (!fifo_full(&ca->free_inc))
1702 + goto retry_invalidate;
1703 +
1704 +- bch_prio_write(ca);
1705 ++ if (bch_prio_write(ca, false) < 0) {
1706 ++ ca->invalidate_needs_gc = 1;
1707 ++ wake_up_gc(ca->set);
1708 ++ }
1709 + }
1710 + }
1711 + out:
1712 +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
1713 +index 83f0b91aeb90..4677b18ac281 100644
1714 +--- a/drivers/md/bcache/bcache.h
1715 ++++ b/drivers/md/bcache/bcache.h
1716 +@@ -959,7 +959,7 @@ bool bch_cached_dev_error(struct cached_dev *dc);
1717 + __printf(2, 3)
1718 + bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
1719 +
1720 +-void bch_prio_write(struct cache *ca);
1721 ++int bch_prio_write(struct cache *ca, bool wait);
1722 + void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
1723 +
1724 + extern struct workqueue_struct *bcache_wq;
1725 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1726 +index 14d381cc6d74..c45d9ad01077 100644
1727 +--- a/drivers/md/bcache/super.c
1728 ++++ b/drivers/md/bcache/super.c
1729 +@@ -525,12 +525,29 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
1730 + closure_sync(cl);
1731 + }
1732 +
1733 +-void bch_prio_write(struct cache *ca)
1734 ++int bch_prio_write(struct cache *ca, bool wait)
1735 + {
1736 + int i;
1737 + struct bucket *b;
1738 + struct closure cl;
1739 +
1740 ++ pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
1741 ++ fifo_used(&ca->free[RESERVE_PRIO]),
1742 ++ fifo_used(&ca->free[RESERVE_NONE]),
1743 ++ fifo_used(&ca->free_inc));
1744 ++
1745 ++ /*
1746 ++ * Pre-check if there are enough free buckets. In the non-blocking
1747 ++ * scenario it's better to fail early rather than starting to allocate
1748 ++ * buckets and do a cleanup later in case of failure.
1749 ++ */
1750 ++ if (!wait) {
1751 ++ size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
1752 ++ fifo_used(&ca->free[RESERVE_NONE]);
1753 ++ if (prio_buckets(ca) > avail)
1754 ++ return -ENOMEM;
1755 ++ }
1756 ++
1757 + closure_init_stack(&cl);
1758 +
1759 + lockdep_assert_held(&ca->set->bucket_lock);
1760 +@@ -540,9 +557,6 @@ void bch_prio_write(struct cache *ca)
1761 + atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
1762 + &ca->meta_sectors_written);
1763 +
1764 +- //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
1765 +- // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
1766 +-
1767 + for (i = prio_buckets(ca) - 1; i >= 0; --i) {
1768 + long bucket;
1769 + struct prio_set *p = ca->disk_buckets;
1770 +@@ -560,7 +574,7 @@ void bch_prio_write(struct cache *ca)
1771 + p->magic = pset_magic(&ca->sb);
1772 + p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
1773 +
1774 +- bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
1775 ++ bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
1776 + BUG_ON(bucket == -1);
1777 +
1778 + mutex_unlock(&ca->set->bucket_lock);
1779 +@@ -589,6 +603,7 @@ void bch_prio_write(struct cache *ca)
1780 +
1781 + ca->prio_last_buckets[i] = ca->prio_buckets[i];
1782 + }
1783 ++ return 0;
1784 + }
1785 +
1786 + static void prio_read(struct cache *ca, uint64_t bucket)
1787 +@@ -747,20 +762,28 @@ static inline int idx_to_first_minor(int idx)
1788 +
1789 + static void bcache_device_free(struct bcache_device *d)
1790 + {
1791 ++ struct gendisk *disk = d->disk;
1792 ++
1793 + lockdep_assert_held(&bch_register_lock);
1794 +
1795 +- pr_info("%s stopped", d->disk->disk_name);
1796 ++ if (disk)
1797 ++ pr_info("%s stopped", disk->disk_name);
1798 ++ else
1799 ++ pr_err("bcache device (NULL gendisk) stopped");
1800 +
1801 + if (d->c)
1802 + bcache_device_detach(d);
1803 +- if (d->disk && d->disk->flags & GENHD_FL_UP)
1804 +- del_gendisk(d->disk);
1805 +- if (d->disk && d->disk->queue)
1806 +- blk_cleanup_queue(d->disk->queue);
1807 +- if (d->disk) {
1808 ++
1809 ++ if (disk) {
1810 ++ if (disk->flags & GENHD_FL_UP)
1811 ++ del_gendisk(disk);
1812 ++
1813 ++ if (disk->queue)
1814 ++ blk_cleanup_queue(disk->queue);
1815 ++
1816 + ida_simple_remove(&bcache_device_idx,
1817 +- first_minor_to_idx(d->disk->first_minor));
1818 +- put_disk(d->disk);
1819 ++ first_minor_to_idx(disk->first_minor));
1820 ++ put_disk(disk);
1821 + }
1822 +
1823 + bioset_exit(&d->bio_split);
1824 +@@ -1876,7 +1899,7 @@ static int run_cache_set(struct cache_set *c)
1825 +
1826 + mutex_lock(&c->bucket_lock);
1827 + for_each_cache(ca, c, i)
1828 +- bch_prio_write(ca);
1829 ++ bch_prio_write(ca, true);
1830 + mutex_unlock(&c->bucket_lock);
1831 +
1832 + err = "cannot allocate new UUID bucket";
1833 +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
1834 +index 2fc8c113977f..fd8607124bdb 100644
1835 +--- a/drivers/md/md-bitmap.c
1836 ++++ b/drivers/md/md-bitmap.c
1837 +@@ -2132,6 +2132,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
1838 + memcpy(page_address(store.sb_page),
1839 + page_address(bitmap->storage.sb_page),
1840 + sizeof(bitmap_super_t));
1841 ++ spin_lock_irq(&bitmap->counts.lock);
1842 + md_bitmap_file_unmap(&bitmap->storage);
1843 + bitmap->storage = store;
1844 +
1845 +@@ -2147,7 +2148,6 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
1846 + blocks = min(old_counts.chunks << old_counts.chunkshift,
1847 + chunks << chunkshift);
1848 +
1849 +- spin_lock_irq(&bitmap->counts.lock);
1850 + /* For cluster raid, need to pre-allocate bitmap */
1851 + if (mddev_is_clustered(bitmap->mddev)) {
1852 + unsigned long page;
1853 +diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
1854 +index e6a8b5669b9c..4b6be3b0fd52 100644
1855 +--- a/drivers/media/i2c/ov2659.c
1856 ++++ b/drivers/media/i2c/ov2659.c
1857 +@@ -419,10 +419,14 @@ static struct sensor_register ov2659_720p[] = {
1858 + { REG_TIMING_YINC, 0x11 },
1859 + { REG_TIMING_VERT_FORMAT, 0x80 },
1860 + { REG_TIMING_HORIZ_FORMAT, 0x00 },
1861 ++ { 0x370a, 0x12 },
1862 + { 0x3a03, 0xe8 },
1863 + { 0x3a09, 0x6f },
1864 + { 0x3a0b, 0x5d },
1865 + { 0x3a15, 0x9a },
1866 ++ { REG_VFIFO_READ_START_H, 0x00 },
1867 ++ { REG_VFIFO_READ_START_L, 0x80 },
1868 ++ { REG_ISP_CTRL02, 0x00 },
1869 + { REG_NULL, 0x00 },
1870 + };
1871 +
1872 +@@ -1203,11 +1207,15 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
1873 + goto unlock;
1874 + }
1875 +
1876 +- ov2659_set_pixel_clock(ov2659);
1877 +- ov2659_set_frame_size(ov2659);
1878 +- ov2659_set_format(ov2659);
1879 +- ov2659_set_streaming(ov2659, 1);
1880 +- ov2659->streaming = on;
1881 ++ ret = ov2659_set_pixel_clock(ov2659);
1882 ++ if (!ret)
1883 ++ ret = ov2659_set_frame_size(ov2659);
1884 ++ if (!ret)
1885 ++ ret = ov2659_set_format(ov2659);
1886 ++ if (!ret) {
1887 ++ ov2659_set_streaming(ov2659, 1);
1888 ++ ov2659->streaming = on;
1889 ++ }
1890 +
1891 + unlock:
1892 + mutex_unlock(&ov2659->lock);
1893 +diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
1894 +index edded869d792..c5aadd8dd23f 100644
1895 +--- a/drivers/media/i2c/ov6650.c
1896 ++++ b/drivers/media/i2c/ov6650.c
1897 +@@ -469,38 +469,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
1898 + {
1899 + struct i2c_client *client = v4l2_get_subdevdata(sd);
1900 + struct ov6650 *priv = to_ov6650(client);
1901 +- struct v4l2_rect rect = sel->r;
1902 + int ret;
1903 +
1904 + if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
1905 + sel->target != V4L2_SEL_TGT_CROP)
1906 + return -EINVAL;
1907 +
1908 +- v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
1909 +- &rect.height, 2, H_CIF, 1, 0);
1910 +- v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
1911 +- (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
1912 +- &rect.top, DEF_VSTRT << 1,
1913 +- (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
1914 +- 0);
1915 ++ v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
1916 ++ &sel->r.height, 2, H_CIF, 1, 0);
1917 ++ v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
1918 ++ (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
1919 ++ &sel->r.top, DEF_VSTRT << 1,
1920 ++ (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
1921 ++ 1, 0);
1922 +
1923 +- ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
1924 ++ ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
1925 + if (!ret) {
1926 +- priv->rect.left = rect.left;
1927 ++ priv->rect.width += priv->rect.left - sel->r.left;
1928 ++ priv->rect.left = sel->r.left;
1929 + ret = ov6650_reg_write(client, REG_HSTOP,
1930 +- (rect.left + rect.width) >> 1);
1931 ++ (sel->r.left + sel->r.width) >> 1);
1932 + }
1933 + if (!ret) {
1934 +- priv->rect.width = rect.width;
1935 +- ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1);
1936 ++ priv->rect.width = sel->r.width;
1937 ++ ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
1938 + }
1939 + if (!ret) {
1940 +- priv->rect.top = rect.top;
1941 ++ priv->rect.height += priv->rect.top - sel->r.top;
1942 ++ priv->rect.top = sel->r.top;
1943 + ret = ov6650_reg_write(client, REG_VSTOP,
1944 +- (rect.top + rect.height) >> 1);
1945 ++ (sel->r.top + sel->r.height) >> 1);
1946 + }
1947 + if (!ret)
1948 +- priv->rect.height = rect.height;
1949 ++ priv->rect.height = sel->r.height;
1950 +
1951 + return ret;
1952 + }
1953 +@@ -614,7 +615,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
1954 + dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
1955 + return -EINVAL;
1956 + }
1957 +- priv->code = code;
1958 +
1959 + if (code == MEDIA_BUS_FMT_Y8_1X8 ||
1960 + code == MEDIA_BUS_FMT_SBGGR8_1X8) {
1961 +@@ -640,7 +640,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
1962 + dev_dbg(&client->dev, "max resolution: CIF\n");
1963 + coma_mask |= COMA_QCIF;
1964 + }
1965 +- priv->half_scale = half_scale;
1966 +
1967 + clkrc = CLKRC_12MHz;
1968 + mclk = 12000000;
1969 +@@ -658,8 +657,13 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
1970 + ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
1971 + if (!ret)
1972 + ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
1973 +- if (!ret)
1974 ++ if (!ret) {
1975 ++ priv->half_scale = half_scale;
1976 ++
1977 + ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
1978 ++ }
1979 ++ if (!ret)
1980 ++ priv->code = code;
1981 +
1982 + if (!ret) {
1983 + mf->colorspace = priv->colorspace;
1984 +diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
1985 +index 1236683da8f7..4731e1c72f96 100644
1986 +--- a/drivers/media/i2c/smiapp/smiapp-core.c
1987 ++++ b/drivers/media/i2c/smiapp/smiapp-core.c
1988 +@@ -3108,19 +3108,23 @@ static int smiapp_probe(struct i2c_client *client,
1989 + if (rval < 0)
1990 + goto out_media_entity_cleanup;
1991 +
1992 +- rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
1993 +- if (rval < 0)
1994 +- goto out_media_entity_cleanup;
1995 +-
1996 + pm_runtime_set_active(&client->dev);
1997 + pm_runtime_get_noresume(&client->dev);
1998 + pm_runtime_enable(&client->dev);
1999 ++
2000 ++ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
2001 ++ if (rval < 0)
2002 ++ goto out_disable_runtime_pm;
2003 ++
2004 + pm_runtime_set_autosuspend_delay(&client->dev, 1000);
2005 + pm_runtime_use_autosuspend(&client->dev);
2006 + pm_runtime_put_autosuspend(&client->dev);
2007 +
2008 + return 0;
2009 +
2010 ++out_disable_runtime_pm:
2011 ++ pm_runtime_disable(&client->dev);
2012 ++
2013 + out_media_entity_cleanup:
2014 + media_entity_cleanup(&sensor->src->sd.entity);
2015 +
2016 +diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
2017 +index 7b113bad70d2..248fb3b6833c 100644
2018 +--- a/drivers/media/pci/cx88/cx88-video.c
2019 ++++ b/drivers/media/pci/cx88/cx88-video.c
2020 +@@ -1312,7 +1312,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
2021 + core = cx88_core_get(dev->pci);
2022 + if (!core) {
2023 + err = -EINVAL;
2024 +- goto fail_free;
2025 ++ goto fail_disable;
2026 + }
2027 + dev->core = core;
2028 +
2029 +@@ -1358,7 +1358,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
2030 + cc->step, cc->default_value);
2031 + if (!vc) {
2032 + err = core->audio_hdl.error;
2033 +- goto fail_core;
2034 ++ goto fail_irq;
2035 + }
2036 + vc->priv = (void *)cc;
2037 + }
2038 +@@ -1372,7 +1372,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
2039 + cc->step, cc->default_value);
2040 + if (!vc) {
2041 + err = core->video_hdl.error;
2042 +- goto fail_core;
2043 ++ goto fail_irq;
2044 + }
2045 + vc->priv = (void *)cc;
2046 + if (vc->id == V4L2_CID_CHROMA_AGC)
2047 +@@ -1535,11 +1535,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
2048 +
2049 + fail_unreg:
2050 + cx8800_unregister_video(dev);
2051 +- free_irq(pci_dev->irq, dev);
2052 + mutex_unlock(&core->lock);
2053 ++fail_irq:
2054 ++ free_irq(pci_dev->irq, dev);
2055 + fail_core:
2056 + core->v4ldev = NULL;
2057 + cx88_core_put(core, dev->pci);
2058 ++fail_disable:
2059 ++ pci_disable_device(pci_dev);
2060 + fail_free:
2061 + kfree(dev);
2062 + return err;
2063 +diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
2064 +index b05738a95e55..809320decdeb 100644
2065 +--- a/drivers/media/platform/am437x/am437x-vpfe.c
2066 ++++ b/drivers/media/platform/am437x/am437x-vpfe.c
2067 +@@ -1848,6 +1848,10 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
2068 + if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
2069 + return -ENODATA;
2070 +
2071 ++ /* if trying to set the same std then nothing to do */
2072 ++ if (vpfe_standards[vpfe->std_index].std_id == std_id)
2073 ++ return 0;
2074 ++
2075 + /* If streaming is started, return error */
2076 + if (vb2_is_busy(&vpfe->buffer_queue)) {
2077 + vpfe_err(vpfe, "%s device busy\n", __func__);
2078 +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
2079 +index 5b8350e87e75..60069869596c 100644
2080 +--- a/drivers/media/platform/qcom/venus/core.c
2081 ++++ b/drivers/media/platform/qcom/venus/core.c
2082 +@@ -430,10 +430,11 @@ static const struct venus_resources msm8916_res = {
2083 + };
2084 +
2085 + static const struct freq_tbl msm8996_freq_table[] = {
2086 +- { 1944000, 490000000 }, /* 4k UHD @ 60 */
2087 +- { 972000, 320000000 }, /* 4k UHD @ 30 */
2088 +- { 489600, 150000000 }, /* 1080p @ 60 */
2089 +- { 244800, 75000000 }, /* 1080p @ 30 */
2090 ++ { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
2091 ++ { 972000, 520000000 }, /* 4k UHD @ 30 */
2092 ++ { 489600, 346666667 }, /* 1080p @ 60 */
2093 ++ { 244800, 150000000 }, /* 1080p @ 30 */
2094 ++ { 108000, 75000000 }, /* 720p @ 30 */
2095 + };
2096 +
2097 + static const struct reg_val msm8996_reg_preset[] = {
2098 +diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
2099 +index 124085556b94..fbcc67c10993 100644
2100 +--- a/drivers/media/platform/qcom/venus/hfi_venus.c
2101 ++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
2102 +@@ -1484,6 +1484,7 @@ static int venus_suspend_3xx(struct venus_core *core)
2103 + {
2104 + struct venus_hfi_device *hdev = to_hfi_priv(core);
2105 + struct device *dev = core->dev;
2106 ++ u32 ctrl_status;
2107 + bool val;
2108 + int ret;
2109 +
2110 +@@ -1499,6 +1500,10 @@ static int venus_suspend_3xx(struct venus_core *core)
2111 + return -EINVAL;
2112 + }
2113 +
2114 ++ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
2115 ++ if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
2116 ++ goto power_off;
2117 ++
2118 + /*
2119 + * Power collapse sequence for Venus 3xx and 4xx versions:
2120 + * 1. Check for ARM9 and video core to be idle by checking WFI bit
2121 +@@ -1523,6 +1528,7 @@ static int venus_suspend_3xx(struct venus_core *core)
2122 + if (ret)
2123 + return ret;
2124 +
2125 ++power_off:
2126 + mutex_lock(&hdev->lock);
2127 +
2128 + ret = venus_power_off(hdev);
2129 +diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
2130 +index 81413ab52475..b677d014e7ba 100644
2131 +--- a/drivers/media/platform/rcar_drif.c
2132 ++++ b/drivers/media/platform/rcar_drif.c
2133 +@@ -912,6 +912,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
2134 + {
2135 + struct rcar_drif_sdr *sdr = video_drvdata(file);
2136 +
2137 ++ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
2138 + f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
2139 + f->fmt.sdr.buffersize = sdr->fmt->buffersize;
2140 +
2141 +diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
2142 +index 7e611501c291..f29074c84915 100644
2143 +--- a/drivers/media/platform/ti-vpe/vpdma.h
2144 ++++ b/drivers/media/platform/ti-vpe/vpdma.h
2145 +@@ -60,6 +60,7 @@ struct vpdma_data_format {
2146 + * line stride of source and dest
2147 + * buffers should be 16 byte aligned
2148 + */
2149 ++#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
2150 + #define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
2151 + #define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
2152 +
2153 +diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
2154 +index d70871d0ad2d..a285b9db7ee8 100644
2155 +--- a/drivers/media/platform/ti-vpe/vpe.c
2156 ++++ b/drivers/media/platform/ti-vpe/vpe.c
2157 +@@ -352,20 +352,25 @@ enum {
2158 + };
2159 +
2160 + /* find our format description corresponding to the passed v4l2_format */
2161 +-static struct vpe_fmt *find_format(struct v4l2_format *f)
2162 ++static struct vpe_fmt *__find_format(u32 fourcc)
2163 + {
2164 + struct vpe_fmt *fmt;
2165 + unsigned int k;
2166 +
2167 + for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
2168 + fmt = &vpe_formats[k];
2169 +- if (fmt->fourcc == f->fmt.pix.pixelformat)
2170 ++ if (fmt->fourcc == fourcc)
2171 + return fmt;
2172 + }
2173 +
2174 + return NULL;
2175 + }
2176 +
2177 ++static struct vpe_fmt *find_format(struct v4l2_format *f)
2178 ++{
2179 ++ return __find_format(f->fmt.pix.pixelformat);
2180 ++}
2181 ++
2182 + /*
2183 + * there is one vpe_dev structure in the driver, it is shared by
2184 + * all instances.
2185 +@@ -1027,11 +1032,14 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
2186 + dma_addr_t dma_addr;
2187 + u32 flags = 0;
2188 + u32 offset = 0;
2189 ++ u32 stride;
2190 +
2191 + if (port == VPE_PORT_MV_OUT) {
2192 + vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
2193 + dma_addr = ctx->mv_buf_dma[mv_buf_selector];
2194 + q_data = &ctx->q_data[Q_DATA_SRC];
2195 ++ stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
2196 ++ VPDMA_STRIDE_ALIGN);
2197 + } else {
2198 + /* to incorporate interleaved formats */
2199 + int plane = fmt->coplanar ? p_data->vb_part : 0;
2200 +@@ -1058,6 +1066,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
2201 + }
2202 + /* Apply the offset */
2203 + dma_addr += offset;
2204 ++ stride = q_data->bytesperline[VPE_LUMA];
2205 + }
2206 +
2207 + if (q_data->flags & Q_DATA_FRAME_1D)
2208 +@@ -1069,7 +1078,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
2209 + MAX_W, MAX_H);
2210 +
2211 + vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
2212 +- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
2213 ++ stride, &q_data->c_rect,
2214 + vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
2215 + MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
2216 + }
2217 +@@ -1088,10 +1097,13 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
2218 + dma_addr_t dma_addr;
2219 + u32 flags = 0;
2220 + u32 offset = 0;
2221 ++ u32 stride;
2222 +
2223 + if (port == VPE_PORT_MV_IN) {
2224 + vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
2225 + dma_addr = ctx->mv_buf_dma[mv_buf_selector];
2226 ++ stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
2227 ++ VPDMA_STRIDE_ALIGN);
2228 + } else {
2229 + /* to incorporate interleaved formats */
2230 + int plane = fmt->coplanar ? p_data->vb_part : 0;
2231 +@@ -1118,6 +1130,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
2232 + }
2233 + /* Apply the offset */
2234 + dma_addr += offset;
2235 ++ stride = q_data->bytesperline[VPE_LUMA];
2236 +
2237 + if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
2238 + /*
2239 +@@ -1153,10 +1166,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
2240 + if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
2241 + frame_height /= 2;
2242 +
2243 +- vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
2244 +- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
2245 +- vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
2246 +- frame_height, 0, 0);
2247 ++ vpdma_add_in_dtd(&ctx->desc_list, q_data->width, stride,
2248 ++ &q_data->c_rect, vpdma_fmt, dma_addr,
2249 ++ p_data->channel, field, flags, frame_width,
2250 ++ frame_height, 0, 0);
2251 + }
2252 +
2253 + /*
2254 +@@ -1405,9 +1418,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
2255 + /* the previous dst mv buffer becomes the next src mv buffer */
2256 + ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
2257 +
2258 +- if (ctx->aborting)
2259 +- goto finished;
2260 +-
2261 + s_vb = ctx->src_vbs[0];
2262 + d_vb = ctx->dst_vb;
2263 +
2264 +@@ -1418,6 +1428,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
2265 + d_vb->timecode = s_vb->timecode;
2266 +
2267 + d_vb->sequence = ctx->sequence;
2268 ++ s_vb->sequence = ctx->sequence;
2269 +
2270 + d_q_data = &ctx->q_data[Q_DATA_DST];
2271 + if (d_q_data->flags & Q_IS_INTERLACED) {
2272 +@@ -1471,6 +1482,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
2273 + ctx->src_vbs[0] = NULL;
2274 + ctx->dst_vb = NULL;
2275 +
2276 ++ if (ctx->aborting)
2277 ++ goto finished;
2278 ++
2279 + ctx->bufs_completed++;
2280 + if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
2281 + device_run(ctx);
2282 +@@ -1583,9 +1597,9 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
2283 + unsigned int stride = 0;
2284 +
2285 + if (!fmt || !(fmt->types & type)) {
2286 +- vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
2287 ++ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
2288 + pix->pixelformat);
2289 +- return -EINVAL;
2290 ++ fmt = __find_format(V4L2_PIX_FMT_YUYV);
2291 + }
2292 +
2293 + if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
2294 +@@ -1632,7 +1646,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
2295 + &pix->height, MIN_H, MAX_H, H_ALIGN,
2296 + S_ALIGN);
2297 +
2298 +- if (!pix->num_planes)
2299 ++ if (!pix->num_planes || pix->num_planes > 2)
2300 + pix->num_planes = fmt->coplanar ? 2 : 1;
2301 + else if (pix->num_planes > 1 && !fmt->coplanar)
2302 + pix->num_planes = 1;
2303 +@@ -1671,6 +1685,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
2304 + if (stride > plane_fmt->bytesperline)
2305 + plane_fmt->bytesperline = stride;
2306 +
2307 ++ plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
2308 ++ stride,
2309 ++ VPDMA_MAX_STRIDE);
2310 ++
2311 + plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
2312 + VPDMA_STRIDE_ALIGN);
2313 +
2314 +@@ -2291,7 +2309,7 @@ static int vpe_open(struct file *file)
2315 + v4l2_ctrl_handler_setup(hdl);
2316 +
2317 + s_q_data = &ctx->q_data[Q_DATA_SRC];
2318 +- s_q_data->fmt = &vpe_formats[2];
2319 ++ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
2320 + s_q_data->width = 1920;
2321 + s_q_data->height = 1080;
2322 + s_q_data->nplanes = 1;
2323 +@@ -2369,6 +2387,12 @@ static int vpe_release(struct file *file)
2324 +
2325 + mutex_lock(&dev->dev_mutex);
2326 + free_mv_buffers(ctx);
2327 ++
2328 ++ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
2329 ++ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
2330 ++ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
2331 ++ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
2332 ++
2333 + vpdma_free_desc_list(&ctx->desc_list);
2334 + vpdma_free_desc_buf(&ctx->mmr_adb);
2335 +
2336 +diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
2337 +index e3b3ecd14a4d..ae7540b765e1 100644
2338 +--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
2339 ++++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
2340 +@@ -485,6 +485,8 @@ static int si470x_i2c_remove(struct i2c_client *client)
2341 + video_unregister_device(&radio->videodev);
2342 + kfree(radio);
2343 +
2344 ++ v4l2_ctrl_handler_free(&radio->hdl);
2345 ++ v4l2_device_unregister(&radio->v4l2_dev);
2346 + return 0;
2347 + }
2348 +
2349 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
2350 +index ac4fddfd0a43..f1807c16438d 100644
2351 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
2352 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
2353 +@@ -503,7 +503,13 @@ urb_error:
2354 + static int flexcop_usb_init(struct flexcop_usb *fc_usb)
2355 + {
2356 + /* use the alternate setting with the larges buffer */
2357 +- usb_set_interface(fc_usb->udev,0,1);
2358 ++ int ret = usb_set_interface(fc_usb->udev, 0, 1);
2359 ++
2360 ++ if (ret) {
2361 ++ err("set interface failed.");
2362 ++ return ret;
2363 ++ }
2364 ++
2365 + switch (fc_usb->udev->speed) {
2366 + case USB_SPEED_LOW:
2367 + err("cannot handle USB speed because it is too slow.");
2368 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
2369 +index e53a80b589a1..04d334152eae 100644
2370 +--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
2371 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
2372 +@@ -916,8 +916,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
2373 + pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
2374 + pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
2375 + if (!list_empty(&vp->dev_video->devbase.fh_list) ||
2376 +- !list_empty(&vp->dev_radio->devbase.fh_list))
2377 ++ (vp->dev_radio &&
2378 ++ !list_empty(&vp->dev_radio->devbase.fh_list))) {
2379 ++ pvr2_trace(PVR2_TRACE_STRUCT,
2380 ++ "pvr2_v4l2 internal_check exit-empty id=%p", vp);
2381 + return;
2382 ++ }
2383 + pvr2_v4l2_destroy_no_lock(vp);
2384 + }
2385 +
2386 +@@ -953,7 +957,8 @@ static int pvr2_v4l2_release(struct file *file)
2387 + kfree(fhp);
2388 + if (vp->channel.mc_head->disconnect_flag &&
2389 + list_empty(&vp->dev_video->devbase.fh_list) &&
2390 +- list_empty(&vp->dev_radio->devbase.fh_list)) {
2391 ++ (!vp->dev_radio ||
2392 ++ list_empty(&vp->dev_radio->devbase.fh_list))) {
2393 + pvr2_v4l2_destroy_no_lock(vp);
2394 + }
2395 + return 0;
2396 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
2397 +index a4d3e94a400c..7675b645db2e 100644
2398 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
2399 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
2400 +@@ -1415,10 +1415,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
2401 + return ret;
2402 + }
2403 +
2404 ++static void v4l_pix_format_touch(struct v4l2_pix_format *p)
2405 ++{
2406 ++ /*
2407 ++ * The v4l2_pix_format structure contains fields that make no sense for
2408 ++ * touch. Set them to default values in this case.
2409 ++ */
2410 ++
2411 ++ p->field = V4L2_FIELD_NONE;
2412 ++ p->colorspace = V4L2_COLORSPACE_RAW;
2413 ++ p->flags = 0;
2414 ++ p->ycbcr_enc = 0;
2415 ++ p->quantization = 0;
2416 ++ p->xfer_func = 0;
2417 ++}
2418 ++
2419 + static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
2420 + struct file *file, void *fh, void *arg)
2421 + {
2422 + struct v4l2_format *p = arg;
2423 ++ struct video_device *vfd = video_devdata(file);
2424 + int ret = check_fmt(file, p->type);
2425 +
2426 + if (ret)
2427 +@@ -1456,6 +1472,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
2428 + ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
2429 + /* just in case the driver zeroed it again */
2430 + p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
2431 ++ if (vfd->vfl_type == VFL_TYPE_TOUCH)
2432 ++ v4l_pix_format_touch(&p->fmt.pix);
2433 + return ret;
2434 + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
2435 + return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
2436 +@@ -1491,21 +1509,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
2437 + return -EINVAL;
2438 + }
2439 +
2440 +-static void v4l_pix_format_touch(struct v4l2_pix_format *p)
2441 +-{
2442 +- /*
2443 +- * The v4l2_pix_format structure contains fields that make no sense for
2444 +- * touch. Set them to default values in this case.
2445 +- */
2446 +-
2447 +- p->field = V4L2_FIELD_NONE;
2448 +- p->colorspace = V4L2_COLORSPACE_RAW;
2449 +- p->flags = 0;
2450 +- p->ycbcr_enc = 0;
2451 +- p->quantization = 0;
2452 +- p->xfer_func = 0;
2453 +-}
2454 +-
2455 + static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
2456 + struct file *file, void *fh, void *arg)
2457 + {
2458 +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
2459 +index 673f6a9616cd..9ecf86ba4bb0 100644
2460 +--- a/drivers/mmc/host/mtk-sd.c
2461 ++++ b/drivers/mmc/host/mtk-sd.c
2462 +@@ -228,6 +228,7 @@
2463 + #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
2464 + #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
2465 +
2466 ++#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
2467 + #define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
2468 +
2469 + #define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
2470 +@@ -1673,6 +1674,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
2471 +
2472 + /* select EMMC50 PAD CMD tune */
2473 + sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
2474 ++ sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
2475 +
2476 + if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
2477 + mmc->ios.timing == MMC_TIMING_UHS_SDR104)
2478 +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
2479 +index ad0275191d91..17b2054d9b62 100644
2480 +--- a/drivers/mmc/host/sdhci-msm.c
2481 ++++ b/drivers/mmc/host/sdhci-msm.c
2482 +@@ -108,7 +108,7 @@
2483 +
2484 + #define CORE_PWRSAVE_DLL BIT(3)
2485 +
2486 +-#define DDR_CONFIG_POR_VAL 0x80040853
2487 ++#define DDR_CONFIG_POR_VAL 0x80040873
2488 +
2489 +
2490 + #define INVALID_TUNING_PHASE -1
2491 +@@ -157,8 +157,9 @@ struct sdhci_msm_offset {
2492 + u32 core_ddr_200_cfg;
2493 + u32 core_vendor_spec3;
2494 + u32 core_dll_config_2;
2495 ++ u32 core_dll_config_3;
2496 ++ u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
2497 + u32 core_ddr_config;
2498 +- u32 core_ddr_config_2;
2499 + };
2500 +
2501 + static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
2502 +@@ -186,8 +187,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
2503 + .core_ddr_200_cfg = 0x224,
2504 + .core_vendor_spec3 = 0x250,
2505 + .core_dll_config_2 = 0x254,
2506 +- .core_ddr_config = 0x258,
2507 +- .core_ddr_config_2 = 0x25c,
2508 ++ .core_dll_config_3 = 0x258,
2509 ++ .core_ddr_config = 0x25c,
2510 + };
2511 +
2512 + static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
2513 +@@ -216,8 +217,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
2514 + .core_ddr_200_cfg = 0x184,
2515 + .core_vendor_spec3 = 0x1b0,
2516 + .core_dll_config_2 = 0x1b4,
2517 +- .core_ddr_config = 0x1b8,
2518 +- .core_ddr_config_2 = 0x1bc,
2519 ++ .core_ddr_config_old = 0x1b8,
2520 ++ .core_ddr_config = 0x1bc,
2521 + };
2522 +
2523 + struct sdhci_msm_variant_ops {
2524 +@@ -260,6 +261,7 @@ struct sdhci_msm_host {
2525 + const struct sdhci_msm_offset *offset;
2526 + bool use_cdr;
2527 + u32 transfer_mode;
2528 ++ bool updated_ddr_cfg;
2529 + };
2530 +
2531 + static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
2532 +@@ -931,8 +933,10 @@ out:
2533 + static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
2534 + {
2535 + struct mmc_host *mmc = host->mmc;
2536 +- u32 dll_status, config;
2537 ++ u32 dll_status, config, ddr_cfg_offset;
2538 + int ret;
2539 ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2540 ++ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2541 + const struct sdhci_msm_offset *msm_offset =
2542 + sdhci_priv_msm_offset(host);
2543 +
2544 +@@ -945,8 +949,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
2545 + * bootloaders. In the future, if this changes, then the desired
2546 + * values will need to be programmed appropriately.
2547 + */
2548 +- writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
2549 +- msm_offset->core_ddr_config);
2550 ++ if (msm_host->updated_ddr_cfg)
2551 ++ ddr_cfg_offset = msm_offset->core_ddr_config;
2552 ++ else
2553 ++ ddr_cfg_offset = msm_offset->core_ddr_config_old;
2554 ++ writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
2555 +
2556 + if (mmc->ios.enhanced_strobe) {
2557 + config = readl_relaxed(host->ioaddr +
2558 +@@ -1862,6 +1869,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
2559 + msm_offset->core_vendor_spec_capabilities0);
2560 + }
2561 +
2562 ++ if (core_major == 1 && core_minor >= 0x49)
2563 ++ msm_host->updated_ddr_cfg = true;
2564 ++
2565 + /*
2566 + * Power on reset state may trigger power irq if previous status of
2567 + * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
2568 +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
2569 +index 6627523e728b..2c9110fee1cc 100644
2570 +--- a/drivers/mmc/host/sdhci-of-esdhc.c
2571 ++++ b/drivers/mmc/host/sdhci-of-esdhc.c
2572 +@@ -648,9 +648,6 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
2573 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2574 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2575 +
2576 +- if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
2577 +- mdelay(5);
2578 +-
2579 + if (mask & SDHCI_RESET_ALL) {
2580 + val = sdhci_readl(host, ESDHC_TBCTL);
2581 + val &= ~ESDHC_TB_EN;
2582 +@@ -926,8 +923,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
2583 + host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
2584 +
2585 + if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
2586 +- host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
2587 +- host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
2588 ++ host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
2589 ++ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
2590 + }
2591 +
2592 + if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
2593 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
2594 +index 71794391f48f..65985dc3e1a7 100644
2595 +--- a/drivers/mmc/host/sdhci-pci-core.c
2596 ++++ b/drivers/mmc/host/sdhci-pci-core.c
2597 +@@ -30,6 +30,7 @@
2598 + #include <linux/mmc/slot-gpio.h>
2599 + #include <linux/mmc/sdhci-pci-data.h>
2600 + #include <linux/acpi.h>
2601 ++#include <linux/dmi.h>
2602 +
2603 + #include "cqhci.h"
2604 +
2605 +@@ -732,11 +733,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
2606 + return 0;
2607 + }
2608 +
2609 ++static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
2610 ++{
2611 ++ return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
2612 ++ dmi_match(DMI_BIOS_VENDOR, "LENOVO");
2613 ++}
2614 ++
2615 + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
2616 + {
2617 + int ret = byt_emmc_probe_slot(slot);
2618 +
2619 +- slot->host->mmc->caps2 |= MMC_CAP2_CQE;
2620 ++ if (!glk_broken_cqhci(slot))
2621 ++ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
2622 +
2623 + if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
2624 + slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
2625 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2626 +index e99d5632d8fa..369817a29c22 100644
2627 +--- a/drivers/mmc/host/sdhci.c
2628 ++++ b/drivers/mmc/host/sdhci.c
2629 +@@ -1713,9 +1713,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2630 + ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2631 + else if (timing == MMC_TIMING_UHS_SDR12)
2632 + ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2633 +- else if (timing == MMC_TIMING_SD_HS ||
2634 +- timing == MMC_TIMING_MMC_HS ||
2635 +- timing == MMC_TIMING_UHS_SDR25)
2636 ++ else if (timing == MMC_TIMING_UHS_SDR25)
2637 + ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2638 + else if (timing == MMC_TIMING_UHS_SDR50)
2639 + ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2640 +@@ -2246,8 +2244,8 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2641 + sdhci_send_tuning(host, opcode);
2642 +
2643 + if (!host->tuning_done) {
2644 +- pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2645 +- mmc_hostname(host->mmc));
2646 ++ pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2647 ++ mmc_hostname(host->mmc));
2648 + sdhci_abort_tuning(host, opcode);
2649 + return;
2650 + }
2651 +@@ -3551,6 +3549,9 @@ int sdhci_setup_host(struct sdhci_host *host)
2652 + mmc_hostname(mmc), host->version);
2653 + }
2654 +
2655 ++ if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
2656 ++ mmc->caps2 &= ~MMC_CAP2_CQE;
2657 ++
2658 + if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2659 + host->flags |= SDHCI_USE_SDMA;
2660 + else if (!(host->caps & SDHCI_CAN_DO_SDMA))
2661 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
2662 +index 0f8c4f3ccafc..c0372e3443fd 100644
2663 +--- a/drivers/mmc/host/sdhci.h
2664 ++++ b/drivers/mmc/host/sdhci.h
2665 +@@ -391,6 +391,8 @@ struct sdhci_host {
2666 + #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
2667 + /* Controller reports inverted write-protect state */
2668 + #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
2669 ++/* Controller has unusable command queue engine */
2670 ++#define SDHCI_QUIRK_BROKEN_CQE (1<<17)
2671 + /* Controller does not like fast PIO transfers */
2672 + #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
2673 + /* Controller has to be forced to use block size of 2048 bytes */
2674 +diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
2675 +index 35630ccbe9e5..33c9ca8f14a9 100644
2676 +--- a/drivers/mmc/host/tmio_mmc_core.c
2677 ++++ b/drivers/mmc/host/tmio_mmc_core.c
2678 +@@ -1267,7 +1267,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
2679 + return ret;
2680 + }
2681 +
2682 +- mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
2683 ++ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
2684 + mmc->caps2 |= pdata->capabilities2;
2685 + mmc->max_segs = pdata->max_segs ? : 32;
2686 + mmc->max_blk_size = 512;
2687 +diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
2688 +index 07d2f3aa2c02..ae4c37e1bb75 100644
2689 +--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
2690 ++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
2691 +@@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
2692 + struct kvaser_cmd *cmd;
2693 + int err;
2694 +
2695 +- cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
2696 ++ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
2697 + if (!cmd)
2698 + return -ENOMEM;
2699 +
2700 +@@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
2701 + struct kvaser_cmd *cmd;
2702 + int rc;
2703 +
2704 +- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
2705 ++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2706 + if (!cmd)
2707 + return -ENOMEM;
2708 +
2709 +@@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
2710 + struct kvaser_cmd *cmd;
2711 + int rc;
2712 +
2713 +- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
2714 ++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2715 + if (!cmd)
2716 + return -ENOMEM;
2717 +
2718 +diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
2719 +index d3ce1e4cb4d3..dbfb6ad80fac 100644
2720 +--- a/drivers/net/dsa/Kconfig
2721 ++++ b/drivers/net/dsa/Kconfig
2722 +@@ -66,6 +66,7 @@ config NET_DSA_REALTEK_SMI
2723 + config NET_DSA_SMSC_LAN9303
2724 + tristate
2725 + select NET_DSA_TAG_LAN9303
2726 ++ select REGMAP
2727 + ---help---
2728 + This enables support for the SMSC/Microchip LAN9303 3 port ethernet
2729 + switch chips.
2730 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2731 +index 62da46537734..ab60f4f9cc24 100644
2732 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2733 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2734 +@@ -2394,15 +2394,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2735 + /* send the ramrod on all the queues of the PF */
2736 + for_each_eth_queue(bp, i) {
2737 + struct bnx2x_fastpath *fp = &bp->fp[i];
2738 ++ int tx_idx;
2739 +
2740 + /* Set the appropriate Queue object */
2741 + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2742 +
2743 +- /* Update the Queue state */
2744 +- rc = bnx2x_queue_state_change(bp, &q_params);
2745 +- if (rc) {
2746 +- BNX2X_ERR("Failed to configure Tx switching\n");
2747 +- return rc;
2748 ++ for (tx_idx = FIRST_TX_COS_INDEX;
2749 ++ tx_idx < fp->max_cos; tx_idx++) {
2750 ++ q_params.params.update.cid_index = tx_idx;
2751 ++
2752 ++ /* Update the Queue state */
2753 ++ rc = bnx2x_queue_state_change(bp, &q_params);
2754 ++ if (rc) {
2755 ++ BNX2X_ERR("Failed to configure Tx switching\n");
2756 ++ return rc;
2757 ++ }
2758 + }
2759 + }
2760 +
2761 +diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
2762 +index 4af78de0e077..01a212097836 100644
2763 +--- a/drivers/net/ethernet/cortina/gemini.c
2764 ++++ b/drivers/net/ethernet/cortina/gemini.c
2765 +@@ -577,6 +577,8 @@ static int gmac_setup_txqs(struct net_device *netdev)
2766 +
2767 + if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
2768 + dev_warn(geth->dev, "TX queue base is not aligned\n");
2769 ++ dma_free_coherent(geth->dev, len * sizeof(*desc_ring),
2770 ++ desc_ring, port->txq_dma_base);
2771 + kfree(skb_tab);
2772 + return -ENOMEM;
2773 + }
2774 +diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
2775 +index e8936ae46add..718afa4be2a0 100644
2776 +--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
2777 ++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
2778 +@@ -456,9 +456,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2779 + skb_tx_timestamp(skb);
2780 +
2781 + hip04_set_xmit_desc(priv, phys);
2782 +- priv->tx_head = TX_NEXT(tx_head);
2783 + count++;
2784 + netdev_sent_queue(ndev, skb->len);
2785 ++ priv->tx_head = TX_NEXT(tx_head);
2786 +
2787 + stats->tx_bytes += skb->len;
2788 + stats->tx_packets++;
2789 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2790 +index 0788e78747d9..1aaf6e2a3b39 100644
2791 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2792 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2793 +@@ -1474,6 +1474,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
2794 + time_after(jiffies,
2795 + (trans_start + ndev->watchdog_timeo))) {
2796 + timeout_queue = i;
2797 ++ netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
2798 ++ q->state,
2799 ++ jiffies_to_msecs(jiffies - trans_start));
2800 + break;
2801 + }
2802 + }
2803 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2804 +index 1a66373184d6..23b31b2ff5cc 100644
2805 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2806 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2807 +@@ -3441,14 +3441,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2808 + q_vector->rx.target_itr =
2809 + ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
2810 + wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2811 +- q_vector->rx.target_itr);
2812 ++ q_vector->rx.target_itr >> 1);
2813 + q_vector->rx.current_itr = q_vector->rx.target_itr;
2814 +
2815 + q_vector->tx.next_update = jiffies + 1;
2816 + q_vector->tx.target_itr =
2817 + ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
2818 + wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2819 +- q_vector->tx.target_itr);
2820 ++ q_vector->tx.target_itr >> 1);
2821 + q_vector->tx.current_itr = q_vector->tx.target_itr;
2822 +
2823 + wr32(hw, I40E_PFINT_RATEN(vector - 1),
2824 +@@ -3553,11 +3553,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2825 + /* set the ITR configuration */
2826 + q_vector->rx.next_update = jiffies + 1;
2827 + q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
2828 +- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
2829 ++ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
2830 + q_vector->rx.current_itr = q_vector->rx.target_itr;
2831 + q_vector->tx.next_update = jiffies + 1;
2832 + q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
2833 +- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
2834 ++ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
2835 + q_vector->tx.current_itr = q_vector->tx.target_itr;
2836 +
2837 + i40e_enable_misc_int_causes(pf);
2838 +@@ -10735,7 +10735,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
2839 +
2840 + /* associate no queues to the misc vector */
2841 + wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
2842 +- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
2843 ++ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
2844 +
2845 + i40e_flush(hw);
2846 +
2847 +diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
2848 +index 89f18fe18fe3..921cc0c9a30d 100644
2849 +--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
2850 ++++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
2851 +@@ -911,7 +911,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
2852 + if (ice_sq_done(hw, cq))
2853 + break;
2854 +
2855 +- mdelay(1);
2856 ++ udelay(ICE_CTL_Q_SQ_CMD_USEC);
2857 + total_delay++;
2858 + } while (total_delay < cq->sq_cmd_timeout);
2859 +
2860 +diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
2861 +index ea02b89243e2..0f2cdb06e6ef 100644
2862 +--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
2863 ++++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
2864 +@@ -30,8 +30,9 @@ enum ice_ctl_q {
2865 + ICE_CTL_Q_ADMIN,
2866 + };
2867 +
2868 +-/* Control Queue default settings */
2869 +-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
2870 ++/* Control Queue timeout settings - max delay 250ms */
2871 ++#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
2872 ++#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
2873 +
2874 + struct ice_ctl_q_ring {
2875 + void *dma_head; /* Virtual address to dma head */
2876 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2877 +index b45a6e2ed8d1..de65ca1e6558 100644
2878 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2879 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2880 +@@ -8551,7 +8551,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
2881 +
2882 + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2883 + adapter->ptp_clock) {
2884 +- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
2885 ++ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
2886 ++ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
2887 + &adapter->state)) {
2888 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2889 + tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
2890 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
2891 +index b16ce7d93caf..c3d5d40afec0 100644
2892 +--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
2893 ++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
2894 +@@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev,
2895 + netif_addr_lock_bh(ndev);
2896 +
2897 + mc_count = netdev_mc_count(ndev);
2898 +- if (mc_count < 64) {
2899 ++ if (mc_count <= 64) {
2900 + netdev_for_each_mc_addr(ha, ndev) {
2901 + ether_addr_copy(temp, ha->addr);
2902 + temp += ETH_ALEN;
2903 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
2904 +index 630b13a9c3d5..0d8e39ffbcd1 100644
2905 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
2906 ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
2907 +@@ -1362,6 +1362,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
2908 + rxq->rx_buf_seg_size = roundup_pow_of_two(size);
2909 + } else {
2910 + rxq->rx_buf_seg_size = PAGE_SIZE;
2911 ++ edev->ndev->features &= ~NETIF_F_GRO_HW;
2912 + }
2913 +
2914 + /* Allocate the parallel driver ring for Rx buffers */
2915 +@@ -1406,6 +1407,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
2916 + }
2917 + }
2918 +
2919 ++ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
2920 + if (!edev->gro_disable)
2921 + qede_set_tpa_param(rxq);
2922 + err:
2923 +@@ -1606,8 +1608,6 @@ static void qede_init_fp(struct qede_dev *edev)
2924 + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
2925 + edev->ndev->name, queue_id);
2926 + }
2927 +-
2928 +- edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
2929 + }
2930 +
2931 + static int qede_set_real_num_queues(struct qede_dev *edev)
2932 +diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
2933 +index 783ee6a32b5d..1b5e098b2367 100644
2934 +--- a/drivers/net/ethernet/qlogic/qla3xxx.c
2935 ++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
2936 +@@ -2757,6 +2757,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2937 + int err;
2938 +
2939 + for (i = 0; i < qdev->num_large_buffers; i++) {
2940 ++ lrg_buf_cb = &qdev->lrg_buf[i];
2941 ++ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2942 ++
2943 + skb = netdev_alloc_skb(qdev->ndev,
2944 + qdev->lrg_buffer_len);
2945 + if (unlikely(!skb)) {
2946 +@@ -2767,11 +2770,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2947 + ql_free_large_buffers(qdev);
2948 + return -ENOMEM;
2949 + } else {
2950 +-
2951 +- lrg_buf_cb = &qdev->lrg_buf[i];
2952 +- memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2953 + lrg_buf_cb->index = i;
2954 +- lrg_buf_cb->skb = skb;
2955 + /*
2956 + * We save some space to copy the ethhdr from first
2957 + * buffer
2958 +@@ -2793,6 +2792,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2959 + return -ENOMEM;
2960 + }
2961 +
2962 ++ lrg_buf_cb->skb = skb;
2963 + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2964 + dma_unmap_len_set(lrg_buf_cb, maplen,
2965 + qdev->lrg_buffer_len -
2966 +diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
2967 +index 5766225a4ce1..c245629a38c7 100644
2968 +--- a/drivers/net/ethernet/ti/cpsw_ale.c
2969 ++++ b/drivers/net/ethernet/ti/cpsw_ale.c
2970 +@@ -793,6 +793,7 @@ EXPORT_SYMBOL_GPL(cpsw_ale_start);
2971 + void cpsw_ale_stop(struct cpsw_ale *ale)
2972 + {
2973 + del_timer_sync(&ale->timer);
2974 ++ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
2975 + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
2976 + }
2977 + EXPORT_SYMBOL_GPL(cpsw_ale_stop);
2978 +@@ -877,6 +878,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
2979 + ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
2980 + }
2981 +
2982 ++ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
2983 + return ale;
2984 + }
2985 + EXPORT_SYMBOL_GPL(cpsw_ale_create);
2986 +diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
2987 +index 61a9843346ad..1979f8f8dac7 100644
2988 +--- a/drivers/net/fjes/fjes_main.c
2989 ++++ b/drivers/net/fjes/fjes_main.c
2990 +@@ -181,6 +181,9 @@ static int fjes_acpi_add(struct acpi_device *device)
2991 + /* create platform_device */
2992 + plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
2993 + ARRAY_SIZE(fjes_resource));
2994 ++ if (IS_ERR(plat_dev))
2995 ++ return PTR_ERR(plat_dev);
2996 ++
2997 + device->driver_data = plat_dev;
2998 +
2999 + return 0;
3000 +diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
3001 +index eeadfde15940..879096d3ff41 100644
3002 +--- a/drivers/net/phy/dp83867.c
3003 ++++ b/drivers/net/phy/dp83867.c
3004 +@@ -86,6 +86,10 @@
3005 + #define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
3006 + #define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
3007 +
3008 ++/* CFG3 bits */
3009 ++#define DP83867_CFG3_INT_OE BIT(7)
3010 ++#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
3011 ++
3012 + /* CFG4 bits */
3013 + #define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
3014 +
3015 +@@ -331,12 +335,13 @@ static int dp83867_config_init(struct phy_device *phydev)
3016 + return ret;
3017 + }
3018 +
3019 ++ val = phy_read(phydev, DP83867_CFG3);
3020 + /* Enable Interrupt output INT_OE in CFG3 register */
3021 +- if (phy_interrupt_is_valid(phydev)) {
3022 +- val = phy_read(phydev, DP83867_CFG3);
3023 +- val |= BIT(7);
3024 +- phy_write(phydev, DP83867_CFG3, val);
3025 +- }
3026 ++ if (phy_interrupt_is_valid(phydev))
3027 ++ val |= DP83867_CFG3_INT_OE;
3028 ++
3029 ++ val |= DP83867_CFG3_ROBUST_AUTO_MDIX;
3030 ++ phy_write(phydev, DP83867_CFG3, val);
3031 +
3032 + if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
3033 + dp83867_config_port_mirroring(phydev);
3034 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
3035 +index 6144146aec29..43c4f358eeb8 100644
3036 +--- a/drivers/net/phy/phy_device.c
3037 ++++ b/drivers/net/phy/phy_device.c
3038 +@@ -420,8 +420,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
3039 + mdiodev->device_free = phy_mdio_device_free;
3040 + mdiodev->device_remove = phy_mdio_device_remove;
3041 +
3042 +- dev->speed = 0;
3043 +- dev->duplex = -1;
3044 ++ dev->speed = SPEED_UNKNOWN;
3045 ++ dev->duplex = DUPLEX_UNKNOWN;
3046 + dev->pause = 0;
3047 + dev->asym_pause = 0;
3048 + dev->link = 0;
3049 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
3050 +index e1ac1c57089f..bbd92221c6ca 100644
3051 +--- a/drivers/net/tun.c
3052 ++++ b/drivers/net/tun.c
3053 +@@ -319,8 +319,8 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
3054 + tfile->napi_enabled = napi_en;
3055 + tfile->napi_frags_enabled = napi_en && napi_frags;
3056 + if (napi_en) {
3057 +- netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
3058 +- NAPI_POLL_WEIGHT);
3059 ++ netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
3060 ++ NAPI_POLL_WEIGHT);
3061 + napi_enable(&tfile->napi);
3062 + }
3063 + }
3064 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
3065 +index 50bf4b2080d5..6dd24a1ca10d 100644
3066 +--- a/drivers/net/usb/lan78xx.c
3067 ++++ b/drivers/net/usb/lan78xx.c
3068 +@@ -1823,6 +1823,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
3069 + dev->mdiobus->read = lan78xx_mdiobus_read;
3070 + dev->mdiobus->write = lan78xx_mdiobus_write;
3071 + dev->mdiobus->name = "lan78xx-mdiobus";
3072 ++ dev->mdiobus->parent = &dev->udev->dev;
3073 +
3074 + snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
3075 + dev->udev->bus->busnum, dev->udev->devnum);
3076 +diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
3077 +index 4d28063052fe..385b84f24322 100644
3078 +--- a/drivers/net/wireless/ath/ath10k/coredump.c
3079 ++++ b/drivers/net/wireless/ath/ath10k/coredump.c
3080 +@@ -1105,9 +1105,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
3081 + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
3082 + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
3083 + dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
3084 +- memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
3085 +- crash_data->ramdump_buf_len);
3086 +- sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
3087 ++ if (crash_data->ramdump_buf_len) {
3088 ++ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
3089 ++ crash_data->ramdump_buf_len);
3090 ++ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
3091 ++ }
3092 + }
3093 +
3094 + spin_unlock_bh(&ar->data_lock);
3095 +@@ -1154,6 +1156,9 @@ int ath10k_coredump_register(struct ath10k *ar)
3096 + if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
3097 + crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
3098 +
3099 ++ if (!crash_data->ramdump_buf_len)
3100 ++ return 0;
3101 ++
3102 + crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
3103 + if (!crash_data->ramdump_buf)
3104 + return -ENOMEM;
3105 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
3106 +index 613ca74f1b28..448e3a8c33a6 100644
3107 +--- a/drivers/net/wireless/ath/ath10k/mac.c
3108 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
3109 +@@ -3651,7 +3651,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
3110 + struct ieee80211_vif *vif,
3111 + enum ath10k_hw_txrx_mode txmode,
3112 + enum ath10k_mac_tx_path txpath,
3113 +- struct sk_buff *skb)
3114 ++ struct sk_buff *skb, bool noque_offchan)
3115 + {
3116 + struct ieee80211_hw *hw = ar->hw;
3117 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3118 +@@ -3679,10 +3679,10 @@ static int ath10k_mac_tx(struct ath10k *ar,
3119 + }
3120 + }
3121 +
3122 +- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3123 ++ if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3124 + if (!ath10k_mac_tx_frm_has_freq(ar)) {
3125 +- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
3126 +- skb);
3127 ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
3128 ++ skb, skb->len);
3129 +
3130 + skb_queue_tail(&ar->offchan_tx_queue, skb);
3131 + ieee80211_queue_work(hw, &ar->offchan_tx_work);
3132 +@@ -3744,8 +3744,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
3133 +
3134 + mutex_lock(&ar->conf_mutex);
3135 +
3136 +- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
3137 +- skb);
3138 ++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
3139 ++ skb, skb->len);
3140 +
3141 + hdr = (struct ieee80211_hdr *)skb->data;
3142 + peer_addr = ieee80211_get_DA(hdr);
3143 +@@ -3791,7 +3791,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
3144 + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3145 + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3146 +
3147 +- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
3148 ++ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
3149 + if (ret) {
3150 + ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3151 + ret);
3152 +@@ -3801,8 +3801,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
3153 + time_left =
3154 + wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3155 + if (time_left == 0)
3156 +- ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
3157 +- skb);
3158 ++ ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
3159 ++ skb, skb->len);
3160 +
3161 + if (!peer && tmp_peer_created) {
3162 + ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3163 +@@ -3844,8 +3844,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3164 + ar->running_fw->fw_file.fw_features)) {
3165 + paddr = dma_map_single(ar->dev, skb->data,
3166 + skb->len, DMA_TO_DEVICE);
3167 +- if (!paddr)
3168 ++ if (dma_mapping_error(ar->dev, paddr)) {
3169 ++ ieee80211_free_txskb(ar->hw, skb);
3170 + continue;
3171 ++ }
3172 + ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
3173 + if (ret) {
3174 + ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
3175 +@@ -3998,7 +4000,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3176 + spin_unlock_bh(&ar->htt.tx_lock);
3177 + }
3178 +
3179 +- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
3180 ++ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
3181 + if (unlikely(ret)) {
3182 + ath10k_warn(ar, "failed to push frame: %d\n", ret);
3183 +
3184 +@@ -4280,7 +4282,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
3185 + spin_unlock_bh(&ar->htt.tx_lock);
3186 + }
3187 +
3188 +- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
3189 ++ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
3190 + if (ret) {
3191 + ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
3192 + if (is_htt) {
3193 +diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
3194 +index 6f62ddc0494c..6c47e4b6aa6c 100644
3195 +--- a/drivers/net/wireless/ath/ath10k/txrx.c
3196 ++++ b/drivers/net/wireless/ath/ath10k/txrx.c
3197 +@@ -101,6 +101,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
3198 +
3199 + info = IEEE80211_SKB_CB(msdu);
3200 + memset(&info->status, 0, sizeof(info->status));
3201 ++ info->status.rates[0].idx = -1;
3202 ++
3203 + trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
3204 +
3205 + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
3206 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
3207 +index 0f56be13c7ad..584e05fdca6a 100644
3208 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
3209 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
3210 +@@ -1246,6 +1246,11 @@ void brcmf_detach(struct device *dev)
3211 +
3212 + brcmf_proto_detach_pre_delif(drvr);
3213 +
3214 ++ if (drvr->mon_if) {
3215 ++ brcmf_net_detach(drvr->mon_if->ndev, false);
3216 ++ drvr->mon_if = NULL;
3217 ++ }
3218 ++
3219 + /* make sure primary interface removed last */
3220 + for (i = BRCMF_MAX_IFS-1; i > -1; i--)
3221 + brcmf_remove_interface(drvr->iflist[i], false);
3222 +diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
3223 +index 1bbd17ada974..20e16c423990 100644
3224 +--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
3225 ++++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
3226 +@@ -185,6 +185,9 @@ void iwl_leds_init(struct iwl_priv *priv)
3227 +
3228 + priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
3229 + wiphy_name(priv->hw->wiphy));
3230 ++ if (!priv->led.name)
3231 ++ return;
3232 ++
3233 + priv->led.brightness_set = iwl_led_brightness_set;
3234 + priv->led.blink_set = iwl_led_blink_set;
3235 + priv->led.max_brightness = 1;
3236 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
3237 +index b27269504a62..072f80c90ce4 100644
3238 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
3239 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
3240 +@@ -131,6 +131,9 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
3241 +
3242 + mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
3243 + wiphy_name(mvm->hw->wiphy));
3244 ++ if (!mvm->led.name)
3245 ++ return -ENOMEM;
3246 ++
3247 + mvm->led.brightness_set = iwl_led_brightness_set;
3248 + mvm->led.max_brightness = 1;
3249 +
3250 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
3251 +index bfb163419c67..e6a67bc02209 100644
3252 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
3253 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
3254 +@@ -62,6 +62,7 @@
3255 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3256 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3257 + *****************************************************************************/
3258 ++#include <asm/unaligned.h>
3259 + #include <linux/etherdevice.h>
3260 + #include <linux/skbuff.h>
3261 + #include "iwl-trans.h"
3262 +@@ -360,7 +361,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
3263 + rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
3264 + hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
3265 + len = le16_to_cpu(rx_res->byte_count);
3266 +- rx_pkt_status = le32_to_cpup((__le32 *)
3267 ++ rx_pkt_status = get_unaligned_le32((__le32 *)
3268 + (pkt->data + sizeof(*rx_res) + len));
3269 +
3270 + /* Dont use dev_alloc_skb(), we'll have enough headroom once
3271 +diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
3272 +index 39bf85d0ade0..c7f8a29d2606 100644
3273 +--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
3274 ++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
3275 +@@ -1183,6 +1183,10 @@ static int if_sdio_probe(struct sdio_func *func,
3276 +
3277 + spin_lock_init(&card->lock);
3278 + card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
3279 ++ if (unlikely(!card->workqueue)) {
3280 ++ ret = -ENOMEM;
3281 ++ goto err_queue;
3282 ++ }
3283 + INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
3284 + init_waitqueue_head(&card->pwron_waitq);
3285 +
3286 +@@ -1234,6 +1238,7 @@ err_activate_card:
3287 + lbs_remove_card(priv);
3288 + free:
3289 + destroy_workqueue(card->workqueue);
3290 ++err_queue:
3291 + while (card->packets) {
3292 + packet = card->packets;
3293 + card->packets = card->packets->next;
3294 +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
3295 +index 3fe81b2a929a..918c69936540 100644
3296 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
3297 ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
3298 +@@ -691,8 +691,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
3299 + skb_put(skb, MAX_EVENT_SIZE);
3300 +
3301 + if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
3302 +- PCI_DMA_FROMDEVICE))
3303 ++ PCI_DMA_FROMDEVICE)) {
3304 ++ kfree_skb(skb);
3305 ++ kfree(card->evtbd_ring_vbase);
3306 + return -1;
3307 ++ }
3308 +
3309 + buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
3310 +
3311 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
3312 +index 8828baf26e7b..47c2bfe06d03 100644
3313 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
3314 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
3315 +@@ -1349,6 +1349,7 @@ struct rtl8xxxu_fileops {
3316 + u8 has_s0s1:1;
3317 + u8 has_tx_report:1;
3318 + u8 gen2_thermal_meter:1;
3319 ++ u8 needs_full_init:1;
3320 + u32 adda_1t_init;
3321 + u32 adda_1t_path_on;
3322 + u32 adda_2t_path_on_a;
3323 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
3324 +index 26b674aca125..14e207f2466c 100644
3325 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
3326 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
3327 +@@ -1673,6 +1673,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
3328 + .has_s0s1 = 1,
3329 + .has_tx_report = 1,
3330 + .gen2_thermal_meter = 1,
3331 ++ .needs_full_init = 1,
3332 + .adda_1t_init = 0x01c00014,
3333 + .adda_1t_path_on = 0x01c00014,
3334 + .adda_2t_path_on_a = 0x01c00014,
3335 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
3336 +index 2b4fcdf4ec5b..66c6ee70f00a 100644
3337 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
3338 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
3339 +@@ -3905,6 +3905,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
3340 + else
3341 + macpower = true;
3342 +
3343 ++ if (fops->needs_full_init)
3344 ++ macpower = false;
3345 ++
3346 + ret = fops->power_on(priv);
3347 + if (ret < 0) {
3348 + dev_warn(dev, "%s: Failed power on\n", __func__);
3349 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
3350 +index 1e60f70481f5..8c60a84941d5 100644
3351 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
3352 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
3353 +@@ -1556,6 +1556,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
3354 + * This is maybe necessary:
3355 + * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
3356 + */
3357 ++ dev_kfree_skb(skb);
3358 ++
3359 + return true;
3360 + }
3361 +
3362 +diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
3363 +index 5adb939afee8..1181b725f503 100644
3364 +--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
3365 ++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
3366 +@@ -1050,8 +1050,10 @@ int rtl_usb_probe(struct usb_interface *intf,
3367 + rtlpriv->hw = hw;
3368 + rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32),
3369 + GFP_KERNEL);
3370 +- if (!rtlpriv->usb_data)
3371 ++ if (!rtlpriv->usb_data) {
3372 ++ ieee80211_free_hw(hw);
3373 + return -ENOMEM;
3374 ++ }
3375 +
3376 + /* this spin lock must be initialized early */
3377 + spin_lock_init(&rtlpriv->locks.usb_lock);
3378 +@@ -1112,6 +1114,7 @@ error_out2:
3379 + _rtl_usb_io_handler_release(hw);
3380 + usb_put_dev(udev);
3381 + complete(&rtlpriv->firmware_loading_complete);
3382 ++ kfree(rtlpriv->usb_data);
3383 + return -ENODEV;
3384 + }
3385 + EXPORT_SYMBOL(rtl_usb_probe);
3386 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3387 +index b2d9bd564960..b7bd89b3b2f9 100644
3388 +--- a/drivers/nvme/host/core.c
3389 ++++ b/drivers/nvme/host/core.c
3390 +@@ -551,8 +551,14 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
3391 + struct nvme_dsm_range *range;
3392 + struct bio *bio;
3393 +
3394 +- range = kmalloc_array(segments, sizeof(*range),
3395 +- GFP_ATOMIC | __GFP_NOWARN);
3396 ++ /*
3397 ++ * Some devices do not consider the DSM 'Number of Ranges' field when
3398 ++ * determining how much data to DMA. Always allocate memory for maximum
3399 ++ * number of segments to prevent device reading beyond end of buffer.
3400 ++ */
3401 ++ static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
3402 ++
3403 ++ range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
3404 + if (!range) {
3405 + /*
3406 + * If we fail allocation our range, fallback to the controller
3407 +@@ -593,7 +599,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
3408 +
3409 + req->special_vec.bv_page = virt_to_page(range);
3410 + req->special_vec.bv_offset = offset_in_page(range);
3411 +- req->special_vec.bv_len = sizeof(*range) * segments;
3412 ++ req->special_vec.bv_len = alloc_size;
3413 + req->rq_flags |= RQF_SPECIAL_PAYLOAD;
3414 +
3415 + return BLK_STS_OK;
3416 +diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
3417 +index afb429a417fe..926d9cc080cf 100644
3418 +--- a/drivers/nvmem/imx-ocotp.c
3419 ++++ b/drivers/nvmem/imx-ocotp.c
3420 +@@ -466,6 +466,10 @@ static int imx_ocotp_probe(struct platform_device *pdev)
3421 + if (IS_ERR(priv->clk))
3422 + return PTR_ERR(priv->clk);
3423 +
3424 ++ clk_prepare_enable(priv->clk);
3425 ++ imx_ocotp_clr_err_if_set(priv->base);
3426 ++ clk_disable_unprepare(priv->clk);
3427 ++
3428 + priv->params = of_device_get_match_data(&pdev->dev);
3429 + imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
3430 + imx_ocotp_nvmem_config.dev = dev;
3431 +diff --git a/drivers/parport/share.c b/drivers/parport/share.c
3432 +index 7b4ee33c1935..15c81cffd2de 100644
3433 +--- a/drivers/parport/share.c
3434 ++++ b/drivers/parport/share.c
3435 +@@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv)
3436 + return 0;
3437 + }
3438 +
3439 ++/*
3440 ++ * Iterates through all the devices connected to the bus and return 1
3441 ++ * if the device is a parallel port.
3442 ++ */
3443 ++
3444 ++static int port_detect(struct device *dev, void *dev_drv)
3445 ++{
3446 ++ if (is_parport(dev))
3447 ++ return 1;
3448 ++ return 0;
3449 ++}
3450 ++
3451 + /**
3452 + * parport_register_driver - register a parallel port device driver
3453 + * @drv: structure describing the driver
3454 +@@ -282,6 +294,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
3455 + if (ret)
3456 + return ret;
3457 +
3458 ++ /*
3459 ++ * check if bus has any parallel port registered, if
3460 ++ * none is found then load the lowlevel driver.
3461 ++ */
3462 ++ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
3463 ++ port_detect);
3464 ++ if (!ret)
3465 ++ get_lowlevel_driver();
3466 ++
3467 + mutex_lock(&registration_lock);
3468 + if (drv->match_port)
3469 + bus_for_each_dev(&parport_bus_type, NULL, drv,
3470 +diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
3471 +index abbbe75070da..5629d56a6257 100644
3472 +--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
3473 ++++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
3474 +@@ -160,8 +160,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy)
3475 + /* setup initial state */
3476 + qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state,
3477 + uphy->vbus_edev);
3478 +- ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev,
3479 +- EXTCON_USB, &uphy->vbus_notify);
3480 ++ ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB,
3481 ++ &uphy->vbus_notify);
3482 + if (ret)
3483 + goto err_ulpi;
3484 + }
3485 +@@ -182,6 +182,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy)
3486 + {
3487 + struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy);
3488 +
3489 ++ if (uphy->vbus_edev)
3490 ++ extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB,
3491 ++ &uphy->vbus_notify);
3492 + regulator_disable(uphy->v3p3);
3493 + regulator_disable(uphy->v1p8);
3494 + clk_disable_unprepare(uphy->sleep_clk);
3495 +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
3496 +index 2969ff3162c3..177ee1136e34 100644
3497 +--- a/drivers/pinctrl/devicetree.c
3498 ++++ b/drivers/pinctrl/devicetree.c
3499 +@@ -40,6 +40,13 @@ struct pinctrl_dt_map {
3500 + static void dt_free_map(struct pinctrl_dev *pctldev,
3501 + struct pinctrl_map *map, unsigned num_maps)
3502 + {
3503 ++ int i;
3504 ++
3505 ++ for (i = 0; i < num_maps; ++i) {
3506 ++ kfree_const(map[i].dev_name);
3507 ++ map[i].dev_name = NULL;
3508 ++ }
3509 ++
3510 + if (pctldev) {
3511 + const struct pinctrl_ops *ops = pctldev->desc->pctlops;
3512 + if (ops->dt_free_map)
3513 +@@ -74,7 +81,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
3514 +
3515 + /* Initialize common mapping table entry fields */
3516 + for (i = 0; i < num_maps; i++) {
3517 +- map[i].dev_name = dev_name(p->dev);
3518 ++ const char *devname;
3519 ++
3520 ++ devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
3521 ++ if (!devname)
3522 ++ goto err_free_map;
3523 ++
3524 ++ map[i].dev_name = devname;
3525 + map[i].name = statename;
3526 + if (pctldev)
3527 + map[i].ctrl_dev_name = dev_name(pctldev->dev);
3528 +@@ -82,10 +95,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
3529 +
3530 + /* Remember the converted mapping table entries */
3531 + dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
3532 +- if (!dt_map) {
3533 +- dt_free_map(pctldev, map, num_maps);
3534 +- return -ENOMEM;
3535 +- }
3536 ++ if (!dt_map)
3537 ++ goto err_free_map;
3538 +
3539 + dt_map->pctldev = pctldev;
3540 + dt_map->map = map;
3541 +@@ -93,6 +104,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
3542 + list_add_tail(&dt_map->node, &p->dt_maps);
3543 +
3544 + return pinctrl_register_map(map, num_maps, false);
3545 ++
3546 ++err_free_map:
3547 ++ dt_free_map(pctldev, map, num_maps);
3548 ++ return -ENOMEM;
3549 + }
3550 +
3551 + struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
3552 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
3553 +index cd7a5d95b499..b1ffdd3f6d07 100644
3554 +--- a/drivers/pinctrl/pinctrl-amd.c
3555 ++++ b/drivers/pinctrl/pinctrl-amd.c
3556 +@@ -544,7 +544,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
3557 + irqreturn_t ret = IRQ_NONE;
3558 + unsigned int i, irqnr;
3559 + unsigned long flags;
3560 +- u32 *regs, regval;
3561 ++ u32 __iomem *regs;
3562 ++ u32 regval;
3563 + u64 status, mask;
3564 +
3565 + /* Read the wake status */
3566 +diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
3567 +index 33232041ee86..3eccc9b3ca84 100644
3568 +--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
3569 ++++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
3570 +@@ -1453,7 +1453,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
3571 + GPIO_FN(ET0_ETXD2_A),
3572 + GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
3573 + GPIO_FN(ET0_ETXD3_A),
3574 +- GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
3575 ++ GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4),
3576 + GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
3577 + GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
3578 + GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
3579 +@@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
3580 + /* IP3_20 [1] */
3581 + FN_EX_WAIT0, FN_TCLK1_B,
3582 + /* IP3_19_18 [2] */
3583 +- FN_RD_WR, FN_TCLK1_B, 0, 0,
3584 ++ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
3585 + /* IP3_17_15 [3] */
3586 + FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
3587 + FN_ET0_ETXD3_A, 0, 0, 0,
3588 +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
3589 +index d0ffdd5d9199..06a3c1ef8eee 100644
3590 +--- a/drivers/platform/x86/hp-wmi.c
3591 ++++ b/drivers/platform/x86/hp-wmi.c
3592 +@@ -313,7 +313,7 @@ static int __init hp_wmi_bios_2008_later(void)
3593 +
3594 + static int __init hp_wmi_bios_2009_later(void)
3595 + {
3596 +- int state = 0;
3597 ++ u8 state[128];
3598 + int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
3599 + sizeof(state), sizeof(state));
3600 + if (!ret)
3601 +diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
3602 +index 860400d2cd85..a8f2f07239fb 100644
3603 +--- a/drivers/regulator/max8907-regulator.c
3604 ++++ b/drivers/regulator/max8907-regulator.c
3605 +@@ -299,7 +299,10 @@ static int max8907_regulator_probe(struct platform_device *pdev)
3606 + memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
3607 +
3608 + /* Backwards compatibility with MAX8907B; SD1 uses different voltages */
3609 +- regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
3610 ++ ret = regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
3611 ++ if (ret)
3612 ++ return ret;
3613 ++
3614 + if ((val & MAX8907_II2RR_VERSION_MASK) ==
3615 + MAX8907_II2RR_VERSION_REV_B) {
3616 + pmic->desc[MAX8907_SD1].min_uV = 637500;
3617 +@@ -336,14 +339,20 @@ static int max8907_regulator_probe(struct platform_device *pdev)
3618 + }
3619 +
3620 + if (pmic->desc[i].ops == &max8907_ldo_ops) {
3621 +- regmap_read(config.regmap, pmic->desc[i].enable_reg,
3622 ++ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
3623 + &val);
3624 ++ if (ret)
3625 ++ return ret;
3626 ++
3627 + if ((val & MAX8907_MASK_LDO_SEQ) !=
3628 + MAX8907_MASK_LDO_SEQ)
3629 + pmic->desc[i].ops = &max8907_ldo_hwctl_ops;
3630 + } else if (pmic->desc[i].ops == &max8907_out5v_ops) {
3631 +- regmap_read(config.regmap, pmic->desc[i].enable_reg,
3632 ++ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
3633 + &val);
3634 ++ if (ret)
3635 ++ return ret;
3636 ++
3637 + if ((val & (MAX8907_MASK_OUT5V_VINEN |
3638 + MAX8907_MASK_OUT5V_ENSRC)) !=
3639 + MAX8907_MASK_OUT5V_ENSRC)
3640 +diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
3641 +index 29bc99c4a7b6..e49d3c810677 100644
3642 +--- a/drivers/soundwire/intel.c
3643 ++++ b/drivers/soundwire/intel.c
3644 +@@ -352,7 +352,10 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
3645 + unsigned int link_id = sdw->instance;
3646 + int pdi_conf = 0;
3647 +
3648 +- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
3649 ++ /* the Bulk and PCM streams are not contiguous */
3650 ++ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
3651 ++ if (pdi->num >= 2)
3652 ++ pdi->intel_alh_id += 2;
3653 +
3654 + /*
3655 + * Program stream parameters to stream SHIM register
3656 +@@ -381,7 +384,10 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
3657 + unsigned int link_id = sdw->instance;
3658 + unsigned int conf;
3659 +
3660 +- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
3661 ++ /* the Bulk and PCM streams are not contiguous */
3662 ++ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
3663 ++ if (pdi->num >= 2)
3664 ++ pdi->intel_alh_id += 2;
3665 +
3666 + /* Program Stream config ALH register */
3667 + conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
3668 +diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
3669 +index e6eb979f1b8a..e4b31d6e6e33 100644
3670 +--- a/drivers/spi/spi-img-spfi.c
3671 ++++ b/drivers/spi/spi-img-spfi.c
3672 +@@ -676,6 +676,8 @@ static int img_spfi_probe(struct platform_device *pdev)
3673 + dma_release_channel(spfi->tx_ch);
3674 + if (spfi->rx_ch)
3675 + dma_release_channel(spfi->rx_ch);
3676 ++ spfi->tx_ch = NULL;
3677 ++ spfi->rx_ch = NULL;
3678 + dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
3679 + } else {
3680 + master->dma_tx = spfi->tx_ch;
3681 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
3682 +index f41333817c50..525388126e26 100644
3683 +--- a/drivers/spi/spi-pxa2xx.c
3684 ++++ b/drivers/spi/spi-pxa2xx.c
3685 +@@ -1470,7 +1470,13 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
3686 + }
3687 +
3688 + ssp->clk = devm_clk_get(&pdev->dev, NULL);
3689 ++ if (IS_ERR(ssp->clk))
3690 ++ return NULL;
3691 ++
3692 + ssp->irq = platform_get_irq(pdev, 0);
3693 ++ if (ssp->irq < 0)
3694 ++ return NULL;
3695 ++
3696 + ssp->type = type;
3697 + ssp->pdev = pdev;
3698 + ssp->port_id = pxa2xx_spi_get_port_id(adev);
3699 +diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
3700 +index df5960bddfe6..f1fc2bde6ef3 100644
3701 +--- a/drivers/spi/spi-sprd-adi.c
3702 ++++ b/drivers/spi/spi-sprd-adi.c
3703 +@@ -367,6 +367,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
3704 + val |= BIT_WDG_RUN | BIT_WDG_RST;
3705 + sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
3706 +
3707 ++ /* Lock the watchdog */
3708 ++ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
3709 ++
3710 + mdelay(1000);
3711 +
3712 + dev_emerg(sadi->dev, "Unable to restart system\n");
3713 +diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
3714 +index a4e43fc19ece..5df01ffdef46 100644
3715 +--- a/drivers/spi/spi-st-ssc4.c
3716 ++++ b/drivers/spi/spi-st-ssc4.c
3717 +@@ -385,6 +385,7 @@ static int spi_st_probe(struct platform_device *pdev)
3718 + return 0;
3719 +
3720 + clk_disable:
3721 ++ pm_runtime_disable(&pdev->dev);
3722 + clk_disable_unprepare(spi_st->clk);
3723 + put_master:
3724 + spi_master_put(master);
3725 +@@ -396,6 +397,8 @@ static int spi_st_remove(struct platform_device *pdev)
3726 + struct spi_master *master = platform_get_drvdata(pdev);
3727 + struct spi_st *spi_st = spi_master_get_devdata(master);
3728 +
3729 ++ pm_runtime_disable(&pdev->dev);
3730 ++
3731 + clk_disable_unprepare(spi_st->clk);
3732 +
3733 + pinctrl_pm_select_sleep_state(&pdev->dev);
3734 +diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
3735 +index 1427f343b39a..d1187317bb5d 100644
3736 +--- a/drivers/spi/spi-tegra20-slink.c
3737 ++++ b/drivers/spi/spi-tegra20-slink.c
3738 +@@ -1078,7 +1078,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
3739 + ret = clk_enable(tspi->clk);
3740 + if (ret < 0) {
3741 + dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
3742 +- goto exit_free_master;
3743 ++ goto exit_clk_unprepare;
3744 + }
3745 +
3746 + spi_irq = platform_get_irq(pdev, 0);
3747 +@@ -1151,6 +1151,8 @@ exit_free_irq:
3748 + free_irq(spi_irq, tspi);
3749 + exit_clk_disable:
3750 + clk_disable(tspi->clk);
3751 ++exit_clk_unprepare:
3752 ++ clk_unprepare(tspi->clk);
3753 + exit_free_master:
3754 + spi_master_put(master);
3755 + return ret;
3756 +@@ -1164,6 +1166,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
3757 + free_irq(tspi->irq, tspi);
3758 +
3759 + clk_disable(tspi->clk);
3760 ++ clk_unprepare(tspi->clk);
3761 +
3762 + if (tspi->tx_dma_chan)
3763 + tegra_slink_deinit_dma_param(tspi, false);
3764 +diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
3765 +index c5fe08bc34a0..028725573e63 100644
3766 +--- a/drivers/spi/spidev.c
3767 ++++ b/drivers/spi/spidev.c
3768 +@@ -634,6 +634,9 @@ static int spidev_release(struct inode *inode, struct file *filp)
3769 + if (dofree)
3770 + kfree(spidev);
3771 + }
3772 ++#ifdef CONFIG_SPI_SLAVE
3773 ++ spi_slave_abort(spidev->spi);
3774 ++#endif
3775 + mutex_unlock(&device_list_lock);
3776 +
3777 + return 0;
3778 +diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
3779 +index 4bdf44d82879..dc62db1ee1dd 100644
3780 +--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
3781 ++++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
3782 +@@ -623,6 +623,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
3783 + dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
3784 + &devpriv->dio_buffer_phys_addr[i],
3785 + GFP_KERNEL);
3786 ++ if (!devpriv->dio_buffer[i]) {
3787 ++ dev_warn(dev->class_dev,
3788 ++ "failed to allocate DMA buffer\n");
3789 ++ return -ENOMEM;
3790 ++ }
3791 + }
3792 + /* allocate dma descriptors */
3793 + devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev,
3794 +@@ -630,6 +635,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
3795 + NUM_DMA_DESCRIPTORS,
3796 + &devpriv->dma_desc_phys_addr,
3797 + GFP_KERNEL);
3798 ++ if (!devpriv->dma_desc) {
3799 ++ dev_warn(dev->class_dev,
3800 ++ "failed to allocate DMA descriptors\n");
3801 ++ return -ENOMEM;
3802 ++ }
3803 + if (devpriv->dma_desc_phys_addr & 0xf) {
3804 + dev_warn(dev->class_dev,
3805 + " dma descriptors not quad-word aligned (bug)\n");
3806 +diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
3807 +index 16fcf633e60f..3fe4738720ea 100644
3808 +--- a/drivers/staging/fbtft/fbtft-core.c
3809 ++++ b/drivers/staging/fbtft/fbtft-core.c
3810 +@@ -771,7 +771,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
3811 + fbdefio->deferred_io = fbtft_deferred_io;
3812 + fb_deferred_io_init(info);
3813 +
3814 +- strncpy(info->fix.id, dev->driver->name, 16);
3815 ++ snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
3816 + info->fix.type = FB_TYPE_PACKED_PIXELS;
3817 + info->fix.visual = FB_VISUAL_TRUECOLOR;
3818 + info->fix.xpanstep = 0;
3819 +diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
3820 +index dd9b02d316f3..c6a5b62cb363 100644
3821 +--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
3822 ++++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
3823 +@@ -778,7 +778,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
3824 + memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
3825 + memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
3826 +
3827 +- if (psta->qos_option)
3828 ++ if (psta && psta->qos_option)
3829 + qos_option = true;
3830 + } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
3831 + check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
3832 +@@ -786,7 +786,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
3833 + memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3834 + memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3835 +
3836 +- if (psta->qos_option)
3837 ++ if (psta && psta->qos_option)
3838 + qos_option = true;
3839 + } else {
3840 + RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
3841 +diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
3842 +index e218b5c20642..2066a1d9bc84 100644
3843 +--- a/drivers/staging/rtl8192u/r8192U_core.c
3844 ++++ b/drivers/staging/rtl8192u/r8192U_core.c
3845 +@@ -1467,7 +1467,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
3846 + (struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
3847 + struct usb_device *udev = priv->udev;
3848 + int pend;
3849 +- int status;
3850 ++ int status, rt = -1;
3851 + struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
3852 + unsigned int idx_pipe;
3853 +
3854 +@@ -1611,8 +1611,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
3855 + }
3856 + if (bSend0Byte) {
3857 + tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
3858 +- if (!tx_urb_zero)
3859 +- return -ENOMEM;
3860 ++ if (!tx_urb_zero) {
3861 ++ rt = -ENOMEM;
3862 ++ goto error;
3863 ++ }
3864 + usb_fill_bulk_urb(tx_urb_zero, udev,
3865 + usb_sndbulkpipe(udev, idx_pipe),
3866 + &zero, 0, tx_zero_isr, dev);
3867 +@@ -1622,7 +1624,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
3868 + "Error TX URB for zero byte %d, error %d",
3869 + atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
3870 + status);
3871 +- return -1;
3872 ++ goto error;
3873 + }
3874 + }
3875 + netif_trans_update(dev);
3876 +@@ -1633,7 +1635,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
3877 + RT_TRACE(COMP_ERR, "Error TX URB %d, error %d",
3878 + atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
3879 + status);
3880 +- return -1;
3881 ++
3882 ++error:
3883 ++ dev_kfree_skb_any(skb);
3884 ++ usb_free_urb(tx_urb);
3885 ++ usb_free_urb(tx_urb_zero);
3886 ++ return rt;
3887 + }
3888 +
3889 + static short rtl8192_usb_initendpoints(struct net_device *dev)
3890 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
3891 +index 29c6414f48f1..00204824bffd 100644
3892 +--- a/drivers/usb/core/devio.c
3893 ++++ b/drivers/usb/core/devio.c
3894 +@@ -739,8 +739,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
3895 + intf = usb_ifnum_to_if(dev, ifnum);
3896 + if (!intf)
3897 + err = -ENOENT;
3898 +- else
3899 ++ else {
3900 ++ unsigned int old_suppress;
3901 ++
3902 ++ /* suppress uevents while claiming interface */
3903 ++ old_suppress = dev_get_uevent_suppress(&intf->dev);
3904 ++ dev_set_uevent_suppress(&intf->dev, 1);
3905 + err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
3906 ++ dev_set_uevent_suppress(&intf->dev, old_suppress);
3907 ++ }
3908 + if (err == 0)
3909 + set_bit(ifnum, &ps->ifclaimed);
3910 + return err;
3911 +@@ -760,7 +767,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
3912 + if (!intf)
3913 + err = -ENOENT;
3914 + else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
3915 ++ unsigned int old_suppress;
3916 ++
3917 ++ /* suppress uevents while releasing interface */
3918 ++ old_suppress = dev_get_uevent_suppress(&intf->dev);
3919 ++ dev_set_uevent_suppress(&intf->dev, 1);
3920 + usb_driver_release_interface(&usbfs_driver, intf);
3921 ++ dev_set_uevent_suppress(&intf->dev, old_suppress);
3922 + err = 0;
3923 + }
3924 + return err;
3925 +diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
3926 +index 327630405695..f643603c8de6 100644
3927 +--- a/drivers/usb/host/ehci-q.c
3928 ++++ b/drivers/usb/host/ehci-q.c
3929 +@@ -27,6 +27,10 @@
3930 +
3931 + /*-------------------------------------------------------------------------*/
3932 +
3933 ++/* PID Codes that are used here, from EHCI specification, Table 3-16. */
3934 ++#define PID_CODE_IN 1
3935 ++#define PID_CODE_SETUP 2
3936 ++
3937 + /* fill a qtd, returning how much of the buffer we were able to queue up */
3938 +
3939 + static int
3940 +@@ -190,7 +194,7 @@ static int qtd_copy_status (
3941 + int status = -EINPROGRESS;
3942 +
3943 + /* count IN/OUT bytes, not SETUP (even short packets) */
3944 +- if (likely (QTD_PID (token) != 2))
3945 ++ if (likely(QTD_PID(token) != PID_CODE_SETUP))
3946 + urb->actual_length += length - QTD_LENGTH (token);
3947 +
3948 + /* don't modify error codes */
3949 +@@ -206,6 +210,13 @@ static int qtd_copy_status (
3950 + if (token & QTD_STS_BABBLE) {
3951 + /* FIXME "must" disable babbling device's port too */
3952 + status = -EOVERFLOW;
3953 ++ /*
3954 ++ * When MMF is active and PID Code is IN, queue is halted.
3955 ++ * EHCI Specification, Table 4-13.
3956 ++ */
3957 ++ } else if ((token & QTD_STS_MMF) &&
3958 ++ (QTD_PID(token) == PID_CODE_IN)) {
3959 ++ status = -EPROTO;
3960 + /* CERR nonzero + halt --> stall */
3961 + } else if (QTD_CERR(token)) {
3962 + status = -EPIPE;
3963 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3964 +index 74aeaa61f5c6..075c49cfe60f 100644
3965 +--- a/drivers/usb/host/xhci-pci.c
3966 ++++ b/drivers/usb/host/xhci-pci.c
3967 +@@ -493,7 +493,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
3968 + retval = xhci_resume(xhci, hibernated);
3969 + return retval;
3970 + }
3971 +-#endif /* CONFIG_PM */
3972 +
3973 + static void xhci_pci_shutdown(struct usb_hcd *hcd)
3974 + {
3975 +@@ -506,6 +505,7 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
3976 + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
3977 + pci_set_power_state(pdev, PCI_D3hot);
3978 + }
3979 ++#endif /* CONFIG_PM */
3980 +
3981 + /*-------------------------------------------------------------------------*/
3982 +
3983 +diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
3984 +index c47b721b8bca..63a75fd9fa0c 100644
3985 +--- a/drivers/usb/renesas_usbhs/common.h
3986 ++++ b/drivers/usb/renesas_usbhs/common.h
3987 +@@ -157,11 +157,12 @@ struct usbhs_priv;
3988 + #define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */
3989 + #define VALID (1 << 3) /* USB Request Receive */
3990 +
3991 +-#define DVSQ_MASK (0x3 << 4) /* Device State */
3992 ++#define DVSQ_MASK (0x7 << 4) /* Device State */
3993 + #define POWER_STATE (0 << 4)
3994 + #define DEFAULT_STATE (1 << 4)
3995 + #define ADDRESS_STATE (2 << 4)
3996 + #define CONFIGURATION_STATE (3 << 4)
3997 ++#define SUSPENDED_STATE (4 << 4)
3998 +
3999 + #define CTSQ_MASK (0x7) /* Control Transfer Stage */
4000 + #define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */
4001 +diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
4002 +index 7feac4128a2d..f36248e9387d 100644
4003 +--- a/drivers/usb/renesas_usbhs/mod_gadget.c
4004 ++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
4005 +@@ -456,12 +456,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
4006 + {
4007 + struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
4008 + struct device *dev = usbhsg_gpriv_to_dev(gpriv);
4009 ++ int state = usbhs_status_get_device_state(irq_state);
4010 +
4011 + gpriv->gadget.speed = usbhs_bus_get_speed(priv);
4012 +
4013 +- dev_dbg(dev, "state = %x : speed : %d\n",
4014 +- usbhs_status_get_device_state(irq_state),
4015 +- gpriv->gadget.speed);
4016 ++ dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
4017 ++
4018 ++ if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
4019 ++ (state & SUSPENDED_STATE)) {
4020 ++ if (gpriv->driver && gpriv->driver->suspend)
4021 ++ gpriv->driver->suspend(&gpriv->gadget);
4022 ++ usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
4023 ++ }
4024 +
4025 + return 0;
4026 + }
4027 +diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
4028 +index d88a5b15f073..88eaf3c45dd5 100644
4029 +--- a/drivers/usb/usbip/usbip_common.c
4030 ++++ b/drivers/usb/usbip/usbip_common.c
4031 +@@ -727,6 +727,9 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
4032 +
4033 + copy -= recv;
4034 + ret += recv;
4035 ++
4036 ++ if (!copy)
4037 ++ break;
4038 + }
4039 +
4040 + if (ret != size)
4041 +diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
4042 +index 33f8972ba842..00fc98741c5d 100644
4043 +--- a/drivers/usb/usbip/vhci_rx.c
4044 ++++ b/drivers/usb/usbip/vhci_rx.c
4045 +@@ -77,16 +77,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
4046 + usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0);
4047 +
4048 + /* recv transfer buffer */
4049 +- if (usbip_recv_xbuff(ud, urb) < 0)
4050 +- return;
4051 ++ if (usbip_recv_xbuff(ud, urb) < 0) {
4052 ++ urb->status = -EPROTO;
4053 ++ goto error;
4054 ++ }
4055 +
4056 + /* recv iso_packet_descriptor */
4057 +- if (usbip_recv_iso(ud, urb) < 0)
4058 +- return;
4059 ++ if (usbip_recv_iso(ud, urb) < 0) {
4060 ++ urb->status = -EPROTO;
4061 ++ goto error;
4062 ++ }
4063 +
4064 + /* restore the padding in iso packets */
4065 + usbip_pad_iso(ud, urb);
4066 +
4067 ++error:
4068 + if (usbip_dbg_flag_vhci_rx)
4069 + usbip_dump_urb(urb);
4070 +
4071 +diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
4072 +index 90d387b50ab7..0505eeb593b5 100644
4073 +--- a/drivers/xen/Kconfig
4074 ++++ b/drivers/xen/Kconfig
4075 +@@ -158,7 +158,8 @@ config XEN_GNTDEV
4076 +
4077 + config XEN_GNTDEV_DMABUF
4078 + bool "Add support for dma-buf grant access device driver extension"
4079 +- depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER
4080 ++ depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC
4081 ++ select DMA_SHARED_BUFFER
4082 + help
4083 + Allows userspace processes and kernel modules to use Xen backed
4084 + dma-buf implementation. With this extension grant references to
4085 +diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
4086 +index d522494698fa..02e4e903dfe9 100644
4087 +--- a/fs/btrfs/async-thread.c
4088 ++++ b/fs/btrfs/async-thread.c
4089 +@@ -252,16 +252,17 @@ out:
4090 + }
4091 + }
4092 +
4093 +-static void run_ordered_work(struct __btrfs_workqueue *wq)
4094 ++static void run_ordered_work(struct __btrfs_workqueue *wq,
4095 ++ struct btrfs_work *self)
4096 + {
4097 + struct list_head *list = &wq->ordered_list;
4098 + struct btrfs_work *work;
4099 + spinlock_t *lock = &wq->list_lock;
4100 + unsigned long flags;
4101 ++ void *wtag;
4102 ++ bool free_self = false;
4103 +
4104 + while (1) {
4105 +- void *wtag;
4106 +-
4107 + spin_lock_irqsave(lock, flags);
4108 + if (list_empty(list))
4109 + break;
4110 +@@ -287,16 +288,47 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
4111 + list_del(&work->ordered_list);
4112 + spin_unlock_irqrestore(lock, flags);
4113 +
4114 +- /*
4115 +- * We don't want to call the ordered free functions with the
4116 +- * lock held though. Save the work as tag for the trace event,
4117 +- * because the callback could free the structure.
4118 +- */
4119 +- wtag = work;
4120 +- work->ordered_free(work);
4121 +- trace_btrfs_all_work_done(wq->fs_info, wtag);
4122 ++ if (work == self) {
4123 ++ /*
4124 ++ * This is the work item that the worker is currently
4125 ++ * executing.
4126 ++ *
4127 ++ * The kernel workqueue code guarantees non-reentrancy
4128 ++ * of work items. I.e., if a work item with the same
4129 ++ * address and work function is queued twice, the second
4130 ++ * execution is blocked until the first one finishes. A
4131 ++ * work item may be freed and recycled with the same
4132 ++ * work function; the workqueue code assumes that the
4133 ++ * original work item cannot depend on the recycled work
4134 ++ * item in that case (see find_worker_executing_work()).
4135 ++ *
4136 ++ * Note that the work of one Btrfs filesystem may depend
4137 ++ * on the work of another Btrfs filesystem via, e.g., a
4138 ++ * loop device. Therefore, we must not allow the current
4139 ++ * work item to be recycled until we are really done,
4140 ++ * otherwise we break the above assumption and can
4141 ++ * deadlock.
4142 ++ */
4143 ++ free_self = true;
4144 ++ } else {
4145 ++ /*
4146 ++ * We don't want to call the ordered free functions with
4147 ++ * the lock held though. Save the work as tag for the
4148 ++ * trace event, because the callback could free the
4149 ++ * structure.
4150 ++ */
4151 ++ wtag = work;
4152 ++ work->ordered_free(work);
4153 ++ trace_btrfs_all_work_done(wq->fs_info, wtag);
4154 ++ }
4155 + }
4156 + spin_unlock_irqrestore(lock, flags);
4157 ++
4158 ++ if (free_self) {
4159 ++ wtag = self;
4160 ++ self->ordered_free(self);
4161 ++ trace_btrfs_all_work_done(wq->fs_info, wtag);
4162 ++ }
4163 + }
4164 +
4165 + static void normal_work_helper(struct btrfs_work *work)
4166 +@@ -324,7 +356,7 @@ static void normal_work_helper(struct btrfs_work *work)
4167 + work->func(work);
4168 + if (need_order) {
4169 + set_bit(WORK_DONE_BIT, &work->flags);
4170 +- run_ordered_work(wq);
4171 ++ run_ordered_work(wq, work);
4172 + }
4173 + if (!need_order)
4174 + trace_btrfs_all_work_done(wq->fs_info, wtag);
4175 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
4176 +index fc764f350f05..84ff398ae70b 100644
4177 +--- a/fs/btrfs/ctree.c
4178 ++++ b/fs/btrfs/ctree.c
4179 +@@ -390,7 +390,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
4180 + for (node = rb_first(tm_root); node; node = next) {
4181 + next = rb_next(node);
4182 + tm = rb_entry(node, struct tree_mod_elem, node);
4183 +- if (tm->seq > min_seq)
4184 ++ if (tm->seq >= min_seq)
4185 + continue;
4186 + rb_erase(node, tm_root);
4187 + kfree(tm);
4188 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
4189 +index cc2d268e2cd7..d24ecbf938b6 100644
4190 +--- a/fs/btrfs/ctree.h
4191 ++++ b/fs/btrfs/ctree.h
4192 +@@ -3101,7 +3101,7 @@ int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot,
4193 + /* file-item.c */
4194 + struct btrfs_dio_private;
4195 + int btrfs_del_csums(struct btrfs_trans_handle *trans,
4196 +- struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
4197 ++ struct btrfs_root *root, u64 bytenr, u64 len);
4198 + blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
4199 + blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
4200 + u64 logical_offset);
4201 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
4202 +index 96296dc7d2ea..e12c37f457e0 100644
4203 +--- a/fs/btrfs/disk-io.c
4204 ++++ b/fs/btrfs/disk-io.c
4205 +@@ -1660,8 +1660,8 @@ static void end_workqueue_fn(struct btrfs_work *work)
4206 + bio->bi_status = end_io_wq->status;
4207 + bio->bi_private = end_io_wq->private;
4208 + bio->bi_end_io = end_io_wq->end_io;
4209 +- kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
4210 + bio_endio(bio);
4211 ++ kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
4212 + }
4213 +
4214 + static int cleaner_kthread(void *arg)
4215 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4216 +index 4bda5c09cdfe..47ca1ebda056 100644
4217 +--- a/fs/btrfs/extent-tree.c
4218 ++++ b/fs/btrfs/extent-tree.c
4219 +@@ -2492,8 +2492,8 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
4220 + btrfs_pin_extent(fs_info, head->bytenr,
4221 + head->num_bytes, 1);
4222 + if (head->is_data) {
4223 +- ret = btrfs_del_csums(trans, fs_info, head->bytenr,
4224 +- head->num_bytes);
4225 ++ ret = btrfs_del_csums(trans, fs_info->csum_root,
4226 ++ head->bytenr, head->num_bytes);
4227 + }
4228 + }
4229 +
4230 +@@ -6880,7 +6880,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4231 + btrfs_release_path(path);
4232 +
4233 + if (is_data) {
4234 +- ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
4235 ++ ret = btrfs_del_csums(trans, info->csum_root, bytenr,
4236 ++ num_bytes);
4237 + if (ret) {
4238 + btrfs_abort_transaction(trans, ret);
4239 + goto out;
4240 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
4241 +index 88fc5a0c573f..fed44390c049 100644
4242 +--- a/fs/btrfs/extent_io.c
4243 ++++ b/fs/btrfs/extent_io.c
4244 +@@ -4888,12 +4888,14 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4245 + return eb;
4246 + eb = alloc_dummy_extent_buffer(fs_info, start);
4247 + if (!eb)
4248 +- return NULL;
4249 ++ return ERR_PTR(-ENOMEM);
4250 + eb->fs_info = fs_info;
4251 + again:
4252 + ret = radix_tree_preload(GFP_NOFS);
4253 +- if (ret)
4254 ++ if (ret) {
4255 ++ exists = ERR_PTR(ret);
4256 + goto free_eb;
4257 ++ }
4258 + spin_lock(&fs_info->buffer_lock);
4259 + ret = radix_tree_insert(&fs_info->buffer_radix,
4260 + start >> PAGE_SHIFT, eb);
4261 +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
4262 +index ba74827beb32..4cf2817ab120 100644
4263 +--- a/fs/btrfs/file-item.c
4264 ++++ b/fs/btrfs/file-item.c
4265 +@@ -577,9 +577,9 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
4266 + * range of bytes.
4267 + */
4268 + int btrfs_del_csums(struct btrfs_trans_handle *trans,
4269 +- struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
4270 ++ struct btrfs_root *root, u64 bytenr, u64 len)
4271 + {
4272 +- struct btrfs_root *root = fs_info->csum_root;
4273 ++ struct btrfs_fs_info *fs_info = trans->fs_info;
4274 + struct btrfs_path *path;
4275 + struct btrfs_key key;
4276 + u64 end_byte = bytenr + len;
4277 +@@ -589,6 +589,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
4278 + u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
4279 + int blocksize_bits = fs_info->sb->s_blocksize_bits;
4280 +
4281 ++ ASSERT(root == fs_info->csum_root ||
4282 ++ root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
4283 ++
4284 + path = btrfs_alloc_path();
4285 + if (!path)
4286 + return -ENOMEM;
4287 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4288 +index 7fd4458cf0a7..70085e98d372 100644
4289 +--- a/fs/btrfs/inode.c
4290 ++++ b/fs/btrfs/inode.c
4291 +@@ -5665,7 +5665,6 @@ static void inode_tree_add(struct inode *inode)
4292 +
4293 + static void inode_tree_del(struct inode *inode)
4294 + {
4295 +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4296 + struct btrfs_root *root = BTRFS_I(inode)->root;
4297 + int empty = 0;
4298 +
4299 +@@ -5678,7 +5677,6 @@ static void inode_tree_del(struct inode *inode)
4300 + spin_unlock(&root->inode_lock);
4301 +
4302 + if (empty && btrfs_root_refs(&root->root_item) == 0) {
4303 +- synchronize_srcu(&fs_info->subvol_srcu);
4304 + spin_lock(&root->inode_lock);
4305 + empty = RB_EMPTY_ROOT(&root->inode_tree);
4306 + spin_unlock(&root->inode_lock);
4307 +@@ -9491,9 +9489,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
4308 + btrfs_init_log_ctx(&ctx_dest, new_inode);
4309 +
4310 + /* close the race window with snapshot create/destroy ioctl */
4311 +- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
4312 +- down_read(&fs_info->subvol_sem);
4313 +- if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
4314 ++ if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
4315 ++ new_ino == BTRFS_FIRST_FREE_OBJECTID)
4316 + down_read(&fs_info->subvol_sem);
4317 +
4318 + /*
4319 +@@ -9727,9 +9724,8 @@ out_fail:
4320 + ret = ret ? ret : ret2;
4321 + }
4322 + out_notrans:
4323 +- if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
4324 +- up_read(&fs_info->subvol_sem);
4325 +- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
4326 ++ if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
4327 ++ old_ino == BTRFS_FIRST_FREE_OBJECTID)
4328 + up_read(&fs_info->subvol_sem);
4329 +
4330 + ASSERT(list_empty(&ctx_root.list));
4331 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
4332 +index 00ff4349b457..199c70b8f7d8 100644
4333 +--- a/fs/btrfs/ioctl.c
4334 ++++ b/fs/btrfs/ioctl.c
4335 +@@ -709,11 +709,17 @@ static noinline int create_subvol(struct inode *dir,
4336 +
4337 + btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
4338 + ret = btrfs_update_inode(trans, root, dir);
4339 +- BUG_ON(ret);
4340 ++ if (ret) {
4341 ++ btrfs_abort_transaction(trans, ret);
4342 ++ goto fail;
4343 ++ }
4344 +
4345 + ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
4346 + btrfs_ino(BTRFS_I(dir)), index, name, namelen);
4347 +- BUG_ON(ret);
4348 ++ if (ret) {
4349 ++ btrfs_abort_transaction(trans, ret);
4350 ++ goto fail;
4351 ++ }
4352 +
4353 + ret = btrfs_uuid_tree_add(trans, root_item->uuid,
4354 + BTRFS_UUID_KEY_SUBVOL, objectid);
4355 +diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
4356 +index 859274e38417..4c81ffe12385 100644
4357 +--- a/fs/btrfs/reada.c
4358 ++++ b/fs/btrfs/reada.c
4359 +@@ -720,21 +720,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
4360 + static void reada_start_machine_worker(struct btrfs_work *work)
4361 + {
4362 + struct reada_machine_work *rmw;
4363 +- struct btrfs_fs_info *fs_info;
4364 + int old_ioprio;
4365 +
4366 + rmw = container_of(work, struct reada_machine_work, work);
4367 +- fs_info = rmw->fs_info;
4368 +-
4369 +- kfree(rmw);
4370 +
4371 + old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
4372 + task_nice_ioprio(current));
4373 + set_task_ioprio(current, BTRFS_IOPRIO_READA);
4374 +- __reada_start_machine(fs_info);
4375 ++ __reada_start_machine(rmw->fs_info);
4376 + set_task_ioprio(current, old_ioprio);
4377 +
4378 +- atomic_dec(&fs_info->reada_works_cnt);
4379 ++ atomic_dec(&rmw->fs_info->reada_works_cnt);
4380 ++
4381 ++ kfree(rmw);
4382 + }
4383 +
4384 + static void __reada_start_machine(struct btrfs_fs_info *fs_info)
4385 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
4386 +index b4958f724ce5..f98913061a40 100644
4387 +--- a/fs/btrfs/relocation.c
4388 ++++ b/fs/btrfs/relocation.c
4389 +@@ -4474,6 +4474,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4390 + fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
4391 + if (IS_ERR(fs_root)) {
4392 + err = PTR_ERR(fs_root);
4393 ++ list_add_tail(&reloc_root->root_list, &reloc_roots);
4394 + goto out_free;
4395 + }
4396 +
4397 +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
4398 +index 916c39770467..6b6008db3e03 100644
4399 +--- a/fs/btrfs/scrub.c
4400 ++++ b/fs/btrfs/scrub.c
4401 +@@ -2145,14 +2145,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
4402 + scrub_write_block_to_dev_replace(sblock);
4403 + }
4404 +
4405 +- scrub_block_put(sblock);
4406 +-
4407 + if (sctx->is_dev_replace && sctx->flush_all_writes) {
4408 + mutex_lock(&sctx->wr_lock);
4409 + scrub_wr_submit(sctx);
4410 + mutex_unlock(&sctx->wr_lock);
4411 + }
4412 +
4413 ++ scrub_block_put(sblock);
4414 + scrub_pending_bio_dec(sctx);
4415 + }
4416 +
4417 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
4418 +index 10532463e92a..931a7d1ddc95 100644
4419 +--- a/fs/btrfs/send.c
4420 ++++ b/fs/btrfs/send.c
4421 +@@ -6638,12 +6638,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
4422 + send_root->send_in_progress++;
4423 + spin_unlock(&send_root->root_item_lock);
4424 +
4425 +- /*
4426 +- * This is done when we lookup the root, it should already be complete
4427 +- * by the time we get here.
4428 +- */
4429 +- WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
4430 +-
4431 + /*
4432 + * Userspace tools do the checks and warn the user if it's
4433 + * not RO.
4434 +diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
4435 +index 89346da890cf..de8fef91ac48 100644
4436 +--- a/fs/btrfs/tests/free-space-tree-tests.c
4437 ++++ b/fs/btrfs/tests/free-space-tree-tests.c
4438 +@@ -462,9 +462,9 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
4439 + root->fs_info->tree_root = root;
4440 +
4441 + root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
4442 +- if (!root->node) {
4443 ++ if (IS_ERR(root->node)) {
4444 + test_err("couldn't allocate dummy buffer");
4445 +- ret = -ENOMEM;
4446 ++ ret = PTR_ERR(root->node);
4447 + goto out;
4448 + }
4449 + btrfs_set_header_level(root->node, 0);
4450 +diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
4451 +index 412b910b04cc..d07dd26194b1 100644
4452 +--- a/fs/btrfs/tests/qgroup-tests.c
4453 ++++ b/fs/btrfs/tests/qgroup-tests.c
4454 +@@ -484,9 +484,9 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
4455 + * *cough*backref walking code*cough*
4456 + */
4457 + root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
4458 +- if (!root->node) {
4459 ++ if (IS_ERR(root->node)) {
4460 + test_err("couldn't allocate dummy buffer");
4461 +- ret = -ENOMEM;
4462 ++ ret = PTR_ERR(root->node);
4463 + goto out;
4464 + }
4465 + btrfs_set_header_level(root->node, 0);
4466 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4467 +index 4d4f57f0f08c..fe7165c9d875 100644
4468 +--- a/fs/btrfs/tree-log.c
4469 ++++ b/fs/btrfs/tree-log.c
4470 +@@ -795,7 +795,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
4471 + struct btrfs_ordered_sum,
4472 + list);
4473 + if (!ret)
4474 +- ret = btrfs_del_csums(trans, fs_info,
4475 ++ ret = btrfs_del_csums(trans,
4476 ++ fs_info->csum_root,
4477 + sums->bytenr,
4478 + sums->len);
4479 + if (!ret)
4480 +@@ -3866,6 +3867,28 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
4481 + return 0;
4482 + }
4483 +
4484 ++static int log_csums(struct btrfs_trans_handle *trans,
4485 ++ struct btrfs_root *log_root,
4486 ++ struct btrfs_ordered_sum *sums)
4487 ++{
4488 ++ int ret;
4489 ++
4490 ++ /*
4491 ++ * Due to extent cloning, we might have logged a csum item that covers a
4492 ++ * subrange of a cloned extent, and later we can end up logging a csum
4493 ++ * item for a larger subrange of the same extent or the entire range.
4494 ++ * This would leave csum items in the log tree that cover the same range
4495 ++ * and break the searches for checksums in the log tree, resulting in
4496 ++ * some checksums missing in the fs/subvolume tree. So just delete (or
4497 ++ * trim and adjust) any existing csum items in the log for this range.
4498 ++ */
4499 ++ ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
4500 ++ if (ret)
4501 ++ return ret;
4502 ++
4503 ++ return btrfs_csum_file_blocks(trans, log_root, sums);
4504 ++}
4505 ++
4506 + static noinline int copy_items(struct btrfs_trans_handle *trans,
4507 + struct btrfs_inode *inode,
4508 + struct btrfs_path *dst_path,
4509 +@@ -4011,7 +4034,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
4510 + struct btrfs_ordered_sum,
4511 + list);
4512 + if (!ret)
4513 +- ret = btrfs_csum_file_blocks(trans, log, sums);
4514 ++ ret = log_csums(trans, log, sums);
4515 + list_del(&sums->list);
4516 + kfree(sums);
4517 + }
4518 +@@ -4231,7 +4254,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
4519 + struct btrfs_ordered_sum,
4520 + list);
4521 + if (!ret)
4522 +- ret = btrfs_csum_file_blocks(trans, log_root, sums);
4523 ++ ret = log_csums(trans, log_root, sums);
4524 + list_del(&sums->list);
4525 + kfree(sums);
4526 + }
4527 +@@ -5997,9 +6020,28 @@ again:
4528 + wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
4529 + if (IS_ERR(wc.replay_dest)) {
4530 + ret = PTR_ERR(wc.replay_dest);
4531 ++
4532 ++ /*
4533 ++ * We didn't find the subvol, likely because it was
4534 ++ * deleted. This is ok, simply skip this log and go to
4535 ++ * the next one.
4536 ++ *
4537 ++ * We need to exclude the root because we can't have
4538 ++ * other log replays overwriting this log as we'll read
4539 ++ * it back in a few more times. This will keep our
4540 ++ * block from being modified, and we'll just bail for
4541 ++ * each subsequent pass.
4542 ++ */
4543 ++ if (ret == -ENOENT)
4544 ++ ret = btrfs_pin_extent_for_log_replay(fs_info,
4545 ++ log->node->start,
4546 ++ log->node->len);
4547 + free_extent_buffer(log->node);
4548 + free_extent_buffer(log->commit_root);
4549 + kfree(log);
4550 ++
4551 ++ if (!ret)
4552 ++ goto next;
4553 + btrfs_handle_fs_error(fs_info, ret,
4554 + "Couldn't read target root for tree log recovery.");
4555 + goto error;
4556 +@@ -6031,7 +6073,6 @@ again:
4557 + &root->highest_objectid);
4558 + }
4559 +
4560 +- key.offset = found_key.offset - 1;
4561 + wc.replay_dest->log_root = NULL;
4562 + free_extent_buffer(log->node);
4563 + free_extent_buffer(log->commit_root);
4564 +@@ -6039,9 +6080,10 @@ again:
4565 +
4566 + if (ret)
4567 + goto error;
4568 +-
4569 ++next:
4570 + if (found_key.offset == 0)
4571 + break;
4572 ++ key.offset = found_key.offset - 1;
4573 + }
4574 + btrfs_release_path(path);
4575 +
4576 +diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
4577 +index 3b2ae342e649..5bbb977b5189 100644
4578 +--- a/fs/btrfs/uuid-tree.c
4579 ++++ b/fs/btrfs/uuid-tree.c
4580 +@@ -324,6 +324,8 @@ again_search_slot:
4581 + }
4582 + if (ret < 0 && ret != -ENOENT)
4583 + goto out;
4584 ++ key.offset++;
4585 ++ goto again_search_slot;
4586 + }
4587 + item_size -= sizeof(subid_le);
4588 + offset += sizeof(subid_le);
4589 +diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
4590 +index 46d5c40f2835..d947c5e439cf 100644
4591 +--- a/fs/ext4/dir.c
4592 ++++ b/fs/ext4/dir.c
4593 +@@ -77,6 +77,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
4594 + error_msg = "rec_len is too small for name_len";
4595 + else if (unlikely(((char *) de - buf) + rlen > size))
4596 + error_msg = "directory entry overrun";
4597 ++ else if (unlikely(((char *) de - buf) + rlen >
4598 ++ size - EXT4_DIR_REC_LEN(1) &&
4599 ++ ((char *) de - buf) + rlen != size)) {
4600 ++ error_msg = "directory entry too close to block end";
4601 ++ }
4602 + else if (unlikely(le32_to_cpu(de->inode) >
4603 + le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
4604 + error_msg = "inode out of bounds";
4605 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4606 +index ab16b7c6068c..215802cbc42b 100644
4607 +--- a/fs/ext4/inode.c
4608 ++++ b/fs/ext4/inode.c
4609 +@@ -6027,7 +6027,7 @@ int ext4_expand_extra_isize(struct inode *inode,
4610 + error = ext4_journal_get_write_access(handle, iloc->bh);
4611 + if (error) {
4612 + brelse(iloc->bh);
4613 +- goto out_stop;
4614 ++ goto out_unlock;
4615 + }
4616 +
4617 + error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
4618 +@@ -6037,8 +6037,8 @@ int ext4_expand_extra_isize(struct inode *inode,
4619 + if (!error)
4620 + error = rc;
4621 +
4622 ++out_unlock:
4623 + ext4_write_unlock_xattr(inode, &no_expand);
4624 +-out_stop:
4625 + ext4_journal_stop(handle);
4626 + return error;
4627 + }
4628 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4629 +index f56d6f1950b9..43dcb91d63f4 100644
4630 +--- a/fs/ext4/namei.c
4631 ++++ b/fs/ext4/namei.c
4632 +@@ -2693,7 +2693,7 @@ bool ext4_empty_dir(struct inode *inode)
4633 + {
4634 + unsigned int offset;
4635 + struct buffer_head *bh;
4636 +- struct ext4_dir_entry_2 *de, *de1;
4637 ++ struct ext4_dir_entry_2 *de;
4638 + struct super_block *sb;
4639 +
4640 + if (ext4_has_inline_data(inode)) {
4641 +@@ -2718,19 +2718,25 @@ bool ext4_empty_dir(struct inode *inode)
4642 + return true;
4643 +
4644 + de = (struct ext4_dir_entry_2 *) bh->b_data;
4645 +- de1 = ext4_next_entry(de, sb->s_blocksize);
4646 +- if (le32_to_cpu(de->inode) != inode->i_ino ||
4647 +- le32_to_cpu(de1->inode) == 0 ||
4648 +- strcmp(".", de->name) || strcmp("..", de1->name)) {
4649 +- ext4_warning_inode(inode, "directory missing '.' and/or '..'");
4650 ++ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
4651 ++ 0) ||
4652 ++ le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
4653 ++ ext4_warning_inode(inode, "directory missing '.'");
4654 ++ brelse(bh);
4655 ++ return true;
4656 ++ }
4657 ++ offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
4658 ++ de = ext4_next_entry(de, sb->s_blocksize);
4659 ++ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
4660 ++ offset) ||
4661 ++ le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
4662 ++ ext4_warning_inode(inode, "directory missing '..'");
4663 + brelse(bh);
4664 + return true;
4665 + }
4666 +- offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
4667 +- ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
4668 +- de = ext4_next_entry(de1, sb->s_blocksize);
4669 ++ offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
4670 + while (offset < inode->i_size) {
4671 +- if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
4672 ++ if (!(offset & (sb->s_blocksize - 1))) {
4673 + unsigned int lblock;
4674 + brelse(bh);
4675 + lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
4676 +@@ -2741,12 +2747,11 @@ bool ext4_empty_dir(struct inode *inode)
4677 + }
4678 + if (IS_ERR(bh))
4679 + return true;
4680 +- de = (struct ext4_dir_entry_2 *) bh->b_data;
4681 + }
4682 ++ de = (struct ext4_dir_entry_2 *) (bh->b_data +
4683 ++ (offset & (sb->s_blocksize - 1)));
4684 + if (ext4_check_dir_entry(inode, NULL, de, bh,
4685 + bh->b_data, bh->b_size, offset)) {
4686 +- de = (struct ext4_dir_entry_2 *)(bh->b_data +
4687 +- sb->s_blocksize);
4688 + offset = (offset | (sb->s_blocksize - 1)) + 1;
4689 + continue;
4690 + }
4691 +@@ -2755,7 +2760,6 @@ bool ext4_empty_dir(struct inode *inode)
4692 + return false;
4693 + }
4694 + offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
4695 +- de = ext4_next_entry(de, sb->s_blocksize);
4696 + }
4697 + brelse(bh);
4698 + return true;
4699 +diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
4700 +index 7f78d26a0766..0f7439f0bb2f 100644
4701 +--- a/include/drm/drm_dp_mst_helper.h
4702 ++++ b/include/drm/drm_dp_mst_helper.h
4703 +@@ -313,7 +313,7 @@ struct drm_dp_resource_status_notify {
4704 +
4705 + struct drm_dp_query_payload_ack_reply {
4706 + u8 port_number;
4707 +- u8 allocated_pbn;
4708 ++ u16 allocated_pbn;
4709 + };
4710 +
4711 + struct drm_dp_sideband_msg_req_body {
4712 +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
4713 +index 72f59e8321e7..3361663144a1 100644
4714 +--- a/include/linux/cpufreq.h
4715 ++++ b/include/linux/cpufreq.h
4716 +@@ -563,17 +563,6 @@ struct governor_attr {
4717 + size_t count);
4718 + };
4719 +
4720 +-static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
4721 +-{
4722 +- /*
4723 +- * Allow remote callbacks if:
4724 +- * - dvfs_possible_from_any_cpu flag is set
4725 +- * - the local and remote CPUs share cpufreq policy
4726 +- */
4727 +- return policy->dvfs_possible_from_any_cpu ||
4728 +- cpumask_test_cpu(smp_processor_id(), policy->cpus);
4729 +-}
4730 +-
4731 + /*********************************************************************
4732 + * FREQUENCY TABLE HELPERS *
4733 + *********************************************************************/
4734 +diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
4735 +index 7d5fd38d5282..1995ce146789 100644
4736 +--- a/include/linux/ipmi_smi.h
4737 ++++ b/include/linux/ipmi_smi.h
4738 +@@ -211,10 +211,14 @@ static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd,
4739 + * is called, and the lower layer must get the interface from that
4740 + * call.
4741 + */
4742 +-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
4743 +- void *send_info,
4744 +- struct device *dev,
4745 +- unsigned char slave_addr);
4746 ++int ipmi_add_smi(struct module *owner,
4747 ++ const struct ipmi_smi_handlers *handlers,
4748 ++ void *send_info,
4749 ++ struct device *dev,
4750 ++ unsigned char slave_addr);
4751 ++
4752 ++#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \
4753 ++ ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr)
4754 +
4755 + /*
4756 + * Remove a low-level interface from the IPMI driver. This will
4757 +diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
4758 +index 3247a3dc7934..b06b75776a32 100644
4759 +--- a/include/linux/miscdevice.h
4760 ++++ b/include/linux/miscdevice.h
4761 +@@ -57,6 +57,7 @@
4762 + #define UHID_MINOR 239
4763 + #define USERIO_MINOR 240
4764 + #define VHOST_VSOCK_MINOR 241
4765 ++#define RFKILL_MINOR 242
4766 + #define MISC_DYNAMIC_MINOR 255
4767 +
4768 + struct device;
4769 +diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
4770 +index 01797cb4587e..867db9b9384c 100644
4771 +--- a/include/linux/mod_devicetable.h
4772 ++++ b/include/linux/mod_devicetable.h
4773 +@@ -551,9 +551,9 @@ struct platform_device_id {
4774 + #define MDIO_NAME_SIZE 32
4775 + #define MDIO_MODULE_PREFIX "mdio:"
4776 +
4777 +-#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
4778 ++#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
4779 + #define MDIO_ID_ARGS(_id) \
4780 +- (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
4781 ++ ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
4782 + ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
4783 + ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
4784 + ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
4785 +diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
4786 +index 59667444669f..a4530d782353 100644
4787 +--- a/include/linux/sched/cpufreq.h
4788 ++++ b/include/linux/sched/cpufreq.h
4789 +@@ -12,6 +12,8 @@
4790 + #define SCHED_CPUFREQ_MIGRATION (1U << 1)
4791 +
4792 + #ifdef CONFIG_CPU_FREQ
4793 ++struct cpufreq_policy;
4794 ++
4795 + struct update_util_data {
4796 + void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
4797 + };
4798 +@@ -20,6 +22,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
4799 + void (*func)(struct update_util_data *data, u64 time,
4800 + unsigned int flags));
4801 + void cpufreq_remove_update_util_hook(int cpu);
4802 ++bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
4803 + #endif /* CONFIG_CPU_FREQ */
4804 +
4805 + #endif /* _LINUX_SCHED_CPUFREQ_H */
4806 +diff --git a/include/net/dst.h b/include/net/dst.h
4807 +index ffc8ee0ea5e5..851cf1124240 100644
4808 +--- a/include/net/dst.h
4809 ++++ b/include/net/dst.h
4810 +@@ -93,7 +93,7 @@ struct dst_entry {
4811 + struct dst_metrics {
4812 + u32 metrics[RTAX_MAX];
4813 + refcount_t refcnt;
4814 +-};
4815 ++} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
4816 + extern const struct dst_metrics dst_default_metrics;
4817 +
4818 + u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
4819 +diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
4820 +index b048694070e2..37342a13c9cb 100644
4821 +--- a/include/trace/events/wbt.h
4822 ++++ b/include/trace/events/wbt.h
4823 +@@ -33,7 +33,8 @@ TRACE_EVENT(wbt_stat,
4824 + ),
4825 +
4826 + TP_fast_assign(
4827 +- strncpy(__entry->name, dev_name(bdi->dev), 32);
4828 ++ strlcpy(__entry->name, dev_name(bdi->dev),
4829 ++ ARRAY_SIZE(__entry->name));
4830 + __entry->rmean = stat[0].mean;
4831 + __entry->rmin = stat[0].min;
4832 + __entry->rmax = stat[0].max;
4833 +@@ -67,7 +68,8 @@ TRACE_EVENT(wbt_lat,
4834 + ),
4835 +
4836 + TP_fast_assign(
4837 +- strncpy(__entry->name, dev_name(bdi->dev), 32);
4838 ++ strlcpy(__entry->name, dev_name(bdi->dev),
4839 ++ ARRAY_SIZE(__entry->name));
4840 + __entry->lat = div_u64(lat, 1000);
4841 + ),
4842 +
4843 +@@ -103,7 +105,8 @@ TRACE_EVENT(wbt_step,
4844 + ),
4845 +
4846 + TP_fast_assign(
4847 +- strncpy(__entry->name, dev_name(bdi->dev), 32);
4848 ++ strlcpy(__entry->name, dev_name(bdi->dev),
4849 ++ ARRAY_SIZE(__entry->name));
4850 + __entry->msg = msg;
4851 + __entry->step = step;
4852 + __entry->window = div_u64(window, 1000);
4853 +@@ -138,7 +141,8 @@ TRACE_EVENT(wbt_timer,
4854 + ),
4855 +
4856 + TP_fast_assign(
4857 +- strncpy(__entry->name, dev_name(bdi->dev), 32);
4858 ++ strlcpy(__entry->name, dev_name(bdi->dev),
4859 ++ ARRAY_SIZE(__entry->name));
4860 + __entry->status = status;
4861 + __entry->step = step;
4862 + __entry->inflight = inflight;
4863 +diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
4864 +index 8997d5068c08..4511b85c84df 100644
4865 +--- a/include/uapi/linux/cec-funcs.h
4866 ++++ b/include/uapi/linux/cec-funcs.h
4867 +@@ -923,7 +923,8 @@ static inline void cec_msg_give_deck_status(struct cec_msg *msg,
4868 + msg->len = 3;
4869 + msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
4870 + msg->msg[2] = status_req;
4871 +- msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
4872 ++ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
4873 ++ CEC_MSG_DECK_STATUS : 0;
4874 + }
4875 +
4876 + static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
4877 +@@ -1027,7 +1028,8 @@ static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
4878 + msg->len = 3;
4879 + msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
4880 + msg->msg[2] = status_req;
4881 +- msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
4882 ++ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
4883 ++ CEC_MSG_TUNER_DEVICE_STATUS : 0;
4884 + }
4885 +
4886 + static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
4887 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
4888 +index 7cb7a7f98a37..55fff5e6d983 100644
4889 +--- a/kernel/bpf/stackmap.c
4890 ++++ b/kernel/bpf/stackmap.c
4891 +@@ -292,7 +292,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4892 + bool irq_work_busy = false;
4893 + struct stack_map_irq_work *work = NULL;
4894 +
4895 +- if (in_nmi()) {
4896 ++ if (irqs_disabled()) {
4897 + work = this_cpu_ptr(&up_read_work);
4898 + if (work->irq_work.flags & IRQ_WORK_BUSY)
4899 + /* cannot queue more up_read, fallback */
4900 +@@ -300,8 +300,9 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
4901 + }
4902 +
4903 + /*
4904 +- * We cannot do up_read() in nmi context. To do build_id lookup
4905 +- * in nmi context, we need to run up_read() in irq_work. We use
4906 ++ * We cannot do up_read() when the irq is disabled, because of
4907 ++ * risk to deadlock with rq_lock. To do build_id lookup when the
4908 ++ * irqs are disabled, we need to run up_read() in irq_work. We use
4909 + * a percpu variable to do the irq_work. If the irq_work is
4910 + * already used by another lookup, we fall back to report ips.
4911 + *
4912 +diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
4913 +index 5e54cbcae673..42ce32a1abad 100644
4914 +--- a/kernel/sched/cpufreq.c
4915 ++++ b/kernel/sched/cpufreq.c
4916 +@@ -8,6 +8,8 @@
4917 + * it under the terms of the GNU General Public License version 2 as
4918 + * published by the Free Software Foundation.
4919 + */
4920 ++#include <linux/cpufreq.h>
4921 ++
4922 + #include "sched.h"
4923 +
4924 + DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
4925 +@@ -60,3 +62,19 @@ void cpufreq_remove_update_util_hook(int cpu)
4926 + rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
4927 + }
4928 + EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
4929 ++
4930 ++/**
4931 ++ * cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated.
4932 ++ * @policy: cpufreq policy to check.
4933 ++ *
4934 ++ * Return 'true' if:
4935 ++ * - the local and remote CPUs share @policy,
4936 ++ * - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going
4937 ++ * offline (in which case it is not expected to run cpufreq updates any more).
4938 ++ */
4939 ++bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
4940 ++{
4941 ++ return cpumask_test_cpu(smp_processor_id(), policy->cpus) ||
4942 ++ (policy->dvfs_possible_from_any_cpu &&
4943 ++ rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
4944 ++}
4945 +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
4946 +index 54fcff656ecd..1b7ec822dc75 100644
4947 +--- a/kernel/sched/cpufreq_schedutil.c
4948 ++++ b/kernel/sched/cpufreq_schedutil.c
4949 +@@ -83,12 +83,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
4950 + * by the hardware, as calculating the frequency is pointless if
4951 + * we cannot in fact act on it.
4952 + *
4953 +- * For the slow switching platforms, the kthread is always scheduled on
4954 +- * the right set of CPUs and any CPU can find the next frequency and
4955 +- * schedule the kthread.
4956 ++ * This is needed on the slow switching platforms too to prevent CPUs
4957 ++ * going offline from leaving stale IRQ work items behind.
4958 + */
4959 +- if (sg_policy->policy->fast_switch_enabled &&
4960 +- !cpufreq_this_cpu_can_update(sg_policy->policy))
4961 ++ if (!cpufreq_this_cpu_can_update(sg_policy->policy))
4962 + return false;
4963 +
4964 + if (unlikely(sg_policy->limits_changed)) {
4965 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4966 +index bdd7f3d78724..b6ff2f84df17 100644
4967 +--- a/kernel/trace/trace.c
4968 ++++ b/kernel/trace/trace.c
4969 +@@ -4389,7 +4389,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4970 +
4971 + if (mask == TRACE_ITER_RECORD_TGID) {
4972 + if (!tgid_map)
4973 +- tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4974 ++ tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4975 + sizeof(*tgid_map),
4976 + GFP_KERNEL);
4977 + if (!tgid_map) {
4978 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
4979 +index f9a0cd094b81..c61b2b0a99e9 100644
4980 +--- a/kernel/trace/trace_kprobe.c
4981 ++++ b/kernel/trace/trace_kprobe.c
4982 +@@ -519,11 +519,10 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
4983 +
4984 + #if defined(CONFIG_KPROBES_ON_FTRACE) && \
4985 + !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
4986 +-static bool within_notrace_func(struct trace_kprobe *tk)
4987 ++static bool __within_notrace_func(unsigned long addr)
4988 + {
4989 +- unsigned long offset, size, addr;
4990 ++ unsigned long offset, size;
4991 +
4992 +- addr = trace_kprobe_address(tk);
4993 + if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
4994 + return false;
4995 +
4996 +@@ -536,6 +535,28 @@ static bool within_notrace_func(struct trace_kprobe *tk)
4997 + */
4998 + return !ftrace_location_range(addr, addr + size - 1);
4999 + }
5000 ++
5001 ++static bool within_notrace_func(struct trace_kprobe *tk)
5002 ++{
5003 ++ unsigned long addr = addr = trace_kprobe_address(tk);
5004 ++ char symname[KSYM_NAME_LEN], *p;
5005 ++
5006 ++ if (!__within_notrace_func(addr))
5007 ++ return false;
5008 ++
5009 ++ /* Check if the address is on a suffixed-symbol */
5010 ++ if (!lookup_symbol_name(addr, symname)) {
5011 ++ p = strchr(symname, '.');
5012 ++ if (!p)
5013 ++ return true;
5014 ++ *p = '\0';
5015 ++ addr = (unsigned long)kprobe_lookup_name(symname, 0);
5016 ++ if (addr)
5017 ++ return __within_notrace_func(addr);
5018 ++ }
5019 ++
5020 ++ return true;
5021 ++}
5022 + #else
5023 + #define within_notrace_func(tk) (false)
5024 + #endif
5025 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
5026 +index 15d1cb5aee18..f5c27065ad44 100644
5027 +--- a/net/bluetooth/hci_conn.c
5028 ++++ b/net/bluetooth/hci_conn.c
5029 +@@ -931,6 +931,14 @@ static void hci_req_directed_advertising(struct hci_request *req,
5030 + return;
5031 +
5032 + memset(&cp, 0, sizeof(cp));
5033 ++
5034 ++ /* Some controllers might reject command if intervals are not
5035 ++ * within range for undirected advertising.
5036 ++ * BCM20702A0 is known to be affected by this.
5037 ++ */
5038 ++ cp.min_interval = cpu_to_le16(0x0020);
5039 ++ cp.max_interval = cpu_to_le16(0x0020);
5040 ++
5041 + cp.type = LE_ADV_DIRECT_IND;
5042 + cp.own_address_type = own_addr_type;
5043 + cp.direct_addr_type = conn->dst_type;
5044 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
5045 +index 5afd67ef797a..e03faca84919 100644
5046 +--- a/net/bluetooth/hci_core.c
5047 ++++ b/net/bluetooth/hci_core.c
5048 +@@ -841,8 +841,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
5049 + if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
5050 + struct hci_cp_le_write_def_data_len cp;
5051 +
5052 +- cp.tx_len = hdev->le_max_tx_len;
5053 +- cp.tx_time = hdev->le_max_tx_time;
5054 ++ cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
5055 ++ cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
5056 + hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
5057 + }
5058 +
5059 +@@ -4330,7 +4330,14 @@ static void hci_rx_work(struct work_struct *work)
5060 + hci_send_to_sock(hdev, skb);
5061 + }
5062 +
5063 +- if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5064 ++ /* If the device has been opened in HCI_USER_CHANNEL,
5065 ++ * the userspace has exclusive access to device.
5066 ++ * When device is HCI_INIT, we still need to process
5067 ++ * the data packets to the driver in order
5068 ++ * to complete its setup().
5069 ++ */
5070 ++ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5071 ++ !test_bit(HCI_INIT, &hdev->flags)) {
5072 + kfree_skb(skb);
5073 + continue;
5074 + }
5075 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
5076 +index 9448ebd3780a..a8ddd211e94c 100644
5077 +--- a/net/bluetooth/hci_request.c
5078 ++++ b/net/bluetooth/hci_request.c
5079 +@@ -1258,6 +1258,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
5080 +
5081 + instance_flags = get_adv_instance_flags(hdev, instance);
5082 +
5083 ++ /* If instance already has the flags set skip adding it once
5084 ++ * again.
5085 ++ */
5086 ++ if (adv_instance && eir_get_data(adv_instance->adv_data,
5087 ++ adv_instance->adv_data_len, EIR_FLAGS,
5088 ++ NULL))
5089 ++ goto skip_flags;
5090 ++
5091 + /* The Add Advertising command allows userspace to set both the general
5092 + * and limited discoverable flags.
5093 + */
5094 +@@ -1290,6 +1298,7 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
5095 + }
5096 + }
5097 +
5098 ++skip_flags:
5099 + if (adv_instance) {
5100 + memcpy(ptr, adv_instance->adv_data,
5101 + adv_instance->adv_data_len);
5102 +diff --git a/net/mac80211/status.c b/net/mac80211/status.c
5103 +index 534a604b75c2..f895c656407b 100644
5104 +--- a/net/mac80211/status.c
5105 ++++ b/net/mac80211/status.c
5106 +@@ -867,7 +867,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
5107 + I802_DEBUG_INC(local->dot11FailedCount);
5108 + }
5109 +
5110 +- if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
5111 ++ if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
5112 ++ ieee80211_has_pm(fc) &&
5113 + ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
5114 + !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
5115 + local->ps_sdata && !(local->scanning)) {
5116 +diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
5117 +index a66f102c6c01..040576dd73bb 100644
5118 +--- a/net/nfc/nci/uart.c
5119 ++++ b/net/nfc/nci/uart.c
5120 +@@ -348,7 +348,7 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data,
5121 + nu->rx_packet_len = -1;
5122 + nu->rx_skb = nci_skb_alloc(nu->ndev,
5123 + NCI_MAX_PACKET_SIZE,
5124 +- GFP_KERNEL);
5125 ++ GFP_ATOMIC);
5126 + if (!nu->rx_skb)
5127 + return -ENOMEM;
5128 + }
5129 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5130 +index 7204e7bbebb0..ac65e66d1d72 100644
5131 +--- a/net/packet/af_packet.c
5132 ++++ b/net/packet/af_packet.c
5133 +@@ -552,7 +552,8 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
5134 + msec = 1;
5135 + div = ecmd.base.speed / 1000;
5136 + }
5137 +- }
5138 ++ } else
5139 ++ return DEFAULT_PRB_RETIRE_TOV;
5140 +
5141 + mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
5142 +
5143 +diff --git a/net/rfkill/core.c b/net/rfkill/core.c
5144 +index 1355f5ca8d22..7fbc8314f626 100644
5145 +--- a/net/rfkill/core.c
5146 ++++ b/net/rfkill/core.c
5147 +@@ -1328,10 +1328,12 @@ static const struct file_operations rfkill_fops = {
5148 + .llseek = no_llseek,
5149 + };
5150 +
5151 ++#define RFKILL_NAME "rfkill"
5152 ++
5153 + static struct miscdevice rfkill_miscdev = {
5154 +- .name = "rfkill",
5155 + .fops = &rfkill_fops,
5156 +- .minor = MISC_DYNAMIC_MINOR,
5157 ++ .name = RFKILL_NAME,
5158 ++ .minor = RFKILL_MINOR,
5159 + };
5160 +
5161 + static int __init rfkill_init(void)
5162 +@@ -1383,3 +1385,6 @@ static void __exit rfkill_exit(void)
5163 + class_unregister(&rfkill_class);
5164 + }
5165 + module_exit(rfkill_exit);
5166 ++
5167 ++MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
5168 ++MODULE_ALIAS("devname:" RFKILL_NAME);
5169 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
5170 +index 6d36f74ad295..269b528e50b9 100644
5171 +--- a/net/sctp/protocol.c
5172 ++++ b/net/sctp/protocol.c
5173 +@@ -242,6 +242,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
5174 + sa->sin_port = sh->dest;
5175 + sa->sin_addr.s_addr = ip_hdr(skb)->daddr;
5176 + }
5177 ++ memset(sa->sin_zero, 0, sizeof(sa->sin_zero));
5178 + }
5179 +
5180 + /* Initialize an sctp_addr from a socket. */
5181 +@@ -250,6 +251,7 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
5182 + addr->v4.sin_family = AF_INET;
5183 + addr->v4.sin_port = 0;
5184 + addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
5185 ++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
5186 + }
5187 +
5188 + /* Initialize sk->sk_rcv_saddr from sctp_addr. */
5189 +@@ -272,6 +274,7 @@ static void sctp_v4_from_addr_param(union sctp_addr *addr,
5190 + addr->v4.sin_family = AF_INET;
5191 + addr->v4.sin_port = port;
5192 + addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
5193 ++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
5194 + }
5195 +
5196 + /* Initialize an address parameter from a sctp_addr and return the length
5197 +@@ -296,6 +299,7 @@ static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
5198 + saddr->v4.sin_family = AF_INET;
5199 + saddr->v4.sin_port = port;
5200 + saddr->v4.sin_addr.s_addr = fl4->saddr;
5201 ++ memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero));
5202 + }
5203 +
5204 + /* Compare two addresses exactly. */
5205 +@@ -318,6 +322,7 @@ static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
5206 + addr->v4.sin_family = AF_INET;
5207 + addr->v4.sin_addr.s_addr = htonl(INADDR_ANY);
5208 + addr->v4.sin_port = port;
5209 ++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
5210 + }
5211 +
5212 + /* Is this a wildcard address? */
5213 +diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
5214 +index f8bb3cd0f4ce..7d928571b25c 100644
5215 +--- a/samples/pktgen/functions.sh
5216 ++++ b/samples/pktgen/functions.sh
5217 +@@ -5,6 +5,8 @@
5218 + # Author: Jesper Dangaaard Brouer
5219 + # License: GPL
5220 +
5221 ++set -o errexit
5222 ++
5223 + ## -- General shell logging cmds --
5224 + function err() {
5225 + local exitcode=$1
5226 +@@ -58,6 +60,7 @@ function pg_set() {
5227 + function proc_cmd() {
5228 + local result
5229 + local proc_file=$1
5230 ++ local status=0
5231 + # after shift, the remaining args are contained in $@
5232 + shift
5233 + local proc_ctrl=${PROC_DIR}/$proc_file
5234 +@@ -73,13 +76,13 @@ function proc_cmd() {
5235 + echo "cmd: $@ > $proc_ctrl"
5236 + fi
5237 + # Quoting of "$@" is important for space expansion
5238 +- echo "$@" > "$proc_ctrl"
5239 +- local status=$?
5240 ++ echo "$@" > "$proc_ctrl" || status=$?
5241 +
5242 +- result=$(grep "Result: OK:" $proc_ctrl)
5243 +- # Due to pgctrl, cannot use exit code $? from grep
5244 +- if [[ "$result" == "" ]]; then
5245 +- grep "Result:" $proc_ctrl >&2
5246 ++ if [[ "$proc_file" != "pgctrl" ]]; then
5247 ++ result=$(grep "Result: OK:" $proc_ctrl) || true
5248 ++ if [[ "$result" == "" ]]; then
5249 ++ grep "Result:" $proc_ctrl >&2
5250 ++ fi
5251 + fi
5252 + if (( $status != 0 )); then
5253 + err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
5254 +@@ -105,6 +108,8 @@ function pgset() {
5255 + fi
5256 + }
5257 +
5258 ++[[ $EUID -eq 0 ]] && trap 'pg_ctrl "reset"' EXIT
5259 ++
5260 + ## -- General shell tricks --
5261 +
5262 + function root_check_run_with_sudo() {
5263 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
5264 +index e08c6c6ca029..f03ceaff75b5 100644
5265 +--- a/sound/core/pcm_native.c
5266 ++++ b/sound/core/pcm_native.c
5267 +@@ -752,6 +752,10 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
5268 + while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
5269 + runtime->boundary *= 2;
5270 +
5271 ++ /* clear the buffer for avoiding possible kernel info leaks */
5272 ++ if (runtime->dma_area && !substream->ops->copy_user)
5273 ++ memset(runtime->dma_area, 0, runtime->dma_bytes);
5274 ++
5275 + snd_pcm_timer_resolution_change(substream);
5276 + snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
5277 +
5278 +diff --git a/sound/core/timer.c b/sound/core/timer.c
5279 +index 86a31e69fc7d..b5dc51030316 100644
5280 +--- a/sound/core/timer.c
5281 ++++ b/sound/core/timer.c
5282 +@@ -88,6 +88,9 @@ static LIST_HEAD(snd_timer_slave_list);
5283 + /* lock for slave active lists */
5284 + static DEFINE_SPINLOCK(slave_active_lock);
5285 +
5286 ++#define MAX_SLAVE_INSTANCES 1000
5287 ++static int num_slaves;
5288 ++
5289 + static DEFINE_MUTEX(register_mutex);
5290 +
5291 + static int snd_timer_free(struct snd_timer *timer);
5292 +@@ -266,6 +269,10 @@ int snd_timer_open(struct snd_timer_instance **ti,
5293 + err = -EINVAL;
5294 + goto unlock;
5295 + }
5296 ++ if (num_slaves >= MAX_SLAVE_INSTANCES) {
5297 ++ err = -EBUSY;
5298 ++ goto unlock;
5299 ++ }
5300 + timeri = snd_timer_instance_new(owner, NULL);
5301 + if (!timeri) {
5302 + err = -ENOMEM;
5303 +@@ -275,6 +282,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
5304 + timeri->slave_id = tid->device;
5305 + timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
5306 + list_add_tail(&timeri->open_list, &snd_timer_slave_list);
5307 ++ num_slaves++;
5308 + err = snd_timer_check_slave(timeri);
5309 + if (err < 0) {
5310 + snd_timer_close_locked(timeri, &card_dev_to_put);
5311 +@@ -364,6 +372,8 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
5312 + struct snd_timer_instance *slave, *tmp;
5313 +
5314 + list_del(&timeri->open_list);
5315 ++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
5316 ++ num_slaves--;
5317 +
5318 + /* force to stop the timer */
5319 + snd_timer_stop(timeri);
5320 +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
5321 +index 6a9b89e05dae..bc4edc5607c7 100644
5322 +--- a/sound/pci/hda/patch_ca0132.c
5323 ++++ b/sound/pci/hda/patch_ca0132.c
5324 +@@ -1683,13 +1683,14 @@ struct scp_msg {
5325 +
5326 + static void dspio_clear_response_queue(struct hda_codec *codec)
5327 + {
5328 ++ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
5329 + unsigned int dummy = 0;
5330 +- int status = -1;
5331 ++ int status;
5332 +
5333 + /* clear all from the response queue */
5334 + do {
5335 + status = dspio_read(codec, &dummy);
5336 +- } while (status == 0);
5337 ++ } while (status == 0 && time_before(jiffies, timeout));
5338 + }
5339 +
5340 + static int dspio_get_response_data(struct hda_codec *codec)
5341 +@@ -6754,12 +6755,14 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
5342 + struct ca0132_spec *spec = codec->spec;
5343 +
5344 + codec_dbg(codec, "ca0132_process_dsp_response\n");
5345 ++ snd_hda_power_up_pm(codec);
5346 + if (spec->wait_scp) {
5347 + if (dspio_get_response_data(codec) >= 0)
5348 + spec->wait_scp = 0;
5349 + }
5350 +
5351 + dspio_clear_response_queue(codec);
5352 ++ snd_hda_power_down_pm(codec);
5353 + }
5354 +
5355 + static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
5356 +@@ -6770,11 +6773,10 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
5357 + /* Delay enabling the HP amp, to let the mic-detection
5358 + * state machine run.
5359 + */
5360 +- cancel_delayed_work(&spec->unsol_hp_work);
5361 +- schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
5362 + tbl = snd_hda_jack_tbl_get(codec, cb->nid);
5363 + if (tbl)
5364 + tbl->block_report = 1;
5365 ++ schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
5366 + }
5367 +
5368 + static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
5369 +@@ -7408,12 +7410,25 @@ static void ca0132_reboot_notify(struct hda_codec *codec)
5370 + codec->patch_ops.free(codec);
5371 + }
5372 +
5373 ++#ifdef CONFIG_PM
5374 ++static int ca0132_suspend(struct hda_codec *codec)
5375 ++{
5376 ++ struct ca0132_spec *spec = codec->spec;
5377 ++
5378 ++ cancel_delayed_work_sync(&spec->unsol_hp_work);
5379 ++ return 0;
5380 ++}
5381 ++#endif
5382 ++
5383 + static const struct hda_codec_ops ca0132_patch_ops = {
5384 + .build_controls = ca0132_build_controls,
5385 + .build_pcms = ca0132_build_pcms,
5386 + .init = ca0132_init,
5387 + .free = ca0132_free,
5388 + .unsol_event = snd_hda_jack_unsol_event,
5389 ++#ifdef CONFIG_PM
5390 ++ .suspend = ca0132_suspend,
5391 ++#endif
5392 + .reboot_notify = ca0132_reboot_notify,
5393 + };
5394 +
5395 +diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
5396 +index 9b7a1833d331..71b7b881df39 100644
5397 +--- a/sound/soc/codecs/rt5677.c
5398 ++++ b/sound/soc/codecs/rt5677.c
5399 +@@ -297,6 +297,7 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg)
5400 + case RT5677_I2C_MASTER_CTRL7:
5401 + case RT5677_I2C_MASTER_CTRL8:
5402 + case RT5677_HAP_GENE_CTRL2:
5403 ++ case RT5677_PWR_ANLG2: /* Modified by DSP firmware */
5404 + case RT5677_PWR_DSP_ST:
5405 + case RT5677_PRIV_DATA:
5406 + case RT5677_ASRC_22:
5407 +diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
5408 +index deff65161504..0a3b746fb909 100644
5409 +--- a/sound/soc/codecs/wm2200.c
5410 ++++ b/sound/soc/codecs/wm2200.c
5411 +@@ -2413,6 +2413,8 @@ static int wm2200_i2c_probe(struct i2c_client *i2c,
5412 +
5413 + err_pm_runtime:
5414 + pm_runtime_disable(&i2c->dev);
5415 ++ if (i2c->irq)
5416 ++ free_irq(i2c->irq, wm2200);
5417 + err_reset:
5418 + if (wm2200->pdata.reset)
5419 + gpio_set_value_cansleep(wm2200->pdata.reset, 0);
5420 +@@ -2429,12 +2431,15 @@ static int wm2200_i2c_remove(struct i2c_client *i2c)
5421 + {
5422 + struct wm2200_priv *wm2200 = i2c_get_clientdata(i2c);
5423 +
5424 ++ pm_runtime_disable(&i2c->dev);
5425 + if (i2c->irq)
5426 + free_irq(i2c->irq, wm2200);
5427 + if (wm2200->pdata.reset)
5428 + gpio_set_value_cansleep(wm2200->pdata.reset, 0);
5429 + if (wm2200->pdata.ldo_ena)
5430 + gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
5431 ++ regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies),
5432 ++ wm2200->core_supplies);
5433 +
5434 + return 0;
5435 + }
5436 +diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
5437 +index ba89d9d711f7..b793701aafcd 100644
5438 +--- a/sound/soc/codecs/wm5100.c
5439 ++++ b/sound/soc/codecs/wm5100.c
5440 +@@ -2620,6 +2620,7 @@ static int wm5100_i2c_probe(struct i2c_client *i2c,
5441 + return ret;
5442 +
5443 + err_reset:
5444 ++ pm_runtime_disable(&i2c->dev);
5445 + if (i2c->irq)
5446 + free_irq(i2c->irq, wm5100);
5447 + wm5100_free_gpio(i2c);
5448 +@@ -2643,6 +2644,7 @@ static int wm5100_i2c_remove(struct i2c_client *i2c)
5449 + {
5450 + struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c);
5451 +
5452 ++ pm_runtime_disable(&i2c->dev);
5453 + if (i2c->irq)
5454 + free_irq(i2c->irq, wm5100);
5455 + wm5100_free_gpio(i2c);
5456 +diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
5457 +index 1965635ec07c..d14e851b9160 100644
5458 +--- a/sound/soc/codecs/wm8904.c
5459 ++++ b/sound/soc/codecs/wm8904.c
5460 +@@ -1902,6 +1902,7 @@ static int wm8904_set_bias_level(struct snd_soc_component *component,
5461 + snd_soc_component_update_bits(component, WM8904_BIAS_CONTROL_0,
5462 + WM8904_BIAS_ENA, 0);
5463 +
5464 ++ snd_soc_component_write(component, WM8904_SW_RESET_AND_ID, 0);
5465 + regcache_cache_only(wm8904->regmap, true);
5466 + regcache_mark_dirty(wm8904->regmap);
5467 +
5468 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
5469 +index b6dc524830b2..6acd5dd599dc 100644
5470 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
5471 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
5472 +@@ -414,10 +414,12 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
5473 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
5474 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
5475 + },
5476 +- .driver_data = (void *)(BYT_RT5640_IN1_MAP |
5477 +- BYT_RT5640_MCLK_EN |
5478 +- BYT_RT5640_SSP0_AIF1),
5479 +-
5480 ++ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
5481 ++ BYT_RT5640_JD_SRC_JD2_IN4N |
5482 ++ BYT_RT5640_OVCD_TH_2000UA |
5483 ++ BYT_RT5640_OVCD_SF_0P75 |
5484 ++ BYT_RT5640_SSP0_AIF1 |
5485 ++ BYT_RT5640_MCLK_EN),
5486 + },
5487 + {
5488 + .matches = {
5489 +diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
5490 +index b8a03f58ac8c..f36e33a14728 100644
5491 +--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
5492 ++++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
5493 +@@ -423,6 +423,9 @@ static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
5494 + snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
5495 + dmic_constraints);
5496 +
5497 ++ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
5498 ++ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
5499 ++
5500 + return snd_pcm_hw_constraint_list(substream->runtime, 0,
5501 + SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
5502 + }
5503 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
5504 +index a62be78fc07b..249fa8d7376e 100644
5505 +--- a/tools/lib/bpf/libbpf.c
5506 ++++ b/tools/lib/bpf/libbpf.c
5507 +@@ -1073,16 +1073,22 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
5508 + return -errno;
5509 +
5510 + new_fd = open("/", O_RDONLY | O_CLOEXEC);
5511 +- if (new_fd < 0)
5512 ++ if (new_fd < 0) {
5513 ++ err = -errno;
5514 + goto err_free_new_name;
5515 ++ }
5516 +
5517 + new_fd = dup3(fd, new_fd, O_CLOEXEC);
5518 +- if (new_fd < 0)
5519 ++ if (new_fd < 0) {
5520 ++ err = -errno;
5521 + goto err_close_new_fd;
5522 ++ }
5523 +
5524 + err = zclose(map->fd);
5525 +- if (err)
5526 ++ if (err) {
5527 ++ err = -errno;
5528 + goto err_close_new_fd;
5529 ++ }
5530 + free(map->name);
5531 +
5532 + map->fd = new_fd;
5533 +@@ -1101,7 +1107,7 @@ err_close_new_fd:
5534 + close(new_fd);
5535 + err_free_new_name:
5536 + free(new_name);
5537 +- return -errno;
5538 ++ return err;
5539 + }
5540 +
5541 + static int
5542 +diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
5543 +index 5b2cd5e58df0..5dbb0dde208c 100644
5544 +--- a/tools/lib/subcmd/Makefile
5545 ++++ b/tools/lib/subcmd/Makefile
5546 +@@ -28,7 +28,9 @@ ifeq ($(DEBUG),0)
5547 + endif
5548 + endif
5549 +
5550 +-ifeq ($(CC_NO_CLANG), 0)
5551 ++ifeq ($(DEBUG),1)
5552 ++ CFLAGS += -O0
5553 ++else ifeq ($(CC_NO_CLANG), 0)
5554 + CFLAGS += -O3
5555 + else
5556 + CFLAGS += -O6
5557 +diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
5558 +index e76154c02ee7..2700f1f17876 100644
5559 +--- a/tools/lib/traceevent/parse-filter.c
5560 ++++ b/tools/lib/traceevent/parse-filter.c
5561 +@@ -1475,8 +1475,10 @@ static int copy_filter_type(struct event_filter *filter,
5562 + if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) {
5563 + /* Add trivial event */
5564 + arg = allocate_arg();
5565 +- if (arg == NULL)
5566 ++ if (arg == NULL) {
5567 ++ free(str);
5568 + return -1;
5569 ++ }
5570 +
5571 + arg->type = FILTER_ARG_BOOLEAN;
5572 + if (strcmp(str, "TRUE") == 0)
5573 +@@ -1485,8 +1487,11 @@ static int copy_filter_type(struct event_filter *filter,
5574 + arg->boolean.value = 0;
5575 +
5576 + filter_type = add_filter_type(filter, event->id);
5577 +- if (filter_type == NULL)
5578 ++ if (filter_type == NULL) {
5579 ++ free(str);
5580 ++ free_arg(arg);
5581 + return -1;
5582 ++ }
5583 +
5584 + filter_type->filter = arg;
5585 +
5586 +diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
5587 +index e0b85930dd77..0a0e9112f284 100644
5588 +--- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt
5589 ++++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
5590 +@@ -333,7 +333,7 @@ AVXcode: 1
5591 + 06: CLTS
5592 + 07: SYSRET (o64)
5593 + 08: INVD
5594 +-09: WBINVD
5595 ++09: WBINVD | WBNOINVD (F3)
5596 + 0a:
5597 + 0b: UD2 (1B)
5598 + 0c:
5599 +@@ -364,7 +364,7 @@ AVXcode: 1
5600 + # a ModR/M byte.
5601 + 1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
5602 + 1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
5603 +-1c:
5604 ++1c: Grp20 (1A),(1C)
5605 + 1d:
5606 + 1e:
5607 + 1f: NOP Ev
5608 +@@ -792,6 +792,8 @@ f3: Grp17 (1A)
5609 + f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
5610 + f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
5611 + f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
5612 ++f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
5613 ++f9: MOVDIRI My,Gy
5614 + EndTable
5615 +
5616 + Table: 3-byte opcode 2 (0x0f 0x3a)
5617 +@@ -943,9 +945,9 @@ GrpTable: Grp6
5618 + EndTable
5619 +
5620 + GrpTable: Grp7
5621 +-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
5622 +-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
5623 +-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
5624 ++0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
5625 ++1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
5626 ++2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
5627 + 3: LIDT Ms
5628 + 4: SMSW Mw/Rv
5629 + 5: rdpkru (110),(11B) | wrpkru (111),(11B)
5630 +@@ -1020,7 +1022,7 @@ GrpTable: Grp15
5631 + 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
5632 + 4: XSAVE | ptwrite Ey (F3),(11B)
5633 + 5: XRSTOR | lfence (11B)
5634 +-6: XSAVEOPT | clwb (66) | mfence (11B)
5635 ++6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
5636 + 7: clflush | clflushopt (66) | sfence (11B)
5637 + EndTable
5638 +
5639 +@@ -1051,6 +1053,10 @@ GrpTable: Grp19
5640 + 6: vscatterpf1qps/d Wx (66),(ev)
5641 + EndTable
5642 +
5643 ++GrpTable: Grp20
5644 ++0: cldemote Mb
5645 ++EndTable
5646 ++
5647 + # AMD's Prefetch Group
5648 + GrpTable: GrpP
5649 + 0: PREFETCH
5650 +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
5651 +index b2188e623e22..2f94f7ad65bd 100644
5652 +--- a/tools/perf/builtin-report.c
5653 ++++ b/tools/perf/builtin-report.c
5654 +@@ -383,6 +383,13 @@ static int report__setup_sample_type(struct report *rep)
5655 + PERF_SAMPLE_BRANCH_ANY))
5656 + rep->nonany_branch_mode = true;
5657 +
5658 ++#ifndef HAVE_LIBUNWIND_SUPPORT
5659 ++ if (dwarf_callchain_users) {
5660 ++ ui__warning("Please install libunwind development packages "
5661 ++ "during the perf build.\n");
5662 ++ }
5663 ++#endif
5664 ++
5665 + return 0;
5666 + }
5667 +
5668 +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
5669 +index 6cd9623ebc93..38b5888ef7b3 100644
5670 +--- a/tools/perf/pmu-events/jevents.c
5671 ++++ b/tools/perf/pmu-events/jevents.c
5672 +@@ -754,6 +754,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
5673 + char *line, *p;
5674 + int line_num;
5675 + char *tblname;
5676 ++ int ret = 0;
5677 +
5678 + pr_info("%s: Processing mapfile %s\n", prog, fpath);
5679 +
5680 +@@ -765,6 +766,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
5681 + if (!mapfp) {
5682 + pr_info("%s: Error %s opening %s\n", prog, strerror(errno),
5683 + fpath);
5684 ++ free(line);
5685 + return -1;
5686 + }
5687 +
5688 +@@ -791,7 +793,8 @@ static int process_mapfile(FILE *outfp, char *fpath)
5689 + /* TODO Deal with lines longer than 16K */
5690 + pr_info("%s: Mapfile %s: line %d too long, aborting\n",
5691 + prog, fpath, line_num);
5692 +- return -1;
5693 ++ ret = -1;
5694 ++ goto out;
5695 + }
5696 + line[strlen(line)-1] = '\0';
5697 +
5698 +@@ -821,7 +824,9 @@ static int process_mapfile(FILE *outfp, char *fpath)
5699 +
5700 + out:
5701 + print_mapping_table_suffix(outfp);
5702 +- return 0;
5703 ++ fclose(mapfp);
5704 ++ free(line);
5705 ++ return ret;
5706 + }
5707 +
5708 + /*
5709 +@@ -1118,6 +1123,7 @@ int main(int argc, char *argv[])
5710 + goto empty_map;
5711 + } else if (rc < 0) {
5712 + /* Make build fail */
5713 ++ fclose(eventsfp);
5714 + free_arch_std_events();
5715 + return 1;
5716 + } else if (rc) {
5717 +@@ -1130,6 +1136,7 @@ int main(int argc, char *argv[])
5718 + goto empty_map;
5719 + } else if (rc < 0) {
5720 + /* Make build fail */
5721 ++ fclose(eventsfp);
5722 + free_arch_std_events();
5723 + return 1;
5724 + } else if (rc) {
5725 +@@ -1147,6 +1154,8 @@ int main(int argc, char *argv[])
5726 + if (process_mapfile(eventsfp, mapfile)) {
5727 + pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
5728 + /* Make build fail */
5729 ++ fclose(eventsfp);
5730 ++ free_arch_std_events();
5731 + return 1;
5732 + }
5733 +
5734 +diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
5735 +index 910e25e64188..6cf00650602e 100644
5736 +--- a/tools/perf/tests/bp_signal.c
5737 ++++ b/tools/perf/tests/bp_signal.c
5738 +@@ -48,14 +48,6 @@ asm (
5739 + "__test_function:\n"
5740 + "incq (%rdi)\n"
5741 + "ret\n");
5742 +-#elif defined (__aarch64__)
5743 +-extern void __test_function(volatile long *ptr);
5744 +-asm (
5745 +- ".globl __test_function\n"
5746 +- "__test_function:\n"
5747 +- "str x30, [x0]\n"
5748 +- "ret\n");
5749 +-
5750 + #else
5751 + static void __test_function(volatile long *ptr)
5752 + {
5753 +@@ -301,10 +293,15 @@ bool test__bp_signal_is_supported(void)
5754 + * stepping into the SIGIO handler and getting stuck on the
5755 + * breakpointed instruction.
5756 + *
5757 ++ * Since arm64 has the same issue with arm for the single-step
5758 ++ * handling, this case also gets suck on the breakpointed
5759 ++ * instruction.
5760 ++ *
5761 + * Just disable the test for these architectures until these
5762 + * issues are resolved.
5763 + */
5764 +-#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
5765 ++#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) || \
5766 ++ defined(__aarch64__)
5767 + return false;
5768 + #else
5769 + return true;
5770 +diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
5771 +index e92fa6029ac7..788b0805d2f1 100644
5772 +--- a/tools/perf/tests/task-exit.c
5773 ++++ b/tools/perf/tests/task-exit.c
5774 +@@ -105,6 +105,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
5775 + if (perf_evlist__mmap(evlist, 128) < 0) {
5776 + pr_debug("failed to mmap events: %d (%s)\n", errno,
5777 + str_error_r(errno, sbuf, sizeof(sbuf)));
5778 ++ err = -1;
5779 + goto out_delete_evlist;
5780 + }
5781 +
5782 +diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
5783 +index 7eb7de5aee44..29e75c051d04 100644
5784 +--- a/tools/perf/util/dwarf-aux.c
5785 ++++ b/tools/perf/util/dwarf-aux.c
5786 +@@ -320,21 +320,51 @@ bool die_is_func_def(Dwarf_Die *dw_die)
5787 + dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
5788 + }
5789 +
5790 ++/**
5791 ++ * die_entrypc - Returns entry PC (the lowest address) of a DIE
5792 ++ * @dw_die: a DIE
5793 ++ * @addr: where to store entry PC
5794 ++ *
5795 ++ * Since dwarf_entrypc() does not return entry PC if the DIE has only address
5796 ++ * range, we have to use this to retrieve the lowest address from the address
5797 ++ * range attribute.
5798 ++ */
5799 ++int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
5800 ++{
5801 ++ Dwarf_Addr base, end;
5802 ++
5803 ++ if (!addr)
5804 ++ return -EINVAL;
5805 ++
5806 ++ if (dwarf_entrypc(dw_die, addr) == 0)
5807 ++ return 0;
5808 ++
5809 ++ return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
5810 ++}
5811 ++
5812 + /**
5813 + * die_is_func_instance - Ensure that this DIE is an instance of a subprogram
5814 + * @dw_die: a DIE
5815 + *
5816 + * Ensure that this DIE is an instance (which has an entry address).
5817 +- * This returns true if @dw_die is a function instance. If not, you need to
5818 +- * call die_walk_instances() to find actual instances.
5819 ++ * This returns true if @dw_die is a function instance. If not, the @dw_die
5820 ++ * must be a prototype. You can use die_walk_instances() to find actual
5821 ++ * instances.
5822 + **/
5823 + bool die_is_func_instance(Dwarf_Die *dw_die)
5824 + {
5825 + Dwarf_Addr tmp;
5826 ++ Dwarf_Attribute attr_mem;
5827 ++ int tag = dwarf_tag(dw_die);
5828 +
5829 +- /* Actually gcc optimizes non-inline as like as inlined */
5830 +- return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0;
5831 ++ if (tag != DW_TAG_subprogram &&
5832 ++ tag != DW_TAG_inlined_subroutine)
5833 ++ return false;
5834 ++
5835 ++ return dwarf_entrypc(dw_die, &tmp) == 0 ||
5836 ++ dwarf_attr(dw_die, DW_AT_ranges, &attr_mem) != NULL;
5837 + }
5838 ++
5839 + /**
5840 + * die_get_data_member_location - Get the data-member offset
5841 + * @mb_die: a DIE of a member of a data structure
5842 +@@ -611,6 +641,9 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
5843 + Dwarf_Die *origin;
5844 + int tmp;
5845 +
5846 ++ if (!die_is_func_instance(inst))
5847 ++ return DIE_FIND_CB_CONTINUE;
5848 ++
5849 + attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
5850 + if (attr == NULL)
5851 + return DIE_FIND_CB_CONTINUE;
5852 +@@ -682,15 +715,14 @@ static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
5853 + if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
5854 + fname = die_get_call_file(in_die);
5855 + lineno = die_get_call_lineno(in_die);
5856 +- if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
5857 ++ if (fname && lineno > 0 && die_entrypc(in_die, &addr) == 0) {
5858 + lw->retval = lw->callback(fname, lineno, addr, lw->data);
5859 + if (lw->retval != 0)
5860 + return DIE_FIND_CB_END;
5861 + }
5862 ++ if (!lw->recursive)
5863 ++ return DIE_FIND_CB_SIBLING;
5864 + }
5865 +- if (!lw->recursive)
5866 +- /* Don't need to search recursively */
5867 +- return DIE_FIND_CB_SIBLING;
5868 +
5869 + if (addr) {
5870 + fname = dwarf_decl_file(in_die);
5871 +@@ -723,7 +755,7 @@ static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
5872 + /* Handle function declaration line */
5873 + fname = dwarf_decl_file(sp_die);
5874 + if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
5875 +- dwarf_entrypc(sp_die, &addr) == 0) {
5876 ++ die_entrypc(sp_die, &addr) == 0) {
5877 + lw.retval = callback(fname, lineno, addr, data);
5878 + if (lw.retval != 0)
5879 + goto done;
5880 +@@ -737,6 +769,10 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
5881 + {
5882 + struct __line_walk_param *lw = data;
5883 +
5884 ++ /*
5885 ++ * Since inlined function can include another inlined function in
5886 ++ * the same file, we need to walk in it recursively.
5887 ++ */
5888 + lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
5889 + if (lw->retval != 0)
5890 + return DWARF_CB_ABORT;
5891 +@@ -761,11 +797,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
5892 + Dwarf_Lines *lines;
5893 + Dwarf_Line *line;
5894 + Dwarf_Addr addr;
5895 +- const char *fname, *decf = NULL;
5896 ++ const char *fname, *decf = NULL, *inf = NULL;
5897 + int lineno, ret = 0;
5898 + int decl = 0, inl;
5899 + Dwarf_Die die_mem, *cu_die;
5900 + size_t nlines, i;
5901 ++ bool flag;
5902 +
5903 + /* Get the CU die */
5904 + if (dwarf_tag(rt_die) != DW_TAG_compile_unit) {
5905 +@@ -796,6 +833,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
5906 + "Possible error in debuginfo.\n");
5907 + continue;
5908 + }
5909 ++ /* Skip end-of-sequence */
5910 ++ if (dwarf_lineendsequence(line, &flag) != 0 || flag)
5911 ++ continue;
5912 ++ /* Skip Non statement line-info */
5913 ++ if (dwarf_linebeginstatement(line, &flag) != 0 || !flag)
5914 ++ continue;
5915 + /* Filter lines based on address */
5916 + if (rt_die != cu_die) {
5917 + /*
5918 +@@ -805,13 +848,21 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
5919 + */
5920 + if (!dwarf_haspc(rt_die, addr))
5921 + continue;
5922 ++
5923 + if (die_find_inlinefunc(rt_die, addr, &die_mem)) {
5924 ++ /* Call-site check */
5925 ++ inf = die_get_call_file(&die_mem);
5926 ++ if ((inf && !strcmp(inf, decf)) &&
5927 ++ die_get_call_lineno(&die_mem) == lineno)
5928 ++ goto found;
5929 ++
5930 + dwarf_decl_line(&die_mem, &inl);
5931 + if (inl != decl ||
5932 + decf != dwarf_decl_file(&die_mem))
5933 + continue;
5934 + }
5935 + }
5936 ++found:
5937 + /* Get source line */
5938 + fname = dwarf_linesrc(line, NULL, NULL);
5939 +
5940 +@@ -826,8 +877,9 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
5941 + */
5942 + if (rt_die != cu_die)
5943 + /*
5944 +- * Don't need walk functions recursively, because nested
5945 +- * inlined functions don't have lines of the specified DIE.
5946 ++ * Don't need walk inlined functions recursively, because
5947 ++ * inner inlined functions don't have the lines of the
5948 ++ * specified function.
5949 + */
5950 + ret = __die_walk_funclines(rt_die, false, callback, data);
5951 + else {
5952 +@@ -1002,7 +1054,7 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
5953 + bool first = true;
5954 + const char *name;
5955 +
5956 +- ret = dwarf_entrypc(sp_die, &entry);
5957 ++ ret = die_entrypc(sp_die, &entry);
5958 + if (ret)
5959 + return ret;
5960 +
5961 +@@ -1065,7 +1117,7 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
5962 + bool first = true;
5963 + const char *name;
5964 +
5965 +- ret = dwarf_entrypc(sp_die, &entry);
5966 ++ ret = die_entrypc(sp_die, &entry);
5967 + if (ret)
5968 + return ret;
5969 +
5970 +diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
5971 +index 8ac53bf1ec4e..ee15fac4e1d0 100644
5972 +--- a/tools/perf/util/dwarf-aux.h
5973 ++++ b/tools/perf/util/dwarf-aux.h
5974 +@@ -41,6 +41,9 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
5975 + /* Get DW_AT_linkage_name (should be NULL for C binary) */
5976 + const char *die_get_linkage_name(Dwarf_Die *dw_die);
5977 +
5978 ++/* Get the lowest PC in DIE (including range list) */
5979 ++int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr);
5980 ++
5981 + /* Ensure that this DIE is a subprogram and definition (not declaration) */
5982 + bool die_is_func_def(Dwarf_Die *dw_die);
5983 +
5984 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
5985 +index 1a7c76d2baa8..95043cae5774 100644
5986 +--- a/tools/perf/util/parse-events.c
5987 ++++ b/tools/perf/util/parse-events.c
5988 +@@ -1282,8 +1282,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
5989 + if (get_config_terms(head_config, &config_terms))
5990 + return -ENOMEM;
5991 +
5992 +- if (perf_pmu__config(pmu, &attr, head_config, parse_state->error))
5993 ++ if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
5994 ++ struct perf_evsel_config_term *pos, *tmp;
5995 ++
5996 ++ list_for_each_entry_safe(pos, tmp, &config_terms, list) {
5997 ++ list_del_init(&pos->list);
5998 ++ free(pos);
5999 ++ }
6000 + return -EINVAL;
6001 ++ }
6002 +
6003 + evsel = __add_event(list, &parse_state->idx, &attr,
6004 + get_config_name(head_config), pmu,
6005 +@@ -1843,15 +1850,20 @@ int parse_events(struct perf_evlist *evlist, const char *str,
6006 +
6007 + ret = parse_events__scanner(str, &parse_state, PE_START_EVENTS);
6008 + perf_pmu__parse_cleanup();
6009 ++
6010 ++ if (!ret && list_empty(&parse_state.list)) {
6011 ++ WARN_ONCE(true, "WARNING: event parser found nothing\n");
6012 ++ return -1;
6013 ++ }
6014 ++
6015 ++ /*
6016 ++ * Add list to the evlist even with errors to allow callers to clean up.
6017 ++ */
6018 ++ perf_evlist__splice_list_tail(evlist, &parse_state.list);
6019 ++
6020 + if (!ret) {
6021 + struct perf_evsel *last;
6022 +
6023 +- if (list_empty(&parse_state.list)) {
6024 +- WARN_ONCE(true, "WARNING: event parser found nothing\n");
6025 +- return -1;
6026 +- }
6027 +-
6028 +- perf_evlist__splice_list_tail(evlist, &parse_state.list);
6029 + evlist->nr_groups += parse_state.nr_groups;
6030 + last = perf_evlist__last(evlist);
6031 + last->cmdline_group_boundary = true;
6032 +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
6033 +index c37fbef1711d..7ccabb891e5a 100644
6034 +--- a/tools/perf/util/probe-finder.c
6035 ++++ b/tools/perf/util/probe-finder.c
6036 +@@ -764,6 +764,16 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
6037 + return 0;
6038 + }
6039 +
6040 ++/* Return innermost DIE */
6041 ++static int find_inner_scope_cb(Dwarf_Die *fn_die, void *data)
6042 ++{
6043 ++ struct find_scope_param *fsp = data;
6044 ++
6045 ++ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
6046 ++ fsp->found = true;
6047 ++ return 1;
6048 ++}
6049 ++
6050 + /* Find an appropriate scope fits to given conditions */
6051 + static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
6052 + {
6053 +@@ -775,8 +785,13 @@ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
6054 + .die_mem = die_mem,
6055 + .found = false,
6056 + };
6057 ++ int ret;
6058 +
6059 +- cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
6060 ++ ret = cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb,
6061 ++ &fsp);
6062 ++ if (!ret && !fsp.found)
6063 ++ cu_walk_functions_at(&pf->cu_die, pf->addr,
6064 ++ find_inner_scope_cb, &fsp);
6065 +
6066 + return fsp.found ? die_mem : NULL;
6067 + }
6068 +@@ -950,7 +965,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
6069 + ret = find_probe_point_lazy(in_die, pf);
6070 + else {
6071 + /* Get probe address */
6072 +- if (dwarf_entrypc(in_die, &addr) != 0) {
6073 ++ if (die_entrypc(in_die, &addr) != 0) {
6074 + pr_warning("Failed to get entry address of %s.\n",
6075 + dwarf_diename(in_die));
6076 + return -ENOENT;
6077 +@@ -1002,7 +1017,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
6078 + param->retval = find_probe_point_by_line(pf);
6079 + } else if (die_is_func_instance(sp_die)) {
6080 + /* Instances always have the entry address */
6081 +- dwarf_entrypc(sp_die, &pf->addr);
6082 ++ die_entrypc(sp_die, &pf->addr);
6083 + /* But in some case the entry address is 0 */
6084 + if (pf->addr == 0) {
6085 + pr_debug("%s has no entry PC. Skipped\n",
6086 +@@ -1414,6 +1429,18 @@ error:
6087 + return DIE_FIND_CB_END;
6088 + }
6089 +
6090 ++static bool available_var_finder_overlap(struct available_var_finder *af)
6091 ++{
6092 ++ int i;
6093 ++
6094 ++ for (i = 0; i < af->nvls; i++) {
6095 ++ if (af->pf.addr == af->vls[i].point.address)
6096 ++ return true;
6097 ++ }
6098 ++ return false;
6099 ++
6100 ++}
6101 ++
6102 + /* Add a found vars into available variables list */
6103 + static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
6104 + {
6105 +@@ -1424,6 +1451,14 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
6106 + Dwarf_Die die_mem;
6107 + int ret;
6108 +
6109 ++ /*
6110 ++ * For some reason (e.g. different column assigned to same address),
6111 ++ * this callback can be called with the address which already passed.
6112 ++ * Ignore it first.
6113 ++ */
6114 ++ if (available_var_finder_overlap(af))
6115 ++ return 0;
6116 ++
6117 + /* Check number of tevs */
6118 + if (af->nvls == af->max_vls) {
6119 + pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
6120 +@@ -1567,7 +1602,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
6121 + /* Get function entry information */
6122 + func = basefunc = dwarf_diename(&spdie);
6123 + if (!func ||
6124 +- dwarf_entrypc(&spdie, &baseaddr) != 0 ||
6125 ++ die_entrypc(&spdie, &baseaddr) != 0 ||
6126 + dwarf_decl_line(&spdie, &baseline) != 0) {
6127 + lineno = 0;
6128 + goto post;
6129 +@@ -1584,7 +1619,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
6130 + while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
6131 + &indie)) {
6132 + /* There is an inline function */
6133 +- if (dwarf_entrypc(&indie, &_addr) == 0 &&
6134 ++ if (die_entrypc(&indie, &_addr) == 0 &&
6135 + _addr == addr) {
6136 + /*
6137 + * addr is at an inline function entry.
6138 +diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
6139 +index f794d6bbb7e9..3e4ff4a1cdf4 100644
6140 +--- a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
6141 ++++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
6142 +@@ -40,7 +40,6 @@ static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = {
6143 + {
6144 + .name = "PC9",
6145 + .desc = N_("Processor Package C9"),
6146 +- .desc = N_("Processor Package C2"),
6147 + .id = PC9,
6148 + .range = RANGE_PACKAGE,
6149 + .get_count_percent = hsw_ext_get_count_percent,
6150 +diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
6151 +index cf16948aad4a..6af24f9a780d 100644
6152 +--- a/tools/testing/selftests/bpf/cgroup_helpers.c
6153 ++++ b/tools/testing/selftests/bpf/cgroup_helpers.c
6154 +@@ -44,7 +44,7 @@
6155 + */
6156 + int setup_cgroup_environment(void)
6157 + {
6158 +- char cgroup_workdir[PATH_MAX + 1];
6159 ++ char cgroup_workdir[PATH_MAX - 24];
6160 +
6161 + format_cgroup_path(cgroup_workdir, "");
6162 +
6163 +diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
6164 +index fef88eb4b873..fa6a88c50750 100755
6165 +--- a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
6166 ++++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
6167 +@@ -36,7 +36,7 @@ h2_destroy()
6168 + {
6169 + ip -6 route del 2001:db8:1::/64 vrf v$h2
6170 + ip -4 route del 192.0.2.0/28 vrf v$h2
6171 +- simple_if_fini $h2 192.0.2.130/28
6172 ++ simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64
6173 + }
6174 +
6175 + router_create()