Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Sat, 15 Sep 2018 10:12:59
Message-Id: 1537006366.f69bd2c4a51cabfc16f5a44334d1cd82613e7157.mpagano@gentoo
1 commit: f69bd2c4a51cabfc16f5a44334d1cd82613e7157
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 15 10:12:46 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Sep 15 10:12:46 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f69bd2c4
7
8 Linux patch 4.18.8
9
10 0000_README | 4 +
11 1007_linux-4.18.8.patch | 6654 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 6658 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index f3682ca..597262e 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -71,6 +71,10 @@ Patch: 1006_linux-4.18.7.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.18.7
21
22 +Patch: 1007_linux-4.18.8.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.18.8
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1007_linux-4.18.8.patch b/1007_linux-4.18.8.patch
31 new file mode 100644
32 index 0000000..8a888c7
33 --- /dev/null
34 +++ b/1007_linux-4.18.8.patch
35 @@ -0,0 +1,6654 @@
36 +diff --git a/Makefile b/Makefile
37 +index 711b04d00e49..0d73431f66cd 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 18
44 +-SUBLEVEL = 7
45 ++SUBLEVEL = 8
46 + EXTRAVERSION =
47 + NAME = Merciless Moray
48 +
49 +diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
50 +index fafd3d7f9f8c..8ca926522026 100644
51 +--- a/arch/arm/mach-rockchip/Kconfig
52 ++++ b/arch/arm/mach-rockchip/Kconfig
53 +@@ -17,6 +17,7 @@ config ARCH_ROCKCHIP
54 + select ARM_GLOBAL_TIMER
55 + select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
56 + select ZONE_DMA if ARM_LPAE
57 ++ select PM
58 + help
59 + Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
60 + containing the RK2928, RK30xx and RK31xx series.
61 +diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
62 +index d5aeac351fc3..21a715ad8222 100644
63 +--- a/arch/arm64/Kconfig.platforms
64 ++++ b/arch/arm64/Kconfig.platforms
65 +@@ -151,6 +151,7 @@ config ARCH_ROCKCHIP
66 + select GPIOLIB
67 + select PINCTRL
68 + select PINCTRL_ROCKCHIP
69 ++ select PM
70 + select ROCKCHIP_TIMER
71 + help
72 + This enables support for the ARMv8 based Rockchip chipsets,
73 +diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
74 +index 16b077801a5f..a4a718dbfec6 100644
75 +--- a/arch/powerpc/include/asm/topology.h
76 ++++ b/arch/powerpc/include/asm/topology.h
77 +@@ -92,6 +92,7 @@ extern int stop_topology_update(void);
78 + extern int prrn_is_enabled(void);
79 + extern int find_and_online_cpu_nid(int cpu);
80 + extern int timed_topology_update(int nsecs);
81 ++extern void __init shared_proc_topology_init(void);
82 + #else
83 + static inline int start_topology_update(void)
84 + {
85 +@@ -113,6 +114,10 @@ static inline int timed_topology_update(int nsecs)
86 + {
87 + return 0;
88 + }
89 ++
90 ++#ifdef CONFIG_SMP
91 ++static inline void shared_proc_topology_init(void) {}
92 ++#endif
93 + #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
94 +
95 + #include <asm-generic/topology.h>
96 +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
97 +index 468653ce844c..327f6112fe8e 100644
98 +--- a/arch/powerpc/include/asm/uaccess.h
99 ++++ b/arch/powerpc/include/asm/uaccess.h
100 +@@ -250,10 +250,17 @@ do { \
101 + } \
102 + } while (0)
103 +
104 ++/*
105 ++ * This is a type: either unsigned long, if the argument fits into
106 ++ * that type, or otherwise unsigned long long.
107 ++ */
108 ++#define __long_type(x) \
109 ++ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
110 ++
111 + #define __get_user_nocheck(x, ptr, size) \
112 + ({ \
113 + long __gu_err; \
114 +- unsigned long __gu_val; \
115 ++ __long_type(*(ptr)) __gu_val; \
116 + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
117 + __chk_user_ptr(ptr); \
118 + if (!is_kernel_addr((unsigned long)__gu_addr)) \
119 +@@ -267,7 +274,7 @@ do { \
120 + #define __get_user_check(x, ptr, size) \
121 + ({ \
122 + long __gu_err = -EFAULT; \
123 +- unsigned long __gu_val = 0; \
124 ++ __long_type(*(ptr)) __gu_val = 0; \
125 + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
126 + might_fault(); \
127 + if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
128 +@@ -281,7 +288,7 @@ do { \
129 + #define __get_user_nosleep(x, ptr, size) \
130 + ({ \
131 + long __gu_err; \
132 +- unsigned long __gu_val; \
133 ++ __long_type(*(ptr)) __gu_val; \
134 + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
135 + __chk_user_ptr(ptr); \
136 + barrier_nospec(); \
137 +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
138 +index 285c6465324a..f817342aab8f 100644
139 +--- a/arch/powerpc/kernel/exceptions-64s.S
140 ++++ b/arch/powerpc/kernel/exceptions-64s.S
141 +@@ -1526,6 +1526,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
142 + TRAMP_REAL_BEGIN(rfi_flush_fallback)
143 + SET_SCRATCH0(r13);
144 + GET_PACA(r13);
145 ++ std r1,PACA_EXRFI+EX_R12(r13)
146 ++ ld r1,PACAKSAVE(r13)
147 + std r9,PACA_EXRFI+EX_R9(r13)
148 + std r10,PACA_EXRFI+EX_R10(r13)
149 + std r11,PACA_EXRFI+EX_R11(r13)
150 +@@ -1560,12 +1562,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
151 + ld r9,PACA_EXRFI+EX_R9(r13)
152 + ld r10,PACA_EXRFI+EX_R10(r13)
153 + ld r11,PACA_EXRFI+EX_R11(r13)
154 ++ ld r1,PACA_EXRFI+EX_R12(r13)
155 + GET_SCRATCH0(r13);
156 + rfid
157 +
158 + TRAMP_REAL_BEGIN(hrfi_flush_fallback)
159 + SET_SCRATCH0(r13);
160 + GET_PACA(r13);
161 ++ std r1,PACA_EXRFI+EX_R12(r13)
162 ++ ld r1,PACAKSAVE(r13)
163 + std r9,PACA_EXRFI+EX_R9(r13)
164 + std r10,PACA_EXRFI+EX_R10(r13)
165 + std r11,PACA_EXRFI+EX_R11(r13)
166 +@@ -1600,6 +1605,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
167 + ld r9,PACA_EXRFI+EX_R9(r13)
168 + ld r10,PACA_EXRFI+EX_R10(r13)
169 + ld r11,PACA_EXRFI+EX_R11(r13)
170 ++ ld r1,PACA_EXRFI+EX_R12(r13)
171 + GET_SCRATCH0(r13);
172 + hrfid
173 +
174 +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
175 +index 4794d6b4f4d2..b3142c7b9c31 100644
176 +--- a/arch/powerpc/kernel/smp.c
177 ++++ b/arch/powerpc/kernel/smp.c
178 +@@ -1156,6 +1156,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
179 + if (smp_ops && smp_ops->bringup_done)
180 + smp_ops->bringup_done();
181 +
182 ++ /*
183 ++ * On a shared LPAR, associativity needs to be requested.
184 ++ * Hence, get numa topology before dumping cpu topology
185 ++ */
186 ++ shared_proc_topology_init();
187 + dump_numa_cpu_topology();
188 +
189 + /*
190 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
191 +index 0c7e05d89244..35ac5422903a 100644
192 +--- a/arch/powerpc/mm/numa.c
193 ++++ b/arch/powerpc/mm/numa.c
194 +@@ -1078,7 +1078,6 @@ static int prrn_enabled;
195 + static void reset_topology_timer(void);
196 + static int topology_timer_secs = 1;
197 + static int topology_inited;
198 +-static int topology_update_needed;
199 +
200 + /*
201 + * Change polling interval for associativity changes.
202 +@@ -1306,11 +1305,8 @@ int numa_update_cpu_topology(bool cpus_locked)
203 + struct device *dev;
204 + int weight, new_nid, i = 0;
205 +
206 +- if (!prrn_enabled && !vphn_enabled) {
207 +- if (!topology_inited)
208 +- topology_update_needed = 1;
209 ++ if (!prrn_enabled && !vphn_enabled && topology_inited)
210 + return 0;
211 +- }
212 +
213 + weight = cpumask_weight(&cpu_associativity_changes_mask);
214 + if (!weight)
215 +@@ -1423,7 +1419,6 @@ int numa_update_cpu_topology(bool cpus_locked)
216 +
217 + out:
218 + kfree(updates);
219 +- topology_update_needed = 0;
220 + return changed;
221 + }
222 +
223 +@@ -1551,6 +1546,15 @@ int prrn_is_enabled(void)
224 + return prrn_enabled;
225 + }
226 +
227 ++void __init shared_proc_topology_init(void)
228 ++{
229 ++ if (lppaca_shared_proc(get_lppaca())) {
230 ++ bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
231 ++ nr_cpumask_bits);
232 ++ numa_update_cpu_topology(false);
233 ++ }
234 ++}
235 ++
236 + static int topology_read(struct seq_file *file, void *v)
237 + {
238 + if (vphn_enabled || prrn_enabled)
239 +@@ -1608,10 +1612,6 @@ static int topology_update_init(void)
240 + return -ENOMEM;
241 +
242 + topology_inited = 1;
243 +- if (topology_update_needed)
244 +- bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
245 +- nr_cpumask_bits);
246 +-
247 + return 0;
248 + }
249 + device_initcall(topology_update_init);
250 +diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
251 +index 58fa3d319f1c..dac36ba82fea 100644
252 +--- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c
253 ++++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
254 +@@ -9,8 +9,10 @@
255 + * option) any later version.
256 + */
257 +
258 ++#include <linux/init.h>
259 + #include <linux/io.h>
260 + #include <linux/kernel.h>
261 ++#include <linux/module.h>
262 + #include <linux/of.h>
263 + #include <linux/of_address.h>
264 +
265 +@@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void)
266 + }
267 +
268 + early_initcall(t1042rdb_diu_init);
269 ++
270 ++MODULE_LICENSE("GPL");
271 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
272 +index 2edc673be137..99d1152ae224 100644
273 +--- a/arch/powerpc/platforms/pseries/ras.c
274 ++++ b/arch/powerpc/platforms/pseries/ras.c
275 +@@ -371,7 +371,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
276 + int len, error_log_length;
277 +
278 + error_log_length = 8 + rtas_error_extended_log_length(h);
279 +- len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
280 ++ len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
281 + memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
282 + memcpy(global_mce_data_buf, h, len);
283 + errhdr = (struct rtas_error_log *)global_mce_data_buf;
284 +diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
285 +index eb69a5186243..280e964e1aa8 100644
286 +--- a/arch/powerpc/sysdev/mpic_msgr.c
287 ++++ b/arch/powerpc/sysdev/mpic_msgr.c
288 +@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
289 +
290 + /* IO map the message register block. */
291 + of_address_to_resource(np, 0, &rsrc);
292 +- msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
293 ++ msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
294 + if (!msgr_block_addr) {
295 + dev_err(&dev->dev, "Failed to iomap MPIC message registers");
296 + return -EFAULT;
297 +diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
298 +index f6561b783b61..eed1c137f618 100644
299 +--- a/arch/riscv/kernel/vdso/Makefile
300 ++++ b/arch/riscv/kernel/vdso/Makefile
301 +@@ -52,8 +52,8 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
302 + # Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
303 + # Make sure only to export the intended __vdso_xxx symbol offsets.
304 + quiet_cmd_vdsold = VDSOLD $@
305 +- cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
306 +- -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
307 ++ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
308 ++ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
309 + $(CROSS_COMPILE)objcopy \
310 + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
311 +
312 +diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
313 +index 9f5ea9d87069..9b0216d571ad 100644
314 +--- a/arch/s390/kernel/crash_dump.c
315 ++++ b/arch/s390/kernel/crash_dump.c
316 +@@ -404,11 +404,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
317 + if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
318 + sizeof(nt_name) - 1))
319 + return NULL;
320 +- if (strcmp(nt_name, "VMCOREINFO") != 0)
321 ++ if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
322 + return NULL;
323 + vmcoreinfo = kzalloc_panic(note.n_descsz);
324 +- if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
325 ++ if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
326 ++ kfree(vmcoreinfo);
327 + return NULL;
328 ++ }
329 + *size = note.n_descsz;
330 + return vmcoreinfo;
331 + }
332 +@@ -418,15 +420,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
333 + */
334 + static void *nt_vmcoreinfo(void *ptr)
335 + {
336 ++ const char *name = VMCOREINFO_NOTE_NAME;
337 + unsigned long size;
338 + void *vmcoreinfo;
339 +
340 + vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
341 +- if (!vmcoreinfo)
342 +- vmcoreinfo = get_vmcoreinfo_old(&size);
343 ++ if (vmcoreinfo)
344 ++ return nt_init_name(ptr, 0, vmcoreinfo, size, name);
345 ++
346 ++ vmcoreinfo = get_vmcoreinfo_old(&size);
347 + if (!vmcoreinfo)
348 + return ptr;
349 +- return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
350 ++ ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
351 ++ kfree(vmcoreinfo);
352 ++ return ptr;
353 + }
354 +
355 + /*
356 +diff --git a/arch/um/Makefile b/arch/um/Makefile
357 +index e54dda8a0363..de340e41f3b2 100644
358 +--- a/arch/um/Makefile
359 ++++ b/arch/um/Makefile
360 +@@ -122,8 +122,7 @@ archheaders:
361 + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
362 + kbuild-file=$(HOST_DIR)/include/uapi/asm/Kbuild \
363 + obj=$(HOST_DIR)/include/generated/uapi/asm
364 +- $(Q)$(MAKE) KBUILD_SRC= ARCH=$(HEADER_ARCH) archheaders
365 +-
366 ++ $(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) archheaders
367 +
368 + archprepare: include/generated/user_constants.h
369 +
370 +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
371 +index 8c7b3e5a2d01..3a17107594c8 100644
372 +--- a/arch/x86/include/asm/mce.h
373 ++++ b/arch/x86/include/asm/mce.h
374 +@@ -148,6 +148,7 @@ enum mce_notifier_prios {
375 + MCE_PRIO_LOWEST = 0,
376 + };
377 +
378 ++struct notifier_block;
379 + extern void mce_register_decode_chain(struct notifier_block *nb);
380 + extern void mce_unregister_decode_chain(struct notifier_block *nb);
381 +
382 +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
383 +index bb035a4cbc8c..9eeb1359ec75 100644
384 +--- a/arch/x86/include/asm/pgtable-3level.h
385 ++++ b/arch/x86/include/asm/pgtable-3level.h
386 +@@ -2,6 +2,8 @@
387 + #ifndef _ASM_X86_PGTABLE_3LEVEL_H
388 + #define _ASM_X86_PGTABLE_3LEVEL_H
389 +
390 ++#include <asm/atomic64_32.h>
391 ++
392 + /*
393 + * Intel Physical Address Extension (PAE) Mode - three-level page
394 + * tables on PPro+ CPUs.
395 +@@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
396 + {
397 + pte_t res;
398 +
399 +- /* xchg acts as a barrier before the setting of the high bits */
400 +- res.pte_low = xchg(&ptep->pte_low, 0);
401 +- res.pte_high = ptep->pte_high;
402 +- ptep->pte_high = 0;
403 ++ res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
404 +
405 + return res;
406 + }
407 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
408 +index 74392d9d51e0..a10481656d82 100644
409 +--- a/arch/x86/kernel/tsc.c
410 ++++ b/arch/x86/kernel/tsc.c
411 +@@ -1343,7 +1343,7 @@ device_initcall(init_tsc_clocksource);
412 +
413 + void __init tsc_early_delay_calibrate(void)
414 + {
415 +- unsigned long lpj;
416 ++ u64 lpj;
417 +
418 + if (!boot_cpu_has(X86_FEATURE_TSC))
419 + return;
420 +@@ -1355,7 +1355,7 @@ void __init tsc_early_delay_calibrate(void)
421 + if (!tsc_khz)
422 + return;
423 +
424 +- lpj = tsc_khz * 1000;
425 ++ lpj = (u64)tsc_khz * 1000;
426 + do_div(lpj, HZ);
427 + loops_per_jiffy = lpj;
428 + }
429 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
430 +index a44e568363a4..42f1ba92622a 100644
431 +--- a/arch/x86/kvm/mmu.c
432 ++++ b/arch/x86/kvm/mmu.c
433 +@@ -221,6 +221,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
434 + PT64_EPT_EXECUTABLE_MASK;
435 + static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
436 +
437 ++/*
438 ++ * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
439 ++ * to guard against L1TF attacks.
440 ++ */
441 ++static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
442 ++
443 ++/*
444 ++ * The number of high-order 1 bits to use in the mask above.
445 ++ */
446 ++static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
447 ++
448 + static void mmu_spte_set(u64 *sptep, u64 spte);
449 +
450 + void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
451 +@@ -308,9 +319,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
452 + {
453 + unsigned int gen = kvm_current_mmio_generation(vcpu);
454 + u64 mask = generation_mmio_spte_mask(gen);
455 ++ u64 gpa = gfn << PAGE_SHIFT;
456 +
457 + access &= ACC_WRITE_MASK | ACC_USER_MASK;
458 +- mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
459 ++ mask |= shadow_mmio_value | access;
460 ++ mask |= gpa | shadow_nonpresent_or_rsvd_mask;
461 ++ mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
462 ++ << shadow_nonpresent_or_rsvd_mask_len;
463 +
464 + trace_mark_mmio_spte(sptep, gfn, access, gen);
465 + mmu_spte_set(sptep, mask);
466 +@@ -323,8 +338,14 @@ static bool is_mmio_spte(u64 spte)
467 +
468 + static gfn_t get_mmio_spte_gfn(u64 spte)
469 + {
470 +- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
471 +- return (spte & ~mask) >> PAGE_SHIFT;
472 ++ u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
473 ++ shadow_nonpresent_or_rsvd_mask;
474 ++ u64 gpa = spte & ~mask;
475 ++
476 ++ gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
477 ++ & shadow_nonpresent_or_rsvd_mask;
478 ++
479 ++ return gpa >> PAGE_SHIFT;
480 + }
481 +
482 + static unsigned get_mmio_spte_access(u64 spte)
483 +@@ -381,7 +402,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
484 + }
485 + EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
486 +
487 +-static void kvm_mmu_clear_all_pte_masks(void)
488 ++static void kvm_mmu_reset_all_pte_masks(void)
489 + {
490 + shadow_user_mask = 0;
491 + shadow_accessed_mask = 0;
492 +@@ -391,6 +412,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
493 + shadow_mmio_mask = 0;
494 + shadow_present_mask = 0;
495 + shadow_acc_track_mask = 0;
496 ++
497 ++ /*
498 ++ * If the CPU has 46 or less physical address bits, then set an
499 ++ * appropriate mask to guard against L1TF attacks. Otherwise, it is
500 ++ * assumed that the CPU is not vulnerable to L1TF.
501 ++ */
502 ++ if (boot_cpu_data.x86_phys_bits <
503 ++ 52 - shadow_nonpresent_or_rsvd_mask_len)
504 ++ shadow_nonpresent_or_rsvd_mask =
505 ++ rsvd_bits(boot_cpu_data.x86_phys_bits -
506 ++ shadow_nonpresent_or_rsvd_mask_len,
507 ++ boot_cpu_data.x86_phys_bits - 1);
508 + }
509 +
510 + static int is_cpuid_PSE36(void)
511 +@@ -5500,7 +5533,7 @@ int kvm_mmu_module_init(void)
512 + {
513 + int ret = -ENOMEM;
514 +
515 +- kvm_mmu_clear_all_pte_masks();
516 ++ kvm_mmu_reset_all_pte_masks();
517 +
518 + pte_list_desc_cache = kmem_cache_create("pte_list_desc",
519 + sizeof(struct pte_list_desc),
520 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
521 +index bedabcf33a3e..9869bfd0c601 100644
522 +--- a/arch/x86/kvm/vmx.c
523 ++++ b/arch/x86/kvm/vmx.c
524 +@@ -939,17 +939,21 @@ struct vcpu_vmx {
525 + /*
526 + * loaded_vmcs points to the VMCS currently used in this vcpu. For a
527 + * non-nested (L1) guest, it always points to vmcs01. For a nested
528 +- * guest (L2), it points to a different VMCS.
529 ++ * guest (L2), it points to a different VMCS. loaded_cpu_state points
530 ++ * to the VMCS whose state is loaded into the CPU registers that only
531 ++ * need to be switched when transitioning to/from the kernel; a NULL
532 ++ * value indicates that host state is loaded.
533 + */
534 + struct loaded_vmcs vmcs01;
535 + struct loaded_vmcs *loaded_vmcs;
536 ++ struct loaded_vmcs *loaded_cpu_state;
537 + bool __launched; /* temporary, used in vmx_vcpu_run */
538 + struct msr_autoload {
539 + struct vmx_msrs guest;
540 + struct vmx_msrs host;
541 + } msr_autoload;
542 ++
543 + struct {
544 +- int loaded;
545 + u16 fs_sel, gs_sel, ldt_sel;
546 + #ifdef CONFIG_X86_64
547 + u16 ds_sel, es_sel;
548 +@@ -2750,10 +2754,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
549 + #endif
550 + int i;
551 +
552 +- if (vmx->host_state.loaded)
553 ++ if (vmx->loaded_cpu_state)
554 + return;
555 +
556 +- vmx->host_state.loaded = 1;
557 ++ vmx->loaded_cpu_state = vmx->loaded_vmcs;
558 ++
559 + /*
560 + * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
561 + * allow segment selectors with cpl > 0 or ti == 1.
562 +@@ -2815,11 +2820,14 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
563 +
564 + static void __vmx_load_host_state(struct vcpu_vmx *vmx)
565 + {
566 +- if (!vmx->host_state.loaded)
567 ++ if (!vmx->loaded_cpu_state)
568 + return;
569 +
570 ++ WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
571 ++
572 + ++vmx->vcpu.stat.host_state_reload;
573 +- vmx->host_state.loaded = 0;
574 ++ vmx->loaded_cpu_state = NULL;
575 ++
576 + #ifdef CONFIG_X86_64
577 + if (is_long_mode(&vmx->vcpu))
578 + rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
579 +@@ -8115,7 +8123,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
580 +
581 + /* CPL=0 must be checked manually. */
582 + if (vmx_get_cpl(vcpu)) {
583 +- kvm_queue_exception(vcpu, UD_VECTOR);
584 ++ kvm_inject_gp(vcpu, 0);
585 + return 1;
586 + }
587 +
588 +@@ -8179,7 +8187,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
589 + static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
590 + {
591 + if (vmx_get_cpl(vcpu)) {
592 +- kvm_queue_exception(vcpu, UD_VECTOR);
593 ++ kvm_inject_gp(vcpu, 0);
594 + return 0;
595 + }
596 +
597 +@@ -10517,8 +10525,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
598 + return;
599 +
600 + cpu = get_cpu();
601 +- vmx->loaded_vmcs = vmcs;
602 + vmx_vcpu_put(vcpu);
603 ++ vmx->loaded_vmcs = vmcs;
604 + vmx_vcpu_load(vcpu, cpu);
605 + put_cpu();
606 + }
607 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
608 +index 24c84aa87049..94cd63081471 100644
609 +--- a/arch/x86/kvm/x86.c
610 ++++ b/arch/x86/kvm/x86.c
611 +@@ -6506,20 +6506,22 @@ static void kvm_set_mmio_spte_mask(void)
612 + * Set the reserved bits and the present bit of an paging-structure
613 + * entry to generate page fault with PFER.RSV = 1.
614 + */
615 +- /* Mask the reserved physical address bits. */
616 +- mask = rsvd_bits(maxphyaddr, 51);
617 ++
618 ++ /*
619 ++ * Mask the uppermost physical address bit, which would be reserved as
620 ++ * long as the supported physical address width is less than 52.
621 ++ */
622 ++ mask = 1ull << 51;
623 +
624 + /* Set the present bit. */
625 + mask |= 1ull;
626 +
627 +-#ifdef CONFIG_X86_64
628 + /*
629 + * If reserved bit is not supported, clear the present bit to disable
630 + * mmio page fault.
631 + */
632 +- if (maxphyaddr == 52)
633 ++ if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
634 + mask &= ~1ull;
635 +-#endif
636 +
637 + kvm_mmu_set_mmio_spte_mask(mask, mask);
638 + }
639 +diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
640 +index 2c30cabfda90..071d82ec9abb 100644
641 +--- a/arch/x86/xen/mmu_pv.c
642 ++++ b/arch/x86/xen/mmu_pv.c
643 +@@ -434,14 +434,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
644 + static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
645 + {
646 + trace_xen_mmu_set_pte_atomic(ptep, pte);
647 +- set_64bit((u64 *)ptep, native_pte_val(pte));
648 ++ __xen_set_pte(ptep, pte);
649 + }
650 +
651 + static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
652 + {
653 + trace_xen_mmu_pte_clear(mm, addr, ptep);
654 +- if (!xen_batched_set_pte(ptep, native_make_pte(0)))
655 +- native_pte_clear(mm, addr, ptep);
656 ++ __xen_set_pte(ptep, native_make_pte(0));
657 + }
658 +
659 + static void xen_pmd_clear(pmd_t *pmdp)
660 +@@ -1571,7 +1570,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
661 + pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
662 + pte_val_ma(pte));
663 + #endif
664 +- native_set_pte(ptep, pte);
665 ++ __xen_set_pte(ptep, pte);
666 + }
667 +
668 + /* Early in boot, while setting up the initial pagetable, assume
669 +diff --git a/block/bio.c b/block/bio.c
670 +index 047c5dca6d90..ff94640bc734 100644
671 +--- a/block/bio.c
672 ++++ b/block/bio.c
673 +@@ -156,7 +156,7 @@ out:
674 +
675 + unsigned int bvec_nr_vecs(unsigned short idx)
676 + {
677 +- return bvec_slabs[idx].nr_vecs;
678 ++ return bvec_slabs[--idx].nr_vecs;
679 + }
680 +
681 + void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
682 +diff --git a/block/blk-core.c b/block/blk-core.c
683 +index 1646ea85dade..746a5eac4541 100644
684 +--- a/block/blk-core.c
685 ++++ b/block/blk-core.c
686 +@@ -2159,7 +2159,9 @@ static inline bool should_fail_request(struct hd_struct *part,
687 +
688 + static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
689 + {
690 +- if (part->policy && op_is_write(bio_op(bio))) {
691 ++ const int op = bio_op(bio);
692 ++
693 ++ if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
694 + char b[BDEVNAME_SIZE];
695 +
696 + WARN_ONCE(1,
697 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
698 +index 3de0836163c2..d5f2c21d8531 100644
699 +--- a/block/blk-mq-tag.c
700 ++++ b/block/blk-mq-tag.c
701 +@@ -23,6 +23,9 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
702 +
703 + /*
704 + * If a previously inactive queue goes active, bump the active user count.
705 ++ * We need to do this before try to allocate driver tag, then even if fail
706 ++ * to get tag when first time, the other shared-tag users could reserve
707 ++ * budget for it.
708 + */
709 + bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
710 + {
711 +diff --git a/block/blk-mq.c b/block/blk-mq.c
712 +index 654b0dc7e001..2f9e14361673 100644
713 +--- a/block/blk-mq.c
714 ++++ b/block/blk-mq.c
715 +@@ -285,7 +285,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
716 + rq->tag = -1;
717 + rq->internal_tag = tag;
718 + } else {
719 +- if (blk_mq_tag_busy(data->hctx)) {
720 ++ if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
721 + rq_flags = RQF_MQ_INFLIGHT;
722 + atomic_inc(&data->hctx->nr_active);
723 + }
724 +@@ -367,6 +367,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
725 + if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
726 + !(data->flags & BLK_MQ_REQ_RESERVED))
727 + e->type->ops.mq.limit_depth(op, data);
728 ++ } else {
729 ++ blk_mq_tag_busy(data->hctx);
730 + }
731 +
732 + tag = blk_mq_get_tag(data);
733 +@@ -970,6 +972,7 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
734 + .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
735 + .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
736 + };
737 ++ bool shared;
738 +
739 + might_sleep_if(wait);
740 +
741 +@@ -979,9 +982,10 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
742 + if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
743 + data.flags |= BLK_MQ_REQ_RESERVED;
744 +
745 ++ shared = blk_mq_tag_busy(data.hctx);
746 + rq->tag = blk_mq_get_tag(&data);
747 + if (rq->tag >= 0) {
748 +- if (blk_mq_tag_busy(data.hctx)) {
749 ++ if (shared) {
750 + rq->rq_flags |= RQF_MQ_INFLIGHT;
751 + atomic_inc(&data.hctx->nr_active);
752 + }
753 +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
754 +index 82b6c27b3245..f6f180f3aa1c 100644
755 +--- a/block/cfq-iosched.c
756 ++++ b/block/cfq-iosched.c
757 +@@ -4735,12 +4735,13 @@ USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
758 + static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
759 + { \
760 + struct cfq_data *cfqd = e->elevator_data; \
761 +- unsigned int __data; \
762 ++ unsigned int __data, __min = (MIN), __max = (MAX); \
763 ++ \
764 + cfq_var_store(&__data, (page)); \
765 +- if (__data < (MIN)) \
766 +- __data = (MIN); \
767 +- else if (__data > (MAX)) \
768 +- __data = (MAX); \
769 ++ if (__data < __min) \
770 ++ __data = __min; \
771 ++ else if (__data > __max) \
772 ++ __data = __max; \
773 + if (__CONV) \
774 + *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
775 + else \
776 +@@ -4769,12 +4770,13 @@ STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX,
777 + static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
778 + { \
779 + struct cfq_data *cfqd = e->elevator_data; \
780 +- unsigned int __data; \
781 ++ unsigned int __data, __min = (MIN), __max = (MAX); \
782 ++ \
783 + cfq_var_store(&__data, (page)); \
784 +- if (__data < (MIN)) \
785 +- __data = (MIN); \
786 +- else if (__data > (MAX)) \
787 +- __data = (MAX); \
788 ++ if (__data < __min) \
789 ++ __data = __min; \
790 ++ else if (__data > __max) \
791 ++ __data = __max; \
792 + *(__PTR) = (u64)__data * NSEC_PER_USEC; \
793 + return count; \
794 + }
795 +diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
796 +index 3de794bcf8fa..69603ba52a3a 100644
797 +--- a/drivers/acpi/acpica/hwregs.c
798 ++++ b/drivers/acpi/acpica/hwregs.c
799 +@@ -528,13 +528,18 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
800 +
801 + status =
802 + acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block);
803 +- value = (u32)value64;
804 ++ if (ACPI_SUCCESS(status)) {
805 ++ value = (u32)value64;
806 ++ }
807 + break;
808 +
809 + case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
810 +
811 + status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block);
812 +- value = (u32)value64;
813 ++ if (ACPI_SUCCESS(status)) {
814 ++ value = (u32)value64;
815 ++ }
816 ++
817 + break;
818 +
819 + case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
820 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
821 +index 970dd87d347c..6799d00dd790 100644
822 +--- a/drivers/acpi/scan.c
823 ++++ b/drivers/acpi/scan.c
824 +@@ -1612,7 +1612,8 @@ static int acpi_add_single_object(struct acpi_device **child,
825 + * Note this must be done before the get power-/wakeup_dev-flags calls.
826 + */
827 + if (type == ACPI_BUS_TYPE_DEVICE)
828 +- acpi_bus_get_status(device);
829 ++ if (acpi_bus_get_status(device) < 0)
830 ++ acpi_set_device_status(device, 0);
831 +
832 + acpi_bus_get_power_flags(device);
833 + acpi_bus_get_wakeup_device_flags(device);
834 +@@ -1690,7 +1691,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
835 + * acpi_add_single_object updates this once we've an acpi_device
836 + * so that acpi_bus_get_status' quirk handling can be used.
837 + */
838 +- *sta = 0;
839 ++ *sta = ACPI_STA_DEFAULT;
840 + break;
841 + case ACPI_TYPE_PROCESSOR:
842 + *type = ACPI_BUS_TYPE_PROCESSOR;
843 +diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
844 +index 2a8634a52856..5a628148f3f0 100644
845 +--- a/drivers/clk/rockchip/clk-rk3399.c
846 ++++ b/drivers/clk/rockchip/clk-rk3399.c
847 +@@ -1523,6 +1523,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
848 + "pclk_pmu_src",
849 + "fclk_cm0s_src_pmu",
850 + "clk_timer_src_pmu",
851 ++ "pclk_rkpwm_pmu",
852 + };
853 +
854 + static void __init rk3399_clk_init(struct device_node *np)
855 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
856 +index 7dcbac8af9a7..b60aa7d43cb7 100644
857 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
858 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
859 +@@ -1579,9 +1579,9 @@ struct amdgpu_device {
860 + DECLARE_HASHTABLE(mn_hash, 7);
861 +
862 + /* tracking pinned memory */
863 +- u64 vram_pin_size;
864 +- u64 invisible_pin_size;
865 +- u64 gart_pin_size;
866 ++ atomic64_t vram_pin_size;
867 ++ atomic64_t visible_pin_size;
868 ++ atomic64_t gart_pin_size;
869 +
870 + /* amdkfd interface */
871 + struct kfd_dev *kfd;
872 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
873 +index 9c85a90be293..5a196ec49be8 100644
874 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
875 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
876 +@@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
877 + return;
878 + }
879 +
880 +- total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
881 ++ total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
882 + used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
883 + free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
884 +
885 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
886 +index 91517b166a3b..063f9aa96946 100644
887 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
888 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
889 +@@ -494,13 +494,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
890 + case AMDGPU_INFO_VRAM_GTT: {
891 + struct drm_amdgpu_info_vram_gtt vram_gtt;
892 +
893 +- vram_gtt.vram_size = adev->gmc.real_vram_size;
894 +- vram_gtt.vram_size -= adev->vram_pin_size;
895 +- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
896 +- vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
897 ++ vram_gtt.vram_size = adev->gmc.real_vram_size -
898 ++ atomic64_read(&adev->vram_pin_size);
899 ++ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
900 ++ atomic64_read(&adev->visible_pin_size);
901 + vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
902 + vram_gtt.gtt_size *= PAGE_SIZE;
903 +- vram_gtt.gtt_size -= adev->gart_pin_size;
904 ++ vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
905 + return copy_to_user(out, &vram_gtt,
906 + min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
907 + }
908 +@@ -509,17 +509,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
909 +
910 + memset(&mem, 0, sizeof(mem));
911 + mem.vram.total_heap_size = adev->gmc.real_vram_size;
912 +- mem.vram.usable_heap_size =
913 +- adev->gmc.real_vram_size - adev->vram_pin_size;
914 ++ mem.vram.usable_heap_size = adev->gmc.real_vram_size -
915 ++ atomic64_read(&adev->vram_pin_size);
916 + mem.vram.heap_usage =
917 + amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
918 + mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
919 +
920 + mem.cpu_accessible_vram.total_heap_size =
921 + adev->gmc.visible_vram_size;
922 +- mem.cpu_accessible_vram.usable_heap_size =
923 +- adev->gmc.visible_vram_size -
924 +- (adev->vram_pin_size - adev->invisible_pin_size);
925 ++ mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
926 ++ atomic64_read(&adev->visible_pin_size);
927 + mem.cpu_accessible_vram.heap_usage =
928 + amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
929 + mem.cpu_accessible_vram.max_allocation =
930 +@@ -527,8 +526,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
931 +
932 + mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
933 + mem.gtt.total_heap_size *= PAGE_SIZE;
934 +- mem.gtt.usable_heap_size = mem.gtt.total_heap_size
935 +- - adev->gart_pin_size;
936 ++ mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
937 ++ atomic64_read(&adev->gart_pin_size);
938 + mem.gtt.heap_usage =
939 + amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
940 + mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
941 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
942 +index 3526efa8960e..3873c3353020 100644
943 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
944 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
945 +@@ -50,11 +50,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
946 + return true;
947 + }
948 +
949 ++/**
950 ++ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
951 ++ *
952 ++ * @bo: &amdgpu_bo buffer object
953 ++ *
954 ++ * This function is called when a BO stops being pinned, and updates the
955 ++ * &amdgpu_device pin_size values accordingly.
956 ++ */
957 ++static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
958 ++{
959 ++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
960 ++
961 ++ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
962 ++ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
963 ++ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
964 ++ &adev->visible_pin_size);
965 ++ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
966 ++ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
967 ++ }
968 ++}
969 ++
970 + static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
971 + {
972 + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
973 + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
974 +
975 ++ if (bo->pin_count > 0)
976 ++ amdgpu_bo_subtract_pin_size(bo);
977 ++
978 + if (bo->kfd_bo)
979 + amdgpu_amdkfd_unreserve_system_memory_limit(bo);
980 +
981 +@@ -761,10 +785,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
982 +
983 + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
984 + if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
985 +- adev->vram_pin_size += amdgpu_bo_size(bo);
986 +- adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
987 ++ atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
988 ++ atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
989 ++ &adev->visible_pin_size);
990 + } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
991 +- adev->gart_pin_size += amdgpu_bo_size(bo);
992 ++ atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
993 + }
994 +
995 + error:
996 +@@ -790,12 +815,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
997 + if (bo->pin_count)
998 + return 0;
999 +
1000 +- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
1001 +- adev->vram_pin_size -= amdgpu_bo_size(bo);
1002 +- adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
1003 +- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
1004 +- adev->gart_pin_size -= amdgpu_bo_size(bo);
1005 +- }
1006 ++ amdgpu_bo_subtract_pin_size(bo);
1007 +
1008 + for (i = 0; i < bo->placement.num_placement; i++) {
1009 + bo->placements[i].lpfn = 0;
1010 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1011 +index a44c3d58fef4..2ec20348b983 100644
1012 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1013 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1014 +@@ -1157,7 +1157,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
1015 + int r, size = sizeof(vddnb);
1016 +
1017 + /* only APUs have vddnb */
1018 +- if (adev->flags & AMD_IS_APU)
1019 ++ if (!(adev->flags & AMD_IS_APU))
1020 + return -EINVAL;
1021 +
1022 + /* Can't get voltage when the card is off */
1023 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1024 +index 9f1a5bd39ae8..5b39d1399630 100644
1025 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1026 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
1027 +@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
1028 + msleep(1);
1029 + }
1030 +
1031 ++ if (ucode) {
1032 ++ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
1033 ++ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
1034 ++ }
1035 ++
1036 + return ret;
1037 + }
1038 +
1039 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
1040 +index 86a0715d9431..1cafe8d83a4d 100644
1041 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
1042 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
1043 +@@ -53,9 +53,8 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
1044 + int fd,
1045 + enum drm_sched_priority priority)
1046 + {
1047 +- struct file *filp = fcheck(fd);
1048 ++ struct file *filp = fget(fd);
1049 + struct drm_file *file;
1050 +- struct pid *pid;
1051 + struct amdgpu_fpriv *fpriv;
1052 + struct amdgpu_ctx *ctx;
1053 + uint32_t id;
1054 +@@ -63,20 +62,12 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
1055 + if (!filp)
1056 + return -EINVAL;
1057 +
1058 +- pid = get_pid(((struct drm_file *)filp->private_data)->pid);
1059 ++ file = filp->private_data;
1060 ++ fpriv = file->driver_priv;
1061 ++ idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
1062 ++ amdgpu_ctx_priority_override(ctx, priority);
1063 +
1064 +- mutex_lock(&adev->ddev->filelist_mutex);
1065 +- list_for_each_entry(file, &adev->ddev->filelist, lhead) {
1066 +- if (file->pid != pid)
1067 +- continue;
1068 +-
1069 +- fpriv = file->driver_priv;
1070 +- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
1071 +- amdgpu_ctx_priority_override(ctx, priority);
1072 +- }
1073 +- mutex_unlock(&adev->ddev->filelist_mutex);
1074 +-
1075 +- put_pid(pid);
1076 ++ fput(filp);
1077 +
1078 + return 0;
1079 + }
1080 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
1081 +index e5da4654b630..8b3cc6687769 100644
1082 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
1083 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
1084 +@@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
1085 + uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
1086 + int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
1087 +
1088 +-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
1089 ++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
1090 + uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
1091 + uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
1092 +
1093 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
1094 +index 08e38579af24..bdc472b6e641 100644
1095 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
1096 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
1097 +@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
1098 + AMDGPU_UCODE_ID_SMC,
1099 + AMDGPU_UCODE_ID_UVD,
1100 + AMDGPU_UCODE_ID_VCE,
1101 ++ AMDGPU_UCODE_ID_VCN,
1102 + AMDGPU_UCODE_ID_MAXIMUM,
1103 + };
1104 +
1105 +@@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
1106 + void *kaddr;
1107 + /* ucode_size_bytes */
1108 + uint32_t ucode_size;
1109 ++ /* starting tmr mc address */
1110 ++ uint32_t tmr_mc_addr_lo;
1111 ++ uint32_t tmr_mc_addr_hi;
1112 + };
1113 +
1114 + void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
1115 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1116 +index 1b4ad9b2a755..bee49991c1ff 100644
1117 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1118 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1119 +@@ -111,9 +111,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
1120 + version_major, version_minor, family_id);
1121 + }
1122 +
1123 +- bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
1124 +- + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
1125 ++ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
1126 + + AMDGPU_VCN_SESSION_SIZE * 40;
1127 ++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1128 ++ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
1129 + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
1130 + AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
1131 + &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
1132 +@@ -187,11 +188,13 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
1133 + unsigned offset;
1134 +
1135 + hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1136 +- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
1137 +- memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
1138 +- le32_to_cpu(hdr->ucode_size_bytes));
1139 +- size -= le32_to_cpu(hdr->ucode_size_bytes);
1140 +- ptr += le32_to_cpu(hdr->ucode_size_bytes);
1141 ++ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1142 ++ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
1143 ++ memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
1144 ++ le32_to_cpu(hdr->ucode_size_bytes));
1145 ++ size -= le32_to_cpu(hdr->ucode_size_bytes);
1146 ++ ptr += le32_to_cpu(hdr->ucode_size_bytes);
1147 ++ }
1148 + memset_io(ptr, 0, size);
1149 + }
1150 +
1151 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
1152 +index b6333f92ba45..ef4784458800 100644
1153 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
1154 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
1155 +@@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
1156 + }
1157 +
1158 + /**
1159 +- * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
1160 ++ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
1161 + *
1162 + * @bo: &amdgpu_bo buffer object (must be in VRAM)
1163 + *
1164 + * Returns:
1165 +- * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
1166 ++ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
1167 + */
1168 +-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
1169 ++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
1170 + {
1171 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1172 + struct ttm_mem_reg *mem = &bo->tbo.mem;
1173 + struct drm_mm_node *nodes = mem->mm_node;
1174 + unsigned pages = mem->num_pages;
1175 +- u64 usage = 0;
1176 ++ u64 usage;
1177 +
1178 + if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
1179 +- return 0;
1180 ++ return amdgpu_bo_size(bo);
1181 +
1182 + if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
1183 +- return amdgpu_bo_size(bo);
1184 ++ return 0;
1185 +
1186 +- while (nodes && pages) {
1187 +- usage += nodes->size << PAGE_SHIFT;
1188 +- usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
1189 +- pages -= nodes->size;
1190 +- ++nodes;
1191 +- }
1192 ++ for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
1193 ++ usage += amdgpu_vram_mgr_vis_size(adev, nodes);
1194 +
1195 + return usage;
1196 + }
1197 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1198 +index a69153435ea7..8f0ac805ecd2 100644
1199 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1200 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1201 +@@ -3433,7 +3433,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
1202 +
1203 + /* wait for RLC_SAFE_MODE */
1204 + for (i = 0; i < adev->usec_timeout; i++) {
1205 +- if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1206 ++ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1207 + break;
1208 + udelay(1);
1209 + }
1210 +diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
1211 +index 0ff136d02d9b..02be34e72ed9 100644
1212 +--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
1213 ++++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
1214 +@@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
1215 + case AMDGPU_UCODE_ID_VCE:
1216 + *type = GFX_FW_TYPE_VCE;
1217 + break;
1218 ++ case AMDGPU_UCODE_ID_VCN:
1219 ++ *type = GFX_FW_TYPE_VCN;
1220 ++ break;
1221 + case AMDGPU_UCODE_ID_MAXIMUM:
1222 + default:
1223 + return -EINVAL;
1224 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1225 +index bfddf97dd13e..a16eebc05d12 100644
1226 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1227 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1228 +@@ -1569,7 +1569,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1229 + static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1230 + .type = AMDGPU_RING_TYPE_UVD,
1231 + .align_mask = 0xf,
1232 +- .nop = PACKET0(mmUVD_NO_OP, 0),
1233 + .support_64bit_ptrs = false,
1234 + .get_rptr = uvd_v6_0_ring_get_rptr,
1235 + .get_wptr = uvd_v6_0_ring_get_wptr,
1236 +@@ -1587,7 +1586,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1237 + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1238 + .test_ring = uvd_v6_0_ring_test_ring,
1239 + .test_ib = amdgpu_uvd_ring_test_ib,
1240 +- .insert_nop = amdgpu_ring_insert_nop,
1241 ++ .insert_nop = uvd_v6_0_ring_insert_nop,
1242 + .pad_ib = amdgpu_ring_generic_pad_ib,
1243 + .begin_use = amdgpu_uvd_ring_begin_use,
1244 + .end_use = amdgpu_uvd_ring_end_use,
1245 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1246 +index 29684c3ea4ef..700119168067 100644
1247 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1248 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1249 +@@ -90,6 +90,16 @@ static int vcn_v1_0_sw_init(void *handle)
1250 + if (r)
1251 + return r;
1252 +
1253 ++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1254 ++ const struct common_firmware_header *hdr;
1255 ++ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1256 ++ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
1257 ++ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
1258 ++ adev->firmware.fw_size +=
1259 ++ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1260 ++ DRM_INFO("PSP loading VCN firmware\n");
1261 ++ }
1262 ++
1263 + r = amdgpu_vcn_resume(adev);
1264 + if (r)
1265 + return r;
1266 +@@ -241,26 +251,38 @@ static int vcn_v1_0_resume(void *handle)
1267 + static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
1268 + {
1269 + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1270 +-
1271 +- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
1272 ++ uint32_t offset;
1273 ++
1274 ++ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1275 ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
1276 ++ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
1277 ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
1278 ++ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
1279 ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
1280 ++ offset = 0;
1281 ++ } else {
1282 ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
1283 + lower_32_bits(adev->vcn.gpu_addr));
1284 +- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
1285 ++ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
1286 + upper_32_bits(adev->vcn.gpu_addr));
1287 +- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
1288 +- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1289 ++ offset = size;
1290 ++ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
1291 ++ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1292 ++ }
1293 ++
1294 + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
1295 +
1296 + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
1297 +- lower_32_bits(adev->vcn.gpu_addr + size));
1298 ++ lower_32_bits(adev->vcn.gpu_addr + offset));
1299 + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
1300 +- upper_32_bits(adev->vcn.gpu_addr + size));
1301 ++ upper_32_bits(adev->vcn.gpu_addr + offset));
1302 + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
1303 + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
1304 +
1305 + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
1306 +- lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
1307 ++ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
1308 + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
1309 +- upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
1310 ++ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
1311 + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
1312 + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
1313 + AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
1314 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1315 +index 770c6b24be0b..e484d0a94bdc 100644
1316 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1317 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1318 +@@ -1334,6 +1334,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1319 + struct backlight_properties props = { 0 };
1320 +
1321 + props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1322 ++ props.brightness = AMDGPU_MAX_BL_LEVEL;
1323 + props.type = BACKLIGHT_RAW;
1324 +
1325 + snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1326 +@@ -2123,13 +2124,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
1327 + static enum dc_aspect_ratio
1328 + get_aspect_ratio(const struct drm_display_mode *mode_in)
1329 + {
1330 +- int32_t width = mode_in->crtc_hdisplay * 9;
1331 +- int32_t height = mode_in->crtc_vdisplay * 16;
1332 +-
1333 +- if ((width - height) < 10 && (width - height) > -10)
1334 +- return ASPECT_RATIO_16_9;
1335 +- else
1336 +- return ASPECT_RATIO_4_3;
1337 ++ /* 1-1 mapping, since both enums follow the HDMI spec. */
1338 ++ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
1339 + }
1340 +
1341 + static enum dc_color_space
1342 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1343 +index 52f2c01349e3..9bfb040352e9 100644
1344 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1345 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1346 +@@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
1347 + */
1348 + void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
1349 + {
1350 +- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
1351 +- struct dc_stream_state *stream_state = crtc_state->stream;
1352 ++ struct dm_crtc_state *crtc_state;
1353 ++ struct dc_stream_state *stream_state;
1354 + uint32_t crcs[3];
1355 +
1356 ++ if (crtc == NULL)
1357 ++ return;
1358 ++
1359 ++ crtc_state = to_dm_crtc_state(crtc->state);
1360 ++ stream_state = crtc_state->stream;
1361 ++
1362 + /* Early return if CRC capture is not enabled. */
1363 + if (!crtc_state->crc_enabled)
1364 + return;
1365 +diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
1366 +index 651e1fd4622f..a558bfaa0c46 100644
1367 +--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
1368 ++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
1369 +@@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5(
1370 + * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
1371 + * LVDS mode: usPixelClock = pixel clock
1372 + */
1373 ++ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
1374 ++ switch (cntl->color_depth) {
1375 ++ case COLOR_DEPTH_101010:
1376 ++ params.usSymClock =
1377 ++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24);
1378 ++ break;
1379 ++ case COLOR_DEPTH_121212:
1380 ++ params.usSymClock =
1381 ++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24);
1382 ++ break;
1383 ++ case COLOR_DEPTH_161616:
1384 ++ params.usSymClock =
1385 ++ cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24);
1386 ++ break;
1387 ++ default:
1388 ++ break;
1389 ++ }
1390 ++ }
1391 +
1392 + if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
1393 + result = BP_RESULT_OK;
1394 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1395 +index 2fa521812d23..8a7890b03d97 100644
1396 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1397 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
1398 +@@ -728,6 +728,17 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
1399 + break;
1400 + case EDID_NO_RESPONSE:
1401 + DC_LOG_ERROR("No EDID read.\n");
1402 ++
1403 ++ /*
1404 ++ * Abort detection for non-DP connectors if we have
1405 ++ * no EDID
1406 ++ *
1407 ++ * DP needs to report as connected if HDP is high
1408 ++ * even if we have no EDID in order to go to
1409 ++ * fail-safe mode
1410 ++ */
1411 ++ if (!dc_is_dp_signal(link->connector_signal))
1412 ++ return false;
1413 + default:
1414 + break;
1415 + }
1416 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1417 +index 751f3ac9d921..754b4c2fc90a 100644
1418 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1419 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1420 +@@ -268,24 +268,30 @@ bool resource_construct(
1421 +
1422 + return true;
1423 + }
1424 ++static int find_matching_clock_source(
1425 ++ const struct resource_pool *pool,
1426 ++ struct clock_source *clock_source)
1427 ++{
1428 +
1429 ++ int i;
1430 ++
1431 ++ for (i = 0; i < pool->clk_src_count; i++) {
1432 ++ if (pool->clock_sources[i] == clock_source)
1433 ++ return i;
1434 ++ }
1435 ++ return -1;
1436 ++}
1437 +
1438 + void resource_unreference_clock_source(
1439 + struct resource_context *res_ctx,
1440 + const struct resource_pool *pool,
1441 + struct clock_source *clock_source)
1442 + {
1443 +- int i;
1444 +-
1445 +- for (i = 0; i < pool->clk_src_count; i++) {
1446 +- if (pool->clock_sources[i] != clock_source)
1447 +- continue;
1448 ++ int i = find_matching_clock_source(pool, clock_source);
1449 +
1450 ++ if (i > -1)
1451 + res_ctx->clock_source_ref_count[i]--;
1452 +
1453 +- break;
1454 +- }
1455 +-
1456 + if (pool->dp_clock_source == clock_source)
1457 + res_ctx->dp_clock_source_ref_count--;
1458 + }
1459 +@@ -295,19 +301,31 @@ void resource_reference_clock_source(
1460 + const struct resource_pool *pool,
1461 + struct clock_source *clock_source)
1462 + {
1463 +- int i;
1464 +- for (i = 0; i < pool->clk_src_count; i++) {
1465 +- if (pool->clock_sources[i] != clock_source)
1466 +- continue;
1467 ++ int i = find_matching_clock_source(pool, clock_source);
1468 +
1469 ++ if (i > -1)
1470 + res_ctx->clock_source_ref_count[i]++;
1471 +- break;
1472 +- }
1473 +
1474 + if (pool->dp_clock_source == clock_source)
1475 + res_ctx->dp_clock_source_ref_count++;
1476 + }
1477 +
1478 ++int resource_get_clock_source_reference(
1479 ++ struct resource_context *res_ctx,
1480 ++ const struct resource_pool *pool,
1481 ++ struct clock_source *clock_source)
1482 ++{
1483 ++ int i = find_matching_clock_source(pool, clock_source);
1484 ++
1485 ++ if (i > -1)
1486 ++ return res_ctx->clock_source_ref_count[i];
1487 ++
1488 ++ if (pool->dp_clock_source == clock_source)
1489 ++ return res_ctx->dp_clock_source_ref_count;
1490 ++
1491 ++ return -1;
1492 ++}
1493 ++
1494 + bool resource_are_streams_timing_synchronizable(
1495 + struct dc_stream_state *stream1,
1496 + struct dc_stream_state *stream2)
1497 +@@ -330,6 +348,9 @@ bool resource_are_streams_timing_synchronizable(
1498 + != stream2->timing.pix_clk_khz)
1499 + return false;
1500 +
1501 ++ if (stream1->clamping.c_depth != stream2->clamping.c_depth)
1502 ++ return false;
1503 ++
1504 + if (stream1->phy_pix_clk != stream2->phy_pix_clk
1505 + && (!dc_is_dp_signal(stream1->signal)
1506 + || !dc_is_dp_signal(stream2->signal)))
1507 +@@ -337,6 +358,20 @@ bool resource_are_streams_timing_synchronizable(
1508 +
1509 + return true;
1510 + }
1511 ++static bool is_dp_and_hdmi_sharable(
1512 ++ struct dc_stream_state *stream1,
1513 ++ struct dc_stream_state *stream2)
1514 ++{
1515 ++ if (stream1->ctx->dc->caps.disable_dp_clk_share)
1516 ++ return false;
1517 ++
1518 ++ if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
1519 ++ stream2->clamping.c_depth != COLOR_DEPTH_888)
1520 ++ return false;
1521 ++
1522 ++ return true;
1523 ++
1524 ++}
1525 +
1526 + static bool is_sharable_clk_src(
1527 + const struct pipe_ctx *pipe_with_clk_src,
1528 +@@ -348,7 +383,10 @@ static bool is_sharable_clk_src(
1529 + if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
1530 + return false;
1531 +
1532 +- if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
1533 ++ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
1534 ++ (dc_is_dp_signal(pipe->stream->signal) &&
1535 ++ !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
1536 ++ pipe->stream)))
1537 + return false;
1538 +
1539 + if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
1540 +diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
1541 +index 53c71296f3dd..efe155d50668 100644
1542 +--- a/drivers/gpu/drm/amd/display/dc/dc.h
1543 ++++ b/drivers/gpu/drm/amd/display/dc/dc.h
1544 +@@ -77,6 +77,7 @@ struct dc_caps {
1545 + bool dual_link_dvi;
1546 + bool post_blend_color_processing;
1547 + bool force_dp_tps4_for_cp2520;
1548 ++ bool disable_dp_clk_share;
1549 + };
1550 +
1551 + struct dc_dcc_surface_param {
1552 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1553 +index dbe3b26b6d9e..f6ec1d3dfd0c 100644
1554 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1555 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
1556 +@@ -919,7 +919,7 @@ void dce110_link_encoder_enable_tmds_output(
1557 + enum bp_result result;
1558 +
1559 + /* Enable the PHY */
1560 +-
1561 ++ cntl.connector_obj_id = enc110->base.connector;
1562 + cntl.action = TRANSMITTER_CONTROL_ENABLE;
1563 + cntl.engine_id = enc->preferred_engine;
1564 + cntl.transmitter = enc110->base.transmitter;
1565 +@@ -961,7 +961,7 @@ void dce110_link_encoder_enable_dp_output(
1566 + * We need to set number of lanes manually.
1567 + */
1568 + configure_encoder(enc110, link_settings);
1569 +-
1570 ++ cntl.connector_obj_id = enc110->base.connector;
1571 + cntl.action = TRANSMITTER_CONTROL_ENABLE;
1572 + cntl.engine_id = enc->preferred_engine;
1573 + cntl.transmitter = enc110->base.transmitter;
1574 +diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
1575 +index 344dd2e69e7c..aa2f03eb46fe 100644
1576 +--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
1577 ++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
1578 +@@ -884,7 +884,7 @@ static bool construct(
1579 + dc->caps.i2c_speed_in_khz = 40;
1580 + dc->caps.max_cursor_size = 128;
1581 + dc->caps.dual_link_dvi = true;
1582 +-
1583 ++ dc->caps.disable_dp_clk_share = true;
1584 + for (i = 0; i < pool->base.pipe_count; i++) {
1585 + pool->base.timing_generators[i] =
1586 + dce100_timing_generator_create(
1587 +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
1588 +index e2994d337044..111c4921987f 100644
1589 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
1590 ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
1591 +@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
1592 + struct dce110_compressor *cp110,
1593 + bool enabled)
1594 + {
1595 +- uint8_t counter = 0;
1596 ++ uint16_t counter = 0;
1597 + uint32_t addr = mmFBC_STATUS;
1598 + uint32_t value;
1599 +
1600 +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
1601 +index c29052b6da5a..7c0b1d7aa9b8 100644
1602 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
1603 ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
1604 +@@ -1939,7 +1939,9 @@ static void dce110_reset_hw_ctx_wrap(
1605 + pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
1606 + pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
1607 +
1608 +- if (old_clk)
1609 ++ if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
1610 ++ dc->res_pool,
1611 ++ old_clk))
1612 + old_clk->funcs->cs_power_down(old_clk);
1613 +
1614 + dc->hwss.disable_plane(dc, pipe_ctx_old);
1615 +diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
1616 +index 48a068964722..6f4992bdc9ce 100644
1617 +--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
1618 ++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
1619 +@@ -902,6 +902,7 @@ static bool dce80_construct(
1620 + }
1621 +
1622 + dc->caps.max_planes = pool->base.pipe_count;
1623 ++ dc->caps.disable_dp_clk_share = true;
1624 +
1625 + if (!resource_construct(num_virtual_links, dc, &pool->base,
1626 + &res_create_funcs))
1627 +@@ -1087,6 +1088,7 @@ static bool dce81_construct(
1628 + }
1629 +
1630 + dc->caps.max_planes = pool->base.pipe_count;
1631 ++ dc->caps.disable_dp_clk_share = true;
1632 +
1633 + if (!resource_construct(num_virtual_links, dc, &pool->base,
1634 + &res_create_funcs))
1635 +@@ -1268,6 +1270,7 @@ static bool dce83_construct(
1636 + }
1637 +
1638 + dc->caps.max_planes = pool->base.pipe_count;
1639 ++ dc->caps.disable_dp_clk_share = true;
1640 +
1641 + if (!resource_construct(num_virtual_links, dc, &pool->base,
1642 + &res_create_funcs))
1643 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
1644 +index 640a647f4611..abf42a7d0859 100644
1645 +--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
1646 ++++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
1647 +@@ -102,6 +102,11 @@ void resource_reference_clock_source(
1648 + const struct resource_pool *pool,
1649 + struct clock_source *clock_source);
1650 +
1651 ++int resource_get_clock_source_reference(
1652 ++ struct resource_context *res_ctx,
1653 ++ const struct resource_pool *pool,
1654 ++ struct clock_source *clock_source);
1655 ++
1656 + bool resource_are_streams_timing_synchronizable(
1657 + struct dc_stream_state *stream1,
1658 + struct dc_stream_state *stream2);
1659 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
1660 +index c952845833d7..5e19f5977eb1 100644
1661 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
1662 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
1663 +@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
1664 + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
1665 +
1666 + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1667 ++ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
1668 ++
1669 ++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
1670 ++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1671 ++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
1672 ++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1673 ++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1674 ++ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1675 ++
1676 ++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
1677 ++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
1678 ++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
1679 ++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
1680 ++ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1681 ++
1682 ++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
1683 ++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
1684 ++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
1685 ++ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1686 ++
1687 ++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
1688 ++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1689 ++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1690 ++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1691 ++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1692 ++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
1693 ++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
1694 ++ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1695 ++
1696 ++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
1697 ++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
1698 ++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
1699 ++ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
1700 ++
1701 ++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
1702 ++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
1703 ++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1704 ++ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1705 ++
1706 ++ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1707 ++ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
1708 ++
1709 ++ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
1710 + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
1711 +
1712 + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
1713 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
1714 +index 50690c72b2ea..617557bd8c24 100644
1715 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
1716 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
1717 +@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
1718 + return 0;
1719 + }
1720 +
1721 ++/* convert form 8bit vid to real voltage in mV*4 */
1722 + static uint32_t smu8_convert_8Bit_index_to_voltage(
1723 + struct pp_hwmgr *hwmgr, uint16_t voltage)
1724 + {
1725 +@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1726 + case AMDGPU_PP_SENSOR_VDDNB:
1727 + tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1728 + CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1729 +- vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
1730 ++ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
1731 + *((uint32_t *)value) = vddnb;
1732 + return 0;
1733 + case AMDGPU_PP_SENSOR_VDDGFX:
1734 + tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1735 + CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1736 +- vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
1737 ++ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
1738 + *((uint32_t *)value) = vddgfx;
1739 + return 0;
1740 + case AMDGPU_PP_SENSOR_UVD_VCLK:
1741 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
1742 +index c98e5de777cd..fcd2808874bf 100644
1743 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
1744 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
1745 +@@ -490,7 +490,7 @@ static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
1746 + static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
1747 + PPCLK_e clkID, uint32_t index, uint32_t *clock)
1748 + {
1749 +- int result;
1750 ++ int result = 0;
1751 +
1752 + /*
1753 + *SMU expects the Clock ID to be in the top 16 bits.
1754 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1755 +index a5808382bdf0..c7b4481c90d7 100644
1756 +--- a/drivers/gpu/drm/drm_edid.c
1757 ++++ b/drivers/gpu/drm/drm_edid.c
1758 +@@ -116,6 +116,9 @@ static const struct edid_quirk {
1759 + /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
1760 + { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
1761 +
1762 ++ /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
1763 ++ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
1764 ++
1765 + /* Belinea 10 15 55 */
1766 + { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
1767 + { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
1768 +@@ -163,8 +166,9 @@ static const struct edid_quirk {
1769 + /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
1770 + { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
1771 +
1772 +- /* HTC Vive VR Headset */
1773 ++ /* HTC Vive and Vive Pro VR Headsets */
1774 + { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
1775 ++ { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
1776 +
1777 + /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
1778 + { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
1779 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1780 +index 686f6552db48..3ef440b235e5 100644
1781 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1782 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1783 +@@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
1784 +
1785 + free_buffer:
1786 + etnaviv_cmdbuf_free(&gpu->buffer);
1787 ++ gpu->buffer.suballoc = NULL;
1788 + destroy_iommu:
1789 + etnaviv_iommu_destroy(gpu->mmu);
1790 + gpu->mmu = NULL;
1791 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1792 +index 9c449b8d8eab..015f9e93419d 100644
1793 +--- a/drivers/gpu/drm/i915/i915_drv.c
1794 ++++ b/drivers/gpu/drm/i915/i915_drv.c
1795 +@@ -919,7 +919,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1796 + spin_lock_init(&dev_priv->uncore.lock);
1797 +
1798 + mutex_init(&dev_priv->sb_lock);
1799 +- mutex_init(&dev_priv->modeset_restore_lock);
1800 + mutex_init(&dev_priv->av_mutex);
1801 + mutex_init(&dev_priv->wm.wm_mutex);
1802 + mutex_init(&dev_priv->pps_mutex);
1803 +@@ -1560,11 +1559,6 @@ static int i915_drm_suspend(struct drm_device *dev)
1804 + pci_power_t opregion_target_state;
1805 + int error;
1806 +
1807 +- /* ignore lid events during suspend */
1808 +- mutex_lock(&dev_priv->modeset_restore_lock);
1809 +- dev_priv->modeset_restore = MODESET_SUSPENDED;
1810 +- mutex_unlock(&dev_priv->modeset_restore_lock);
1811 +-
1812 + disable_rpm_wakeref_asserts(dev_priv);
1813 +
1814 + /* We do a lot of poking in a lot of registers, make sure they work
1815 +@@ -1764,10 +1758,6 @@ static int i915_drm_resume(struct drm_device *dev)
1816 +
1817 + intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1818 +
1819 +- mutex_lock(&dev_priv->modeset_restore_lock);
1820 +- dev_priv->modeset_restore = MODESET_DONE;
1821 +- mutex_unlock(&dev_priv->modeset_restore_lock);
1822 +-
1823 + intel_opregion_notify_adapter(dev_priv, PCI_D0);
1824 +
1825 + enable_rpm_wakeref_asserts(dev_priv);
1826 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1827 +index 71e1aa54f774..7c22fac3aa04 100644
1828 +--- a/drivers/gpu/drm/i915/i915_drv.h
1829 ++++ b/drivers/gpu/drm/i915/i915_drv.h
1830 +@@ -1003,12 +1003,6 @@ struct i915_gem_mm {
1831 + #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
1832 + #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
1833 +
1834 +-enum modeset_restore {
1835 +- MODESET_ON_LID_OPEN,
1836 +- MODESET_DONE,
1837 +- MODESET_SUSPENDED,
1838 +-};
1839 +-
1840 + #define DP_AUX_A 0x40
1841 + #define DP_AUX_B 0x10
1842 + #define DP_AUX_C 0x20
1843 +@@ -1740,8 +1734,6 @@ struct drm_i915_private {
1844 +
1845 + unsigned long quirks;
1846 +
1847 +- enum modeset_restore modeset_restore;
1848 +- struct mutex modeset_restore_lock;
1849 + struct drm_atomic_state *modeset_restore_state;
1850 + struct drm_modeset_acquire_ctx reset_ctx;
1851 +
1852 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1853 +index 7720569f2024..6e048ee88e3f 100644
1854 +--- a/drivers/gpu/drm/i915/i915_reg.h
1855 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1856 +@@ -8825,6 +8825,7 @@ enum skl_power_gate {
1857 + #define TRANS_MSA_10_BPC (2<<5)
1858 + #define TRANS_MSA_12_BPC (3<<5)
1859 + #define TRANS_MSA_16_BPC (4<<5)
1860 ++#define TRANS_MSA_CEA_RANGE (1<<3)
1861 +
1862 + /* LCPLL Control */
1863 + #define LCPLL_CTL _MMIO(0x130040)
1864 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
1865 +index fed26d6e4e27..e195c287c263 100644
1866 +--- a/drivers/gpu/drm/i915/intel_ddi.c
1867 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
1868 +@@ -1659,6 +1659,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
1869 + WARN_ON(transcoder_is_dsi(cpu_transcoder));
1870 +
1871 + temp = TRANS_MSA_SYNC_CLK;
1872 ++
1873 ++ if (crtc_state->limited_color_range)
1874 ++ temp |= TRANS_MSA_CEA_RANGE;
1875 ++
1876 + switch (crtc_state->pipe_bpp) {
1877 + case 18:
1878 + temp |= TRANS_MSA_6_BPC;
1879 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1880 +index 16faea30114a..8e465095fe06 100644
1881 +--- a/drivers/gpu/drm/i915/intel_dp.c
1882 ++++ b/drivers/gpu/drm/i915/intel_dp.c
1883 +@@ -4293,18 +4293,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
1884 + return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
1885 + }
1886 +
1887 +-/*
1888 +- * If display is now connected check links status,
1889 +- * there has been known issues of link loss triggering
1890 +- * long pulse.
1891 +- *
1892 +- * Some sinks (eg. ASUS PB287Q) seem to perform some
1893 +- * weird HPD ping pong during modesets. So we can apparently
1894 +- * end up with HPD going low during a modeset, and then
1895 +- * going back up soon after. And once that happens we must
1896 +- * retrain the link to get a picture. That's in case no
1897 +- * userspace component reacted to intermittent HPD dip.
1898 +- */
1899 + int intel_dp_retrain_link(struct intel_encoder *encoder,
1900 + struct drm_modeset_acquire_ctx *ctx)
1901 + {
1902 +@@ -4794,7 +4782,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
1903 + }
1904 +
1905 + static int
1906 +-intel_dp_long_pulse(struct intel_connector *connector)
1907 ++intel_dp_long_pulse(struct intel_connector *connector,
1908 ++ struct drm_modeset_acquire_ctx *ctx)
1909 + {
1910 + struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1911 + struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
1912 +@@ -4853,6 +4842,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
1913 + */
1914 + status = connector_status_disconnected;
1915 + goto out;
1916 ++ } else {
1917 ++ /*
1918 ++ * If display is now connected check links status,
1919 ++ * there has been known issues of link loss triggering
1920 ++ * long pulse.
1921 ++ *
1922 ++ * Some sinks (eg. ASUS PB287Q) seem to perform some
1923 ++ * weird HPD ping pong during modesets. So we can apparently
1924 ++ * end up with HPD going low during a modeset, and then
1925 ++ * going back up soon after. And once that happens we must
1926 ++ * retrain the link to get a picture. That's in case no
1927 ++ * userspace component reacted to intermittent HPD dip.
1928 ++ */
1929 ++ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1930 ++
1931 ++ intel_dp_retrain_link(encoder, ctx);
1932 + }
1933 +
1934 + /*
1935 +@@ -4914,7 +4919,7 @@ intel_dp_detect(struct drm_connector *connector,
1936 + return ret;
1937 + }
1938 +
1939 +- status = intel_dp_long_pulse(intel_dp->attached_connector);
1940 ++ status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
1941 + }
1942 +
1943 + intel_dp->detect_done = false;
1944 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1945 +index d8cb53ef4351..c8640959a7fc 100644
1946 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
1947 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
1948 +@@ -933,8 +933,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
1949 +
1950 + ret = i2c_transfer(adapter, &msg, 1);
1951 + if (ret == 1)
1952 +- return 0;
1953 +- return ret >= 0 ? -EIO : ret;
1954 ++ ret = 0;
1955 ++ else if (ret >= 0)
1956 ++ ret = -EIO;
1957 ++
1958 ++ kfree(write_buf);
1959 ++ return ret;
1960 + }
1961 +
1962 + static
1963 +diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
1964 +index b4941101f21a..cdf19553ffac 100644
1965 +--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
1966 ++++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
1967 +@@ -127,9 +127,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
1968 + return platdev;
1969 + }
1970 +
1971 +- pm_runtime_forbid(&platdev->dev);
1972 +- pm_runtime_set_active(&platdev->dev);
1973 +- pm_runtime_enable(&platdev->dev);
1974 ++ pm_runtime_no_callbacks(&platdev->dev);
1975 +
1976 + return platdev;
1977 + }
1978 +diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
1979 +index 8ae8f42f430a..6b6758419fb3 100644
1980 +--- a/drivers/gpu/drm/i915/intel_lspcon.c
1981 ++++ b/drivers/gpu/drm/i915/intel_lspcon.c
1982 +@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
1983 + DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
1984 + lspcon_mode_name(mode));
1985 +
1986 +- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
1987 ++ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
1988 + if (current_mode != mode)
1989 + DRM_ERROR("LSPCON mode hasn't settled\n");
1990 +
1991 +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1992 +index 48f618dc9abb..63d7faa99946 100644
1993 +--- a/drivers/gpu/drm/i915/intel_lvds.c
1994 ++++ b/drivers/gpu/drm/i915/intel_lvds.c
1995 +@@ -44,8 +44,6 @@
1996 + /* Private structure for the integrated LVDS support */
1997 + struct intel_lvds_connector {
1998 + struct intel_connector base;
1999 +-
2000 +- struct notifier_block lid_notifier;
2001 + };
2002 +
2003 + struct intel_lvds_pps {
2004 +@@ -454,26 +452,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
2005 + return true;
2006 + }
2007 +
2008 +-/*
2009 +- * Detect the LVDS connection.
2010 +- *
2011 +- * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
2012 +- * connected and closed means disconnected. We also send hotplug events as
2013 +- * needed, using lid status notification from the input layer.
2014 +- */
2015 + static enum drm_connector_status
2016 + intel_lvds_detect(struct drm_connector *connector, bool force)
2017 + {
2018 +- struct drm_i915_private *dev_priv = to_i915(connector->dev);
2019 +- enum drm_connector_status status;
2020 +-
2021 +- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
2022 +- connector->base.id, connector->name);
2023 +-
2024 +- status = intel_panel_detect(dev_priv);
2025 +- if (status != connector_status_unknown)
2026 +- return status;
2027 +-
2028 + return connector_status_connected;
2029 + }
2030 +
2031 +@@ -498,117 +479,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
2032 + return 1;
2033 + }
2034 +
2035 +-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
2036 +-{
2037 +- DRM_INFO("Skipping forced modeset for %s\n", id->ident);
2038 +- return 1;
2039 +-}
2040 +-
2041 +-/* The GPU hangs up on these systems if modeset is performed on LID open */
2042 +-static const struct dmi_system_id intel_no_modeset_on_lid[] = {
2043 +- {
2044 +- .callback = intel_no_modeset_on_lid_dmi_callback,
2045 +- .ident = "Toshiba Tecra A11",
2046 +- .matches = {
2047 +- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
2048 +- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
2049 +- },
2050 +- },
2051 +-
2052 +- { } /* terminating entry */
2053 +-};
2054 +-
2055 +-/*
2056 +- * Lid events. Note the use of 'modeset':
2057 +- * - we set it to MODESET_ON_LID_OPEN on lid close,
2058 +- * and set it to MODESET_DONE on open
2059 +- * - we use it as a "only once" bit (ie we ignore
2060 +- * duplicate events where it was already properly set)
2061 +- * - the suspend/resume paths will set it to
2062 +- * MODESET_SUSPENDED and ignore the lid open event,
2063 +- * because they restore the mode ("lid open").
2064 +- */
2065 +-static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
2066 +- void *unused)
2067 +-{
2068 +- struct intel_lvds_connector *lvds_connector =
2069 +- container_of(nb, struct intel_lvds_connector, lid_notifier);
2070 +- struct drm_connector *connector = &lvds_connector->base.base;
2071 +- struct drm_device *dev = connector->dev;
2072 +- struct drm_i915_private *dev_priv = to_i915(dev);
2073 +-
2074 +- if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
2075 +- return NOTIFY_OK;
2076 +-
2077 +- mutex_lock(&dev_priv->modeset_restore_lock);
2078 +- if (dev_priv->modeset_restore == MODESET_SUSPENDED)
2079 +- goto exit;
2080 +- /*
2081 +- * check and update the status of LVDS connector after receiving
2082 +- * the LID nofication event.
2083 +- */
2084 +- connector->status = connector->funcs->detect(connector, false);
2085 +-
2086 +- /* Don't force modeset on machines where it causes a GPU lockup */
2087 +- if (dmi_check_system(intel_no_modeset_on_lid))
2088 +- goto exit;
2089 +- if (!acpi_lid_open()) {
2090 +- /* do modeset on next lid open event */
2091 +- dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
2092 +- goto exit;
2093 +- }
2094 +-
2095 +- if (dev_priv->modeset_restore == MODESET_DONE)
2096 +- goto exit;
2097 +-
2098 +- /*
2099 +- * Some old platform's BIOS love to wreak havoc while the lid is closed.
2100 +- * We try to detect this here and undo any damage. The split for PCH
2101 +- * platforms is rather conservative and a bit arbitrary expect that on
2102 +- * those platforms VGA disabling requires actual legacy VGA I/O access,
2103 +- * and as part of the cleanup in the hw state restore we also redisable
2104 +- * the vga plane.
2105 +- */
2106 +- if (!HAS_PCH_SPLIT(dev_priv))
2107 +- intel_display_resume(dev);
2108 +-
2109 +- dev_priv->modeset_restore = MODESET_DONE;
2110 +-
2111 +-exit:
2112 +- mutex_unlock(&dev_priv->modeset_restore_lock);
2113 +- return NOTIFY_OK;
2114 +-}
2115 +-
2116 +-static int
2117 +-intel_lvds_connector_register(struct drm_connector *connector)
2118 +-{
2119 +- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
2120 +- int ret;
2121 +-
2122 +- ret = intel_connector_register(connector);
2123 +- if (ret)
2124 +- return ret;
2125 +-
2126 +- lvds->lid_notifier.notifier_call = intel_lid_notify;
2127 +- if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
2128 +- DRM_DEBUG_KMS("lid notifier registration failed\n");
2129 +- lvds->lid_notifier.notifier_call = NULL;
2130 +- }
2131 +-
2132 +- return 0;
2133 +-}
2134 +-
2135 +-static void
2136 +-intel_lvds_connector_unregister(struct drm_connector *connector)
2137 +-{
2138 +- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
2139 +-
2140 +- if (lvds->lid_notifier.notifier_call)
2141 +- acpi_lid_notifier_unregister(&lvds->lid_notifier);
2142 +-
2143 +- intel_connector_unregister(connector);
2144 +-}
2145 +-
2146 + /**
2147 + * intel_lvds_destroy - unregister and free LVDS structures
2148 + * @connector: connector to free
2149 +@@ -641,8 +511,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
2150 + .fill_modes = drm_helper_probe_single_connector_modes,
2151 + .atomic_get_property = intel_digital_connector_atomic_get_property,
2152 + .atomic_set_property = intel_digital_connector_atomic_set_property,
2153 +- .late_register = intel_lvds_connector_register,
2154 +- .early_unregister = intel_lvds_connector_unregister,
2155 ++ .late_register = intel_connector_register,
2156 ++ .early_unregister = intel_connector_unregister,
2157 + .destroy = intel_lvds_destroy,
2158 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2159 + .atomic_duplicate_state = intel_digital_connector_duplicate_state,
2160 +@@ -1108,8 +978,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
2161 + * 2) check for VBT data
2162 + * 3) check to see if LVDS is already on
2163 + * if none of the above, no panel
2164 +- * 4) make sure lid is open
2165 +- * if closed, act like it's not there for now
2166 + */
2167 +
2168 + /*
2169 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2170 +index 2121345a61af..78ce3d232c4d 100644
2171 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2172 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2173 +@@ -486,6 +486,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
2174 + spin_unlock_irqrestore(&vop->irq_lock, flags);
2175 + }
2176 +
2177 ++static int vop_core_clks_enable(struct vop *vop)
2178 ++{
2179 ++ int ret;
2180 ++
2181 ++ ret = clk_enable(vop->hclk);
2182 ++ if (ret < 0)
2183 ++ return ret;
2184 ++
2185 ++ ret = clk_enable(vop->aclk);
2186 ++ if (ret < 0)
2187 ++ goto err_disable_hclk;
2188 ++
2189 ++ return 0;
2190 ++
2191 ++err_disable_hclk:
2192 ++ clk_disable(vop->hclk);
2193 ++ return ret;
2194 ++}
2195 ++
2196 ++static void vop_core_clks_disable(struct vop *vop)
2197 ++{
2198 ++ clk_disable(vop->aclk);
2199 ++ clk_disable(vop->hclk);
2200 ++}
2201 ++
2202 + static int vop_enable(struct drm_crtc *crtc)
2203 + {
2204 + struct vop *vop = to_vop(crtc);
2205 +@@ -497,17 +522,13 @@ static int vop_enable(struct drm_crtc *crtc)
2206 + return ret;
2207 + }
2208 +
2209 +- ret = clk_enable(vop->hclk);
2210 ++ ret = vop_core_clks_enable(vop);
2211 + if (WARN_ON(ret < 0))
2212 + goto err_put_pm_runtime;
2213 +
2214 + ret = clk_enable(vop->dclk);
2215 + if (WARN_ON(ret < 0))
2216 +- goto err_disable_hclk;
2217 +-
2218 +- ret = clk_enable(vop->aclk);
2219 +- if (WARN_ON(ret < 0))
2220 +- goto err_disable_dclk;
2221 ++ goto err_disable_core;
2222 +
2223 + /*
2224 + * Slave iommu shares power, irq and clock with vop. It was associated
2225 +@@ -519,7 +540,7 @@ static int vop_enable(struct drm_crtc *crtc)
2226 + if (ret) {
2227 + DRM_DEV_ERROR(vop->dev,
2228 + "failed to attach dma mapping, %d\n", ret);
2229 +- goto err_disable_aclk;
2230 ++ goto err_disable_dclk;
2231 + }
2232 +
2233 + spin_lock(&vop->reg_lock);
2234 +@@ -552,18 +573,14 @@ static int vop_enable(struct drm_crtc *crtc)
2235 +
2236 + spin_unlock(&vop->reg_lock);
2237 +
2238 +- enable_irq(vop->irq);
2239 +-
2240 + drm_crtc_vblank_on(crtc);
2241 +
2242 + return 0;
2243 +
2244 +-err_disable_aclk:
2245 +- clk_disable(vop->aclk);
2246 + err_disable_dclk:
2247 + clk_disable(vop->dclk);
2248 +-err_disable_hclk:
2249 +- clk_disable(vop->hclk);
2250 ++err_disable_core:
2251 ++ vop_core_clks_disable(vop);
2252 + err_put_pm_runtime:
2253 + pm_runtime_put_sync(vop->dev);
2254 + return ret;
2255 +@@ -599,8 +616,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
2256 +
2257 + vop_dsp_hold_valid_irq_disable(vop);
2258 +
2259 +- disable_irq(vop->irq);
2260 +-
2261 + vop->is_enabled = false;
2262 +
2263 + /*
2264 +@@ -609,8 +624,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
2265 + rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
2266 +
2267 + clk_disable(vop->dclk);
2268 +- clk_disable(vop->aclk);
2269 +- clk_disable(vop->hclk);
2270 ++ vop_core_clks_disable(vop);
2271 + pm_runtime_put(vop->dev);
2272 + mutex_unlock(&vop->vop_lock);
2273 +
2274 +@@ -1177,6 +1191,18 @@ static irqreturn_t vop_isr(int irq, void *data)
2275 + uint32_t active_irqs;
2276 + int ret = IRQ_NONE;
2277 +
2278 ++ /*
2279 ++ * The irq is shared with the iommu. If the runtime-pm state of the
2280 ++ * vop-device is disabled the irq has to be targeted at the iommu.
2281 ++ */
2282 ++ if (!pm_runtime_get_if_in_use(vop->dev))
2283 ++ return IRQ_NONE;
2284 ++
2285 ++ if (vop_core_clks_enable(vop)) {
2286 ++ DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
2287 ++ goto out;
2288 ++ }
2289 ++
2290 + /*
2291 + * interrupt register has interrupt status, enable and clear bits, we
2292 + * must hold irq_lock to avoid a race with enable/disable_vblank().
2293 +@@ -1192,7 +1218,7 @@ static irqreturn_t vop_isr(int irq, void *data)
2294 +
2295 + /* This is expected for vop iommu irqs, since the irq is shared */
2296 + if (!active_irqs)
2297 +- return IRQ_NONE;
2298 ++ goto out_disable;
2299 +
2300 + if (active_irqs & DSP_HOLD_VALID_INTR) {
2301 + complete(&vop->dsp_hold_completion);
2302 +@@ -1218,6 +1244,10 @@ static irqreturn_t vop_isr(int irq, void *data)
2303 + DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
2304 + active_irqs);
2305 +
2306 ++out_disable:
2307 ++ vop_core_clks_disable(vop);
2308 ++out:
2309 ++ pm_runtime_put(vop->dev);
2310 + return ret;
2311 + }
2312 +
2313 +@@ -1596,9 +1626,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
2314 + if (ret)
2315 + goto err_disable_pm_runtime;
2316 +
2317 +- /* IRQ is initially disabled; it gets enabled in power_on */
2318 +- disable_irq(vop->irq);
2319 +-
2320 + return 0;
2321 +
2322 + err_disable_pm_runtime:
2323 +diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
2324 +index e67f4ea28c0e..051b8be3dc0f 100644
2325 +--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
2326 ++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
2327 +@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
2328 + of_property_read_u32(endpoint, "reg", &endpoint_id);
2329 + ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
2330 + &lvds->panel, &lvds->bridge);
2331 +- if (!ret)
2332 ++ if (!ret) {
2333 ++ of_node_put(endpoint);
2334 + break;
2335 ++ }
2336 + }
2337 + if (!child_count) {
2338 + DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
2339 +diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c
2340 +index daf59578bf93..73c9d4c4fa34 100644
2341 +--- a/drivers/hid/hid-redragon.c
2342 ++++ b/drivers/hid/hid-redragon.c
2343 +@@ -44,29 +44,6 @@ static __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc,
2344 + return rdesc;
2345 + }
2346 +
2347 +-static int redragon_probe(struct hid_device *dev,
2348 +- const struct hid_device_id *id)
2349 +-{
2350 +- int ret;
2351 +-
2352 +- ret = hid_parse(dev);
2353 +- if (ret) {
2354 +- hid_err(dev, "parse failed\n");
2355 +- return ret;
2356 +- }
2357 +-
2358 +- /* do not register unused input device */
2359 +- if (dev->maxapplication == 1)
2360 +- return 0;
2361 +-
2362 +- ret = hid_hw_start(dev, HID_CONNECT_DEFAULT);
2363 +- if (ret) {
2364 +- hid_err(dev, "hw start failed\n");
2365 +- return ret;
2366 +- }
2367 +-
2368 +- return 0;
2369 +-}
2370 + static const struct hid_device_id redragon_devices[] = {
2371 + {HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_REDRAGON_ASURA)},
2372 + {}
2373 +@@ -77,8 +54,7 @@ MODULE_DEVICE_TABLE(hid, redragon_devices);
2374 + static struct hid_driver redragon_driver = {
2375 + .name = "redragon",
2376 + .id_table = redragon_devices,
2377 +- .report_fixup = redragon_report_fixup,
2378 +- .probe = redragon_probe
2379 ++ .report_fixup = redragon_report_fixup
2380 + };
2381 +
2382 + module_hid_driver(redragon_driver);
2383 +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
2384 +index b8f303dea305..32affd3fa8bd 100644
2385 +--- a/drivers/i2c/i2c-core-acpi.c
2386 ++++ b/drivers/i2c/i2c-core-acpi.c
2387 +@@ -453,8 +453,12 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
2388 + else
2389 + dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
2390 + data_len, client->addr, cmd, ret);
2391 +- } else {
2392 ++ /* 2 transfers must have completed successfully */
2393 ++ } else if (ret == 2) {
2394 + memcpy(data, buffer, data_len);
2395 ++ ret = 0;
2396 ++ } else {
2397 ++ ret = -EIO;
2398 + }
2399 +
2400 + kfree(buffer);
2401 +@@ -595,8 +599,6 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
2402 + if (action == ACPI_READ) {
2403 + status = acpi_gsb_i2c_read_bytes(client, command,
2404 + gsb->data, info->access_length);
2405 +- if (status > 0)
2406 +- status = 0;
2407 + } else {
2408 + status = acpi_gsb_i2c_write_bytes(client, command,
2409 + gsb->data, info->access_length);
2410 +diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
2411 +index fbe7198a715a..bedd5fba33b0 100644
2412 +--- a/drivers/infiniband/hw/hfi1/affinity.c
2413 ++++ b/drivers/infiniband/hw/hfi1/affinity.c
2414 +@@ -198,7 +198,7 @@ int node_affinity_init(void)
2415 + while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
2416 + node = pcibus_to_node(dev->bus);
2417 + if (node < 0)
2418 +- node = numa_node_id();
2419 ++ goto out;
2420 +
2421 + hfi1_per_node_cntr[node]++;
2422 + }
2423 +@@ -206,6 +206,18 @@ int node_affinity_init(void)
2424 + }
2425 +
2426 + return 0;
2427 ++
2428 ++out:
2429 ++ /*
2430 ++ * Invalid PCI NUMA node information found, note it, and populate
2431 ++ * our database 1:1.
2432 ++ */
2433 ++ pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
2434 ++ pr_err("HFI: System BIOS may need to be upgraded\n");
2435 ++ for (node = 0; node < node_affinity.num_possible_nodes; node++)
2436 ++ hfi1_per_node_cntr[node] = 1;
2437 ++
2438 ++ return 0;
2439 + }
2440 +
2441 + static void node_affinity_destroy(struct hfi1_affinity_node *entry)
2442 +@@ -622,8 +634,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
2443 + int curr_cpu, possible, i, ret;
2444 + bool new_entry = false;
2445 +
2446 +- if (node < 0)
2447 +- node = numa_node_id();
2448 ++ /*
2449 ++ * If the BIOS does not have the NUMA node information set, select
2450 ++ * NUMA 0 so we get consistent performance.
2451 ++ */
2452 ++ if (node < 0) {
2453 ++ dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
2454 ++ node = 0;
2455 ++ }
2456 + dd->node = node;
2457 +
2458 + local_mask = cpumask_of_node(dd->node);
2459 +diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
2460 +index b9f2c871ff9a..e11c149da04d 100644
2461 +--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
2462 ++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
2463 +@@ -37,7 +37,7 @@
2464 +
2465 + static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
2466 + {
2467 +- return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
2468 ++ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
2469 + }
2470 +
2471 + static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
2472 +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
2473 +index baaf906f7c2e..97664570c5ac 100644
2474 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
2475 ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
2476 +@@ -115,7 +115,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
2477 + {
2478 + struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
2479 +
2480 +- return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
2481 ++ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
2482 ++ base) ?
2483 ++ -ENOMEM :
2484 ++ 0;
2485 + }
2486 +
2487 + enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
2488 +diff --git a/drivers/input/input.c b/drivers/input/input.c
2489 +index 6365c1958264..3304aaaffe87 100644
2490 +--- a/drivers/input/input.c
2491 ++++ b/drivers/input/input.c
2492 +@@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event);
2493 + */
2494 + void input_alloc_absinfo(struct input_dev *dev)
2495 + {
2496 +- if (!dev->absinfo)
2497 +- dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo),
2498 +- GFP_KERNEL);
2499 ++ if (dev->absinfo)
2500 ++ return;
2501 +
2502 +- WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
2503 ++ dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
2504 ++ if (!dev->absinfo) {
2505 ++ dev_err(dev->dev.parent ?: &dev->dev,
2506 ++ "%s: unable to allocate memory\n", __func__);
2507 ++ /*
2508 ++ * We will handle this allocation failure in
2509 ++ * input_register_device() when we refuse to register input
2510 ++ * device with ABS bits but without absinfo.
2511 ++ */
2512 ++ }
2513 + }
2514 + EXPORT_SYMBOL(input_alloc_absinfo);
2515 +
2516 +diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
2517 +index af4a8e7fcd27..3b05117118c3 100644
2518 +--- a/drivers/iommu/omap-iommu.c
2519 ++++ b/drivers/iommu/omap-iommu.c
2520 +@@ -550,7 +550,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
2521 +
2522 + pte_ready:
2523 + iopte = iopte_offset(iopgd, da);
2524 +- *pt_dma = virt_to_phys(iopte);
2525 ++ *pt_dma = iopgd_page_paddr(iopgd);
2526 + dev_vdbg(obj->dev,
2527 + "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
2528 + __func__, da, iopgd, *iopgd, iopte, *iopte);
2529 +@@ -738,7 +738,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
2530 + }
2531 + bytes *= nent;
2532 + memset(iopte, 0, nent * sizeof(*iopte));
2533 +- pt_dma = virt_to_phys(iopte);
2534 ++ pt_dma = iopgd_page_paddr(iopgd);
2535 + flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
2536 +
2537 + /*
2538 +diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
2539 +index 054cd2c8e9c8..2b1724e8d307 100644
2540 +--- a/drivers/iommu/rockchip-iommu.c
2541 ++++ b/drivers/iommu/rockchip-iommu.c
2542 +@@ -521,10 +521,11 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
2543 + u32 int_status;
2544 + dma_addr_t iova;
2545 + irqreturn_t ret = IRQ_NONE;
2546 +- int i;
2547 ++ int i, err;
2548 +
2549 +- if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
2550 +- return 0;
2551 ++ err = pm_runtime_get_if_in_use(iommu->dev);
2552 ++ if (WARN_ON_ONCE(err <= 0))
2553 ++ return ret;
2554 +
2555 + if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
2556 + goto out;
2557 +@@ -620,11 +621,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
2558 + spin_lock_irqsave(&rk_domain->iommus_lock, flags);
2559 + list_for_each(pos, &rk_domain->iommus) {
2560 + struct rk_iommu *iommu;
2561 ++ int ret;
2562 +
2563 + iommu = list_entry(pos, struct rk_iommu, node);
2564 +
2565 + /* Only zap TLBs of IOMMUs that are powered on. */
2566 +- if (pm_runtime_get_if_in_use(iommu->dev)) {
2567 ++ ret = pm_runtime_get_if_in_use(iommu->dev);
2568 ++ if (WARN_ON_ONCE(ret < 0))
2569 ++ continue;
2570 ++ if (ret) {
2571 + WARN_ON(clk_bulk_enable(iommu->num_clocks,
2572 + iommu->clocks));
2573 + rk_iommu_zap_lines(iommu, iova, size);
2574 +@@ -891,6 +896,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
2575 + struct rk_iommu *iommu;
2576 + struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
2577 + unsigned long flags;
2578 ++ int ret;
2579 +
2580 + /* Allow 'virtual devices' (eg drm) to detach from domain */
2581 + iommu = rk_iommu_from_dev(dev);
2582 +@@ -909,7 +915,9 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
2583 + list_del_init(&iommu->node);
2584 + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
2585 +
2586 +- if (pm_runtime_get_if_in_use(iommu->dev)) {
2587 ++ ret = pm_runtime_get_if_in_use(iommu->dev);
2588 ++ WARN_ON_ONCE(ret < 0);
2589 ++ if (ret > 0) {
2590 + rk_iommu_disable(iommu);
2591 + pm_runtime_put(iommu->dev);
2592 + }
2593 +@@ -946,7 +954,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
2594 + list_add_tail(&iommu->node, &rk_domain->iommus);
2595 + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
2596 +
2597 +- if (!pm_runtime_get_if_in_use(iommu->dev))
2598 ++ ret = pm_runtime_get_if_in_use(iommu->dev);
2599 ++ if (!ret || WARN_ON_ONCE(ret < 0))
2600 + return 0;
2601 +
2602 + ret = rk_iommu_enable(iommu);
2603 +@@ -1152,17 +1161,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
2604 + if (iommu->num_mmu == 0)
2605 + return PTR_ERR(iommu->bases[0]);
2606 +
2607 +- i = 0;
2608 +- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
2609 +- if (irq < 0)
2610 +- return irq;
2611 +-
2612 +- err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
2613 +- IRQF_SHARED, dev_name(dev), iommu);
2614 +- if (err)
2615 +- return err;
2616 +- }
2617 +-
2618 + iommu->reset_disabled = device_property_read_bool(dev,
2619 + "rockchip,disable-mmu-reset");
2620 +
2621 +@@ -1219,6 +1217,19 @@ static int rk_iommu_probe(struct platform_device *pdev)
2622 +
2623 + pm_runtime_enable(dev);
2624 +
2625 ++ i = 0;
2626 ++ while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
2627 ++ if (irq < 0)
2628 ++ return irq;
2629 ++
2630 ++ err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
2631 ++ IRQF_SHARED, dev_name(dev), iommu);
2632 ++ if (err) {
2633 ++ pm_runtime_disable(dev);
2634 ++ goto err_remove_sysfs;
2635 ++ }
2636 ++ }
2637 ++
2638 + return 0;
2639 + err_remove_sysfs:
2640 + iommu_device_sysfs_remove(&iommu->iommu);
2641 +diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
2642 +index faf734ff4cf3..0f6e30e9009d 100644
2643 +--- a/drivers/irqchip/irq-bcm7038-l1.c
2644 ++++ b/drivers/irqchip/irq-bcm7038-l1.c
2645 +@@ -217,6 +217,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
2646 + return 0;
2647 + }
2648 +
2649 ++#ifdef CONFIG_SMP
2650 + static void bcm7038_l1_cpu_offline(struct irq_data *d)
2651 + {
2652 + struct cpumask *mask = irq_data_get_affinity_mask(d);
2653 +@@ -241,6 +242,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d)
2654 + }
2655 + irq_set_affinity_locked(d, &new_affinity, false);
2656 + }
2657 ++#endif
2658 +
2659 + static int __init bcm7038_l1_init_one(struct device_node *dn,
2660 + unsigned int idx,
2661 +@@ -293,7 +295,9 @@ static struct irq_chip bcm7038_l1_irq_chip = {
2662 + .irq_mask = bcm7038_l1_mask,
2663 + .irq_unmask = bcm7038_l1_unmask,
2664 + .irq_set_affinity = bcm7038_l1_set_affinity,
2665 ++#ifdef CONFIG_SMP
2666 + .irq_cpu_offline = bcm7038_l1_cpu_offline,
2667 ++#endif
2668 + };
2669 +
2670 + static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
2671 +diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
2672 +index 3a7e8905a97e..880e48947576 100644
2673 +--- a/drivers/irqchip/irq-stm32-exti.c
2674 ++++ b/drivers/irqchip/irq-stm32-exti.c
2675 +@@ -602,17 +602,24 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
2676 + sizeof(struct stm32_exti_chip_data),
2677 + GFP_KERNEL);
2678 + if (!host_data->chips_data)
2679 +- return NULL;
2680 ++ goto free_host_data;
2681 +
2682 + host_data->base = of_iomap(node, 0);
2683 + if (!host_data->base) {
2684 + pr_err("%pOF: Unable to map registers\n", node);
2685 +- return NULL;
2686 ++ goto free_chips_data;
2687 + }
2688 +
2689 + stm32_host_data = host_data;
2690 +
2691 + return host_data;
2692 ++
2693 ++free_chips_data:
2694 ++ kfree(host_data->chips_data);
2695 ++free_host_data:
2696 ++ kfree(host_data);
2697 ++
2698 ++ return NULL;
2699 + }
2700 +
2701 + static struct
2702 +@@ -664,10 +671,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
2703 + struct irq_domain *domain;
2704 +
2705 + host_data = stm32_exti_host_init(drv_data, node);
2706 +- if (!host_data) {
2707 +- ret = -ENOMEM;
2708 +- goto out_free_mem;
2709 +- }
2710 ++ if (!host_data)
2711 ++ return -ENOMEM;
2712 +
2713 + domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
2714 + &irq_exti_domain_ops, NULL);
2715 +@@ -724,7 +729,6 @@ out_free_domain:
2716 + irq_domain_remove(domain);
2717 + out_unmap:
2718 + iounmap(host_data->base);
2719 +-out_free_mem:
2720 + kfree(host_data->chips_data);
2721 + kfree(host_data);
2722 + return ret;
2723 +@@ -751,10 +755,8 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
2724 + }
2725 +
2726 + host_data = stm32_exti_host_init(drv_data, node);
2727 +- if (!host_data) {
2728 +- ret = -ENOMEM;
2729 +- goto out_free_mem;
2730 +- }
2731 ++ if (!host_data)
2732 ++ return -ENOMEM;
2733 +
2734 + for (i = 0; i < drv_data->bank_nr; i++)
2735 + stm32_exti_chip_init(host_data, i, node);
2736 +@@ -776,7 +778,6 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
2737 +
2738 + out_unmap:
2739 + iounmap(host_data->base);
2740 +-out_free_mem:
2741 + kfree(host_data->chips_data);
2742 + kfree(host_data);
2743 + return ret;
2744 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
2745 +index 3c7547a3c371..d7b9cdafd1c3 100644
2746 +--- a/drivers/md/dm-kcopyd.c
2747 ++++ b/drivers/md/dm-kcopyd.c
2748 +@@ -487,6 +487,8 @@ static int run_complete_job(struct kcopyd_job *job)
2749 + if (atomic_dec_and_test(&kc->nr_jobs))
2750 + wake_up(&kc->destroyq);
2751 +
2752 ++ cond_resched();
2753 ++
2754 + return 0;
2755 + }
2756 +
2757 +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
2758 +index 2a87b0d2f21f..a530972c5a7e 100644
2759 +--- a/drivers/mfd/sm501.c
2760 ++++ b/drivers/mfd/sm501.c
2761 +@@ -715,6 +715,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name,
2762 + smdev->pdev.name = name;
2763 + smdev->pdev.id = sm->pdev_id;
2764 + smdev->pdev.dev.parent = sm->dev;
2765 ++ smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
2766 +
2767 + if (res_count) {
2768 + smdev->pdev.resource = (struct resource *)(smdev+1);
2769 +diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
2770 +index 94d7a865b135..7504f430c011 100644
2771 +--- a/drivers/mtd/ubi/vtbl.c
2772 ++++ b/drivers/mtd/ubi/vtbl.c
2773 +@@ -578,6 +578,16 @@ static int init_volumes(struct ubi_device *ubi,
2774 + vol->ubi = ubi;
2775 + reserved_pebs += vol->reserved_pebs;
2776 +
2777 ++ /*
2778 ++ * We use ubi->peb_count and not vol->reserved_pebs because
2779 ++ * we want to keep the code simple. Otherwise we'd have to
2780 ++ * resize/check the bitmap upon volume resize too.
2781 ++ * Allocating a few bytes more does not hurt.
2782 ++ */
2783 ++ err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
2784 ++ if (err)
2785 ++ return err;
2786 ++
2787 + /*
2788 + * In case of dynamic volume UBI knows nothing about how many
2789 + * data is stored there. So assume the whole volume is used.
2790 +@@ -620,16 +630,6 @@ static int init_volumes(struct ubi_device *ubi,
2791 + (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
2792 + vol->used_bytes += av->last_data_size;
2793 + vol->last_eb_bytes = av->last_data_size;
2794 +-
2795 +- /*
2796 +- * We use ubi->peb_count and not vol->reserved_pebs because
2797 +- * we want to keep the code simple. Otherwise we'd have to
2798 +- * resize/check the bitmap upon volume resize too.
2799 +- * Allocating a few bytes more does not hurt.
2800 +- */
2801 +- err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
2802 +- if (err)
2803 +- return err;
2804 + }
2805 +
2806 + /* And add the layout volume */
2807 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2808 +index 4394c1162be4..4fdf3d33aa59 100644
2809 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2810 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2811 +@@ -5907,12 +5907,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
2812 + return bp->hw_resc.max_cp_rings;
2813 + }
2814 +
2815 +-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
2816 ++unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
2817 + {
2818 +- bp->hw_resc.max_cp_rings = max;
2819 ++ return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
2820 + }
2821 +
2822 +-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
2823 ++static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
2824 + {
2825 + struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
2826 +
2827 +@@ -8492,7 +8492,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
2828 +
2829 + *max_tx = hw_resc->max_tx_rings;
2830 + *max_rx = hw_resc->max_rx_rings;
2831 +- *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
2832 ++ *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
2833 ++ hw_resc->max_irqs);
2834 + *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
2835 + max_ring_grps = hw_resc->max_hw_ring_grps;
2836 + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
2837 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2838 +index 91575ef97c8c..ea1246a94b38 100644
2839 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2840 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2841 +@@ -1468,8 +1468,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
2842 + unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
2843 + void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
2844 + unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
2845 +-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
2846 +-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
2847 ++unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
2848 + int bnxt_get_avail_msix(struct bnxt *bp, int num);
2849 + int bnxt_reserve_rings(struct bnxt *bp);
2850 + void bnxt_tx_disable(struct bnxt *bp);
2851 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
2852 +index a64910892c25..2c77004a022b 100644
2853 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
2854 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
2855 +@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
2856 +
2857 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
2858 +
2859 +- vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
2860 ++ vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
2861 + vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
2862 + if (bp->flags & BNXT_FLAG_AGG_RINGS)
2863 + vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
2864 +@@ -544,7 +544,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
2865 + max_stat_ctxs = hw_resc->max_stat_ctxs;
2866 +
2867 + /* Remaining rings are distributed equally amongs VF's for now */
2868 +- vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
2869 ++ vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
2870 ++ bp->cp_nr_rings) / num_vfs;
2871 + vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
2872 + if (bp->flags & BNXT_FLAG_AGG_RINGS)
2873 + vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
2874 +@@ -638,7 +639,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
2875 + */
2876 + vfs_supported = *num_vfs;
2877 +
2878 +- avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
2879 ++ avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
2880 + avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
2881 + avail_cp = min_t(int, avail_cp, avail_stat);
2882 +
2883 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
2884 +index 840f6e505f73..4209cfd73971 100644
2885 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
2886 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
2887 +@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
2888 + edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
2889 + }
2890 + bnxt_fill_msix_vecs(bp, ent);
2891 +- bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
2892 + edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
2893 + return avail_msix;
2894 + }
2895 +@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
2896 + {
2897 + struct net_device *dev = edev->net;
2898 + struct bnxt *bp = netdev_priv(dev);
2899 +- int max_cp_rings, msix_requested;
2900 +
2901 + ASSERT_RTNL();
2902 + if (ulp_id != BNXT_ROCE_ULP)
2903 +@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
2904 + if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
2905 + return 0;
2906 +
2907 +- max_cp_rings = bnxt_get_max_func_cp_rings(bp);
2908 +- msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
2909 +- bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
2910 + edev->ulp_tbl[ulp_id].msix_requested = 0;
2911 + edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
2912 + if (netif_running(dev)) {
2913 +@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
2914 + return 0;
2915 + }
2916 +
2917 +-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
2918 +-{
2919 +- ASSERT_RTNL();
2920 +- if (bnxt_ulp_registered(bp->edev, ulp_id)) {
2921 +- struct bnxt_en_dev *edev = bp->edev;
2922 +- unsigned int msix_req, max;
2923 +-
2924 +- msix_req = edev->ulp_tbl[ulp_id].msix_requested;
2925 +- max = bnxt_get_max_func_cp_rings(bp);
2926 +- bnxt_set_max_func_cp_rings(bp, max - msix_req);
2927 +- max = bnxt_get_max_func_stat_ctxs(bp);
2928 +- bnxt_set_max_func_stat_ctxs(bp, max - 1);
2929 +- }
2930 +-}
2931 +-
2932 + static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
2933 + struct bnxt_fw_msg *fw_msg)
2934 + {
2935 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
2936 +index df48ac71729f..d9bea37cd211 100644
2937 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
2938 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
2939 +@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
2940 +
2941 + int bnxt_get_ulp_msix_num(struct bnxt *bp);
2942 + int bnxt_get_ulp_msix_base(struct bnxt *bp);
2943 +-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
2944 + void bnxt_ulp_stop(struct bnxt *bp);
2945 + void bnxt_ulp_start(struct bnxt *bp);
2946 + void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
2947 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
2948 +index b773bc07edf7..14b49612aa86 100644
2949 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
2950 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
2951 +@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
2952 + #define UMAC_MAC1 0x010
2953 + #define UMAC_MAX_FRAME_LEN 0x014
2954 +
2955 ++#define UMAC_MODE 0x44
2956 ++#define MODE_LINK_STATUS (1 << 5)
2957 ++
2958 + #define UMAC_EEE_CTRL 0x064
2959 + #define EN_LPI_RX_PAUSE (1 << 0)
2960 + #define EN_LPI_TX_PFC (1 << 1)
2961 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
2962 +index 5333274a283c..4241ae928d4a 100644
2963 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
2964 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
2965 +@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
2966 + static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
2967 + struct fixed_phy_status *status)
2968 + {
2969 +- if (dev && dev->phydev && status)
2970 +- status->link = dev->phydev->link;
2971 ++ struct bcmgenet_priv *priv;
2972 ++ u32 reg;
2973 ++
2974 ++ if (dev && dev->phydev && status) {
2975 ++ priv = netdev_priv(dev);
2976 ++ reg = bcmgenet_umac_readl(priv, UMAC_MODE);
2977 ++ status->link = !!(reg & MODE_LINK_STATUS);
2978 ++ }
2979 +
2980 + return 0;
2981 + }
2982 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
2983 +index a6c911bb5ce2..515d96e32143 100644
2984 +--- a/drivers/net/ethernet/cadence/macb_main.c
2985 ++++ b/drivers/net/ethernet/cadence/macb_main.c
2986 +@@ -481,11 +481,6 @@ static int macb_mii_probe(struct net_device *dev)
2987 +
2988 + if (np) {
2989 + if (of_phy_is_fixed_link(np)) {
2990 +- if (of_phy_register_fixed_link(np) < 0) {
2991 +- dev_err(&bp->pdev->dev,
2992 +- "broken fixed-link specification\n");
2993 +- return -ENODEV;
2994 +- }
2995 + bp->phy_node = of_node_get(np);
2996 + } else {
2997 + bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
2998 +@@ -568,7 +563,7 @@ static int macb_mii_init(struct macb *bp)
2999 + {
3000 + struct macb_platform_data *pdata;
3001 + struct device_node *np;
3002 +- int err;
3003 ++ int err = -ENXIO;
3004 +
3005 + /* Enable management port */
3006 + macb_writel(bp, NCR, MACB_BIT(MPE));
3007 +@@ -591,12 +586,23 @@ static int macb_mii_init(struct macb *bp)
3008 + dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
3009 +
3010 + np = bp->pdev->dev.of_node;
3011 +- if (pdata)
3012 +- bp->mii_bus->phy_mask = pdata->phy_mask;
3013 ++ if (np && of_phy_is_fixed_link(np)) {
3014 ++ if (of_phy_register_fixed_link(np) < 0) {
3015 ++ dev_err(&bp->pdev->dev,
3016 ++ "broken fixed-link specification %pOF\n", np);
3017 ++ goto err_out_free_mdiobus;
3018 ++ }
3019 ++
3020 ++ err = mdiobus_register(bp->mii_bus);
3021 ++ } else {
3022 ++ if (pdata)
3023 ++ bp->mii_bus->phy_mask = pdata->phy_mask;
3024 ++
3025 ++ err = of_mdiobus_register(bp->mii_bus, np);
3026 ++ }
3027 +
3028 +- err = of_mdiobus_register(bp->mii_bus, np);
3029 + if (err)
3030 +- goto err_out_free_mdiobus;
3031 ++ goto err_out_free_fixed_link;
3032 +
3033 + err = macb_mii_probe(bp->dev);
3034 + if (err)
3035 +@@ -606,6 +612,7 @@ static int macb_mii_init(struct macb *bp)
3036 +
3037 + err_out_unregister_bus:
3038 + mdiobus_unregister(bp->mii_bus);
3039 ++err_out_free_fixed_link:
3040 + if (np && of_phy_is_fixed_link(np))
3041 + of_phy_deregister_fixed_link(np);
3042 + err_out_free_mdiobus:
3043 +@@ -1957,14 +1964,17 @@ static void macb_reset_hw(struct macb *bp)
3044 + {
3045 + struct macb_queue *queue;
3046 + unsigned int q;
3047 ++ u32 ctrl = macb_readl(bp, NCR);
3048 +
3049 + /* Disable RX and TX (XXX: Should we halt the transmission
3050 + * more gracefully?)
3051 + */
3052 +- macb_writel(bp, NCR, 0);
3053 ++ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
3054 +
3055 + /* Clear the stats registers (XXX: Update stats first?) */
3056 +- macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
3057 ++ ctrl |= MACB_BIT(CLRSTAT);
3058 ++
3059 ++ macb_writel(bp, NCR, ctrl);
3060 +
3061 + /* Clear all status flags */
3062 + macb_writel(bp, TSR, -1);
3063 +@@ -2152,7 +2162,7 @@ static void macb_init_hw(struct macb *bp)
3064 + }
3065 +
3066 + /* Enable TX and RX */
3067 +- macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
3068 ++ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
3069 + }
3070 +
3071 + /* The hash address register is 64 bits long and takes up two
3072 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3073 +index d318d35e598f..6fd7ea8074b0 100644
3074 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3075 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3076 +@@ -3911,7 +3911,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3077 + #define HCLGE_FUNC_NUMBER_PER_DESC 6
3078 + int i, j;
3079 +
3080 +- for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3081 ++ for (i = 1; i < HCLGE_DESC_NUMBER; i++)
3082 + for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3083 + if (desc[i].data[j])
3084 + return false;
3085 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
3086 +index 9f7932e423b5..6315e8ad8467 100644
3087 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
3088 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
3089 +@@ -208,6 +208,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
3090 + if (!phydev)
3091 + return 0;
3092 +
3093 ++ phydev->supported &= ~SUPPORTED_FIBRE;
3094 ++
3095 + ret = phy_connect_direct(netdev, phydev,
3096 + hclge_mac_adjust_link,
3097 + PHY_INTERFACE_MODE_SGMII);
3098 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
3099 +index 86478a6b99c5..c8c315eb5128 100644
3100 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
3101 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
3102 +@@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3103 + struct mlx5_wq_ctrl *wq_ctrl)
3104 + {
3105 + u32 sq_strides_offset;
3106 ++ u32 rq_pg_remainder;
3107 + int err;
3108 +
3109 + mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
3110 + MLX5_GET(qpc, qpc, log_rq_size),
3111 + &wq->rq.fbc);
3112 +
3113 +- sq_strides_offset =
3114 +- ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
3115 ++ rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
3116 ++ sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
3117 +
3118 + mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
3119 + MLX5_GET(qpc, qpc, log_sq_size),
3120 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3121 +index 4a519d8edec8..3500c79e29cd 100644
3122 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3123 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3124 +@@ -433,6 +433,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
3125 + void
3126 + mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
3127 + void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
3128 ++void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
3129 ++ struct net_device *dev);
3130 +
3131 + /* spectrum_kvdl.c */
3132 + int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
3133 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3134 +index 77b2adb29341..cb43d17097fa 100644
3135 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3136 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3137 +@@ -6228,6 +6228,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
3138 + mlxsw_sp_vr_put(mlxsw_sp, vr);
3139 + }
3140 +
3141 ++void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
3142 ++ struct net_device *dev)
3143 ++{
3144 ++ struct mlxsw_sp_rif *rif;
3145 ++
3146 ++ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3147 ++ if (!rif)
3148 ++ return;
3149 ++ mlxsw_sp_rif_destroy(rif);
3150 ++}
3151 ++
3152 + static void
3153 + mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
3154 + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
3155 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3156 +index eea5666a86b2..6cb43dda8232 100644
3157 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3158 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3159 +@@ -160,6 +160,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
3160 + return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3161 + }
3162 +
3163 ++static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
3164 ++ void *data)
3165 ++{
3166 ++ struct mlxsw_sp *mlxsw_sp = data;
3167 ++
3168 ++ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
3169 ++ return 0;
3170 ++}
3171 ++
3172 ++static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
3173 ++ struct net_device *dev)
3174 ++{
3175 ++ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
3176 ++ netdev_walk_all_upper_dev_rcu(dev,
3177 ++ mlxsw_sp_bridge_device_upper_rif_destroy,
3178 ++ mlxsw_sp);
3179 ++}
3180 ++
3181 + static struct mlxsw_sp_bridge_device *
3182 + mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
3183 + struct net_device *br_dev)
3184 +@@ -198,6 +216,8 @@ static void
3185 + mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
3186 + struct mlxsw_sp_bridge_device *bridge_device)
3187 + {
3188 ++ mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
3189 ++ bridge_device->dev);
3190 + list_del(&bridge_device->list);
3191 + if (bridge_device->vlan_enabled)
3192 + bridge->vlan_enabled_exists = false;
3193 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
3194 +index d4c27f849f9b..c2a9e64bc57b 100644
3195 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
3196 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
3197 +@@ -227,29 +227,16 @@ done:
3198 + spin_unlock_bh(&nn->reconfig_lock);
3199 + }
3200 +
3201 +-/**
3202 +- * nfp_net_reconfig() - Reconfigure the firmware
3203 +- * @nn: NFP Net device to reconfigure
3204 +- * @update: The value for the update field in the BAR config
3205 +- *
3206 +- * Write the update word to the BAR and ping the reconfig queue. The
3207 +- * poll until the firmware has acknowledged the update by zeroing the
3208 +- * update word.
3209 +- *
3210 +- * Return: Negative errno on error, 0 on success
3211 +- */
3212 +-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
3213 ++static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
3214 + {
3215 + bool cancelled_timer = false;
3216 + u32 pre_posted_requests;
3217 +- int ret;
3218 +
3219 + spin_lock_bh(&nn->reconfig_lock);
3220 +
3221 + nn->reconfig_sync_present = true;
3222 +
3223 + if (nn->reconfig_timer_active) {
3224 +- del_timer(&nn->reconfig_timer);
3225 + nn->reconfig_timer_active = false;
3226 + cancelled_timer = true;
3227 + }
3228 +@@ -258,14 +245,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
3229 +
3230 + spin_unlock_bh(&nn->reconfig_lock);
3231 +
3232 +- if (cancelled_timer)
3233 ++ if (cancelled_timer) {
3234 ++ del_timer_sync(&nn->reconfig_timer);
3235 + nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
3236 ++ }
3237 +
3238 + /* Run the posted reconfigs which were issued before we started */
3239 + if (pre_posted_requests) {
3240 + nfp_net_reconfig_start(nn, pre_posted_requests);
3241 + nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
3242 + }
3243 ++}
3244 ++
3245 ++static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
3246 ++{
3247 ++ nfp_net_reconfig_sync_enter(nn);
3248 ++
3249 ++ spin_lock_bh(&nn->reconfig_lock);
3250 ++ nn->reconfig_sync_present = false;
3251 ++ spin_unlock_bh(&nn->reconfig_lock);
3252 ++}
3253 ++
3254 ++/**
3255 ++ * nfp_net_reconfig() - Reconfigure the firmware
3256 ++ * @nn: NFP Net device to reconfigure
3257 ++ * @update: The value for the update field in the BAR config
3258 ++ *
3259 ++ * Write the update word to the BAR and ping the reconfig queue. The
3260 ++ * poll until the firmware has acknowledged the update by zeroing the
3261 ++ * update word.
3262 ++ *
3263 ++ * Return: Negative errno on error, 0 on success
3264 ++ */
3265 ++int nfp_net_reconfig(struct nfp_net *nn, u32 update)
3266 ++{
3267 ++ int ret;
3268 ++
3269 ++ nfp_net_reconfig_sync_enter(nn);
3270 +
3271 + nfp_net_reconfig_start(nn, update);
3272 + ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
3273 +@@ -3609,6 +3625,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
3274 + */
3275 + void nfp_net_free(struct nfp_net *nn)
3276 + {
3277 ++ WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
3278 + if (nn->dp.netdev)
3279 + free_netdev(nn->dp.netdev);
3280 + else
3281 +@@ -3893,4 +3910,5 @@ void nfp_net_clean(struct nfp_net *nn)
3282 + return;
3283 +
3284 + unregister_netdev(nn->dp.netdev);
3285 ++ nfp_net_reconfig_wait_posted(nn);
3286 + }
3287 +diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
3288 +index 353f1c129af1..059ba9429e51 100644
3289 +--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
3290 ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
3291 +@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
3292 + return status;
3293 + }
3294 +
3295 +-static netdev_features_t qlge_fix_features(struct net_device *ndev,
3296 +- netdev_features_t features)
3297 +-{
3298 +- int err;
3299 +-
3300 +- /* Update the behavior of vlan accel in the adapter */
3301 +- err = qlge_update_hw_vlan_features(ndev, features);
3302 +- if (err)
3303 +- return err;
3304 +-
3305 +- return features;
3306 +-}
3307 +-
3308 + static int qlge_set_features(struct net_device *ndev,
3309 + netdev_features_t features)
3310 + {
3311 + netdev_features_t changed = ndev->features ^ features;
3312 ++ int err;
3313 ++
3314 ++ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
3315 ++ /* Update the behavior of vlan accel in the adapter */
3316 ++ err = qlge_update_hw_vlan_features(ndev, features);
3317 ++ if (err)
3318 ++ return err;
3319 +
3320 +- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
3321 + qlge_vlan_mode(ndev, features);
3322 ++ }
3323 +
3324 + return 0;
3325 + }
3326 +@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
3327 + .ndo_set_mac_address = qlge_set_mac_address,
3328 + .ndo_validate_addr = eth_validate_addr,
3329 + .ndo_tx_timeout = qlge_tx_timeout,
3330 +- .ndo_fix_features = qlge_fix_features,
3331 + .ndo_set_features = qlge_set_features,
3332 + .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
3333 + .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
3334 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
3335 +index 9ceb34bac3a9..e5eb361b973c 100644
3336 +--- a/drivers/net/ethernet/realtek/r8169.c
3337 ++++ b/drivers/net/ethernet/realtek/r8169.c
3338 +@@ -303,6 +303,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
3339 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
3340 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
3341 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
3342 ++ { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
3343 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
3344 + { PCI_VENDOR_ID_DLINK, 0x4300,
3345 + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
3346 +@@ -5038,7 +5039,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3347 + rtl_hw_reset(tp);
3348 + }
3349 +
3350 +-static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
3351 ++static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
3352 + {
3353 + /* Set DMA burst size and Interframe Gap Time */
3354 + RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
3355 +@@ -5149,12 +5150,14 @@ static void rtl_hw_start(struct rtl8169_private *tp)
3356 +
3357 + rtl_set_rx_max_size(tp);
3358 + rtl_set_rx_tx_desc_registers(tp);
3359 +- rtl_set_rx_tx_config_registers(tp);
3360 ++ rtl_set_tx_config_registers(tp);
3361 + RTL_W8(tp, Cfg9346, Cfg9346_Lock);
3362 +
3363 + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
3364 + RTL_R8(tp, IntrMask);
3365 + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
3366 ++ rtl_init_rxcfg(tp);
3367 ++
3368 + rtl_set_rx_mode(tp->dev);
3369 + /* no early-rx interrupts */
3370 + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
3371 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
3372 +index 76649adf8fb0..c0a855b7ab3b 100644
3373 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
3374 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
3375 +@@ -112,7 +112,6 @@ struct stmmac_priv {
3376 + u32 tx_count_frames;
3377 + u32 tx_coal_frames;
3378 + u32 tx_coal_timer;
3379 +- bool tx_timer_armed;
3380 +
3381 + int tx_coalesce;
3382 + int hwts_tx_en;
3383 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3384 +index ef6a8d39db2f..c579d98b9666 100644
3385 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3386 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3387 +@@ -3126,16 +3126,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3388 + * element in case of no SG.
3389 + */
3390 + priv->tx_count_frames += nfrags + 1;
3391 +- if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3392 +- !priv->tx_timer_armed) {
3393 ++ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3394 + mod_timer(&priv->txtimer,
3395 + STMMAC_COAL_TIMER(priv->tx_coal_timer));
3396 +- priv->tx_timer_armed = true;
3397 + } else {
3398 + priv->tx_count_frames = 0;
3399 + stmmac_set_tx_ic(priv, desc);
3400 + priv->xstats.tx_set_ic_bit++;
3401 +- priv->tx_timer_armed = false;
3402 + }
3403 +
3404 + skb_tx_timestamp(skb);
3405 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
3406 +index dd1d6e115145..6d74cde68163 100644
3407 +--- a/drivers/net/hyperv/netvsc_drv.c
3408 ++++ b/drivers/net/hyperv/netvsc_drv.c
3409 +@@ -29,6 +29,7 @@
3410 + #include <linux/netdevice.h>
3411 + #include <linux/inetdevice.h>
3412 + #include <linux/etherdevice.h>
3413 ++#include <linux/pci.h>
3414 + #include <linux/skbuff.h>
3415 + #include <linux/if_vlan.h>
3416 + #include <linux/in.h>
3417 +@@ -1939,12 +1940,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
3418 + {
3419 + struct net_device *ndev;
3420 + struct net_device_context *net_device_ctx;
3421 ++ struct device *pdev = vf_netdev->dev.parent;
3422 + struct netvsc_device *netvsc_dev;
3423 + int ret;
3424 +
3425 + if (vf_netdev->addr_len != ETH_ALEN)
3426 + return NOTIFY_DONE;
3427 +
3428 ++ if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
3429 ++ return NOTIFY_DONE;
3430 ++
3431 + /*
3432 + * We will use the MAC address to locate the synthetic interface to
3433 + * associate with the VF interface. If we don't find a matching
3434 +@@ -2101,6 +2106,16 @@ static int netvsc_probe(struct hv_device *dev,
3435 +
3436 + memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
3437 +
3438 ++ /* We must get rtnl lock before scheduling nvdev->subchan_work,
3439 ++ * otherwise netvsc_subchan_work() can get rtnl lock first and wait
3440 ++ * all subchannels to show up, but that may not happen because
3441 ++ * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
3442 ++ * -> ... -> device_add() -> ... -> __device_attach() can't get
3443 ++ * the device lock, so all the subchannels can't be processed --
3444 ++ * finally netvsc_subchan_work() hangs for ever.
3445 ++ */
3446 ++ rtnl_lock();
3447 ++
3448 + if (nvdev->num_chn > 1)
3449 + schedule_work(&nvdev->subchan_work);
3450 +
3451 +@@ -2119,7 +2134,6 @@ static int netvsc_probe(struct hv_device *dev,
3452 + else
3453 + net->max_mtu = ETH_DATA_LEN;
3454 +
3455 +- rtnl_lock();
3456 + ret = register_netdevice(net);
3457 + if (ret != 0) {
3458 + pr_err("Unable to register netdev.\n");
3459 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
3460 +index 2a58607a6aea..1b07bb5e110d 100644
3461 +--- a/drivers/net/usb/r8152.c
3462 ++++ b/drivers/net/usb/r8152.c
3463 +@@ -5214,8 +5214,8 @@ static int rtl8152_probe(struct usb_interface *intf,
3464 + netdev->hw_features &= ~NETIF_F_RXCSUM;
3465 + }
3466 +
3467 +- if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
3468 +- udev->serial && !strcmp(udev->serial, "000001000000")) {
3469 ++ if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
3470 ++ (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
3471 + dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
3472 + set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
3473 + }
3474 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
3475 +index b6122aad639e..7569f9af8d47 100644
3476 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
3477 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
3478 +@@ -6926,15 +6926,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
3479 + cfg->d11inf.io_type = (u8)io_type;
3480 + brcmu_d11_attach(&cfg->d11inf);
3481 +
3482 +- err = brcmf_setup_wiphy(wiphy, ifp);
3483 +- if (err < 0)
3484 +- goto priv_out;
3485 +-
3486 + /* regulatory notifer below needs access to cfg so
3487 + * assign it now.
3488 + */
3489 + drvr->config = cfg;
3490 +
3491 ++ err = brcmf_setup_wiphy(wiphy, ifp);
3492 ++ if (err < 0)
3493 ++ goto priv_out;
3494 ++
3495 + brcmf_dbg(INFO, "Registering custom regulatory\n");
3496 + wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
3497 + wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
3498 +diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
3499 +index 23e270839e6a..f00df2384985 100644
3500 +--- a/drivers/pci/controller/pci-mvebu.c
3501 ++++ b/drivers/pci/controller/pci-mvebu.c
3502 +@@ -1219,7 +1219,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
3503 + pcie->realio.start = PCIBIOS_MIN_IO;
3504 + pcie->realio.end = min_t(resource_size_t,
3505 + IO_SPACE_LIMIT,
3506 +- resource_size(&pcie->io));
3507 ++ resource_size(&pcie->io) - 1);
3508 + } else
3509 + pcie->realio = pcie->io;
3510 +
3511 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
3512 +index b2857865c0aa..a1a243ee36bb 100644
3513 +--- a/drivers/pci/probe.c
3514 ++++ b/drivers/pci/probe.c
3515 +@@ -1725,7 +1725,7 @@ int pci_setup_device(struct pci_dev *dev)
3516 + static void pci_configure_mps(struct pci_dev *dev)
3517 + {
3518 + struct pci_dev *bridge = pci_upstream_bridge(dev);
3519 +- int mps, p_mps, rc;
3520 ++ int mps, mpss, p_mps, rc;
3521 +
3522 + if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
3523 + return;
3524 +@@ -1753,6 +1753,14 @@ static void pci_configure_mps(struct pci_dev *dev)
3525 + if (pcie_bus_config != PCIE_BUS_DEFAULT)
3526 + return;
3527 +
3528 ++ mpss = 128 << dev->pcie_mpss;
3529 ++ if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
3530 ++ pcie_set_mps(bridge, mpss);
3531 ++ pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
3532 ++ mpss, p_mps, 128 << bridge->pcie_mpss);
3533 ++ p_mps = pcie_get_mps(bridge);
3534 ++ }
3535 ++
3536 + rc = pcie_set_mps(dev, p_mps);
3537 + if (rc) {
3538 + pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
3539 +@@ -1761,7 +1769,7 @@ static void pci_configure_mps(struct pci_dev *dev)
3540 + }
3541 +
3542 + pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
3543 +- p_mps, mps, 128 << dev->pcie_mpss);
3544 ++ p_mps, mps, mpss);
3545 + }
3546 +
3547 + static struct hpp_type0 pci_default_type0 = {
3548 +diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
3549 +index a52779f33ad4..afd0b533c40a 100644
3550 +--- a/drivers/pinctrl/pinctrl-axp209.c
3551 ++++ b/drivers/pinctrl/pinctrl-axp209.c
3552 +@@ -316,7 +316,7 @@ static const struct pinctrl_ops axp20x_pctrl_ops = {
3553 + .get_group_pins = axp20x_group_pins,
3554 + };
3555 +
3556 +-static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
3557 ++static int axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
3558 + unsigned int mask_len,
3559 + struct axp20x_pinctrl_function *func,
3560 + const struct pinctrl_pin_desc *pins)
3561 +@@ -331,18 +331,22 @@ static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
3562 + func->groups = devm_kcalloc(dev,
3563 + ngroups, sizeof(const char *),
3564 + GFP_KERNEL);
3565 ++ if (!func->groups)
3566 ++ return -ENOMEM;
3567 + group = func->groups;
3568 + for_each_set_bit(bit, &mask_cpy, mask_len) {
3569 + *group = pins[bit].name;
3570 + group++;
3571 + }
3572 + }
3573 ++
3574 ++ return 0;
3575 + }
3576 +
3577 +-static void axp20x_build_funcs_groups(struct platform_device *pdev)
3578 ++static int axp20x_build_funcs_groups(struct platform_device *pdev)
3579 + {
3580 + struct axp20x_pctl *pctl = platform_get_drvdata(pdev);
3581 +- int i, pin, npins = pctl->desc->npins;
3582 ++ int i, ret, pin, npins = pctl->desc->npins;
3583 +
3584 + pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out";
3585 + pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT;
3586 +@@ -366,13 +370,19 @@ static void axp20x_build_funcs_groups(struct platform_device *pdev)
3587 + pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
3588 + }
3589 +
3590 +- axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
3591 ++ ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
3592 + npins, &pctl->funcs[AXP20X_FUNC_LDO],
3593 + pctl->desc->pins);
3594 ++ if (ret)
3595 ++ return ret;
3596 +
3597 +- axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
3598 ++ ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
3599 + npins, &pctl->funcs[AXP20X_FUNC_ADC],
3600 + pctl->desc->pins);
3601 ++ if (ret)
3602 ++ return ret;
3603 ++
3604 ++ return 0;
3605 + }
3606 +
3607 + static const struct of_device_id axp20x_pctl_match[] = {
3608 +@@ -424,7 +434,11 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
3609 +
3610 + platform_set_drvdata(pdev, pctl);
3611 +
3612 +- axp20x_build_funcs_groups(pdev);
3613 ++ ret = axp20x_build_funcs_groups(pdev);
3614 ++ if (ret) {
3615 ++ dev_err(&pdev->dev, "failed to build groups\n");
3616 ++ return ret;
3617 ++ }
3618 +
3619 + pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL);
3620 + if (!pctrl_desc)
3621 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
3622 +index 136ff2b4cce5..db2af09067db 100644
3623 +--- a/drivers/platform/x86/asus-nb-wmi.c
3624 ++++ b/drivers/platform/x86/asus-nb-wmi.c
3625 +@@ -496,6 +496,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
3626 + { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
3627 + { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
3628 + { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
3629 ++ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
3630 + { KE_END, 0},
3631 + };
3632 +
3633 +diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
3634 +index b5b890127479..b7dfe06261f1 100644
3635 +--- a/drivers/platform/x86/intel_punit_ipc.c
3636 ++++ b/drivers/platform/x86/intel_punit_ipc.c
3637 +@@ -17,6 +17,7 @@
3638 + #include <linux/bitops.h>
3639 + #include <linux/device.h>
3640 + #include <linux/interrupt.h>
3641 ++#include <linux/io.h>
3642 + #include <linux/platform_device.h>
3643 + #include <asm/intel_punit_ipc.h>
3644 +
3645 +diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
3646 +index 822860b4801a..c1ed641b3e26 100644
3647 +--- a/drivers/pwm/pwm-meson.c
3648 ++++ b/drivers/pwm/pwm-meson.c
3649 +@@ -458,7 +458,6 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
3650 + struct meson_pwm_channel *channels)
3651 + {
3652 + struct device *dev = meson->chip.dev;
3653 +- struct device_node *np = dev->of_node;
3654 + struct clk_init_data init;
3655 + unsigned int i;
3656 + char name[255];
3657 +@@ -467,7 +466,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
3658 + for (i = 0; i < meson->chip.npwm; i++) {
3659 + struct meson_pwm_channel *channel = &channels[i];
3660 +
3661 +- snprintf(name, sizeof(name), "%pOF#mux%u", np, i);
3662 ++ snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i);
3663 +
3664 + init.name = name;
3665 + init.ops = &clk_mux_ops;
3666 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
3667 +index bbf95b78ef5d..43e3398c9268 100644
3668 +--- a/drivers/s390/block/dasd_eckd.c
3669 ++++ b/drivers/s390/block/dasd_eckd.c
3670 +@@ -1780,6 +1780,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
3671 + struct dasd_eckd_private *private = device->private;
3672 + int i;
3673 +
3674 ++ if (!private)
3675 ++ return;
3676 ++
3677 + dasd_alias_disconnect_device_from_lcu(device);
3678 + private->ned = NULL;
3679 + private->sneq = NULL;
3680 +@@ -2035,8 +2038,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
3681 +
3682 + static int dasd_eckd_online_to_ready(struct dasd_device *device)
3683 + {
3684 +- cancel_work_sync(&device->reload_device);
3685 +- cancel_work_sync(&device->kick_validate);
3686 ++ if (cancel_work_sync(&device->reload_device))
3687 ++ dasd_put_device(device);
3688 ++ if (cancel_work_sync(&device->kick_validate))
3689 ++ dasd_put_device(device);
3690 ++
3691 + return 0;
3692 + };
3693 +
3694 +diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
3695 +index 80e5b283fd81..1391e5f35918 100644
3696 +--- a/drivers/scsi/aic94xx/aic94xx_init.c
3697 ++++ b/drivers/scsi/aic94xx/aic94xx_init.c
3698 +@@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void)
3699 +
3700 + aic94xx_transport_template =
3701 + sas_domain_attach_transport(&aic94xx_transport_functions);
3702 +- if (!aic94xx_transport_template)
3703 ++ if (!aic94xx_transport_template) {
3704 ++ err = -ENOMEM;
3705 + goto out_destroy_caches;
3706 ++ }
3707 +
3708 + err = pci_register_driver(&aic94xx_pci_driver);
3709 + if (err)
3710 +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
3711 +index e40a2c0a9543..d3da39a9f567 100644
3712 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c
3713 ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
3714 +@@ -5446,11 +5446,11 @@ static int ni_E_init(struct comedi_device *dev,
3715 + /* Digital I/O (PFI) subdevice */
3716 + s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
3717 + s->type = COMEDI_SUBD_DIO;
3718 +- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
3719 + s->maxdata = 1;
3720 + if (devpriv->is_m_series) {
3721 + s->n_chan = 16;
3722 + s->insn_bits = ni_pfi_insn_bits;
3723 ++ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
3724 +
3725 + ni_writew(dev, s->state, NI_M_PFI_DO_REG);
3726 + for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
3727 +@@ -5459,6 +5459,7 @@ static int ni_E_init(struct comedi_device *dev,
3728 + }
3729 + } else {
3730 + s->n_chan = 10;
3731 ++ s->subdev_flags = SDF_INTERNAL;
3732 + }
3733 + s->insn_config = ni_pfi_insn_config;
3734 +
3735 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
3736 +index ed3114556fda..560ed8711706 100644
3737 +--- a/drivers/vhost/vhost.c
3738 ++++ b/drivers/vhost/vhost.c
3739 +@@ -951,7 +951,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
3740 + list_for_each_entry_safe(node, n, &d->pending_list, node) {
3741 + struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
3742 + if (msg->iova <= vq_msg->iova &&
3743 +- msg->iova + msg->size - 1 > vq_msg->iova &&
3744 ++ msg->iova + msg->size - 1 >= vq_msg->iova &&
3745 + vq_msg->type == VHOST_IOTLB_MISS) {
3746 + vhost_poll_queue(&node->vq->poll);
3747 + list_del(&node->node);
3748 +diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
3749 +index 2780886e8ba3..de062fb201bc 100644
3750 +--- a/drivers/virtio/virtio_pci_legacy.c
3751 ++++ b/drivers/virtio/virtio_pci_legacy.c
3752 +@@ -122,6 +122,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
3753 + struct virtqueue *vq;
3754 + u16 num;
3755 + int err;
3756 ++ u64 q_pfn;
3757 +
3758 + /* Select the queue we're interested in */
3759 + iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
3760 +@@ -141,9 +142,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
3761 + if (!vq)
3762 + return ERR_PTR(-ENOMEM);
3763 +
3764 ++ q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
3765 ++ if (q_pfn >> 32) {
3766 ++ dev_err(&vp_dev->pci_dev->dev,
3767 ++ "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
3768 ++ 0x1ULL << (32 + PAGE_SHIFT - 30));
3769 ++ err = -E2BIG;
3770 ++ goto out_del_vq;
3771 ++ }
3772 ++
3773 + /* activate the queue */
3774 +- iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
3775 +- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
3776 ++ iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
3777 +
3778 + vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
3779 +
3780 +@@ -160,6 +169,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
3781 +
3782 + out_deactivate:
3783 + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
3784 ++out_del_vq:
3785 + vring_del_virtqueue(vq);
3786 + return ERR_PTR(err);
3787 + }
3788 +diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
3789 +index b437fccd4e62..294f35ce9e46 100644
3790 +--- a/drivers/xen/xen-balloon.c
3791 ++++ b/drivers/xen/xen-balloon.c
3792 +@@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch,
3793 + static_max = new_target;
3794 + else
3795 + static_max >>= PAGE_SHIFT - 10;
3796 +- target_diff = xen_pv_domain() ? 0
3797 ++ target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
3798 + : static_max - balloon_stats.target_pages;
3799 + }
3800 +
3801 +diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
3802 +index a3fdb4fe967d..daf45472bef9 100644
3803 +--- a/fs/btrfs/check-integrity.c
3804 ++++ b/fs/btrfs/check-integrity.c
3805 +@@ -1539,7 +1539,12 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
3806 + }
3807 +
3808 + device = multi->stripes[0].dev;
3809 +- block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev->bd_dev);
3810 ++ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
3811 ++ !device->bdev || !device->name)
3812 ++ block_ctx_out->dev = NULL;
3813 ++ else
3814 ++ block_ctx_out->dev = btrfsic_dev_state_lookup(
3815 ++ device->bdev->bd_dev);
3816 + block_ctx_out->dev_bytenr = multi->stripes[0].physical;
3817 + block_ctx_out->start = bytenr;
3818 + block_ctx_out->len = len;
3819 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
3820 +index e2ba0419297a..d20b244623f2 100644
3821 +--- a/fs/btrfs/dev-replace.c
3822 ++++ b/fs/btrfs/dev-replace.c
3823 +@@ -676,6 +676,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
3824 +
3825 + btrfs_rm_dev_replace_unblocked(fs_info);
3826 +
3827 ++ /*
3828 ++ * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
3829 ++ * update on-disk dev stats value during commit transaction
3830 ++ */
3831 ++ atomic_inc(&tgt_device->dev_stats_ccnt);
3832 ++
3833 + /*
3834 + * this is again a consistent state where no dev_replace procedure
3835 + * is running, the target device is part of the filesystem, the
3836 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3837 +index 8aab7a6c1e58..53cac20650d8 100644
3838 +--- a/fs/btrfs/extent-tree.c
3839 ++++ b/fs/btrfs/extent-tree.c
3840 +@@ -10687,7 +10687,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
3841 + /* Don't want to race with allocators so take the groups_sem */
3842 + down_write(&space_info->groups_sem);
3843 + spin_lock(&block_group->lock);
3844 +- if (block_group->reserved ||
3845 ++ if (block_group->reserved || block_group->pinned ||
3846 + btrfs_block_group_used(&block_group->item) ||
3847 + block_group->ro ||
3848 + list_is_singular(&block_group->list)) {
3849 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
3850 +index 879b76fa881a..be94c65bb4d2 100644
3851 +--- a/fs/btrfs/relocation.c
3852 ++++ b/fs/btrfs/relocation.c
3853 +@@ -1321,18 +1321,19 @@ static void __del_reloc_root(struct btrfs_root *root)
3854 + struct mapping_node *node = NULL;
3855 + struct reloc_control *rc = fs_info->reloc_ctl;
3856 +
3857 +- spin_lock(&rc->reloc_root_tree.lock);
3858 +- rb_node = tree_search(&rc->reloc_root_tree.rb_root,
3859 +- root->node->start);
3860 +- if (rb_node) {
3861 +- node = rb_entry(rb_node, struct mapping_node, rb_node);
3862 +- rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
3863 ++ if (rc) {
3864 ++ spin_lock(&rc->reloc_root_tree.lock);
3865 ++ rb_node = tree_search(&rc->reloc_root_tree.rb_root,
3866 ++ root->node->start);
3867 ++ if (rb_node) {
3868 ++ node = rb_entry(rb_node, struct mapping_node, rb_node);
3869 ++ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
3870 ++ }
3871 ++ spin_unlock(&rc->reloc_root_tree.lock);
3872 ++ if (!node)
3873 ++ return;
3874 ++ BUG_ON((struct btrfs_root *)node->data != root);
3875 + }
3876 +- spin_unlock(&rc->reloc_root_tree.lock);
3877 +-
3878 +- if (!node)
3879 +- return;
3880 +- BUG_ON((struct btrfs_root *)node->data != root);
3881 +
3882 + spin_lock(&fs_info->trans_lock);
3883 + list_del_init(&root->root_list);
3884 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3885 +index bddfc28b27c0..9b25f29d0e73 100644
3886 +--- a/fs/btrfs/super.c
3887 ++++ b/fs/btrfs/super.c
3888 +@@ -892,6 +892,8 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
3889 + char *device_name, *opts, *orig, *p;
3890 + int error = 0;
3891 +
3892 ++ lockdep_assert_held(&uuid_mutex);
3893 ++
3894 + if (!options)
3895 + return 0;
3896 +
3897 +@@ -1526,12 +1528,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
3898 + if (!(flags & SB_RDONLY))
3899 + mode |= FMODE_WRITE;
3900 +
3901 +- error = btrfs_parse_early_options(data, mode, fs_type,
3902 +- &fs_devices);
3903 +- if (error) {
3904 +- return ERR_PTR(error);
3905 +- }
3906 +-
3907 + security_init_mnt_opts(&new_sec_opts);
3908 + if (data) {
3909 + error = parse_security_options(data, &new_sec_opts);
3910 +@@ -1539,10 +1535,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
3911 + return ERR_PTR(error);
3912 + }
3913 +
3914 +- error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
3915 +- if (error)
3916 +- goto error_sec_opts;
3917 +-
3918 + /*
3919 + * Setup a dummy root and fs_info for test/set super. This is because
3920 + * we don't actually fill this stuff out until open_ctree, but we need
3921 +@@ -1555,8 +1547,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
3922 + goto error_sec_opts;
3923 + }
3924 +
3925 +- fs_info->fs_devices = fs_devices;
3926 +-
3927 + fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
3928 + fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
3929 + security_init_mnt_opts(&fs_info->security_opts);
3930 +@@ -1565,7 +1555,23 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
3931 + goto error_fs_info;
3932 + }
3933 +
3934 ++ mutex_lock(&uuid_mutex);
3935 ++ error = btrfs_parse_early_options(data, mode, fs_type, &fs_devices);
3936 ++ if (error) {
3937 ++ mutex_unlock(&uuid_mutex);
3938 ++ goto error_fs_info;
3939 ++ }
3940 ++
3941 ++ error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
3942 ++ if (error) {
3943 ++ mutex_unlock(&uuid_mutex);
3944 ++ goto error_fs_info;
3945 ++ }
3946 ++
3947 ++ fs_info->fs_devices = fs_devices;
3948 ++
3949 + error = btrfs_open_devices(fs_devices, mode, fs_type);
3950 ++ mutex_unlock(&uuid_mutex);
3951 + if (error)
3952 + goto error_fs_info;
3953 +
3954 +@@ -2234,15 +2240,21 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
3955 +
3956 + switch (cmd) {
3957 + case BTRFS_IOC_SCAN_DEV:
3958 ++ mutex_lock(&uuid_mutex);
3959 + ret = btrfs_scan_one_device(vol->name, FMODE_READ,
3960 + &btrfs_root_fs_type, &fs_devices);
3961 ++ mutex_unlock(&uuid_mutex);
3962 + break;
3963 + case BTRFS_IOC_DEVICES_READY:
3964 ++ mutex_lock(&uuid_mutex);
3965 + ret = btrfs_scan_one_device(vol->name, FMODE_READ,
3966 + &btrfs_root_fs_type, &fs_devices);
3967 +- if (ret)
3968 ++ if (ret) {
3969 ++ mutex_unlock(&uuid_mutex);
3970 + break;
3971 ++ }
3972 + ret = !(fs_devices->num_devices == fs_devices->total_devices);
3973 ++ mutex_unlock(&uuid_mutex);
3974 + break;
3975 + case BTRFS_IOC_GET_SUPPORTED_FEATURES:
3976 + ret = btrfs_ioctl_get_supported_features((void __user*)arg);
3977 +@@ -2368,7 +2380,7 @@ static __cold void btrfs_interface_exit(void)
3978 +
3979 + static void __init btrfs_print_mod_info(void)
3980 + {
3981 +- pr_info("Btrfs loaded, crc32c=%s"
3982 ++ static const char options[] = ""
3983 + #ifdef CONFIG_BTRFS_DEBUG
3984 + ", debug=on"
3985 + #endif
3986 +@@ -2381,8 +2393,8 @@ static void __init btrfs_print_mod_info(void)
3987 + #ifdef CONFIG_BTRFS_FS_REF_VERIFY
3988 + ", ref-verify=on"
3989 + #endif
3990 +- "\n",
3991 +- crc32c_impl());
3992 ++ ;
3993 ++ pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
3994 + }
3995 +
3996 + static int __init init_btrfs_fs(void)
3997 +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
3998 +index 8d40e7dd8c30..d014af352ce0 100644
3999 +--- a/fs/btrfs/tree-checker.c
4000 ++++ b/fs/btrfs/tree-checker.c
4001 +@@ -396,9 +396,22 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
4002 + * skip this check for relocation trees.
4003 + */
4004 + if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
4005 ++ u64 owner = btrfs_header_owner(leaf);
4006 + struct btrfs_root *check_root;
4007 +
4008 +- key.objectid = btrfs_header_owner(leaf);
4009 ++ /* These trees must never be empty */
4010 ++ if (owner == BTRFS_ROOT_TREE_OBJECTID ||
4011 ++ owner == BTRFS_CHUNK_TREE_OBJECTID ||
4012 ++ owner == BTRFS_EXTENT_TREE_OBJECTID ||
4013 ++ owner == BTRFS_DEV_TREE_OBJECTID ||
4014 ++ owner == BTRFS_FS_TREE_OBJECTID ||
4015 ++ owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
4016 ++ generic_err(fs_info, leaf, 0,
4017 ++ "invalid root, root %llu must never be empty",
4018 ++ owner);
4019 ++ return -EUCLEAN;
4020 ++ }
4021 ++ key.objectid = owner;
4022 + key.type = BTRFS_ROOT_ITEM_KEY;
4023 + key.offset = (u64)-1;
4024 +
4025 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4026 +index 1da162928d1a..5304b8d6ceb8 100644
4027 +--- a/fs/btrfs/volumes.c
4028 ++++ b/fs/btrfs/volumes.c
4029 +@@ -634,44 +634,48 @@ static void pending_bios_fn(struct btrfs_work *work)
4030 + * devices.
4031 + */
4032 + static void btrfs_free_stale_devices(const char *path,
4033 +- struct btrfs_device *skip_dev)
4034 ++ struct btrfs_device *skip_device)
4035 + {
4036 +- struct btrfs_fs_devices *fs_devs, *tmp_fs_devs;
4037 +- struct btrfs_device *dev, *tmp_dev;
4038 ++ struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
4039 ++ struct btrfs_device *device, *tmp_device;
4040 +
4041 +- list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, fs_list) {
4042 +-
4043 +- if (fs_devs->opened)
4044 ++ list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
4045 ++ mutex_lock(&fs_devices->device_list_mutex);
4046 ++ if (fs_devices->opened) {
4047 ++ mutex_unlock(&fs_devices->device_list_mutex);
4048 + continue;
4049 ++ }
4050 +
4051 +- list_for_each_entry_safe(dev, tmp_dev,
4052 +- &fs_devs->devices, dev_list) {
4053 ++ list_for_each_entry_safe(device, tmp_device,
4054 ++ &fs_devices->devices, dev_list) {
4055 + int not_found = 0;
4056 +
4057 +- if (skip_dev && skip_dev == dev)
4058 ++ if (skip_device && skip_device == device)
4059 + continue;
4060 +- if (path && !dev->name)
4061 ++ if (path && !device->name)
4062 + continue;
4063 +
4064 + rcu_read_lock();
4065 + if (path)
4066 +- not_found = strcmp(rcu_str_deref(dev->name),
4067 ++ not_found = strcmp(rcu_str_deref(device->name),
4068 + path);
4069 + rcu_read_unlock();
4070 + if (not_found)
4071 + continue;
4072 +
4073 + /* delete the stale device */
4074 +- if (fs_devs->num_devices == 1) {
4075 +- btrfs_sysfs_remove_fsid(fs_devs);
4076 +- list_del(&fs_devs->fs_list);
4077 +- free_fs_devices(fs_devs);
4078 ++ fs_devices->num_devices--;
4079 ++ list_del(&device->dev_list);
4080 ++ btrfs_free_device(device);
4081 ++
4082 ++ if (fs_devices->num_devices == 0)
4083 + break;
4084 +- } else {
4085 +- fs_devs->num_devices--;
4086 +- list_del(&dev->dev_list);
4087 +- btrfs_free_device(dev);
4088 +- }
4089 ++ }
4090 ++ mutex_unlock(&fs_devices->device_list_mutex);
4091 ++ if (fs_devices->num_devices == 0) {
4092 ++ btrfs_sysfs_remove_fsid(fs_devices);
4093 ++ list_del(&fs_devices->fs_list);
4094 ++ free_fs_devices(fs_devices);
4095 + }
4096 + }
4097 + }
4098 +@@ -750,7 +754,8 @@ error_brelse:
4099 + * error pointer when failed
4100 + */
4101 + static noinline struct btrfs_device *device_list_add(const char *path,
4102 +- struct btrfs_super_block *disk_super)
4103 ++ struct btrfs_super_block *disk_super,
4104 ++ bool *new_device_added)
4105 + {
4106 + struct btrfs_device *device;
4107 + struct btrfs_fs_devices *fs_devices;
4108 +@@ -764,21 +769,26 @@ static noinline struct btrfs_device *device_list_add(const char *path,
4109 + if (IS_ERR(fs_devices))
4110 + return ERR_CAST(fs_devices);
4111 +
4112 ++ mutex_lock(&fs_devices->device_list_mutex);
4113 + list_add(&fs_devices->fs_list, &fs_uuids);
4114 +
4115 + device = NULL;
4116 + } else {
4117 ++ mutex_lock(&fs_devices->device_list_mutex);
4118 + device = find_device(fs_devices, devid,
4119 + disk_super->dev_item.uuid);
4120 + }
4121 +
4122 + if (!device) {
4123 +- if (fs_devices->opened)
4124 ++ if (fs_devices->opened) {
4125 ++ mutex_unlock(&fs_devices->device_list_mutex);
4126 + return ERR_PTR(-EBUSY);
4127 ++ }
4128 +
4129 + device = btrfs_alloc_device(NULL, &devid,
4130 + disk_super->dev_item.uuid);
4131 + if (IS_ERR(device)) {
4132 ++ mutex_unlock(&fs_devices->device_list_mutex);
4133 + /* we can safely leave the fs_devices entry around */
4134 + return device;
4135 + }
4136 +@@ -786,17 +796,16 @@ static noinline struct btrfs_device *device_list_add(const char *path,
4137 + name = rcu_string_strdup(path, GFP_NOFS);
4138 + if (!name) {
4139 + btrfs_free_device(device);
4140 ++ mutex_unlock(&fs_devices->device_list_mutex);
4141 + return ERR_PTR(-ENOMEM);
4142 + }
4143 + rcu_assign_pointer(device->name, name);
4144 +
4145 +- mutex_lock(&fs_devices->device_list_mutex);
4146 + list_add_rcu(&device->dev_list, &fs_devices->devices);
4147 + fs_devices->num_devices++;
4148 +- mutex_unlock(&fs_devices->device_list_mutex);
4149 +
4150 + device->fs_devices = fs_devices;
4151 +- btrfs_free_stale_devices(path, device);
4152 ++ *new_device_added = true;
4153 +
4154 + if (disk_super->label[0])
4155 + pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
4156 +@@ -840,12 +849,15 @@ static noinline struct btrfs_device *device_list_add(const char *path,
4157 + * with larger generation number or the last-in if
4158 + * generation are equal.
4159 + */
4160 ++ mutex_unlock(&fs_devices->device_list_mutex);
4161 + return ERR_PTR(-EEXIST);
4162 + }
4163 +
4164 + name = rcu_string_strdup(path, GFP_NOFS);
4165 +- if (!name)
4166 ++ if (!name) {
4167 ++ mutex_unlock(&fs_devices->device_list_mutex);
4168 + return ERR_PTR(-ENOMEM);
4169 ++ }
4170 + rcu_string_free(device->name);
4171 + rcu_assign_pointer(device->name, name);
4172 + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
4173 +@@ -865,6 +877,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
4174 +
4175 + fs_devices->total_devices = btrfs_super_num_devices(disk_super);
4176 +
4177 ++ mutex_unlock(&fs_devices->device_list_mutex);
4178 + return device;
4179 + }
4180 +
4181 +@@ -1146,7 +1159,8 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
4182 + {
4183 + int ret;
4184 +
4185 +- mutex_lock(&uuid_mutex);
4186 ++ lockdep_assert_held(&uuid_mutex);
4187 ++
4188 + mutex_lock(&fs_devices->device_list_mutex);
4189 + if (fs_devices->opened) {
4190 + fs_devices->opened++;
4191 +@@ -1156,7 +1170,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
4192 + ret = open_fs_devices(fs_devices, flags, holder);
4193 + }
4194 + mutex_unlock(&fs_devices->device_list_mutex);
4195 +- mutex_unlock(&uuid_mutex);
4196 +
4197 + return ret;
4198 + }
4199 +@@ -1221,12 +1234,15 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
4200 + struct btrfs_fs_devices **fs_devices_ret)
4201 + {
4202 + struct btrfs_super_block *disk_super;
4203 ++ bool new_device_added = false;
4204 + struct btrfs_device *device;
4205 + struct block_device *bdev;
4206 + struct page *page;
4207 + int ret = 0;
4208 + u64 bytenr;
4209 +
4210 ++ lockdep_assert_held(&uuid_mutex);
4211 ++
4212 + /*
4213 + * we would like to check all the supers, but that would make
4214 + * a btrfs mount succeed after a mkfs from a different FS.
4215 +@@ -1245,13 +1261,14 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
4216 + goto error_bdev_put;
4217 + }
4218 +
4219 +- mutex_lock(&uuid_mutex);
4220 +- device = device_list_add(path, disk_super);
4221 +- if (IS_ERR(device))
4222 ++ device = device_list_add(path, disk_super, &new_device_added);
4223 ++ if (IS_ERR(device)) {
4224 + ret = PTR_ERR(device);
4225 +- else
4226 ++ } else {
4227 + *fs_devices_ret = device->fs_devices;
4228 +- mutex_unlock(&uuid_mutex);
4229 ++ if (new_device_added)
4230 ++ btrfs_free_stale_devices(path, device);
4231 ++ }
4232 +
4233 + btrfs_release_disk_super(page);
4234 +
4235 +@@ -2029,6 +2046,9 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
4236 +
4237 + cur_devices->num_devices--;
4238 + cur_devices->total_devices--;
4239 ++ /* Update total_devices of the parent fs_devices if it's seed */
4240 ++ if (cur_devices != fs_devices)
4241 ++ fs_devices->total_devices--;
4242 +
4243 + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
4244 + cur_devices->missing_devices--;
4245 +@@ -6563,10 +6583,14 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
4246 + write_lock(&map_tree->map_tree.lock);
4247 + ret = add_extent_mapping(&map_tree->map_tree, em, 0);
4248 + write_unlock(&map_tree->map_tree.lock);
4249 +- BUG_ON(ret); /* Tree corruption */
4250 ++ if (ret < 0) {
4251 ++ btrfs_err(fs_info,
4252 ++ "failed to add chunk map, start=%llu len=%llu: %d",
4253 ++ em->start, em->len, ret);
4254 ++ }
4255 + free_extent_map(em);
4256 +
4257 +- return 0;
4258 ++ return ret;
4259 + }
4260 +
4261 + static void fill_device_from_item(struct extent_buffer *leaf,
4262 +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
4263 +index 991bfb271908..b20297988fe0 100644
4264 +--- a/fs/cifs/cifs_debug.c
4265 ++++ b/fs/cifs/cifs_debug.c
4266 +@@ -383,6 +383,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
4267 + atomic_set(&totBufAllocCount, 0);
4268 + atomic_set(&totSmBufAllocCount, 0);
4269 + #endif /* CONFIG_CIFS_STATS2 */
4270 ++ spin_lock(&GlobalMid_Lock);
4271 ++ GlobalMaxActiveXid = 0;
4272 ++ GlobalCurrentXid = 0;
4273 ++ spin_unlock(&GlobalMid_Lock);
4274 + spin_lock(&cifs_tcp_ses_lock);
4275 + list_for_each(tmp1, &cifs_tcp_ses_list) {
4276 + server = list_entry(tmp1, struct TCP_Server_Info,
4277 +@@ -395,6 +399,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
4278 + struct cifs_tcon,
4279 + tcon_list);
4280 + atomic_set(&tcon->num_smbs_sent, 0);
4281 ++ spin_lock(&tcon->stat_lock);
4282 ++ tcon->bytes_read = 0;
4283 ++ tcon->bytes_written = 0;
4284 ++ spin_unlock(&tcon->stat_lock);
4285 + if (server->ops->clear_stats)
4286 + server->ops->clear_stats(tcon);
4287 + }
4288 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
4289 +index 5df2c0698cda..9d02563b2147 100644
4290 +--- a/fs/cifs/connect.c
4291 ++++ b/fs/cifs/connect.c
4292 +@@ -3031,11 +3031,15 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
4293 + }
4294 +
4295 + #ifdef CONFIG_CIFS_SMB311
4296 +- if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
4297 +- if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
4298 ++ if (volume_info->linux_ext) {
4299 ++ if (ses->server->posix_ext_supported) {
4300 + tcon->posix_extensions = true;
4301 + printk_once(KERN_WARNING
4302 + "SMB3.11 POSIX Extensions are experimental\n");
4303 ++ } else {
4304 ++ cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions.\n");
4305 ++ rc = -EOPNOTSUPP;
4306 ++ goto out_fail;
4307 + }
4308 + }
4309 + #endif /* 311 */
4310 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
4311 +index 3ff7cec2da81..239215dcc00b 100644
4312 +--- a/fs/cifs/smb2misc.c
4313 ++++ b/fs/cifs/smb2misc.c
4314 +@@ -240,6 +240,13 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
4315 + if (clc_len == len + 1)
4316 + return 0;
4317 +
4318 ++ /*
4319 ++ * Some windows servers (win2016) will pad also the final
4320 ++ * PDU in a compound to 8 bytes.
4321 ++ */
4322 ++ if (((clc_len + 7) & ~7) == len)
4323 ++ return 0;
4324 ++
4325 + /*
4326 + * MacOS server pads after SMB2.1 write response with 3 bytes
4327 + * of junk. Other servers match RFC1001 len to actual
4328 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4329 +index ffce77e00a58..44e511a35559 100644
4330 +--- a/fs/cifs/smb2pdu.c
4331 ++++ b/fs/cifs/smb2pdu.c
4332 +@@ -360,7 +360,7 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
4333 + total_len);
4334 +
4335 + if (tcon != NULL) {
4336 +-#ifdef CONFIG_CIFS_STATS2
4337 ++#ifdef CONFIG_CIFS_STATS
4338 + uint16_t com_code = le16_to_cpu(smb2_command);
4339 + cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
4340 + #endif
4341 +@@ -1928,7 +1928,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
4342 + {
4343 + struct smb_rqst rqst;
4344 + struct smb2_create_req *req;
4345 +- struct smb2_create_rsp *rsp;
4346 ++ struct smb2_create_rsp *rsp = NULL;
4347 + struct TCP_Server_Info *server;
4348 + struct cifs_ses *ses = tcon->ses;
4349 + struct kvec iov[3]; /* make sure at least one for each open context */
4350 +@@ -1943,27 +1943,31 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
4351 + char *pc_buf = NULL;
4352 + int flags = 0;
4353 + unsigned int total_len;
4354 +- __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
4355 +-
4356 +- if (!path)
4357 +- return -ENOMEM;
4358 ++ __le16 *utf16_path = NULL;
4359 +
4360 + cifs_dbg(FYI, "mkdir\n");
4361 +
4362 ++ /* resource #1: path allocation */
4363 ++ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
4364 ++ if (!utf16_path)
4365 ++ return -ENOMEM;
4366 ++
4367 + if (ses && (ses->server))
4368 + server = ses->server;
4369 +- else
4370 +- return -EIO;
4371 ++ else {
4372 ++ rc = -EIO;
4373 ++ goto err_free_path;
4374 ++ }
4375 +
4376 ++ /* resource #2: request */
4377 + rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
4378 +-
4379 + if (rc)
4380 +- return rc;
4381 ++ goto err_free_path;
4382 ++
4383 +
4384 + if (smb3_encryption_required(tcon))
4385 + flags |= CIFS_TRANSFORM_REQ;
4386 +
4387 +-
4388 + req->ImpersonationLevel = IL_IMPERSONATION;
4389 + req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
4390 + /* File attributes ignored on open (used in create though) */
4391 +@@ -1992,50 +1996,44 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
4392 + req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
4393 + rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
4394 + &name_len,
4395 +- tcon->treeName, path);
4396 +- if (rc) {
4397 +- cifs_small_buf_release(req);
4398 +- return rc;
4399 +- }
4400 ++ tcon->treeName, utf16_path);
4401 ++ if (rc)
4402 ++ goto err_free_req;
4403 ++
4404 + req->NameLength = cpu_to_le16(name_len * 2);
4405 + uni_path_len = copy_size;
4406 +- path = copy_path;
4407 ++ /* free before overwriting resource */
4408 ++ kfree(utf16_path);
4409 ++ utf16_path = copy_path;
4410 + } else {
4411 +- uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
4412 ++ uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
4413 + /* MUST set path len (NameLength) to 0 opening root of share */
4414 + req->NameLength = cpu_to_le16(uni_path_len - 2);
4415 + if (uni_path_len % 8 != 0) {
4416 + copy_size = roundup(uni_path_len, 8);
4417 + copy_path = kzalloc(copy_size, GFP_KERNEL);
4418 + if (!copy_path) {
4419 +- cifs_small_buf_release(req);
4420 +- return -ENOMEM;
4421 ++ rc = -ENOMEM;
4422 ++ goto err_free_req;
4423 + }
4424 +- memcpy((char *)copy_path, (const char *)path,
4425 ++ memcpy((char *)copy_path, (const char *)utf16_path,
4426 + uni_path_len);
4427 + uni_path_len = copy_size;
4428 +- path = copy_path;
4429 ++ /* free before overwriting resource */
4430 ++ kfree(utf16_path);
4431 ++ utf16_path = copy_path;
4432 + }
4433 + }
4434 +
4435 + iov[1].iov_len = uni_path_len;
4436 +- iov[1].iov_base = path;
4437 ++ iov[1].iov_base = utf16_path;
4438 + req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
4439 +
4440 + if (tcon->posix_extensions) {
4441 +- if (n_iov > 2) {
4442 +- struct create_context *ccontext =
4443 +- (struct create_context *)iov[n_iov-1].iov_base;
4444 +- ccontext->Next =
4445 +- cpu_to_le32(iov[n_iov-1].iov_len);
4446 +- }
4447 +-
4448 ++ /* resource #3: posix buf */
4449 + rc = add_posix_context(iov, &n_iov, mode);
4450 +- if (rc) {
4451 +- cifs_small_buf_release(req);
4452 +- kfree(copy_path);
4453 +- return rc;
4454 +- }
4455 ++ if (rc)
4456 ++ goto err_free_req;
4457 + pc_buf = iov[n_iov-1].iov_base;
4458 + }
4459 +
4460 +@@ -2044,32 +2042,33 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
4461 + rqst.rq_iov = iov;
4462 + rqst.rq_nvec = n_iov;
4463 +
4464 +- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
4465 +- &rsp_iov);
4466 +-
4467 +- cifs_small_buf_release(req);
4468 +- rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
4469 +-
4470 +- if (rc != 0) {
4471 ++ /* resource #4: response buffer */
4472 ++ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
4473 ++ if (rc) {
4474 + cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
4475 + trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
4476 +- CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
4477 +- goto smb311_mkdir_exit;
4478 +- } else
4479 +- trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
4480 +- ses->Suid, CREATE_NOT_FILE,
4481 +- FILE_WRITE_ATTRIBUTES);
4482 ++ CREATE_NOT_FILE,
4483 ++ FILE_WRITE_ATTRIBUTES, rc);
4484 ++ goto err_free_rsp_buf;
4485 ++ }
4486 ++
4487 ++ rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
4488 ++ trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
4489 ++ ses->Suid, CREATE_NOT_FILE,
4490 ++ FILE_WRITE_ATTRIBUTES);
4491 +
4492 + SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
4493 +
4494 + /* Eventually save off posix specific response info and timestaps */
4495 +
4496 +-smb311_mkdir_exit:
4497 +- kfree(copy_path);
4498 +- kfree(pc_buf);
4499 ++err_free_rsp_buf:
4500 + free_rsp_buf(resp_buftype, rsp);
4501 ++ kfree(pc_buf);
4502 ++err_free_req:
4503 ++ cifs_small_buf_release(req);
4504 ++err_free_path:
4505 ++ kfree(utf16_path);
4506 + return rc;
4507 +-
4508 + }
4509 + #endif /* SMB311 */
4510 +
4511 +diff --git a/fs/dcache.c b/fs/dcache.c
4512 +index ceb7b491d1b9..d19a0dc46c04 100644
4513 +--- a/fs/dcache.c
4514 ++++ b/fs/dcache.c
4515 +@@ -292,7 +292,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry
4516 + spin_unlock(&dentry->d_lock);
4517 + name->name = p->name;
4518 + } else {
4519 +- memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
4520 ++ memcpy(name->inline_name, dentry->d_iname,
4521 ++ dentry->d_name.len + 1);
4522 + spin_unlock(&dentry->d_lock);
4523 + name->name = name->inline_name;
4524 + }
4525 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
4526 +index 8f931d699287..b61954d40c25 100644
4527 +--- a/fs/f2fs/data.c
4528 ++++ b/fs/f2fs/data.c
4529 +@@ -2149,8 +2149,12 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
4530 +
4531 + if (to > i_size) {
4532 + down_write(&F2FS_I(inode)->i_mmap_sem);
4533 ++ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4534 ++
4535 + truncate_pagecache(inode, i_size);
4536 + f2fs_truncate_blocks(inode, i_size, true);
4537 ++
4538 ++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4539 + up_write(&F2FS_I(inode)->i_mmap_sem);
4540 + }
4541 + }
4542 +@@ -2490,6 +2494,10 @@ static int f2fs_set_data_page_dirty(struct page *page)
4543 + if (!PageUptodate(page))
4544 + SetPageUptodate(page);
4545 +
4546 ++ /* don't remain PG_checked flag which was set during GC */
4547 ++ if (is_cold_data(page))
4548 ++ clear_cold_data(page);
4549 ++
4550 + if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
4551 + if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
4552 + f2fs_register_inmem_page(inode, page);
4553 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
4554 +index 6880c6f78d58..3ffa341cf586 100644
4555 +--- a/fs/f2fs/file.c
4556 ++++ b/fs/f2fs/file.c
4557 +@@ -782,22 +782,26 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
4558 + }
4559 +
4560 + if (attr->ia_valid & ATTR_SIZE) {
4561 +- if (attr->ia_size <= i_size_read(inode)) {
4562 +- down_write(&F2FS_I(inode)->i_mmap_sem);
4563 +- truncate_setsize(inode, attr->ia_size);
4564 ++ bool to_smaller = (attr->ia_size <= i_size_read(inode));
4565 ++
4566 ++ down_write(&F2FS_I(inode)->i_mmap_sem);
4567 ++ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4568 ++
4569 ++ truncate_setsize(inode, attr->ia_size);
4570 ++
4571 ++ if (to_smaller)
4572 + err = f2fs_truncate(inode);
4573 +- up_write(&F2FS_I(inode)->i_mmap_sem);
4574 +- if (err)
4575 +- return err;
4576 +- } else {
4577 +- /*
4578 +- * do not trim all blocks after i_size if target size is
4579 +- * larger than i_size.
4580 +- */
4581 +- down_write(&F2FS_I(inode)->i_mmap_sem);
4582 +- truncate_setsize(inode, attr->ia_size);
4583 +- up_write(&F2FS_I(inode)->i_mmap_sem);
4584 ++ /*
4585 ++ * do not trim all blocks after i_size if target size is
4586 ++ * larger than i_size.
4587 ++ */
4588 ++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4589 ++ up_write(&F2FS_I(inode)->i_mmap_sem);
4590 +
4591 ++ if (err)
4592 ++ return err;
4593 ++
4594 ++ if (!to_smaller) {
4595 + /* should convert inline inode here */
4596 + if (!f2fs_may_inline_data(inode)) {
4597 + err = f2fs_convert_inline_inode(inode);
4598 +@@ -944,13 +948,18 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
4599 +
4600 + blk_start = (loff_t)pg_start << PAGE_SHIFT;
4601 + blk_end = (loff_t)pg_end << PAGE_SHIFT;
4602 ++
4603 + down_write(&F2FS_I(inode)->i_mmap_sem);
4604 ++ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4605 ++
4606 + truncate_inode_pages_range(mapping, blk_start,
4607 + blk_end - 1);
4608 +
4609 + f2fs_lock_op(sbi);
4610 + ret = f2fs_truncate_hole(inode, pg_start, pg_end);
4611 + f2fs_unlock_op(sbi);
4612 ++
4613 ++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4614 + up_write(&F2FS_I(inode)->i_mmap_sem);
4615 + }
4616 + }
4617 +@@ -1295,8 +1304,6 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
4618 + if (ret)
4619 + goto out_sem;
4620 +
4621 +- truncate_pagecache_range(inode, offset, offset + len - 1);
4622 +-
4623 + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
4624 + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
4625 +
4626 +@@ -1326,12 +1333,19 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
4627 + unsigned int end_offset;
4628 + pgoff_t end;
4629 +
4630 ++ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4631 ++
4632 ++ truncate_pagecache_range(inode,
4633 ++ (loff_t)index << PAGE_SHIFT,
4634 ++ ((loff_t)pg_end << PAGE_SHIFT) - 1);
4635 ++
4636 + f2fs_lock_op(sbi);
4637 +
4638 + set_new_dnode(&dn, inode, NULL, NULL, 0);
4639 + ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
4640 + if (ret) {
4641 + f2fs_unlock_op(sbi);
4642 ++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4643 + goto out;
4644 + }
4645 +
4646 +@@ -1340,7 +1354,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
4647 +
4648 + ret = f2fs_do_zero_range(&dn, index, end);
4649 + f2fs_put_dnode(&dn);
4650 ++
4651 + f2fs_unlock_op(sbi);
4652 ++ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4653 +
4654 + f2fs_balance_fs(sbi, dn.node_changed);
4655 +
4656 +diff --git a/fs/fat/cache.c b/fs/fat/cache.c
4657 +index e9bed49df6b7..78d501c1fb65 100644
4658 +--- a/fs/fat/cache.c
4659 ++++ b/fs/fat/cache.c
4660 +@@ -225,7 +225,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
4661 + int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
4662 + {
4663 + struct super_block *sb = inode->i_sb;
4664 +- const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
4665 ++ struct msdos_sb_info *sbi = MSDOS_SB(sb);
4666 ++ const int limit = sb->s_maxbytes >> sbi->cluster_bits;
4667 + struct fat_entry fatent;
4668 + struct fat_cache_id cid;
4669 + int nr;
4670 +@@ -234,6 +235,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
4671 +
4672 + *fclus = 0;
4673 + *dclus = MSDOS_I(inode)->i_start;
4674 ++ if (!fat_valid_entry(sbi, *dclus)) {
4675 ++ fat_fs_error_ratelimit(sb,
4676 ++ "%s: invalid start cluster (i_pos %lld, start %08x)",
4677 ++ __func__, MSDOS_I(inode)->i_pos, *dclus);
4678 ++ return -EIO;
4679 ++ }
4680 + if (cluster == 0)
4681 + return 0;
4682 +
4683 +@@ -250,9 +257,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
4684 + /* prevent the infinite loop of cluster chain */
4685 + if (*fclus > limit) {
4686 + fat_fs_error_ratelimit(sb,
4687 +- "%s: detected the cluster chain loop"
4688 +- " (i_pos %lld)", __func__,
4689 +- MSDOS_I(inode)->i_pos);
4690 ++ "%s: detected the cluster chain loop (i_pos %lld)",
4691 ++ __func__, MSDOS_I(inode)->i_pos);
4692 + nr = -EIO;
4693 + goto out;
4694 + }
4695 +@@ -262,9 +268,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
4696 + goto out;
4697 + else if (nr == FAT_ENT_FREE) {
4698 + fat_fs_error_ratelimit(sb,
4699 +- "%s: invalid cluster chain (i_pos %lld)",
4700 +- __func__,
4701 +- MSDOS_I(inode)->i_pos);
4702 ++ "%s: invalid cluster chain (i_pos %lld)",
4703 ++ __func__, MSDOS_I(inode)->i_pos);
4704 + nr = -EIO;
4705 + goto out;
4706 + } else if (nr == FAT_ENT_EOF) {
4707 +diff --git a/fs/fat/fat.h b/fs/fat/fat.h
4708 +index 8fc1093da47d..a0a00f3734bc 100644
4709 +--- a/fs/fat/fat.h
4710 ++++ b/fs/fat/fat.h
4711 +@@ -348,6 +348,11 @@ static inline void fatent_brelse(struct fat_entry *fatent)
4712 + fatent->fat_inode = NULL;
4713 + }
4714 +
4715 ++static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
4716 ++{
4717 ++ return FAT_START_ENT <= entry && entry < sbi->max_cluster;
4718 ++}
4719 ++
4720 + extern void fat_ent_access_init(struct super_block *sb);
4721 + extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
4722 + int entry);
4723 +diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
4724 +index bac10de678cc..3aef8630a4b9 100644
4725 +--- a/fs/fat/fatent.c
4726 ++++ b/fs/fat/fatent.c
4727 +@@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry,
4728 + {
4729 + struct msdos_sb_info *sbi = MSDOS_SB(sb);
4730 + int bytes = entry + (entry >> 1);
4731 +- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
4732 ++ WARN_ON(!fat_valid_entry(sbi, entry));
4733 + *offset = bytes & (sb->s_blocksize - 1);
4734 + *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
4735 + }
4736 +@@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry,
4737 + {
4738 + struct msdos_sb_info *sbi = MSDOS_SB(sb);
4739 + int bytes = (entry << sbi->fatent_shift);
4740 +- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
4741 ++ WARN_ON(!fat_valid_entry(sbi, entry));
4742 + *offset = bytes & (sb->s_blocksize - 1);
4743 + *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
4744 + }
4745 +@@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
4746 + int err, offset;
4747 + sector_t blocknr;
4748 +
4749 +- if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
4750 ++ if (!fat_valid_entry(sbi, entry)) {
4751 + fatent_brelse(fatent);
4752 + fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
4753 + return -EIO;
4754 +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
4755 +index ad04a5741016..9a8772465a90 100644
4756 +--- a/fs/hfs/brec.c
4757 ++++ b/fs/hfs/brec.c
4758 +@@ -75,9 +75,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
4759 + if (!fd->bnode) {
4760 + if (!tree->root)
4761 + hfs_btree_inc_height(tree);
4762 +- fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
4763 +- if (IS_ERR(fd->bnode))
4764 +- return PTR_ERR(fd->bnode);
4765 ++ node = hfs_bnode_find(tree, tree->leaf_head);
4766 ++ if (IS_ERR(node))
4767 ++ return PTR_ERR(node);
4768 ++ fd->bnode = node;
4769 + fd->record = -1;
4770 + }
4771 + new_node = NULL;
4772 +diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
4773 +index b5254378f011..cd017d7dbdfa 100644
4774 +--- a/fs/hfsplus/dir.c
4775 ++++ b/fs/hfsplus/dir.c
4776 +@@ -78,13 +78,13 @@ again:
4777 + cpu_to_be32(HFSP_HARDLINK_TYPE) &&
4778 + entry.file.user_info.fdCreator ==
4779 + cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
4780 ++ HFSPLUS_SB(sb)->hidden_dir &&
4781 + (entry.file.create_date ==
4782 + HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
4783 + create_date ||
4784 + entry.file.create_date ==
4785 + HFSPLUS_I(d_inode(sb->s_root))->
4786 +- create_date) &&
4787 +- HFSPLUS_SB(sb)->hidden_dir) {
4788 ++ create_date)) {
4789 + struct qstr str;
4790 + char name[32];
4791 +
4792 +diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
4793 +index a6c0f54c48c3..80abba550bfa 100644
4794 +--- a/fs/hfsplus/super.c
4795 ++++ b/fs/hfsplus/super.c
4796 +@@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
4797 + goto out_put_root;
4798 + if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
4799 + hfs_find_exit(&fd);
4800 +- if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
4801 ++ if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
4802 ++ err = -EINVAL;
4803 + goto out_put_root;
4804 ++ }
4805 + inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
4806 + if (IS_ERR(inode)) {
4807 + err = PTR_ERR(inode);
4808 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4809 +index 464db0c0f5c8..ff98e2a3f3cc 100644
4810 +--- a/fs/nfs/nfs4proc.c
4811 ++++ b/fs/nfs/nfs4proc.c
4812 +@@ -7734,7 +7734,7 @@ static int nfs4_sp4_select_mode(struct nfs_client *clp,
4813 + }
4814 + out:
4815 + clp->cl_sp4_flags = flags;
4816 +- return 0;
4817 ++ return ret;
4818 + }
4819 +
4820 + struct nfs41_exchange_id_data {
4821 +diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
4822 +index e64ecb9f2720..66c373230e60 100644
4823 +--- a/fs/proc/kcore.c
4824 ++++ b/fs/proc/kcore.c
4825 +@@ -384,8 +384,10 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
4826 + phdr->p_flags = PF_R|PF_W|PF_X;
4827 + phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
4828 + phdr->p_vaddr = (size_t)m->addr;
4829 +- if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
4830 ++ if (m->type == KCORE_RAM)
4831 + phdr->p_paddr = __pa(m->addr);
4832 ++ else if (m->type == KCORE_TEXT)
4833 ++ phdr->p_paddr = __pa_symbol(m->addr);
4834 + else
4835 + phdr->p_paddr = (elf_addr_t)-1;
4836 + phdr->p_filesz = phdr->p_memsz = m->size;
4837 +diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
4838 +index cfb6674331fd..0651646dd04d 100644
4839 +--- a/fs/proc/vmcore.c
4840 ++++ b/fs/proc/vmcore.c
4841 +@@ -225,6 +225,7 @@ out_unlock:
4842 + return ret;
4843 + }
4844 +
4845 ++#ifdef CONFIG_MMU
4846 + static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
4847 + u64 start, size_t size)
4848 + {
4849 +@@ -259,6 +260,7 @@ out_unlock:
4850 + mutex_unlock(&vmcoredd_mutex);
4851 + return ret;
4852 + }
4853 ++#endif /* CONFIG_MMU */
4854 + #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
4855 +
4856 + /* Read from the ELF header and then the crash dump. On error, negative value is
4857 +diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
4858 +index ae4811fecc1f..6d670bd9ab6b 100644
4859 +--- a/fs/reiserfs/reiserfs.h
4860 ++++ b/fs/reiserfs/reiserfs.h
4861 +@@ -271,7 +271,7 @@ struct reiserfs_journal_list {
4862 +
4863 + struct mutex j_commit_mutex;
4864 + unsigned int j_trans_id;
4865 +- time_t j_timestamp;
4866 ++ time64_t j_timestamp; /* write-only but useful for crash dump analysis */
4867 + struct reiserfs_list_bitmap *j_list_bitmap;
4868 + struct buffer_head *j_commit_bh; /* commit buffer head */
4869 + struct reiserfs_journal_cnode *j_realblock;
4870 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
4871 +index 29502238e510..bf85e152af05 100644
4872 +--- a/include/linux/pci_ids.h
4873 ++++ b/include/linux/pci_ids.h
4874 +@@ -3082,4 +3082,6 @@
4875 +
4876 + #define PCI_VENDOR_ID_OCZ 0x1b85
4877 +
4878 ++#define PCI_VENDOR_ID_NCUBE 0x10ff
4879 ++
4880 + #endif /* _LINUX_PCI_IDS_H */
4881 +diff --git a/include/net/tcp.h b/include/net/tcp.h
4882 +index cd3ecda9386a..106e01c721e6 100644
4883 +--- a/include/net/tcp.h
4884 ++++ b/include/net/tcp.h
4885 +@@ -2023,6 +2023,10 @@ int tcp_set_ulp_id(struct sock *sk, const int ulp);
4886 + void tcp_get_available_ulp(char *buf, size_t len);
4887 + void tcp_cleanup_ulp(struct sock *sk);
4888 +
4889 ++#define MODULE_ALIAS_TCP_ULP(name) \
4890 ++ __MODULE_INFO(alias, alias_userspace, name); \
4891 ++ __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
4892 ++
4893 + /* Call BPF_SOCK_OPS program that returns an int. If the return value
4894 + * is < 0, then the BPF op failed (for example if the loaded BPF
4895 + * program does not support the chosen operation or there is no BPF
4896 +diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
4897 +index 7b8c9e19bad1..910cc4334b21 100644
4898 +--- a/include/uapi/linux/keyctl.h
4899 ++++ b/include/uapi/linux/keyctl.h
4900 +@@ -65,7 +65,7 @@
4901 +
4902 + /* keyctl structures */
4903 + struct keyctl_dh_params {
4904 +- __s32 private;
4905 ++ __s32 dh_private;
4906 + __s32 prime;
4907 + __s32 base;
4908 + };
4909 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
4910 +index 76efe9a183f5..fc5b103512e7 100644
4911 +--- a/kernel/bpf/inode.c
4912 ++++ b/kernel/bpf/inode.c
4913 +@@ -196,19 +196,21 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
4914 + {
4915 + struct bpf_map *map = seq_file_to_map(m);
4916 + void *key = map_iter(m)->key;
4917 ++ void *prev_key;
4918 +
4919 + if (map_iter(m)->done)
4920 + return NULL;
4921 +
4922 + if (unlikely(v == SEQ_START_TOKEN))
4923 +- goto done;
4924 ++ prev_key = NULL;
4925 ++ else
4926 ++ prev_key = key;
4927 +
4928 +- if (map->ops->map_get_next_key(map, key, key)) {
4929 ++ if (map->ops->map_get_next_key(map, prev_key, key)) {
4930 + map_iter(m)->done = true;
4931 + return NULL;
4932 + }
4933 +
4934 +-done:
4935 + ++(*pos);
4936 + return key;
4937 + }
4938 +diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
4939 +index c4d75c52b4fc..58899601fccf 100644
4940 +--- a/kernel/bpf/sockmap.c
4941 ++++ b/kernel/bpf/sockmap.c
4942 +@@ -58,6 +58,7 @@ struct bpf_stab {
4943 + struct bpf_map map;
4944 + struct sock **sock_map;
4945 + struct bpf_sock_progs progs;
4946 ++ raw_spinlock_t lock;
4947 + };
4948 +
4949 + struct bucket {
4950 +@@ -89,9 +90,9 @@ enum smap_psock_state {
4951 +
4952 + struct smap_psock_map_entry {
4953 + struct list_head list;
4954 ++ struct bpf_map *map;
4955 + struct sock **entry;
4956 + struct htab_elem __rcu *hash_link;
4957 +- struct bpf_htab __rcu *htab;
4958 + };
4959 +
4960 + struct smap_psock {
4961 +@@ -343,13 +344,18 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
4962 + e = psock_map_pop(sk, psock);
4963 + while (e) {
4964 + if (e->entry) {
4965 +- osk = cmpxchg(e->entry, sk, NULL);
4966 ++ struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map);
4967 ++
4968 ++ raw_spin_lock_bh(&stab->lock);
4969 ++ osk = *e->entry;
4970 + if (osk == sk) {
4971 ++ *e->entry = NULL;
4972 + smap_release_sock(psock, sk);
4973 + }
4974 ++ raw_spin_unlock_bh(&stab->lock);
4975 + } else {
4976 + struct htab_elem *link = rcu_dereference(e->hash_link);
4977 +- struct bpf_htab *htab = rcu_dereference(e->htab);
4978 ++ struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map);
4979 + struct hlist_head *head;
4980 + struct htab_elem *l;
4981 + struct bucket *b;
4982 +@@ -370,6 +376,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
4983 + }
4984 + raw_spin_unlock_bh(&b->lock);
4985 + }
4986 ++ kfree(e);
4987 + e = psock_map_pop(sk, psock);
4988 + }
4989 + rcu_read_unlock();
4990 +@@ -1644,6 +1651,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
4991 + return ERR_PTR(-ENOMEM);
4992 +
4993 + bpf_map_init_from_attr(&stab->map, attr);
4994 ++ raw_spin_lock_init(&stab->lock);
4995 +
4996 + /* make sure page count doesn't overflow */
4997 + cost = (u64) stab->map.max_entries * sizeof(struct sock *);
4998 +@@ -1678,8 +1686,10 @@ static void smap_list_map_remove(struct smap_psock *psock,
4999 +
5000 + spin_lock_bh(&psock->maps_lock);
5001 + list_for_each_entry_safe(e, tmp, &psock->maps, list) {
5002 +- if (e->entry == entry)
5003 ++ if (e->entry == entry) {
5004 + list_del(&e->list);
5005 ++ kfree(e);
5006 ++ }
5007 + }
5008 + spin_unlock_bh(&psock->maps_lock);
5009 + }
5010 +@@ -1693,8 +1703,10 @@ static void smap_list_hash_remove(struct smap_psock *psock,
5011 + list_for_each_entry_safe(e, tmp, &psock->maps, list) {
5012 + struct htab_elem *c = rcu_dereference(e->hash_link);
5013 +
5014 +- if (c == hash_link)
5015 ++ if (c == hash_link) {
5016 + list_del(&e->list);
5017 ++ kfree(e);
5018 ++ }
5019 + }
5020 + spin_unlock_bh(&psock->maps_lock);
5021 + }
5022 +@@ -1714,14 +1726,15 @@ static void sock_map_free(struct bpf_map *map)
5023 + * and a grace period expire to ensure psock is really safe to remove.
5024 + */
5025 + rcu_read_lock();
5026 ++ raw_spin_lock_bh(&stab->lock);
5027 + for (i = 0; i < stab->map.max_entries; i++) {
5028 + struct smap_psock *psock;
5029 + struct sock *sock;
5030 +
5031 +- sock = xchg(&stab->sock_map[i], NULL);
5032 ++ sock = stab->sock_map[i];
5033 + if (!sock)
5034 + continue;
5035 +-
5036 ++ stab->sock_map[i] = NULL;
5037 + psock = smap_psock_sk(sock);
5038 + /* This check handles a racing sock event that can get the
5039 + * sk_callback_lock before this case but after xchg happens
5040 +@@ -1733,6 +1746,7 @@ static void sock_map_free(struct bpf_map *map)
5041 + smap_release_sock(psock, sock);
5042 + }
5043 + }
5044 ++ raw_spin_unlock_bh(&stab->lock);
5045 + rcu_read_unlock();
5046 +
5047 + sock_map_remove_complete(stab);
5048 +@@ -1776,19 +1790,23 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
5049 + if (k >= map->max_entries)
5050 + return -EINVAL;
5051 +
5052 +- sock = xchg(&stab->sock_map[k], NULL);
5053 ++ raw_spin_lock_bh(&stab->lock);
5054 ++ sock = stab->sock_map[k];
5055 ++ stab->sock_map[k] = NULL;
5056 ++ raw_spin_unlock_bh(&stab->lock);
5057 + if (!sock)
5058 + return -EINVAL;
5059 +
5060 + psock = smap_psock_sk(sock);
5061 + if (!psock)
5062 +- goto out;
5063 +-
5064 +- if (psock->bpf_parse)
5065 ++ return 0;
5066 ++ if (psock->bpf_parse) {
5067 ++ write_lock_bh(&sock->sk_callback_lock);
5068 + smap_stop_sock(psock, sock);
5069 ++ write_unlock_bh(&sock->sk_callback_lock);
5070 ++ }
5071 + smap_list_map_remove(psock, &stab->sock_map[k]);
5072 + smap_release_sock(psock, sock);
5073 +-out:
5074 + return 0;
5075 + }
5076 +
5077 +@@ -1824,11 +1842,9 @@ out:
5078 + static int __sock_map_ctx_update_elem(struct bpf_map *map,
5079 + struct bpf_sock_progs *progs,
5080 + struct sock *sock,
5081 +- struct sock **map_link,
5082 + void *key)
5083 + {
5084 + struct bpf_prog *verdict, *parse, *tx_msg;
5085 +- struct smap_psock_map_entry *e = NULL;
5086 + struct smap_psock *psock;
5087 + bool new = false;
5088 + int err = 0;
5089 +@@ -1901,14 +1917,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
5090 + new = true;
5091 + }
5092 +
5093 +- if (map_link) {
5094 +- e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
5095 +- if (!e) {
5096 +- err = -ENOMEM;
5097 +- goto out_free;
5098 +- }
5099 +- }
5100 +-
5101 + /* 3. At this point we have a reference to a valid psock that is
5102 + * running. Attach any BPF programs needed.
5103 + */
5104 +@@ -1930,17 +1938,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
5105 + write_unlock_bh(&sock->sk_callback_lock);
5106 + }
5107 +
5108 +- /* 4. Place psock in sockmap for use and stop any programs on
5109 +- * the old sock assuming its not the same sock we are replacing
5110 +- * it with. Because we can only have a single set of programs if
5111 +- * old_sock has a strp we can stop it.
5112 +- */
5113 +- if (map_link) {
5114 +- e->entry = map_link;
5115 +- spin_lock_bh(&psock->maps_lock);
5116 +- list_add_tail(&e->list, &psock->maps);
5117 +- spin_unlock_bh(&psock->maps_lock);
5118 +- }
5119 + return err;
5120 + out_free:
5121 + smap_release_sock(psock, sock);
5122 +@@ -1951,7 +1948,6 @@ out_progs:
5123 + }
5124 + if (tx_msg)
5125 + bpf_prog_put(tx_msg);
5126 +- kfree(e);
5127 + return err;
5128 + }
5129 +
5130 +@@ -1961,36 +1957,57 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
5131 + {
5132 + struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
5133 + struct bpf_sock_progs *progs = &stab->progs;
5134 +- struct sock *osock, *sock;
5135 ++ struct sock *osock, *sock = skops->sk;
5136 ++ struct smap_psock_map_entry *e;
5137 ++ struct smap_psock *psock;
5138 + u32 i = *(u32 *)key;
5139 + int err;
5140 +
5141 + if (unlikely(flags > BPF_EXIST))
5142 + return -EINVAL;
5143 +-
5144 + if (unlikely(i >= stab->map.max_entries))
5145 + return -E2BIG;
5146 +
5147 +- sock = READ_ONCE(stab->sock_map[i]);
5148 +- if (flags == BPF_EXIST && !sock)
5149 +- return -ENOENT;
5150 +- else if (flags == BPF_NOEXIST && sock)
5151 +- return -EEXIST;
5152 ++ e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
5153 ++ if (!e)
5154 ++ return -ENOMEM;
5155 +
5156 +- sock = skops->sk;
5157 +- err = __sock_map_ctx_update_elem(map, progs, sock, &stab->sock_map[i],
5158 +- key);
5159 ++ err = __sock_map_ctx_update_elem(map, progs, sock, key);
5160 + if (err)
5161 + goto out;
5162 +
5163 +- osock = xchg(&stab->sock_map[i], sock);
5164 +- if (osock) {
5165 +- struct smap_psock *opsock = smap_psock_sk(osock);
5166 ++ /* psock guaranteed to be present. */
5167 ++ psock = smap_psock_sk(sock);
5168 ++ raw_spin_lock_bh(&stab->lock);
5169 ++ osock = stab->sock_map[i];
5170 ++ if (osock && flags == BPF_NOEXIST) {
5171 ++ err = -EEXIST;
5172 ++ goto out_unlock;
5173 ++ }
5174 ++ if (!osock && flags == BPF_EXIST) {
5175 ++ err = -ENOENT;
5176 ++ goto out_unlock;
5177 ++ }
5178 +
5179 +- smap_list_map_remove(opsock, &stab->sock_map[i]);
5180 +- smap_release_sock(opsock, osock);
5181 ++ e->entry = &stab->sock_map[i];
5182 ++ e->map = map;
5183 ++ spin_lock_bh(&psock->maps_lock);
5184 ++ list_add_tail(&e->list, &psock->maps);
5185 ++ spin_unlock_bh(&psock->maps_lock);
5186 ++
5187 ++ stab->sock_map[i] = sock;
5188 ++ if (osock) {
5189 ++ psock = smap_psock_sk(osock);
5190 ++ smap_list_map_remove(psock, &stab->sock_map[i]);
5191 ++ smap_release_sock(psock, osock);
5192 + }
5193 ++ raw_spin_unlock_bh(&stab->lock);
5194 ++ return 0;
5195 ++out_unlock:
5196 ++ smap_release_sock(psock, sock);
5197 ++ raw_spin_unlock_bh(&stab->lock);
5198 + out:
5199 ++ kfree(e);
5200 + return err;
5201 + }
5202 +
5203 +@@ -2353,7 +2370,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
5204 + b = __select_bucket(htab, hash);
5205 + head = &b->head;
5206 +
5207 +- err = __sock_map_ctx_update_elem(map, progs, sock, NULL, key);
5208 ++ err = __sock_map_ctx_update_elem(map, progs, sock, key);
5209 + if (err)
5210 + goto err;
5211 +
5212 +@@ -2379,8 +2396,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
5213 + }
5214 +
5215 + rcu_assign_pointer(e->hash_link, l_new);
5216 +- rcu_assign_pointer(e->htab,
5217 +- container_of(map, struct bpf_htab, map));
5218 ++ e->map = map;
5219 + spin_lock_bh(&psock->maps_lock);
5220 + list_add_tail(&e->list, &psock->maps);
5221 + spin_unlock_bh(&psock->maps_lock);
5222 +diff --git a/kernel/fork.c b/kernel/fork.c
5223 +index 1b27babc4c78..8ed48ca2cc43 100644
5224 +--- a/kernel/fork.c
5225 ++++ b/kernel/fork.c
5226 +@@ -549,8 +549,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
5227 + goto out;
5228 + }
5229 + /* a new mm has just been created */
5230 +- arch_dup_mmap(oldmm, mm);
5231 +- retval = 0;
5232 ++ retval = arch_dup_mmap(oldmm, mm);
5233 + out:
5234 + up_write(&mm->mmap_sem);
5235 + flush_tlb_mm(oldmm);
5236 +@@ -1417,7 +1416,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
5237 + return -ENOMEM;
5238 +
5239 + atomic_set(&sig->count, 1);
5240 ++ spin_lock_irq(&current->sighand->siglock);
5241 + memcpy(sig->action, current->sighand->action, sizeof(sig->action));
5242 ++ spin_unlock_irq(&current->sighand->siglock);
5243 + return 0;
5244 + }
5245 +
5246 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
5247 +index 5f78c6e41796..0280deac392e 100644
5248 +--- a/kernel/workqueue.c
5249 ++++ b/kernel/workqueue.c
5250 +@@ -2652,6 +2652,9 @@ void flush_workqueue(struct workqueue_struct *wq)
5251 + if (WARN_ON(!wq_online))
5252 + return;
5253 +
5254 ++ lock_map_acquire(&wq->lockdep_map);
5255 ++ lock_map_release(&wq->lockdep_map);
5256 ++
5257 + mutex_lock(&wq->mutex);
5258 +
5259 + /*
5260 +@@ -2843,7 +2846,8 @@ reflush:
5261 + }
5262 + EXPORT_SYMBOL_GPL(drain_workqueue);
5263 +
5264 +-static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
5265 ++static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
5266 ++ bool from_cancel)
5267 + {
5268 + struct worker *worker = NULL;
5269 + struct worker_pool *pool;
5270 +@@ -2885,7 +2889,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
5271 + * workqueues the deadlock happens when the rescuer stalls, blocking
5272 + * forward progress.
5273 + */
5274 +- if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
5275 ++ if (!from_cancel &&
5276 ++ (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
5277 + lock_map_acquire(&pwq->wq->lockdep_map);
5278 + lock_map_release(&pwq->wq->lockdep_map);
5279 + }
5280 +@@ -2896,6 +2901,27 @@ already_gone:
5281 + return false;
5282 + }
5283 +
5284 ++static bool __flush_work(struct work_struct *work, bool from_cancel)
5285 ++{
5286 ++ struct wq_barrier barr;
5287 ++
5288 ++ if (WARN_ON(!wq_online))
5289 ++ return false;
5290 ++
5291 ++ if (!from_cancel) {
5292 ++ lock_map_acquire(&work->lockdep_map);
5293 ++ lock_map_release(&work->lockdep_map);
5294 ++ }
5295 ++
5296 ++ if (start_flush_work(work, &barr, from_cancel)) {
5297 ++ wait_for_completion(&barr.done);
5298 ++ destroy_work_on_stack(&barr.work);
5299 ++ return true;
5300 ++ } else {
5301 ++ return false;
5302 ++ }
5303 ++}
5304 ++
5305 + /**
5306 + * flush_work - wait for a work to finish executing the last queueing instance
5307 + * @work: the work to flush
5308 +@@ -2909,18 +2935,7 @@ already_gone:
5309 + */
5310 + bool flush_work(struct work_struct *work)
5311 + {
5312 +- struct wq_barrier barr;
5313 +-
5314 +- if (WARN_ON(!wq_online))
5315 +- return false;
5316 +-
5317 +- if (start_flush_work(work, &barr)) {
5318 +- wait_for_completion(&barr.done);
5319 +- destroy_work_on_stack(&barr.work);
5320 +- return true;
5321 +- } else {
5322 +- return false;
5323 +- }
5324 ++ return __flush_work(work, false);
5325 + }
5326 + EXPORT_SYMBOL_GPL(flush_work);
5327 +
5328 +@@ -2986,7 +3001,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
5329 + * isn't executing.
5330 + */
5331 + if (wq_online)
5332 +- flush_work(work);
5333 ++ __flush_work(work, true);
5334 +
5335 + clear_work_data(work);
5336 +
5337 +diff --git a/lib/debugobjects.c b/lib/debugobjects.c
5338 +index 994be4805cec..24c1df0d7466 100644
5339 +--- a/lib/debugobjects.c
5340 ++++ b/lib/debugobjects.c
5341 +@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
5342 +
5343 + limit++;
5344 + if (is_on_stack)
5345 +- pr_warn("object is on stack, but not annotated\n");
5346 ++ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
5347 ++ task_stack_page(current));
5348 + else
5349 +- pr_warn("object is not on stack, but annotated\n");
5350 ++ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
5351 ++ task_stack_page(current));
5352 ++
5353 + WARN_ON(1);
5354 + }
5355 +
5356 +diff --git a/mm/Kconfig b/mm/Kconfig
5357 +index ce95491abd6a..94af022b7f3d 100644
5358 +--- a/mm/Kconfig
5359 ++++ b/mm/Kconfig
5360 +@@ -635,7 +635,7 @@ config DEFERRED_STRUCT_PAGE_INIT
5361 + bool "Defer initialisation of struct pages to kthreads"
5362 + default n
5363 + depends on NO_BOOTMEM
5364 +- depends on !FLATMEM
5365 ++ depends on SPARSEMEM
5366 + depends on !NEED_PER_CPU_KM
5367 + help
5368 + Ordinarily all struct pages are initialised during early boot in a
5369 +diff --git a/mm/fadvise.c b/mm/fadvise.c
5370 +index afa41491d324..2d8376e3c640 100644
5371 +--- a/mm/fadvise.c
5372 ++++ b/mm/fadvise.c
5373 +@@ -72,8 +72,12 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
5374 + goto out;
5375 + }
5376 +
5377 +- /* Careful about overflows. Len == 0 means "as much as possible" */
5378 +- endbyte = offset + len;
5379 ++ /*
5380 ++ * Careful about overflows. Len == 0 means "as much as possible". Use
5381 ++ * unsigned math because signed overflows are undefined and UBSan
5382 ++ * complains.
5383 ++ */
5384 ++ endbyte = (u64)offset + (u64)len;
5385 + if (!len || endbyte < len)
5386 + endbyte = -1;
5387 + else
5388 +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
5389 +index ef456395645a..7fb60dd4be79 100644
5390 +--- a/net/9p/trans_fd.c
5391 ++++ b/net/9p/trans_fd.c
5392 +@@ -199,15 +199,14 @@ static void p9_mux_poll_stop(struct p9_conn *m)
5393 + static void p9_conn_cancel(struct p9_conn *m, int err)
5394 + {
5395 + struct p9_req_t *req, *rtmp;
5396 +- unsigned long flags;
5397 + LIST_HEAD(cancel_list);
5398 +
5399 + p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
5400 +
5401 +- spin_lock_irqsave(&m->client->lock, flags);
5402 ++ spin_lock(&m->client->lock);
5403 +
5404 + if (m->err) {
5405 +- spin_unlock_irqrestore(&m->client->lock, flags);
5406 ++ spin_unlock(&m->client->lock);
5407 + return;
5408 + }
5409 +
5410 +@@ -219,7 +218,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
5411 + list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
5412 + list_move(&req->req_list, &cancel_list);
5413 + }
5414 +- spin_unlock_irqrestore(&m->client->lock, flags);
5415 +
5416 + list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
5417 + p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
5418 +@@ -228,6 +226,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
5419 + req->t_err = err;
5420 + p9_client_cb(m->client, req, REQ_STATUS_ERROR);
5421 + }
5422 ++ spin_unlock(&m->client->lock);
5423 + }
5424 +
5425 + static __poll_t
5426 +@@ -375,8 +374,9 @@ static void p9_read_work(struct work_struct *work)
5427 + if (m->req->status != REQ_STATUS_ERROR)
5428 + status = REQ_STATUS_RCVD;
5429 + list_del(&m->req->req_list);
5430 +- spin_unlock(&m->client->lock);
5431 ++ /* update req->status while holding client->lock */
5432 + p9_client_cb(m->client, m->req, status);
5433 ++ spin_unlock(&m->client->lock);
5434 + m->rc.sdata = NULL;
5435 + m->rc.offset = 0;
5436 + m->rc.capacity = 0;
5437 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
5438 +index 4c2da2513c8b..2dc1c293092b 100644
5439 +--- a/net/9p/trans_virtio.c
5440 ++++ b/net/9p/trans_virtio.c
5441 +@@ -571,7 +571,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
5442 + chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
5443 + if (IS_ERR(chan->vq)) {
5444 + err = PTR_ERR(chan->vq);
5445 +- goto out_free_vq;
5446 ++ goto out_free_chan;
5447 + }
5448 + chan->vq->vdev->priv = chan;
5449 + spin_lock_init(&chan->lock);
5450 +@@ -624,6 +624,7 @@ out_free_tag:
5451 + kfree(tag);
5452 + out_free_vq:
5453 + vdev->config->del_vqs(vdev);
5454 ++out_free_chan:
5455 + kfree(chan);
5456 + fail:
5457 + return err;
5458 +diff --git a/net/core/xdp.c b/net/core/xdp.c
5459 +index 6771f1855b96..2657056130a4 100644
5460 +--- a/net/core/xdp.c
5461 ++++ b/net/core/xdp.c
5462 +@@ -95,23 +95,15 @@ static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
5463 + {
5464 + struct xdp_mem_allocator *xa;
5465 + int id = xdp_rxq->mem.id;
5466 +- int err;
5467 +
5468 + if (id == 0)
5469 + return;
5470 +
5471 + mutex_lock(&mem_id_lock);
5472 +
5473 +- xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
5474 +- if (!xa) {
5475 +- mutex_unlock(&mem_id_lock);
5476 +- return;
5477 +- }
5478 +-
5479 +- err = rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params);
5480 +- WARN_ON(err);
5481 +-
5482 +- call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
5483 ++ xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
5484 ++ if (xa && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
5485 ++ call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
5486 +
5487 + mutex_unlock(&mem_id_lock);
5488 + }
5489 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
5490 +index 2d8efeecf619..055f4bbba86b 100644
5491 +--- a/net/ipv4/ip_gre.c
5492 ++++ b/net/ipv4/ip_gre.c
5493 +@@ -1511,11 +1511,14 @@ nla_put_failure:
5494 +
5495 + static void erspan_setup(struct net_device *dev)
5496 + {
5497 ++ struct ip_tunnel *t = netdev_priv(dev);
5498 ++
5499 + ether_setup(dev);
5500 + dev->netdev_ops = &erspan_netdev_ops;
5501 + dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5502 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5503 + ip_tunnel_setup(dev, erspan_net_id);
5504 ++ t->erspan_ver = 1;
5505 + }
5506 +
5507 + static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
5508 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
5509 +index 3b2711e33e4c..488b201851d7 100644
5510 +--- a/net/ipv4/tcp_ipv4.c
5511 ++++ b/net/ipv4/tcp_ipv4.c
5512 +@@ -2516,6 +2516,12 @@ static int __net_init tcp_sk_init(struct net *net)
5513 + if (res)
5514 + goto fail;
5515 + sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
5516 ++
5517 ++ /* Please enforce IP_DF and IPID==0 for RST and
5518 ++ * ACK sent in SYN-RECV and TIME-WAIT state.
5519 ++ */
5520 ++ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
5521 ++
5522 + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
5523 + }
5524 +
5525 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
5526 +index 1dda1341a223..b690132f5da2 100644
5527 +--- a/net/ipv4/tcp_minisocks.c
5528 ++++ b/net/ipv4/tcp_minisocks.c
5529 +@@ -184,8 +184,9 @@ kill:
5530 + inet_twsk_deschedule_put(tw);
5531 + return TCP_TW_SUCCESS;
5532 + }
5533 ++ } else {
5534 ++ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
5535 + }
5536 +- inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
5537 +
5538 + if (tmp_opt.saw_tstamp) {
5539 + tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
5540 +diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
5541 +index 622caa4039e0..a5995bb2eaca 100644
5542 +--- a/net/ipv4/tcp_ulp.c
5543 ++++ b/net/ipv4/tcp_ulp.c
5544 +@@ -51,7 +51,7 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
5545 + #ifdef CONFIG_MODULES
5546 + if (!ulp && capable(CAP_NET_ADMIN)) {
5547 + rcu_read_unlock();
5548 +- request_module("%s", name);
5549 ++ request_module("tcp-ulp-%s", name);
5550 + rcu_read_lock();
5551 + ulp = tcp_ulp_find(name);
5552 + }
5553 +@@ -129,6 +129,8 @@ void tcp_cleanup_ulp(struct sock *sk)
5554 + if (icsk->icsk_ulp_ops->release)
5555 + icsk->icsk_ulp_ops->release(sk);
5556 + module_put(icsk->icsk_ulp_ops->owner);
5557 ++
5558 ++ icsk->icsk_ulp_ops = NULL;
5559 + }
5560 +
5561 + /* Change upper layer protocol for socket */
5562 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
5563 +index d212738e9d10..5516f55e214b 100644
5564 +--- a/net/ipv6/ip6_fib.c
5565 ++++ b/net/ipv6/ip6_fib.c
5566 +@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
5567 + }
5568 + }
5569 +
5570 ++ lwtstate_put(f6i->fib6_nh.nh_lwtstate);
5571 ++
5572 + if (f6i->fib6_nh.nh_dev)
5573 + dev_put(f6i->fib6_nh.nh_dev);
5574 +
5575 +@@ -987,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
5576 + fib6_clean_expires(iter);
5577 + else
5578 + fib6_set_expires(iter, rt->expires);
5579 +- fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
5580 ++
5581 ++ if (rt->fib6_pmtu)
5582 ++ fib6_metric_set(iter, RTAX_MTU,
5583 ++ rt->fib6_pmtu);
5584 + return -EEXIST;
5585 + }
5586 + /* If we have the same destination and the same metric,
5587 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
5588 +index cd2cfb04e5d8..7ec997fcbc43 100644
5589 +--- a/net/ipv6/ip6_gre.c
5590 ++++ b/net/ipv6/ip6_gre.c
5591 +@@ -1776,6 +1776,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
5592 + if (data[IFLA_GRE_COLLECT_METADATA])
5593 + parms->collect_md = true;
5594 +
5595 ++ parms->erspan_ver = 1;
5596 + if (data[IFLA_GRE_ERSPAN_VER])
5597 + parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
5598 +
5599 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
5600 +index c72ae3a4fe09..c31a7c4a9249 100644
5601 +--- a/net/ipv6/ip6_vti.c
5602 ++++ b/net/ipv6/ip6_vti.c
5603 +@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
5604 + }
5605 +
5606 + mtu = dst_mtu(dst);
5607 +- if (!skb->ignore_df && skb->len > mtu) {
5608 ++ if (skb->len > mtu) {
5609 + skb_dst_update_pmtu(skb, mtu);
5610 +
5611 + if (skb->protocol == htons(ETH_P_IPV6)) {
5612 +@@ -1102,7 +1102,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
5613 + }
5614 +
5615 + t = rtnl_dereference(ip6n->tnls_wc[0]);
5616 +- unregister_netdevice_queue(t->dev, list);
5617 ++ if (t)
5618 ++ unregister_netdevice_queue(t->dev, list);
5619 + }
5620 +
5621 + static int __net_init vti6_init_net(struct net *net)
5622 +@@ -1114,6 +1115,8 @@ static int __net_init vti6_init_net(struct net *net)
5623 + ip6n->tnls[0] = ip6n->tnls_wc;
5624 + ip6n->tnls[1] = ip6n->tnls_r_l;
5625 +
5626 ++ if (!net_has_fallback_tunnels(net))
5627 ++ return 0;
5628 + err = -ENOMEM;
5629 + ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
5630 + NET_NAME_UNKNOWN, vti6_dev_setup);
5631 +diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
5632 +index 0fe61ede77c6..c3c6b09acdc4 100644
5633 +--- a/net/ipv6/netfilter/ip6t_rpfilter.c
5634 ++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
5635 +@@ -26,6 +26,12 @@ static bool rpfilter_addr_unicast(const struct in6_addr *addr)
5636 + return addr_type & IPV6_ADDR_UNICAST;
5637 + }
5638 +
5639 ++static bool rpfilter_addr_linklocal(const struct in6_addr *addr)
5640 ++{
5641 ++ int addr_type = ipv6_addr_type(addr);
5642 ++ return addr_type & IPV6_ADDR_LINKLOCAL;
5643 ++}
5644 ++
5645 + static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
5646 + const struct net_device *dev, u8 flags)
5647 + {
5648 +@@ -48,7 +54,11 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
5649 + }
5650 +
5651 + fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
5652 +- if ((flags & XT_RPFILTER_LOOSE) == 0)
5653 ++
5654 ++ if (rpfilter_addr_linklocal(&iph->saddr)) {
5655 ++ lookup_flags |= RT6_LOOKUP_F_IFACE;
5656 ++ fl6.flowi6_oif = dev->ifindex;
5657 ++ } else if ((flags & XT_RPFILTER_LOOSE) == 0)
5658 + fl6.flowi6_oif = dev->ifindex;
5659 +
5660 + rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
5661 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
5662 +index 7208c16302f6..18e00ce1719a 100644
5663 +--- a/net/ipv6/route.c
5664 ++++ b/net/ipv6/route.c
5665 +@@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
5666 + rt->dst.error = 0;
5667 + rt->dst.output = ip6_output;
5668 +
5669 +- if (ort->fib6_type == RTN_LOCAL) {
5670 ++ if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
5671 + rt->dst.input = ip6_input;
5672 + } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
5673 + rt->dst.input = ip6_mc_input;
5674 +@@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
5675 + rt->rt6i_src = ort->fib6_src;
5676 + #endif
5677 + rt->rt6i_prefsrc = ort->fib6_prefsrc;
5678 +- rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
5679 + }
5680 +
5681 + static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
5682 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
5683 +index 0679dd101e72..7ca926a03b81 100644
5684 +--- a/net/netfilter/ipvs/ip_vs_core.c
5685 ++++ b/net/netfilter/ipvs/ip_vs_core.c
5686 +@@ -1972,13 +1972,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
5687 + if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
5688 + /* the destination server is not available */
5689 +
5690 +- if (sysctl_expire_nodest_conn(ipvs)) {
5691 ++ __u32 flags = cp->flags;
5692 ++
5693 ++ /* when timer already started, silently drop the packet.*/
5694 ++ if (timer_pending(&cp->timer))
5695 ++ __ip_vs_conn_put(cp);
5696 ++ else
5697 ++ ip_vs_conn_put(cp);
5698 ++
5699 ++ if (sysctl_expire_nodest_conn(ipvs) &&
5700 ++ !(flags & IP_VS_CONN_F_ONE_PACKET)) {
5701 + /* try to expire the connection immediately */
5702 + ip_vs_conn_expire_now(cp);
5703 + }
5704 +- /* don't restart its timer, and silently
5705 +- drop the packet. */
5706 +- __ip_vs_conn_put(cp);
5707 ++
5708 + return NF_DROP;
5709 + }
5710 +
5711 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
5712 +index 20a2e37c76d1..e952eedf44b4 100644
5713 +--- a/net/netfilter/nf_conntrack_netlink.c
5714 ++++ b/net/netfilter/nf_conntrack_netlink.c
5715 +@@ -821,6 +821,21 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[])
5716 + #endif
5717 + }
5718 +
5719 ++static int ctnetlink_start(struct netlink_callback *cb)
5720 ++{
5721 ++ const struct nlattr * const *cda = cb->data;
5722 ++ struct ctnetlink_filter *filter = NULL;
5723 ++
5724 ++ if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
5725 ++ filter = ctnetlink_alloc_filter(cda);
5726 ++ if (IS_ERR(filter))
5727 ++ return PTR_ERR(filter);
5728 ++ }
5729 ++
5730 ++ cb->data = filter;
5731 ++ return 0;
5732 ++}
5733 ++
5734 + static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
5735 + {
5736 + struct ctnetlink_filter *filter = data;
5737 +@@ -1240,19 +1255,12 @@ static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
5738 +
5739 + if (nlh->nlmsg_flags & NLM_F_DUMP) {
5740 + struct netlink_dump_control c = {
5741 ++ .start = ctnetlink_start,
5742 + .dump = ctnetlink_dump_table,
5743 + .done = ctnetlink_done,
5744 ++ .data = (void *)cda,
5745 + };
5746 +
5747 +- if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
5748 +- struct ctnetlink_filter *filter;
5749 +-
5750 +- filter = ctnetlink_alloc_filter(cda);
5751 +- if (IS_ERR(filter))
5752 +- return PTR_ERR(filter);
5753 +-
5754 +- c.data = filter;
5755 +- }
5756 + return netlink_dump_start(ctnl, skb, nlh, &c);
5757 + }
5758 +
5759 +diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
5760 +index a0e5adf0b3b6..8fa8bf7c48e6 100644
5761 +--- a/net/netfilter/nfnetlink_acct.c
5762 ++++ b/net/netfilter/nfnetlink_acct.c
5763 +@@ -238,29 +238,33 @@ static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = {
5764 + [NFACCT_FILTER_VALUE] = { .type = NLA_U32 },
5765 + };
5766 +
5767 +-static struct nfacct_filter *
5768 +-nfacct_filter_alloc(const struct nlattr * const attr)
5769 ++static int nfnl_acct_start(struct netlink_callback *cb)
5770 + {
5771 +- struct nfacct_filter *filter;
5772 ++ const struct nlattr *const attr = cb->data;
5773 + struct nlattr *tb[NFACCT_FILTER_MAX + 1];
5774 ++ struct nfacct_filter *filter;
5775 + int err;
5776 +
5777 ++ if (!attr)
5778 ++ return 0;
5779 ++
5780 + err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy,
5781 + NULL);
5782 + if (err < 0)
5783 +- return ERR_PTR(err);
5784 ++ return err;
5785 +
5786 + if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE])
5787 +- return ERR_PTR(-EINVAL);
5788 ++ return -EINVAL;
5789 +
5790 + filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL);
5791 + if (!filter)
5792 +- return ERR_PTR(-ENOMEM);
5793 ++ return -ENOMEM;
5794 +
5795 + filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK]));
5796 + filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE]));
5797 ++ cb->data = filter;
5798 +
5799 +- return filter;
5800 ++ return 0;
5801 + }
5802 +
5803 + static int nfnl_acct_get(struct net *net, struct sock *nfnl,
5804 +@@ -275,18 +279,11 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl,
5805 + if (nlh->nlmsg_flags & NLM_F_DUMP) {
5806 + struct netlink_dump_control c = {
5807 + .dump = nfnl_acct_dump,
5808 ++ .start = nfnl_acct_start,
5809 + .done = nfnl_acct_done,
5810 ++ .data = (void *)tb[NFACCT_FILTER],
5811 + };
5812 +
5813 +- if (tb[NFACCT_FILTER]) {
5814 +- struct nfacct_filter *filter;
5815 +-
5816 +- filter = nfacct_filter_alloc(tb[NFACCT_FILTER]);
5817 +- if (IS_ERR(filter))
5818 +- return PTR_ERR(filter);
5819 +-
5820 +- c.data = filter;
5821 +- }
5822 + return netlink_dump_start(nfnl, skb, nlh, &c);
5823 + }
5824 +
5825 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
5826 +index d0d8397c9588..aecadd471e1d 100644
5827 +--- a/net/netfilter/x_tables.c
5828 ++++ b/net/netfilter/x_tables.c
5829 +@@ -1178,12 +1178,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
5830 + if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
5831 + return NULL;
5832 +
5833 +- /* __GFP_NORETRY is not fully supported by kvmalloc but it should
5834 +- * work reasonably well if sz is too large and bail out rather
5835 +- * than shoot all processes down before realizing there is nothing
5836 +- * more to reclaim.
5837 +- */
5838 +- info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY);
5839 ++ info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
5840 + if (!info)
5841 + return NULL;
5842 +
5843 +diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
5844 +index d152e48ea371..8596eed6d9a8 100644
5845 +--- a/net/rds/ib_frmr.c
5846 ++++ b/net/rds/ib_frmr.c
5847 +@@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
5848 + pool->fmr_attr.max_pages);
5849 + if (IS_ERR(frmr->mr)) {
5850 + pr_warn("RDS/IB: %s failed to allocate MR", __func__);
5851 ++ err = PTR_ERR(frmr->mr);
5852 + goto out_no_cigar;
5853 + }
5854 +
5855 +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
5856 +index 20d7d36b2fc9..005cb21348c9 100644
5857 +--- a/net/sched/act_ife.c
5858 ++++ b/net/sched/act_ife.c
5859 +@@ -265,10 +265,8 @@ static const char *ife_meta_id2name(u32 metaid)
5860 + #endif
5861 +
5862 + /* called when adding new meta information
5863 +- * under ife->tcf_lock for existing action
5864 + */
5865 +-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
5866 +- void *val, int len, bool exists)
5867 ++static int load_metaops_and_vet(u32 metaid, void *val, int len)
5868 + {
5869 + struct tcf_meta_ops *ops = find_ife_oplist(metaid);
5870 + int ret = 0;
5871 +@@ -276,13 +274,9 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
5872 + if (!ops) {
5873 + ret = -ENOENT;
5874 + #ifdef CONFIG_MODULES
5875 +- if (exists)
5876 +- spin_unlock_bh(&ife->tcf_lock);
5877 + rtnl_unlock();
5878 + request_module("ife-meta-%s", ife_meta_id2name(metaid));
5879 + rtnl_lock();
5880 +- if (exists)
5881 +- spin_lock_bh(&ife->tcf_lock);
5882 + ops = find_ife_oplist(metaid);
5883 + #endif
5884 + }
5885 +@@ -299,24 +293,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
5886 + }
5887 +
5888 + /* called when adding new meta information
5889 +- * under ife->tcf_lock for existing action
5890 + */
5891 +-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
5892 +- int len, bool atomic)
5893 ++static int __add_metainfo(const struct tcf_meta_ops *ops,
5894 ++ struct tcf_ife_info *ife, u32 metaid, void *metaval,
5895 ++ int len, bool atomic, bool exists)
5896 + {
5897 + struct tcf_meta_info *mi = NULL;
5898 +- struct tcf_meta_ops *ops = find_ife_oplist(metaid);
5899 + int ret = 0;
5900 +
5901 +- if (!ops)
5902 +- return -ENOENT;
5903 +-
5904 + mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
5905 +- if (!mi) {
5906 +- /*put back what find_ife_oplist took */
5907 +- module_put(ops->owner);
5908 ++ if (!mi)
5909 + return -ENOMEM;
5910 +- }
5911 +
5912 + mi->metaid = metaid;
5913 + mi->ops = ops;
5914 +@@ -324,17 +311,49 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
5915 + ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
5916 + if (ret != 0) {
5917 + kfree(mi);
5918 +- module_put(ops->owner);
5919 + return ret;
5920 + }
5921 + }
5922 +
5923 ++ if (exists)
5924 ++ spin_lock_bh(&ife->tcf_lock);
5925 + list_add_tail(&mi->metalist, &ife->metalist);
5926 ++ if (exists)
5927 ++ spin_unlock_bh(&ife->tcf_lock);
5928 +
5929 + return ret;
5930 + }
5931 +
5932 +-static int use_all_metadata(struct tcf_ife_info *ife)
5933 ++static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
5934 ++ struct tcf_ife_info *ife, u32 metaid,
5935 ++ bool exists)
5936 ++{
5937 ++ int ret;
5938 ++
5939 ++ if (!try_module_get(ops->owner))
5940 ++ return -ENOENT;
5941 ++ ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
5942 ++ if (ret)
5943 ++ module_put(ops->owner);
5944 ++ return ret;
5945 ++}
5946 ++
5947 ++static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
5948 ++ int len, bool exists)
5949 ++{
5950 ++ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
5951 ++ int ret;
5952 ++
5953 ++ if (!ops)
5954 ++ return -ENOENT;
5955 ++ ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
5956 ++ if (ret)
5957 ++ /*put back what find_ife_oplist took */
5958 ++ module_put(ops->owner);
5959 ++ return ret;
5960 ++}
5961 ++
5962 ++static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
5963 + {
5964 + struct tcf_meta_ops *o;
5965 + int rc = 0;
5966 +@@ -342,7 +361,7 @@ static int use_all_metadata(struct tcf_ife_info *ife)
5967 +
5968 + read_lock(&ife_mod_lock);
5969 + list_for_each_entry(o, &ifeoplist, list) {
5970 +- rc = add_metainfo(ife, o->metaid, NULL, 0, true);
5971 ++ rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
5972 + if (rc == 0)
5973 + installed += 1;
5974 + }
5975 +@@ -393,7 +412,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
5976 + struct tcf_meta_info *e, *n;
5977 +
5978 + list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
5979 +- module_put(e->ops->owner);
5980 + list_del(&e->metalist);
5981 + if (e->metaval) {
5982 + if (e->ops->release)
5983 +@@ -401,6 +419,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
5984 + else
5985 + kfree(e->metaval);
5986 + }
5987 ++ module_put(e->ops->owner);
5988 + kfree(e);
5989 + }
5990 + }
5991 +@@ -419,7 +438,6 @@ static void tcf_ife_cleanup(struct tc_action *a)
5992 + kfree_rcu(p, rcu);
5993 + }
5994 +
5995 +-/* under ife->tcf_lock for existing action */
5996 + static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
5997 + bool exists)
5998 + {
5999 +@@ -433,7 +451,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
6000 + val = nla_data(tb[i]);
6001 + len = nla_len(tb[i]);
6002 +
6003 +- rc = load_metaops_and_vet(ife, i, val, len, exists);
6004 ++ rc = load_metaops_and_vet(i, val, len);
6005 + if (rc != 0)
6006 + return rc;
6007 +
6008 +@@ -531,8 +549,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
6009 + p->eth_type = ife_type;
6010 + }
6011 +
6012 +- if (exists)
6013 +- spin_lock_bh(&ife->tcf_lock);
6014 +
6015 + if (ret == ACT_P_CREATED)
6016 + INIT_LIST_HEAD(&ife->metalist);
6017 +@@ -544,9 +560,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
6018 + metadata_parse_err:
6019 + if (ret == ACT_P_CREATED)
6020 + tcf_idr_release(*a, bind);
6021 +-
6022 +- if (exists)
6023 +- spin_unlock_bh(&ife->tcf_lock);
6024 + kfree(p);
6025 + return err;
6026 + }
6027 +@@ -561,18 +574,17 @@ metadata_parse_err:
6028 + * as we can. You better have at least one else we are
6029 + * going to bail out
6030 + */
6031 +- err = use_all_metadata(ife);
6032 ++ err = use_all_metadata(ife, exists);
6033 + if (err) {
6034 + if (ret == ACT_P_CREATED)
6035 + tcf_idr_release(*a, bind);
6036 +-
6037 +- if (exists)
6038 +- spin_unlock_bh(&ife->tcf_lock);
6039 + kfree(p);
6040 + return err;
6041 + }
6042 + }
6043 +
6044 ++ if (exists)
6045 ++ spin_lock_bh(&ife->tcf_lock);
6046 + ife->tcf_action = parm->action;
6047 + if (exists)
6048 + spin_unlock_bh(&ife->tcf_lock);
6049 +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
6050 +index 8a925c72db5f..bad475c87688 100644
6051 +--- a/net/sched/act_pedit.c
6052 ++++ b/net/sched/act_pedit.c
6053 +@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
6054 + {
6055 + struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
6056 +
6057 ++ if (!keys_start)
6058 ++ goto nla_failure;
6059 + for (; n > 0; n--) {
6060 + struct nlattr *key_start;
6061 +
6062 + key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
6063 ++ if (!key_start)
6064 ++ goto nla_failure;
6065 +
6066 + if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
6067 +- nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
6068 +- nlmsg_trim(skb, keys_start);
6069 +- return -EINVAL;
6070 +- }
6071 ++ nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
6072 ++ goto nla_failure;
6073 +
6074 + nla_nest_end(skb, key_start);
6075 +
6076 +@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
6077 + nla_nest_end(skb, keys_start);
6078 +
6079 + return 0;
6080 ++nla_failure:
6081 ++ nla_nest_cancel(skb, keys_start);
6082 ++ return -EINVAL;
6083 + }
6084 +
6085 + static int tcf_pedit_init(struct net *net, struct nlattr *nla,
6086 +@@ -395,7 +400,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
6087 + opt->bindcnt = p->tcf_bindcnt - bind;
6088 +
6089 + if (p->tcfp_keys_ex) {
6090 +- tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
6091 ++ if (tcf_pedit_key_ex_dump(skb,
6092 ++ p->tcfp_keys_ex,
6093 ++ p->tcfp_nkeys))
6094 ++ goto nla_put_failure;
6095 +
6096 + if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
6097 + goto nla_put_failure;
6098 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
6099 +index fb861f90fde6..260749956ef3 100644
6100 +--- a/net/sched/cls_u32.c
6101 ++++ b/net/sched/cls_u32.c
6102 +@@ -912,6 +912,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
6103 + struct nlattr *opt = tca[TCA_OPTIONS];
6104 + struct nlattr *tb[TCA_U32_MAX + 1];
6105 + u32 htid, flags = 0;
6106 ++ size_t sel_size;
6107 + int err;
6108 + #ifdef CONFIG_CLS_U32_PERF
6109 + size_t size;
6110 +@@ -1074,8 +1075,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
6111 + }
6112 +
6113 + s = nla_data(tb[TCA_U32_SEL]);
6114 ++ sel_size = struct_size(s, keys, s->nkeys);
6115 ++ if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
6116 ++ err = -EINVAL;
6117 ++ goto erridr;
6118 ++ }
6119 +
6120 +- n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
6121 ++ n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
6122 + if (n == NULL) {
6123 + err = -ENOBUFS;
6124 + goto erridr;
6125 +@@ -1090,7 +1096,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
6126 + }
6127 + #endif
6128 +
6129 +- memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
6130 ++ memcpy(&n->sel, s, sel_size);
6131 + RCU_INIT_POINTER(n->ht_up, ht);
6132 + n->handle = handle;
6133 + n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
6134 +diff --git a/net/sctp/proc.c b/net/sctp/proc.c
6135 +index ef5c9a82d4e8..a644292f9faf 100644
6136 +--- a/net/sctp/proc.c
6137 ++++ b/net/sctp/proc.c
6138 +@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
6139 + struct sctp_ht_iter {
6140 + struct seq_net_private p;
6141 + struct rhashtable_iter hti;
6142 +- int start_fail;
6143 + };
6144 +
6145 + static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
6146 +@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
6147 +
6148 + sctp_transport_walk_start(&iter->hti);
6149 +
6150 +- iter->start_fail = 0;
6151 + return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
6152 + }
6153 +
6154 +@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
6155 + {
6156 + struct sctp_ht_iter *iter = seq->private;
6157 +
6158 +- if (iter->start_fail)
6159 +- return;
6160 + sctp_transport_walk_stop(&iter->hti);
6161 + }
6162 +
6163 +@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
6164 + }
6165 +
6166 + transport = (struct sctp_transport *)v;
6167 +- if (!sctp_transport_hold(transport))
6168 +- return 0;
6169 + assoc = transport->asoc;
6170 + epb = &assoc->base;
6171 + sk = epb->sk;
6172 +@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
6173 + }
6174 +
6175 + transport = (struct sctp_transport *)v;
6176 +- if (!sctp_transport_hold(transport))
6177 +- return 0;
6178 + assoc = transport->asoc;
6179 +
6180 + list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
6181 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
6182 +index ce620e878538..50ee07cd20c4 100644
6183 +--- a/net/sctp/socket.c
6184 ++++ b/net/sctp/socket.c
6185 +@@ -4881,9 +4881,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
6186 + break;
6187 + }
6188 +
6189 ++ if (!sctp_transport_hold(t))
6190 ++ continue;
6191 ++
6192 + if (net_eq(sock_net(t->asoc->base.sk), net) &&
6193 + t->asoc->peer.primary_path == t)
6194 + break;
6195 ++
6196 ++ sctp_transport_put(t);
6197 + }
6198 +
6199 + return t;
6200 +@@ -4893,13 +4898,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
6201 + struct rhashtable_iter *iter,
6202 + int pos)
6203 + {
6204 +- void *obj = SEQ_START_TOKEN;
6205 ++ struct sctp_transport *t;
6206 +
6207 +- while (pos && (obj = sctp_transport_get_next(net, iter)) &&
6208 +- !IS_ERR(obj))
6209 +- pos--;
6210 ++ if (!pos)
6211 ++ return SEQ_START_TOKEN;
6212 +
6213 +- return obj;
6214 ++ while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
6215 ++ if (!--pos)
6216 ++ break;
6217 ++ sctp_transport_put(t);
6218 ++ }
6219 ++
6220 ++ return t;
6221 + }
6222 +
6223 + int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
6224 +@@ -4958,8 +4968,6 @@ again:
6225 +
6226 + tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
6227 + for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
6228 +- if (!sctp_transport_hold(tsp))
6229 +- continue;
6230 + ret = cb(tsp, p);
6231 + if (ret)
6232 + break;
6233 +diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
6234 +index 8654494b4d0a..834eb2b9e41b 100644
6235 +--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
6236 ++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
6237 +@@ -169,7 +169,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
6238 + struct scatterlist sg[1];
6239 + int err = -1;
6240 + u8 *checksumdata;
6241 +- u8 rc4salt[4];
6242 ++ u8 *rc4salt;
6243 + struct crypto_ahash *md5;
6244 + struct crypto_ahash *hmac_md5;
6245 + struct ahash_request *req;
6246 +@@ -183,14 +183,18 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
6247 + return GSS_S_FAILURE;
6248 + }
6249 +
6250 ++ rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS);
6251 ++ if (!rc4salt)
6252 ++ return GSS_S_FAILURE;
6253 ++
6254 + if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
6255 + dprintk("%s: invalid usage value %u\n", __func__, usage);
6256 +- return GSS_S_FAILURE;
6257 ++ goto out_free_rc4salt;
6258 + }
6259 +
6260 + checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
6261 + if (!checksumdata)
6262 +- return GSS_S_FAILURE;
6263 ++ goto out_free_rc4salt;
6264 +
6265 + md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
6266 + if (IS_ERR(md5))
6267 +@@ -258,6 +262,8 @@ out_free_md5:
6268 + crypto_free_ahash(md5);
6269 + out_free_cksum:
6270 + kfree(checksumdata);
6271 ++out_free_rc4salt:
6272 ++ kfree(rc4salt);
6273 + return err ? GSS_S_FAILURE : 0;
6274 + }
6275 +
6276 +diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
6277 +index bebe88cae07b..ff968c7afef6 100644
6278 +--- a/net/tipc/name_table.c
6279 ++++ b/net/tipc/name_table.c
6280 +@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
6281 +
6282 + struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
6283 + {
6284 +- u64 value = (u64)node << 32 | port;
6285 + struct tipc_dest *dst;
6286 +
6287 + list_for_each_entry(dst, l, list) {
6288 +- if (dst->value != value)
6289 +- continue;
6290 +- return dst;
6291 ++ if (dst->node == node && dst->port == port)
6292 ++ return dst;
6293 + }
6294 + return NULL;
6295 + }
6296 +
6297 + bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
6298 + {
6299 +- u64 value = (u64)node << 32 | port;
6300 + struct tipc_dest *dst;
6301 +
6302 + if (tipc_dest_find(l, node, port))
6303 +@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
6304 + dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
6305 + if (unlikely(!dst))
6306 + return false;
6307 +- dst->value = value;
6308 ++ dst->node = node;
6309 ++ dst->port = port;
6310 + list_add(&dst->list, l);
6311 + return true;
6312 + }
6313 +diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
6314 +index 0febba41da86..892bd750b85f 100644
6315 +--- a/net/tipc/name_table.h
6316 ++++ b/net/tipc/name_table.h
6317 +@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
6318 +
6319 + struct tipc_dest {
6320 + struct list_head list;
6321 +- union {
6322 +- struct {
6323 +- u32 port;
6324 +- u32 node;
6325 +- };
6326 +- u64 value;
6327 +- };
6328 ++ u32 port;
6329 ++ u32 node;
6330 + };
6331 +
6332 + struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
6333 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
6334 +index 930852c54d7a..0a5fa347135e 100644
6335 +--- a/net/tipc/socket.c
6336 ++++ b/net/tipc/socket.c
6337 +@@ -2675,6 +2675,8 @@ void tipc_sk_reinit(struct net *net)
6338 +
6339 + rhashtable_walk_stop(&iter);
6340 + } while (tsk == ERR_PTR(-EAGAIN));
6341 ++
6342 ++ rhashtable_walk_exit(&iter);
6343 + }
6344 +
6345 + static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
6346 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
6347 +index 301f22430469..45188d920013 100644
6348 +--- a/net/tls/tls_main.c
6349 ++++ b/net/tls/tls_main.c
6350 +@@ -45,6 +45,7 @@
6351 + MODULE_AUTHOR("Mellanox Technologies");
6352 + MODULE_DESCRIPTION("Transport Layer Security Support");
6353 + MODULE_LICENSE("Dual BSD/GPL");
6354 ++MODULE_ALIAS_TCP_ULP("tls");
6355 +
6356 + enum {
6357 + TLSV4,
6358 +diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
6359 +index 4b4d78fffe30..da9070889223 100644
6360 +--- a/samples/bpf/xdp_redirect_cpu_user.c
6361 ++++ b/samples/bpf/xdp_redirect_cpu_user.c
6362 +@@ -679,8 +679,9 @@ int main(int argc, char **argv)
6363 + return EXIT_FAIL_OPTION;
6364 + }
6365 +
6366 +- /* Remove XDP program when program is interrupted */
6367 ++ /* Remove XDP program when program is interrupted or killed */
6368 + signal(SIGINT, int_exit);
6369 ++ signal(SIGTERM, int_exit);
6370 +
6371 + if (bpf_set_link_xdp_fd(ifindex, prog_fd[prog_num], xdp_flags) < 0) {
6372 + fprintf(stderr, "link set xdp fd failed\n");
6373 +diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
6374 +index e4e9ba52bff0..bb278447299c 100644
6375 +--- a/samples/bpf/xdp_rxq_info_user.c
6376 ++++ b/samples/bpf/xdp_rxq_info_user.c
6377 +@@ -534,8 +534,9 @@ int main(int argc, char **argv)
6378 + exit(EXIT_FAIL_BPF);
6379 + }
6380 +
6381 +- /* Remove XDP program when program is interrupted */
6382 ++ /* Remove XDP program when program is interrupted or killed */
6383 + signal(SIGINT, int_exit);
6384 ++ signal(SIGTERM, int_exit);
6385 +
6386 + if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
6387 + fprintf(stderr, "link set xdp fd failed\n");
6388 +diff --git a/scripts/coccicheck b/scripts/coccicheck
6389 +index 9fedca611b7f..e04d328210ac 100755
6390 +--- a/scripts/coccicheck
6391 ++++ b/scripts/coccicheck
6392 +@@ -128,9 +128,10 @@ run_cmd_parmap() {
6393 + fi
6394 + echo $@ >>$DEBUG_FILE
6395 + $@ 2>>$DEBUG_FILE
6396 +- if [[ $? -ne 0 ]]; then
6397 ++ err=$?
6398 ++ if [[ $err -ne 0 ]]; then
6399 + echo "coccicheck failed"
6400 +- exit $?
6401 ++ exit $err
6402 + fi
6403 + }
6404 +
6405 +diff --git a/scripts/depmod.sh b/scripts/depmod.sh
6406 +index 999d585eaa73..e5f0aad75b96 100755
6407 +--- a/scripts/depmod.sh
6408 ++++ b/scripts/depmod.sh
6409 +@@ -15,9 +15,9 @@ if ! test -r System.map ; then
6410 + fi
6411 +
6412 + if [ -z $(command -v $DEPMOD) ]; then
6413 +- echo "'make modules_install' requires $DEPMOD. Please install it." >&2
6414 ++ echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
6415 + echo "This is probably in the kmod package." >&2
6416 +- exit 1
6417 ++ exit 0
6418 + fi
6419 +
6420 + # older versions of depmod require the version string to start with three
6421 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
6422 +index 1663fb19343a..b95cf57782a3 100644
6423 +--- a/scripts/mod/modpost.c
6424 ++++ b/scripts/mod/modpost.c
6425 +@@ -672,7 +672,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
6426 + if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
6427 + break;
6428 + if (symname[0] == '.') {
6429 +- char *munged = strdup(symname);
6430 ++ char *munged = NOFAIL(strdup(symname));
6431 + munged[0] = '_';
6432 + munged[1] = toupper(munged[1]);
6433 + symname = munged;
6434 +@@ -1318,7 +1318,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
6435 + static char *sec2annotation(const char *s)
6436 + {
6437 + if (match(s, init_exit_sections)) {
6438 +- char *p = malloc(20);
6439 ++ char *p = NOFAIL(malloc(20));
6440 + char *r = p;
6441 +
6442 + *p++ = '_';
6443 +@@ -1338,7 +1338,7 @@ static char *sec2annotation(const char *s)
6444 + strcat(p, " ");
6445 + return r;
6446 + } else {
6447 +- return strdup("");
6448 ++ return NOFAIL(strdup(""));
6449 + }
6450 + }
6451 +
6452 +@@ -2036,7 +2036,7 @@ void buf_write(struct buffer *buf, const char *s, int len)
6453 + {
6454 + if (buf->size - buf->pos < len) {
6455 + buf->size += len + SZ;
6456 +- buf->p = realloc(buf->p, buf->size);
6457 ++ buf->p = NOFAIL(realloc(buf->p, buf->size));
6458 + }
6459 + strncpy(buf->p + buf->pos, s, len);
6460 + buf->pos += len;
6461 +diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
6462 +index b0f9dc3f765a..1a7cec5d9cac 100644
6463 +--- a/security/apparmor/policy_ns.c
6464 ++++ b/security/apparmor/policy_ns.c
6465 +@@ -255,7 +255,7 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
6466 +
6467 + ns = alloc_ns(parent->base.hname, name);
6468 + if (!ns)
6469 +- return NULL;
6470 ++ return ERR_PTR(-ENOMEM);
6471 + ns->level = parent->level + 1;
6472 + mutex_lock_nested(&ns->lock, ns->level);
6473 + error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
6474 +diff --git a/security/keys/dh.c b/security/keys/dh.c
6475 +index b203f7758f97..1a68d27e72b4 100644
6476 +--- a/security/keys/dh.c
6477 ++++ b/security/keys/dh.c
6478 +@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
6479 + }
6480 + dh_inputs.g_size = dlen;
6481 +
6482 +- dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
6483 ++ dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
6484 + if (dlen < 0) {
6485 + ret = dlen;
6486 + goto out2;
6487 +diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
6488 +index 79d3709b0671..0b66d7283b00 100644
6489 +--- a/security/selinux/selinuxfs.c
6490 ++++ b/security/selinux/selinuxfs.c
6491 +@@ -1365,13 +1365,18 @@ static int sel_make_bools(struct selinux_fs_info *fsi)
6492 +
6493 + ret = -ENOMEM;
6494 + inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
6495 +- if (!inode)
6496 ++ if (!inode) {
6497 ++ dput(dentry);
6498 + goto out;
6499 ++ }
6500 +
6501 + ret = -ENAMETOOLONG;
6502 + len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
6503 +- if (len >= PAGE_SIZE)
6504 ++ if (len >= PAGE_SIZE) {
6505 ++ dput(dentry);
6506 ++ iput(inode);
6507 + goto out;
6508 ++ }
6509 +
6510 + isec = (struct inode_security_struct *)inode->i_security;
6511 + ret = security_genfs_sid(fsi->state, "selinuxfs", page,
6512 +@@ -1586,8 +1591,10 @@ static int sel_make_avc_files(struct dentry *dir)
6513 + return -ENOMEM;
6514 +
6515 + inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
6516 +- if (!inode)
6517 ++ if (!inode) {
6518 ++ dput(dentry);
6519 + return -ENOMEM;
6520 ++ }
6521 +
6522 + inode->i_fop = files[i].ops;
6523 + inode->i_ino = ++fsi->last_ino;
6524 +@@ -1632,8 +1639,10 @@ static int sel_make_initcon_files(struct dentry *dir)
6525 + return -ENOMEM;
6526 +
6527 + inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
6528 +- if (!inode)
6529 ++ if (!inode) {
6530 ++ dput(dentry);
6531 + return -ENOMEM;
6532 ++ }
6533 +
6534 + inode->i_fop = &sel_initcon_ops;
6535 + inode->i_ino = i|SEL_INITCON_INO_OFFSET;
6536 +@@ -1733,8 +1742,10 @@ static int sel_make_perm_files(char *objclass, int classvalue,
6537 +
6538 + rc = -ENOMEM;
6539 + inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
6540 +- if (!inode)
6541 ++ if (!inode) {
6542 ++ dput(dentry);
6543 + goto out;
6544 ++ }
6545 +
6546 + inode->i_fop = &sel_perm_ops;
6547 + /* i+1 since perm values are 1-indexed */
6548 +@@ -1763,8 +1774,10 @@ static int sel_make_class_dir_entries(char *classname, int index,
6549 + return -ENOMEM;
6550 +
6551 + inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
6552 +- if (!inode)
6553 ++ if (!inode) {
6554 ++ dput(dentry);
6555 + return -ENOMEM;
6556 ++ }
6557 +
6558 + inode->i_fop = &sel_class_ops;
6559 + inode->i_ino = sel_class_to_ino(index);
6560 +@@ -1838,8 +1851,10 @@ static int sel_make_policycap(struct selinux_fs_info *fsi)
6561 + return -ENOMEM;
6562 +
6563 + inode = sel_make_inode(fsi->sb, S_IFREG | 0444);
6564 +- if (inode == NULL)
6565 ++ if (inode == NULL) {
6566 ++ dput(dentry);
6567 + return -ENOMEM;
6568 ++ }
6569 +
6570 + inode->i_fop = &sel_policycap_ops;
6571 + inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET;
6572 +@@ -1932,8 +1947,10 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
6573 +
6574 + ret = -ENOMEM;
6575 + inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO);
6576 +- if (!inode)
6577 ++ if (!inode) {
6578 ++ dput(dentry);
6579 + goto err;
6580 ++ }
6581 +
6582 + inode->i_ino = ++fsi->last_ino;
6583 + isec = (struct inode_security_struct *)inode->i_security;
6584 +diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
6585 +index 8a0181a2db08..47feef30dadb 100644
6586 +--- a/sound/soc/codecs/rt5677.c
6587 ++++ b/sound/soc/codecs/rt5677.c
6588 +@@ -5007,7 +5007,7 @@ static const struct regmap_config rt5677_regmap = {
6589 + };
6590 +
6591 + static const struct of_device_id rt5677_of_match[] = {
6592 +- { .compatible = "realtek,rt5677", RT5677 },
6593 ++ { .compatible = "realtek,rt5677", .data = (const void *)RT5677 },
6594 + { }
6595 + };
6596 + MODULE_DEVICE_TABLE(of, rt5677_of_match);
6597 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
6598 +index 7fdfdf3f6e67..14f1b0c0d286 100644
6599 +--- a/sound/soc/codecs/wm8994.c
6600 ++++ b/sound/soc/codecs/wm8994.c
6601 +@@ -2432,6 +2432,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
6602 + snd_soc_component_update_bits(component, WM8994_POWER_MANAGEMENT_2,
6603 + WM8994_OPCLK_ENA, 0);
6604 + }
6605 ++ break;
6606 +
6607 + default:
6608 + return -EINVAL;
6609 +diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
6610 +index 1120e39c1b00..5ccfce87e693 100644
6611 +--- a/tools/perf/arch/arm64/util/arm-spe.c
6612 ++++ b/tools/perf/arch/arm64/util/arm-spe.c
6613 +@@ -194,6 +194,7 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
6614 + sper->itr.read_finish = arm_spe_read_finish;
6615 + sper->itr.alignment = 0;
6616 +
6617 ++ *err = 0;
6618 + return &sper->itr;
6619 + }
6620 +
6621 +diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
6622 +index 53d83d7e6a09..20e7d74d86cd 100644
6623 +--- a/tools/perf/arch/powerpc/util/sym-handling.c
6624 ++++ b/tools/perf/arch/powerpc/util/sym-handling.c
6625 +@@ -141,8 +141,10 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
6626 + for (i = 0; i < ntevs; i++) {
6627 + tev = &pev->tevs[i];
6628 + map__for_each_symbol(map, sym, tmp) {
6629 +- if (map->unmap_ip(map, sym->start) == tev->point.address)
6630 ++ if (map->unmap_ip(map, sym->start) == tev->point.address) {
6631 + arch__fix_tev_from_maps(pev, tev, map, sym);
6632 ++ break;
6633 ++ }
6634 + }
6635 + }
6636 + }
6637 +diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
6638 +index 5be021701f34..cf8bd123cf73 100644
6639 +--- a/tools/perf/util/namespaces.c
6640 ++++ b/tools/perf/util/namespaces.c
6641 +@@ -139,6 +139,9 @@ struct nsinfo *nsinfo__copy(struct nsinfo *nsi)
6642 + {
6643 + struct nsinfo *nnsi;
6644 +
6645 ++ if (nsi == NULL)
6646 ++ return NULL;
6647 ++
6648 + nnsi = calloc(1, sizeof(*nnsi));
6649 + if (nnsi != NULL) {
6650 + nnsi->pid = nsi->pid;
6651 +diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
6652 +index 66d31de60b9a..9d7166dfad1e 100644
6653 +--- a/tools/testing/selftests/powerpc/harness.c
6654 ++++ b/tools/testing/selftests/powerpc/harness.c
6655 +@@ -85,13 +85,13 @@ wait:
6656 + return status;
6657 + }
6658 +
6659 +-static void alarm_handler(int signum)
6660 ++static void sig_handler(int signum)
6661 + {
6662 +- /* Jut wake us up from waitpid */
6663 ++ /* Just wake us up from waitpid */
6664 + }
6665 +
6666 +-static struct sigaction alarm_action = {
6667 +- .sa_handler = alarm_handler,
6668 ++static struct sigaction sig_action = {
6669 ++ .sa_handler = sig_handler,
6670 + };
6671 +
6672 + void test_harness_set_timeout(uint64_t time)
6673 +@@ -106,8 +106,14 @@ int test_harness(int (test_function)(void), char *name)
6674 + test_start(name);
6675 + test_set_git_version(GIT_VERSION);
6676 +
6677 +- if (sigaction(SIGALRM, &alarm_action, NULL)) {
6678 +- perror("sigaction");
6679 ++ if (sigaction(SIGINT, &sig_action, NULL)) {
6680 ++ perror("sigaction (sigint)");
6681 ++ test_error(name);
6682 ++ return 1;
6683 ++ }
6684 ++
6685 ++ if (sigaction(SIGALRM, &sig_action, NULL)) {
6686 ++ perror("sigaction (sigalrm)");
6687 + test_error(name);
6688 + return 1;
6689 + }